code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
# Official modules
import numpy as np
#from mnist import MNIST
from sklearn.linear_model import RidgeClassifier
# Pelenet modules
from ._abstract import Experiment
from ..network import ReservoirNetwork
"""
@desc: Train output on support neuron activity from assemblies
"""
class AssemblyOutputExperiment(Experiment):
def defineParameters(self):
"""
Define parameters for this experiment
"""
return {}
def build(
self,
supportMask=None,
mask=None, weights=None,
assemblyIndex=0,
inputSpikeIndices=[],
targetNeuronIndices=[]
):
"""
Overwrite build reservoir network with given mask, weights and input
Parameters
----------
supportMask :
The last support mask of the assembly organization
mask :
weights :
assemblyIndex :
The index of the assembly where 0: A, 1: B, etc.
inputSpikeIndices :
targetNeuronIndices :
"""
# Define variables for the experiment
self.assemblyIndex = assemblyIndex
self.supportMask = supportMask
# Data
self.dataTrain = None
self.dataTest = None
# Instanciate innate network
self.net = ReservoirNetwork(self.p)
# Define mask and weights
if (mask is not None and weights is not None):
# Set mask and weights
self.net.initialMasks = mask
self.net.initialWeights = weights
else:
# Throw an error if only one of mask/weights is defiend
raise Exception("It is not possible to define only one of mask and weights, both must be defined.")
# Connect ex-in reservoir
self.net.connectReservoir()
# Add input
if len(targetNeuronIndices) == 0:
targetNeuronIndices = np.arange(self.p.inputNumTargetNeurons) + assemblyIndex*self.p.inputNumTargetNeurons
self.net.addInput(inputSpikeIndices=inputSpikeIndices, targetNeuronIndices=targetNeuronIndices)
# Add background noise
if self.p.isNoise:
self.net.addNoiseGenerator()
# Add Probes
self.net.addProbes()
# Call afterBuild
self.afterBuild()
def getInputIdcs(self, dataset):
"""
Get indices of spikes for input generators
"""
spikeIdcs = []
# Iterate over target neurons
for i in range(dataset.shape[1]):
spikeTimes = []
# Iterate over trials
for k in range(dataset.shape[0]):
off = self.p.inputOffset + self.p.stepsPerTrial*k + self.p.resetOffset*(k+1)
spks = off + np.where(dataset[k,i,:])[0]
spikeTimes.extend(spks.tolist())
spikeIdcs.append(spikeTimes)
return spikeIdcs
def getImagesOfClass(self, images, labels, label=0, threshold=64, size=1000):
"""
Get MNIST images of a specific class
Images are postprocesses. The image is reshaped to a squared form from 784 to 28x28.
Further, the grey values are thresholded to get binary values (black/white only).
Parameters
----------
images :
Images to be used from the MNIST dataset (train or test)
labels :
Labels to be used from the MNIST dataset (train or test)
label :
The class to filter images for.
threshold:
The threshold to tranform grey values to black/white (binary) values (0-255).
"""
# Transform images and labels to numpy arrays
images = np.array(images)
labels = np.array(labels)
# Get indices where images match label
idc = np.where(labels == label)[0]
# Filter out 'size' images with specific label
img = images[idc[:size]]
# Reshape images from 1D to 2D
s = img.shape
xy = int(np.sqrt(s[1]))
img = img.reshape((s[0], xy, xy))
# Threshold images to remove grey values and tranform to black/white (binary) values
img[img <= threshold] = 0
img[img > threshold] = 1
# Return images of requested class
return img
def loadMnistAsInputs(self, nAssemblies=2, nTrain=500, nTest=100):
# Define variables
self.nAssemblies = nAssemblies
self.nTrain = nTrain
self.nTest = nTest
# Load mnist data
mndata = MNIST('data/mnist')
train, trainLabels = mndata.load_training()
test, testLabels = mndata.load_testing()
# Throw error if number of classes exceeds limit
if (nAssemblies != 2):
raise Exception('Curently only two classes are supported')
# Get training set
train0 = self.getImagesOfClass(train, trainLabels, 0, size=nTrain)
train1 = self.getImagesOfClass(train, trainLabels, 1, size=nTrain)
# Get test set
test0 = self.getImagesOfClass(test, testLabels, 0, size=nTest)
test1 = self.getImagesOfClass(test, testLabels, 1, size=nTest)
# Concatenate datasets
inputs = np.concatenate((
train0,
train1,
test0,
test1
), axis=0)
return self.getInputIdcs(inputs)
def loadYinYangAsInputs(self, nAssemblies=2, nTrain=500, nTest=100):
"""
Loads the Yin Yang dataset and transforms it to an input for the reservoir network
"""
# Define variables
self.nAssemblies = nAssemblies
self.nTrain = nTrain
self.nTest = nTest
# Load raw data
yinTrain = np.load('data/yinyang/inputs_yin_train.npy') # 1000
yangTrain = np.load('data/yinyang/inputs_yang_train.npy') # 1000
dotsTrain = np.load('data/yinyang/inputs_dots_train.npy') # 1000
yinTest = np.load('data/yinyang/inputs_yin_test.npy') # 200
yangTest = np.load('data/yinyang/inputs_yang_test.npy') # 200
dotsTest = np.load('data/yinyang/inputs_dots_test.npy') # 200
# Check if requested data is avialable
if (nTrain > yinTrain.shape[0]) or (nTrain > yangTrain.shape[0]):
raise Exception('The training dataset has fewer samples than requested.')
if (nTest > yinTest.shape[0]) or (nTest > yangTest.shape[0]):
raise Exception('The test dataset has fewer samples than requested.')
# Check if number of classes is available
if (nAssemblies > 3):
raise Exception('This dataset has maximum 3 classes available')
# Compute total length
nTrials = nAssemblies * (nTrain + nTest)
# Concatenate datasets
inputs = np.concatenate((
yinTrain[:nTrain],
yangTrain[:nTrain],
#dotsTrain[:nTrain],
yinTest[:nTest],
yangTest[:nTest],
#dotsTest[:nTest]
), axis=0)
# Transform to input for reservoir and return
return self.getInputIdcs(inputs)
def getDatasetFromSpikes(self):
"""
Get Dataset from raw spiking data
"""
# Get data from spikes
data = []
nIdx = self.p.inputNumTargetNeurons
for i in range(self.p.trials):
fr = i*self.p.totalTrialSteps + self.p.resetOffset + self.p.inputOffset
to = (i+1)*self.p.totalTrialSteps
# Get number of input neurons (all assemblies)
nInput = self.p.inputNumTargetNeurons*len(self.p.inputVaryProbs)
tmp = self.net.exSpikeTrains[nInput:,fr:to]
data.append(tmp)
# Convert to numpy array
data = np.array(data)
# Get support indices
supportIndices = np.where(self.supportMask[self.assemblyIndex])[0]
# Separate between train and test data
dataTrain = data[:self.nAssemblies*self.nTrain,supportIndices,:]
dataTest = data[self.nAssemblies*self.nTrain:,supportIndices,:]
return dataTrain, dataTest
def afterRun(self):
"""
A lifecycle function called after the simulation has successfully finished
Prepares the dataset
"""
# Get dataset from spikes
dataTrain, dataTest = self.getDatasetFromSpikes()
self.dataTrain = dataTrain
self.dataTest = dataTest
# Store 2D spikes dataset
sTra = self.dataTrain.shape
self.spikesTrain = self.dataTrain.reshape((sTra[0], sTra[1]*sTra[2]))
sTst = self.dataTest.shape
self.spikesTest = self.dataTest.reshape((sTst[0], sTst[1]*sTst[2]))
# Store rates dataset
self.ratesTrain = np.sum(self.dataTrain, axis=1)
self.ratesTest = np.sum(self.dataTest, axis=1)
# Store frequencies dataset
self.freqTrain = np.sum(self.dataTrain, axis=2)
self.freqTest = np.sum(self.dataTest, axis=2)
# Store labels
self.labelsTrain = np.concatenate((-np.ones(self.nTrain), np.ones(self.nTrain)))
self.labelsTest = np.concatenate((-np.ones(self.nTest), np.ones(self.nTest)))
def fitRidgeClassifier(self):
"""
Fit ridge classifier and show scores
"""
self.clfSpikes = RidgeClassifier().fit(self.spikesTrain, self.labelsTrain)
print('(spikes) score:', self.clfSpikes.score(self.spikesTest, self.labelsTest))
self.clfRates = RidgeClassifier().fit(self.ratesTrain, self.labelsTrain)
print('(rates) score:', self.clfRates.score(self.ratesTest, self.labelsTest))
self.clfFreq = RidgeClassifier().fit(self.freqTrain, self.labelsTrain)
print('(frequency) score:', self.clfFreq.score(self.freqTest, self.labelsTest))
|
[
"numpy.load",
"numpy.sum",
"numpy.ones",
"numpy.where",
"numpy.array",
"numpy.arange",
"sklearn.linear_model.RidgeClassifier",
"numpy.concatenate",
"numpy.sqrt"
] |
[((3724, 3740), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (3732, 3740), True, 'import numpy as np\n'), ((3758, 3774), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (3766, 3774), True, 'import numpy as np\n'), ((5231, 5285), 'numpy.concatenate', 'np.concatenate', (['(train0, train1, test0, test1)'], {'axis': '(0)'}), '((train0, train1, test0, test1), axis=0)\n', (5245, 5285), True, 'import numpy as np\n'), ((5743, 5787), 'numpy.load', 'np.load', (['"""data/yinyang/inputs_yin_train.npy"""'], {}), "('data/yinyang/inputs_yin_train.npy')\n", (5750, 5787), True, 'import numpy as np\n'), ((5816, 5861), 'numpy.load', 'np.load', (['"""data/yinyang/inputs_yang_train.npy"""'], {}), "('data/yinyang/inputs_yang_train.npy')\n", (5823, 5861), True, 'import numpy as np\n'), ((5890, 5935), 'numpy.load', 'np.load', (['"""data/yinyang/inputs_dots_train.npy"""'], {}), "('data/yinyang/inputs_dots_train.npy')\n", (5897, 5935), True, 'import numpy as np\n'), ((5962, 6005), 'numpy.load', 'np.load', (['"""data/yinyang/inputs_yin_test.npy"""'], {}), "('data/yinyang/inputs_yin_test.npy')\n", (5969, 6005), True, 'import numpy as np\n'), ((6032, 6076), 'numpy.load', 'np.load', (['"""data/yinyang/inputs_yang_test.npy"""'], {}), "('data/yinyang/inputs_yang_test.npy')\n", (6039, 6076), True, 'import numpy as np\n'), ((6103, 6147), 'numpy.load', 'np.load', (['"""data/yinyang/inputs_dots_test.npy"""'], {}), "('data/yinyang/inputs_dots_test.npy')\n", (6110, 6147), True, 'import numpy as np\n'), ((6802, 6904), 'numpy.concatenate', 'np.concatenate', (['(yinTrain[:nTrain], yangTrain[:nTrain], yinTest[:nTest], yangTest[:nTest])'], {'axis': '(0)'}), '((yinTrain[:nTrain], yangTrain[:nTrain], yinTest[:nTest],\n yangTest[:nTest]), axis=0)\n', (6816, 6904), True, 'import numpy as np\n'), ((7772, 7786), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (7780, 7786), True, 'import numpy as np\n'), ((8762, 8792), 'numpy.sum', 'np.sum', (['self.dataTrain'], {'axis': '(1)'}), '(self.dataTrain, axis=1)\n', (8768, 8792), True, 'import numpy as np\n'), ((8818, 8847), 'numpy.sum', 'np.sum', (['self.dataTest'], {'axis': '(1)'}), '(self.dataTest, axis=1)\n', (8824, 8847), True, 'import numpy as np\n'), ((8910, 8940), 'numpy.sum', 'np.sum', (['self.dataTrain'], {'axis': '(2)'}), '(self.dataTrain, axis=2)\n', (8916, 8940), True, 'import numpy as np\n'), ((8965, 8994), 'numpy.sum', 'np.sum', (['self.dataTest'], {'axis': '(2)'}), '(self.dataTest, axis=2)\n', (8971, 8994), True, 'import numpy as np\n'), ((3836, 3861), 'numpy.where', 'np.where', (['(labels == label)'], {}), '(labels == label)\n', (3844, 3861), True, 'import numpy as np\n'), ((4031, 4044), 'numpy.sqrt', 'np.sqrt', (['s[1]'], {}), '(s[1])\n', (4038, 4044), True, 'import numpy as np\n'), ((7843, 7889), 'numpy.where', 'np.where', (['self.supportMask[self.assemblyIndex]'], {}), '(self.supportMask[self.assemblyIndex])\n', (7851, 7889), True, 'import numpy as np\n'), ((1937, 1976), 'numpy.arange', 'np.arange', (['self.p.inputNumTargetNeurons'], {}), '(self.p.inputNumTargetNeurons)\n', (1946, 1976), True, 'import numpy as np\n'), ((9085, 9105), 'numpy.ones', 'np.ones', (['self.nTrain'], {}), '(self.nTrain)\n', (9092, 9105), True, 'import numpy as np\n'), ((9172, 9191), 'numpy.ones', 'np.ones', (['self.nTest'], {}), '(self.nTest)\n', (9179, 9191), True, 'import numpy as np\n'), ((9323, 9340), 'sklearn.linear_model.RidgeClassifier', 'RidgeClassifier', ([], {}), '()\n', (9338, 9340), False, 'from sklearn.linear_model import RidgeClassifier\n'), ((9495, 9512), 'sklearn.linear_model.RidgeClassifier', 'RidgeClassifier', ([], {}), '()\n', (9510, 9512), False, 'from sklearn.linear_model import RidgeClassifier\n'), ((9662, 9679), 'sklearn.linear_model.RidgeClassifier', 'RidgeClassifier', ([], {}), '()\n', (9677, 9679), False, 'from sklearn.linear_model import RidgeClassifier\n'), ((9063, 9083), 'numpy.ones', 'np.ones', (['self.nTrain'], {}), '(self.nTrain)\n', (9070, 9083), True, 'import numpy as np\n'), ((9151, 9170), 'numpy.ones', 'np.ones', (['self.nTest'], {}), '(self.nTest)\n', (9158, 9170), True, 'import numpy as np\n'), ((2777, 2803), 'numpy.where', 'np.where', (['dataset[k, i, :]'], {}), '(dataset[k, i, :])\n', (2785, 2803), True, 'import numpy as np\n')]
|
import numpy as np
from numba import njit
import scipy.optimize as optim
import torch
from stew.utils import create_diff_matrix
import itertools
@njit
def stew_reg(X, y, D, lam):
return np.linalg.inv(X.T @ X + lam * D) @ X.T @ y
def stew_loss(beta, X, y, D, lam):
residuals = y - X @ beta
l = residuals.T.dot(residuals) + lam * beta.T.dot(D).dot(beta)
return l
def stew_grad(beta, X, y, D, lam):
return 2 * np.dot(beta, X.T).dot(X) - 2 * y.T.dot(X) + 2 * lam * beta.dot(D)
def stew_hessian(beta, X, y, D, lam):
return 2 * X.T.dot(X) + 2 * lam * D
def stew_reg_iter(X, y, D, lam, method='Newton-CG'):
op = optim.minimize(fun=stew_loss, x0=np.zeros(X.shape[1]), args=(X, y, D, lam),
jac=stew_grad, hess=stew_hessian, method=method)
return op.x
class LinearRegressionTorch:
def __init__(self,
num_features,
learning_rate=0.1,
regularization="none",
positivity_constraint=False,
lam=0,
verbose=False):
self.num_features = num_features
self.model = LinearRegressionModel(self.num_features)
self.loss = torch.nn.MSELoss()
self.learning_rate = learning_rate
self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.learning_rate)
self.regularization = regularization
assert(self.regularization in ["none",
"stem_opt",
"ridge", "lasso",
"stew1", "stew2",
"stem1", "stem2",
"stow", "stnw",
"sted", "sthd",
"peno", "penlex"])
self.positivity_constraint = positivity_constraint
self.positivity_weight_clipper = PositivityClipper()
self.lam = lam
self.D = torch.from_numpy(create_diff_matrix(num_features=num_features)).float()
self.verbose = verbose
def fit(self, X, y, epochs=10):
for epoch in range(epochs):
# Clear gradient buffers because we don't want any gradient from previous epoch to carry forward, dont want to cummulate gradients
self.optimizer.zero_grad()
if self.lam > 0:
for param_group in self.optimizer.param_groups:
param_group['lr'] = np.minimum(self.learning_rate, 1 / (self.lam))
# get output from the model, given the inputs
outputs = self.model(torch.from_numpy(X).float())
# get loss for the predicted output
loss = self.loss(outputs, torch.from_numpy(y).float().reshape((-1, 1)))
if self.regularization == "stew2":
stew2_reg = torch.tensor(0.)
for i in range(self.num_features-1):
for j in range(i+1, self.num_features):
stew2_reg += torch.pow(self.model.input_linear.weight[0][i] - self.model.input_linear.weight[0][j], 2)
stew2_reg /= self.num_features * (self.num_features + 1) / 2
loss += self.lam * stew2_reg
elif self.regularization == "stew1":
stew1_reg = torch.tensor(0.)
for i in range(self.num_features-1):
for j in range(i+1, self.num_features):
stew1_reg += torch.abs(self.model.input_linear.weight[0][i] - self.model.input_linear.weight[0][j])
stew1_reg /= self.num_features * (self.num_features + 1) / 2
loss += self.lam * stew1_reg
elif self.regularization == "ridge":
ridge_reg = torch.tensor(0.)
for i in range(self.num_features):
ridge_reg += torch.pow(self.model.input_linear.weight[0][i], 2)
ridge_reg /= self.num_features
loss += self.lam * ridge_reg
elif self.regularization == "lasso":
lasso_reg = torch.tensor(0.)
for i in range(self.num_features):
lasso_reg += torch.abs(self.model.input_linear.weight[0][i])
lasso_reg /= self.num_features
loss += self.lam * lasso_reg
elif self.regularization == "stem2":
stem2_reg = torch.tensor(0.)
for i in range(self.num_features - 1):
for j in range(i + 1, self.num_features):
stem2_reg += torch.pow(torch.abs(self.model.input_linear.weight[0][i]) - torch.abs(self.model.input_linear.weight[0][j]), 2)
stem2_reg /= self.num_features * (self.num_features + 1) / 2
loss += self.lam * stem2_reg
elif self.regularization == "stem1":
stem1_reg = torch.tensor(0.)
for i in range(self.num_features - 1):
for j in range(i + 1, self.num_features):
stem1_reg += torch.abs(torch.abs(self.model.input_linear.weight[0][i]) - torch.abs(self.model.input_linear.weight[0][j]))
stem1_reg /= self.num_features * (self.num_features + 1) / 2
loss += self.lam * stem1_reg
elif self.regularization == "stow":
stow_reg = torch.tensor(0.)
for i in range(1, self.num_features):
stow_reg += torch.clamp(self.model.input_linear.weight[0][i] - self.model.input_linear.weight[0][i - 1], min=0)
stow_reg /= (self.num_features - 1)
loss += self.lam * stow_reg
elif self.regularization == "peno":
peno = torch.tensor(0.)
for i in range(1, self.num_features):
peno += self.model.input_linear.weight[0][i] < self.model.input_linear.weight[0][i - 1]
# peno += torch.clamp(self.model.input_linear.weight[0][i] - self.model.input_linear.weight[0][i - 1], min=0)
peno /= (self.num_features - 1)
loss += self.lam * peno
elif self.regularization == "stnw":
stnw_reg = torch.tensor(0.)
for i in range(self.num_features-1):
stnw_reg += torch.clamp(self.model.input_linear.weight[0][(i+1):].sum() - self.model.input_linear.weight[0][i], min=0)
stnw_reg /= (self.num_features - 1)
loss += self.lam * stnw_reg
elif self.regularization == "sted":
sted_reg = torch.tensor(0.)
for i in range(self.num_features - 1):
sted_reg += torch.pow(self.model.input_linear.weight[0][i] - 2 * self.model.input_linear.weight[0][i+1], 2)
sted_reg /= (self.num_features - 1)
loss += self.lam * sted_reg
elif self.regularization == "sthd":
sthd_reg = torch.tensor(0.)
for i in range(self.num_features - 1):
sthd_reg += torch.pow(self.model.input_linear.weight[0][i] - (i + 2)/(i + 1) * self.model.input_linear.weight[0][i+1], 2)
sthd_reg /= (self.num_features - 1)
loss += self.lam * sthd_reg
if self.verbose:
print('epoch {}, loss {}'.format(epoch, loss.item()))
# get gradients w.r.t to parameters
loss.backward()
# update parameters
self.optimizer.step()
if self.positivity_constraint:
self.model.input_linear.apply(self.positivity_weight_clipper)
return self.model.input_linear.weight[0].detach().numpy() # , loss.item()
def predict(self, X):
"""
Predict on (new) data
:param X: features matrix as numpy array (is converted to torch.tensor within this method.
:return predictions:
"""
with torch.no_grad():
return self.model(torch.from_numpy(X).float()).numpy().flatten()
class STEMopt:
def __init__(self,
train_fraction,
num_features,
learning_rate=0.1,
regularization="none",
positivity_constraint=False,
lam=0,
verbose=False
):
self.train_fraction = train_fraction
self.num_features = num_features
self.learning_rate = learning_rate
self.regularization = regularization
self.positivity_constraint = positivity_constraint
self.lam = lam
self.verbose = verbose
self.train_fraction = train_fraction
if regularization == "stem_opt":
self.regularization = "stew2"
# self.model = LinearRegressionTorch(self.num_features, self.learning_rate,
# self.regularization, self.positivity_constraint,
# self.lam, self.verbose)
self.D = create_diff_matrix(self.num_features)
self.beta = None
def fit(self, X, y, epochs):
num_samples, num_features = X.shape
# permuted_indices = np.random.permutation(np.arange(num_samples))
# train_set_size = int(np.floor(self.train_fraction * num_samples))
# train_indices = permuted_indices[:train_set_size]
# test_indices = permuted_indices[train_set_size:]
# X_train = X[train_indices, :]
# X_test = X[test_indices, :]
# y_train = y[train_indices]
# y_test = y[test_indices]
configurations = np.array(list(itertools.product([-1, 1], repeat=num_features)))
num_configurations = len(configurations)
betas = np.zeros((num_configurations, num_features))
losses = np.zeros(num_configurations)
for configuration_ix, configuration in enumerate(configurations): # configuration_ix = 0; configuration = configurations[configuration_ix]
# print(configuration_ix, configuration)
confd_X = X * configuration[np.newaxis, :]
# confd_X_test = X_test * configuration[np.newaxis, :]
# self.model = LinearRegressionTorch(self.num_features, self.learning_rate,
# self.regularization, self.positivity_constraint,
# self.lam, self.verbose)
# beta, loss = self.model.fit(confd_X, y, epochs)
beta = stew_reg(confd_X, y, self.D, self.lam)
loss = stew_loss(beta, confd_X, y, self.D, self.lam)
betas[configuration_ix, :] = beta * configuration
losses[configuration_ix] = loss
argmin_loss = np.argmin(losses)
argmin_beta = betas[argmin_loss, :]
self.beta = argmin_beta
return argmin_beta
def predict(self, X):
return X @ self.beta
class LinearRegressionModel(torch.nn.Module):
def __init__(self, num_features):
super(LinearRegressionModel, self).__init__()
self.num_features = num_features
self.input_linear = torch.nn.Linear(in_features=self.num_features, out_features=1, bias=False)
def forward(self, choice_sets):
y_pred = self.input_linear(choice_sets)
return y_pred
class PositivityClipper:
def __init__(self, frequency=5):
self.frequency = frequency
def __call__(self, module):
# filter the variables to get the ones you want
if hasattr(module, 'weight'):
module.weight.data = torch.clamp(module.weight.data, min=0)
|
[
"torch.nn.MSELoss",
"numpy.minimum",
"numpy.dot",
"numpy.zeros",
"numpy.argmin",
"torch.clamp",
"numpy.linalg.inv",
"torch.pow",
"torch.nn.Linear",
"stew.utils.create_diff_matrix",
"itertools.product",
"torch.no_grad",
"torch.abs",
"torch.tensor",
"torch.from_numpy"
] |
[((1195, 1213), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (1211, 1213), False, 'import torch\n'), ((9011, 9048), 'stew.utils.create_diff_matrix', 'create_diff_matrix', (['self.num_features'], {}), '(self.num_features)\n', (9029, 9048), False, 'from stew.utils import create_diff_matrix\n'), ((9726, 9770), 'numpy.zeros', 'np.zeros', (['(num_configurations, num_features)'], {}), '((num_configurations, num_features))\n', (9734, 9770), True, 'import numpy as np\n'), ((9788, 9816), 'numpy.zeros', 'np.zeros', (['num_configurations'], {}), '(num_configurations)\n', (9796, 9816), True, 'import numpy as np\n'), ((10713, 10730), 'numpy.argmin', 'np.argmin', (['losses'], {}), '(losses)\n', (10722, 10730), True, 'import numpy as np\n'), ((11099, 11173), 'torch.nn.Linear', 'torch.nn.Linear', ([], {'in_features': 'self.num_features', 'out_features': '(1)', 'bias': '(False)'}), '(in_features=self.num_features, out_features=1, bias=False)\n', (11114, 11173), False, 'import torch\n'), ((192, 224), 'numpy.linalg.inv', 'np.linalg.inv', (['(X.T @ X + lam * D)'], {}), '(X.T @ X + lam * D)\n', (205, 224), True, 'import numpy as np\n'), ((676, 696), 'numpy.zeros', 'np.zeros', (['X.shape[1]'], {}), '(X.shape[1])\n', (684, 696), True, 'import numpy as np\n'), ((7937, 7952), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7950, 7952), False, 'import torch\n'), ((11540, 11578), 'torch.clamp', 'torch.clamp', (['module.weight.data'], {'min': '(0)'}), '(module.weight.data, min=0)\n', (11551, 11578), False, 'import torch\n'), ((2856, 2873), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (2868, 2873), False, 'import torch\n'), ((9611, 9658), 'itertools.product', 'itertools.product', (['[-1, 1]'], {'repeat': 'num_features'}), '([-1, 1], repeat=num_features)\n', (9628, 9658), False, 'import itertools\n'), ((2005, 2050), 'stew.utils.create_diff_matrix', 'create_diff_matrix', ([], {'num_features': 'num_features'}), '(num_features=num_features)\n', (2023, 2050), False, 'from stew.utils import create_diff_matrix\n'), ((2480, 2524), 'numpy.minimum', 'np.minimum', (['self.learning_rate', '(1 / self.lam)'], {}), '(self.learning_rate, 1 / self.lam)\n', (2490, 2524), True, 'import numpy as np\n'), ((3312, 3329), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (3324, 3329), False, 'import torch\n'), ((433, 450), 'numpy.dot', 'np.dot', (['beta', 'X.T'], {}), '(beta, X.T)\n', (439, 450), True, 'import numpy as np\n'), ((2619, 2638), 'torch.from_numpy', 'torch.from_numpy', (['X'], {}), '(X)\n', (2635, 2638), False, 'import torch\n'), ((3023, 3117), 'torch.pow', 'torch.pow', (['(self.model.input_linear.weight[0][i] - self.model.input_linear.weight[0][j])', '(2)'], {}), '(self.model.input_linear.weight[0][i] - self.model.input_linear.\n weight[0][j], 2)\n', (3032, 3117), False, 'import torch\n'), ((3765, 3782), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (3777, 3782), False, 'import torch\n'), ((3479, 3570), 'torch.abs', 'torch.abs', (['(self.model.input_linear.weight[0][i] - self.model.input_linear.weight[0][j])'], {}), '(self.model.input_linear.weight[0][i] - self.model.input_linear.\n weight[0][j])\n', (3488, 3570), False, 'import torch\n'), ((3866, 3916), 'torch.pow', 'torch.pow', (['self.model.input_linear.weight[0][i]', '(2)'], {}), '(self.model.input_linear.weight[0][i], 2)\n', (3875, 3916), False, 'import torch\n'), ((4086, 4103), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (4098, 4103), False, 'import torch\n'), ((2735, 2754), 'torch.from_numpy', 'torch.from_numpy', (['y'], {}), '(y)\n', (2751, 2754), False, 'import torch\n'), ((4187, 4234), 'torch.abs', 'torch.abs', (['self.model.input_linear.weight[0][i]'], {}), '(self.model.input_linear.weight[0][i])\n', (4196, 4234), False, 'import torch\n'), ((4404, 4421), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (4416, 4421), False, 'import torch\n'), ((4886, 4903), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (4898, 4903), False, 'import torch\n'), ((5363, 5380), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (5375, 5380), False, 'import torch\n'), ((7984, 8003), 'torch.from_numpy', 'torch.from_numpy', (['X'], {}), '(X)\n', (8000, 8003), False, 'import torch\n'), ((5466, 5570), 'torch.clamp', 'torch.clamp', (['(self.model.input_linear.weight[0][i] - self.model.input_linear.weight[0][i -\n 1])'], {'min': '(0)'}), '(self.model.input_linear.weight[0][i] - self.model.input_linear.\n weight[0][i - 1], min=0)\n', (5477, 5570), False, 'import torch\n'), ((5733, 5750), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (5745, 5750), False, 'import torch\n'), ((4585, 4632), 'torch.abs', 'torch.abs', (['self.model.input_linear.weight[0][i]'], {}), '(self.model.input_linear.weight[0][i])\n', (4594, 4632), False, 'import torch\n'), ((4635, 4682), 'torch.abs', 'torch.abs', (['self.model.input_linear.weight[0][j]'], {}), '(self.model.input_linear.weight[0][j])\n', (4644, 4682), False, 'import torch\n'), ((6205, 6222), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (6217, 6222), False, 'import torch\n'), ((5067, 5114), 'torch.abs', 'torch.abs', (['self.model.input_linear.weight[0][i]'], {}), '(self.model.input_linear.weight[0][i])\n', (5076, 5114), False, 'import torch\n'), ((5117, 5164), 'torch.abs', 'torch.abs', (['self.model.input_linear.weight[0][j]'], {}), '(self.model.input_linear.weight[0][j])\n', (5126, 5164), False, 'import torch\n'), ((6585, 6602), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (6597, 6602), False, 'import torch\n'), ((6689, 6791), 'torch.pow', 'torch.pow', (['(self.model.input_linear.weight[0][i] - 2 * self.model.input_linear.weight[\n 0][i + 1])', '(2)'], {}), '(self.model.input_linear.weight[0][i] - 2 * self.model.\n input_linear.weight[0][i + 1], 2)\n', (6698, 6791), False, 'import torch\n'), ((6956, 6973), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (6968, 6973), False, 'import torch\n'), ((7060, 7178), 'torch.pow', 'torch.pow', (['(self.model.input_linear.weight[0][i] - (i + 2) / (i + 1) * self.model.\n input_linear.weight[0][i + 1])', '(2)'], {}), '(self.model.input_linear.weight[0][i] - (i + 2) / (i + 1) * self.\n model.input_linear.weight[0][i + 1], 2)\n', (7069, 7178), False, 'import torch\n')]
|
from config import model_name
import pandas as pd
import swifter
import json
import math
from tqdm import tqdm
from os import path
from pathlib import Path
import random
from nltk.tokenize import word_tokenize
import numpy as np
import csv
import importlib
from transformers import RobertaTokenizer, RobertaModel
import torch
try:
config = getattr(importlib.import_module('config'), f"{model_name}Config")
except AttributeError:
print(f"{model_name} not included!")
exit()
def parse_behaviors(source, target, user2int_path):
"""
Parse behaviors file in training set.
Args:
source: source behaviors file
target: target behaviors file
user2int_path: path for saving user2int file
"""
print(f"Parse {source}")
behaviors = pd.read_table(
source,
header=None,
names=['impression_id', 'user', 'time', 'clicked_news', 'impressions'])
behaviors.clicked_news.fillna(' ', inplace=True)
behaviors.impressions = behaviors.impressions.str.split()
user2int = {}
for row in behaviors.itertuples(index=False):
if row.user not in user2int:
user2int[row.user] = len(user2int) + 1
pd.DataFrame(user2int.items(), columns=['user',
'int']).to_csv(user2int_path,
sep='\t',
index=False)
print(
f'Please modify `num_users` in `src/config.py` into 1 + {len(user2int)}'
)
for row in behaviors.itertuples():
behaviors.at[row.Index, 'user'] = user2int[row.user]
for row in tqdm(behaviors.itertuples(), desc="Balancing data"):
positive = iter([x for x in row.impressions if x.endswith('1')])
negative = [x for x in row.impressions if x.endswith('0')]
random.shuffle(negative)
negative = iter(negative)
pairs = []
try:
while True:
pair = [next(positive)]
for _ in range(config.negative_sampling_ratio):
pair.append(next(negative))
pairs.append(pair)
except StopIteration:
pass
behaviors.at[row.Index, 'impressions'] = pairs
behaviors = behaviors.explode('impressions').dropna(
subset=["impressions"]).reset_index(drop=True)
behaviors[['candidate_news', 'clicked']] = pd.DataFrame(
behaviors.impressions.map(
lambda x: (' '.join([e.split('-')[0] for e in x]), ' '.join(
[e.split('-')[1] for e in x]))).tolist())
behaviors.to_csv(
target,
sep='\t',
index=False,
columns=['user', 'clicked_news', 'candidate_news', 'clicked'])
def parse_news(source, target, roberta_output_dir, category2int_path,
word2int_path, entity2int_path, mode):
"""
Parse news for training set and test set
Args:
source: source news file
target: target news file
if mode == 'train':
category2int_path, word2int_path, entity2int_path: Path to save
elif mode == 'test':
category2int_path, word2int_path, entity2int_path: Path to load from
"""
print(f"Parse {source}")
news = pd.read_table(source,
header=None,
usecols=[0, 1, 2, 3, 4, 6, 7],
quoting=csv.QUOTE_NONE,
names=[
'id', 'category', 'subcategory', 'title',
'abstract', 'title_entities', 'abstract_entities'
]) # TODO try to avoid csv.QUOTE_NONE
news.title_entities.fillna('[]', inplace=True)
news.abstract_entities.fillna('[]', inplace=True)
news.fillna(' ', inplace=True)
tokenizer = RobertaTokenizer.from_pretrained("roberta-base")
title_roberta = tokenizer(news.title.tolist(),
padding='max_length',
truncation=True,
max_length=config.num_words_title)
abstract_roberta = tokenizer(news.abstract.tolist(),
padding='max_length',
truncation=True,
max_length=config.num_words_abstract)
roberta_df = pd.DataFrame(data=[
title_roberta['input_ids'], title_roberta['attention_mask'],
abstract_roberta['input_ids'], abstract_roberta['attention_mask']
]).T
roberta_df.columns = [
'title_roberta', 'title_mask_roberta', 'abstract_roberta',
'abstract_mask_roberta'
]
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
for x in [title_roberta, abstract_roberta]:
for key in x.keys():
x[key] = torch.tensor(x[key]).to(device)
Path(roberta_output_dir).mkdir(parents=True, exist_ok=True)
roberta = RobertaModel.from_pretrained('roberta-base',
return_dict=True).to(device)
with torch.no_grad():
title_last_hidden_state = []
title_pooler_output = []
abstract_last_hidden_state = []
abstract_pooler_output = []
for count in tqdm(range(math.ceil(len(news) / config.batch_size)),
desc="Calculating news embeddings with RoBERTa"):
title_roberta_minibatch = {
k: v[count * config.batch_size:(1 + count) * config.batch_size]
for k, v in title_roberta.items()
}
title_outputs = roberta(**title_roberta_minibatch)
title_last_hidden_state.append(
title_outputs['last_hidden_state'].cpu().numpy())
title_pooler_output.append(
title_outputs['pooler_output'].cpu().numpy())
abstract_roberta_minibatch = {
k: v[count * config.batch_size:(1 + count) * config.batch_size]
for k, v in abstract_roberta.items()
}
abstract_outputs = roberta(**abstract_roberta_minibatch)
abstract_last_hidden_state.append(
abstract_outputs['last_hidden_state'].cpu().numpy())
abstract_pooler_output.append(
abstract_outputs['pooler_output'].cpu().numpy())
np.save(path.join(roberta_output_dir, 'title_last_hidden_state.npy'),
np.concatenate(title_last_hidden_state, axis=0))
np.save(path.join(roberta_output_dir, 'title_pooler_output.npy'),
np.concatenate(title_pooler_output, axis=0))
np.save(
path.join(roberta_output_dir, 'abstract_last_hidden_state.npy'),
np.concatenate(abstract_last_hidden_state, axis=0))
np.save(path.join(roberta_output_dir, 'abstract_pooler_output.npy'),
np.concatenate(abstract_pooler_output, axis=0))
def parse_row(row):
new_row = [
row.id,
category2int[row.category] if row.category in category2int else 0,
category2int[row.subcategory]
if row.subcategory in category2int else 0,
[0] * config.num_words_title, [0] * config.num_words_abstract,
[0] * config.num_words_title, [0] * config.num_words_abstract
]
# Calculate local entity map (map lower single word to entity)
local_entity_map = {}
for e in json.loads(row.title_entities):
if e['Confidence'] > config.entity_confidence_threshold and e[
'WikidataId'] in entity2int:
for x in ' '.join(e['SurfaceForms']).lower().split():
local_entity_map[x] = entity2int[e['WikidataId']]
for e in json.loads(row.abstract_entities):
if e['Confidence'] > config.entity_confidence_threshold and e[
'WikidataId'] in entity2int:
for x in ' '.join(e['SurfaceForms']).lower().split():
local_entity_map[x] = entity2int[e['WikidataId']]
try:
for i, w in enumerate(word_tokenize(row.title.lower())):
if w in word2int:
new_row[3][i] = word2int[w]
if w in local_entity_map:
new_row[5][i] = local_entity_map[w]
except IndexError:
pass
try:
for i, w in enumerate(word_tokenize(row.abstract.lower())):
if w in word2int:
new_row[4][i] = word2int[w]
if w in local_entity_map:
new_row[6][i] = local_entity_map[w]
except IndexError:
pass
return pd.Series(new_row,
index=[
'id', 'category', 'subcategory', 'title',
'abstract', 'title_entities', 'abstract_entities'
])
if mode == 'train':
category2int = {}
word2int = {}
word2freq = {}
entity2int = {}
entity2freq = {}
for row in news.itertuples(index=False):
if row.category not in category2int:
category2int[row.category] = len(category2int) + 1
if row.subcategory not in category2int:
category2int[row.subcategory] = len(category2int) + 1
for w in word_tokenize(row.title.lower()):
if w not in word2freq:
word2freq[w] = 1
else:
word2freq[w] += 1
for w in word_tokenize(row.abstract.lower()):
if w not in word2freq:
word2freq[w] = 1
else:
word2freq[w] += 1
for e in json.loads(row.title_entities):
times = len(e['OccurrenceOffsets']) * e['Confidence']
if times > 0:
if e['WikidataId'] not in entity2freq:
entity2freq[e['WikidataId']] = times
else:
entity2freq[e['WikidataId']] += times
for e in json.loads(row.abstract_entities):
times = len(e['OccurrenceOffsets']) * e['Confidence']
if times > 0:
if e['WikidataId'] not in entity2freq:
entity2freq[e['WikidataId']] = times
else:
entity2freq[e['WikidataId']] += times
for k, v in word2freq.items():
if v >= config.word_freq_threshold:
word2int[k] = len(word2int) + 1
for k, v in entity2freq.items():
if v >= config.entity_freq_threshold:
entity2int[k] = len(entity2int) + 1
parsed_news = news.swifter.apply(parse_row, axis=1)
parsed_news = pd.concat([parsed_news, roberta_df], axis=1)
parsed_news.to_csv(target, sep='\t', index=False)
pd.DataFrame(category2int.items(),
columns=['category', 'int']).to_csv(category2int_path,
sep='\t',
index=False)
print(
f'Please modify `num_categories` in `src/config.py` into 1 + {len(category2int)}'
)
pd.DataFrame(word2int.items(), columns=['word',
'int']).to_csv(word2int_path,
sep='\t',
index=False)
print(
f'Please modify `num_words` in `src/config.py` into 1 + {len(word2int)}'
)
pd.DataFrame(entity2int.items(),
columns=['entity', 'int']).to_csv(entity2int_path,
sep='\t',
index=False)
print(
f'Please modify `num_entities` in `src/config.py` into 1 + {len(entity2int)}'
)
elif mode == 'test':
category2int = dict(pd.read_table(category2int_path).values.tolist())
# na_filter=False is needed since nan is also a valid word
word2int = dict(
pd.read_table(word2int_path, na_filter=False).values.tolist())
entity2int = dict(pd.read_table(entity2int_path).values.tolist())
parsed_news = news.swifter.apply(parse_row, axis=1)
parsed_news = pd.concat([parsed_news, roberta_df], axis=1)
parsed_news.to_csv(target, sep='\t', index=False)
else:
print('Wrong mode!')
def generate_word_embedding(source, target, word2int_path):
"""
Generate from pretrained word embedding file
If a word not in embedding file, initial its embedding by N(0, 1)
Args:
source: path of pretrained word embedding file, e.g. glove.840B.300d.txt
target: path for saving word embedding. Will be saved in numpy format
word2int_path: vocabulary file when words in it will be searched in pretrained embedding file
"""
# na_filter=False is needed since nan is also a valid word
# word, int
word2int = pd.read_table(word2int_path, na_filter=False, index_col='word')
source_embedding = pd.read_table(source,
index_col=0,
sep=' ',
header=None,
quoting=csv.QUOTE_NONE,
names=range(config.word_embedding_dim))
# word, vector
source_embedding.index.rename('word', inplace=True)
# word, int, vector
merged = word2int.merge(source_embedding,
how='inner',
left_index=True,
right_index=True)
merged.set_index('int', inplace=True)
missed_index = np.setdiff1d(np.arange(len(word2int) + 1),
merged.index.values)
missed_embedding = pd.DataFrame(data=np.random.normal(
size=(len(missed_index), config.word_embedding_dim)))
missed_embedding['int'] = missed_index
missed_embedding.set_index('int', inplace=True)
final_embedding = pd.concat([merged, missed_embedding]).sort_index()
np.save(target, final_embedding.values)
print(
f'Rate of word missed in pretrained embedding: {(len(missed_index)-1)/len(word2int):.4f}'
)
def transform_entity_embedding(source, target, entity2int_path):
"""
Args:
source: path of embedding file
target: path of transformed embedding file in numpy format
entity2int_path
"""
entity_embedding = pd.read_table(source, header=None)
entity_embedding['vector'] = entity_embedding.iloc[:,
1:101].values.tolist()
entity_embedding = entity_embedding[[0, 'vector'
]].rename(columns={0: "entity"})
entity2int = pd.read_table(entity2int_path)
merged_df = pd.merge(entity_embedding, entity2int,
on='entity').sort_values('int')
entity_embedding_transformed = np.random.normal(
size=(len(entity2int) + 1, config.entity_embedding_dim))
for row in merged_df.itertuples(index=False):
entity_embedding_transformed[row.int] = row.vector
np.save(target, entity_embedding_transformed)
if __name__ == '__main__':
train_dir = './data/train'
val_dir = './data/val'
test_dir = './data/test'
print('Process data for training')
print('Parse behaviors')
parse_behaviors(path.join(train_dir, 'behaviors.tsv'),
path.join(train_dir, 'behaviors_parsed.tsv'),
path.join(train_dir, 'user2int.tsv'))
print('Parse news')
parse_news(path.join(train_dir, 'news.tsv'),
path.join(train_dir, 'news_parsed.tsv'),
path.join(train_dir, 'roberta'),
path.join(train_dir, 'category2int.tsv'),
path.join(train_dir, 'word2int.tsv'),
path.join(train_dir, 'entity2int.tsv'),
mode='train')
print('Generate word embedding')
generate_word_embedding(
f'./data/glove/glove.840B.{config.word_embedding_dim}d.txt',
path.join(train_dir, 'pretrained_word_embedding.npy'),
path.join(train_dir, 'word2int.tsv'))
print('Transform entity embeddings')
transform_entity_embedding(
path.join(train_dir, 'entity_embedding.vec'),
path.join(train_dir, 'pretrained_entity_embedding.npy'),
path.join(train_dir, 'entity2int.tsv'))
print('\nProcess data for validation')
print('Parse news')
parse_news(path.join(val_dir, 'news.tsv'),
path.join(val_dir, 'news_parsed.tsv'),
path.join(val_dir, 'roberta'),
path.join(train_dir, 'category2int.tsv'),
path.join(train_dir, 'word2int.tsv'),
path.join(train_dir, 'entity2int.tsv'),
mode='test')
print('\nProcess data for test')
print('Parse news')
parse_news(path.join(test_dir, 'news.tsv'),
path.join(test_dir, 'news_parsed.tsv'),
path.join(test_dir, 'roberta'),
path.join(train_dir, 'category2int.tsv'),
path.join(train_dir, 'word2int.tsv'),
path.join(train_dir, 'entity2int.tsv'),
mode='test')
|
[
"pandas.DataFrame",
"numpy.save",
"json.loads",
"importlib.import_module",
"transformers.RobertaTokenizer.from_pretrained",
"random.shuffle",
"pandas.merge",
"transformers.RobertaModel.from_pretrained",
"pathlib.Path",
"torch.cuda.is_available",
"pandas.Series",
"pandas.read_table",
"torch.tensor",
"torch.no_grad",
"os.path.join",
"pandas.concat",
"numpy.concatenate"
] |
[((783, 893), 'pandas.read_table', 'pd.read_table', (['source'], {'header': 'None', 'names': "['impression_id', 'user', 'time', 'clicked_news', 'impressions']"}), "(source, header=None, names=['impression_id', 'user', 'time',\n 'clicked_news', 'impressions'])\n", (796, 893), True, 'import pandas as pd\n'), ((3280, 3479), 'pandas.read_table', 'pd.read_table', (['source'], {'header': 'None', 'usecols': '[0, 1, 2, 3, 4, 6, 7]', 'quoting': 'csv.QUOTE_NONE', 'names': "['id', 'category', 'subcategory', 'title', 'abstract', 'title_entities',\n 'abstract_entities']"}), "(source, header=None, usecols=[0, 1, 2, 3, 4, 6, 7], quoting=\n csv.QUOTE_NONE, names=['id', 'category', 'subcategory', 'title',\n 'abstract', 'title_entities', 'abstract_entities'])\n", (3293, 3479), True, 'import pandas as pd\n'), ((3849, 3897), 'transformers.RobertaTokenizer.from_pretrained', 'RobertaTokenizer.from_pretrained', (['"""roberta-base"""'], {}), "('roberta-base')\n", (3881, 3897), False, 'from transformers import RobertaTokenizer, RobertaModel\n'), ((13201, 13264), 'pandas.read_table', 'pd.read_table', (['word2int_path'], {'na_filter': '(False)', 'index_col': '"""word"""'}), "(word2int_path, na_filter=False, index_col='word')\n", (13214, 13264), True, 'import pandas as pd\n'), ((14323, 14362), 'numpy.save', 'np.save', (['target', 'final_embedding.values'], {}), '(target, final_embedding.values)\n', (14330, 14362), True, 'import numpy as np\n'), ((14725, 14759), 'pandas.read_table', 'pd.read_table', (['source'], {'header': 'None'}), '(source, header=None)\n', (14738, 14759), True, 'import pandas as pd\n'), ((15041, 15071), 'pandas.read_table', 'pd.read_table', (['entity2int_path'], {}), '(entity2int_path)\n', (15054, 15071), True, 'import pandas as pd\n'), ((15415, 15460), 'numpy.save', 'np.save', (['target', 'entity_embedding_transformed'], {}), '(target, entity_embedding_transformed)\n', (15422, 15460), True, 'import numpy as np\n'), ((353, 386), 'importlib.import_module', 'importlib.import_module', (['"""config"""'], {}), "('config')\n", (376, 386), False, 'import importlib\n'), ((1871, 1895), 'random.shuffle', 'random.shuffle', (['negative'], {}), '(negative)\n', (1885, 1895), False, 'import random\n'), ((4364, 4521), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': "[title_roberta['input_ids'], title_roberta['attention_mask'],\n abstract_roberta['input_ids'], abstract_roberta['attention_mask']]"}), "(data=[title_roberta['input_ids'], title_roberta[\n 'attention_mask'], abstract_roberta['input_ids'], abstract_roberta[\n 'attention_mask']])\n", (4376, 4521), True, 'import pandas as pd\n'), ((5079, 5094), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5092, 5094), False, 'import torch\n'), ((7433, 7463), 'json.loads', 'json.loads', (['row.title_entities'], {}), '(row.title_entities)\n', (7443, 7463), False, 'import json\n'), ((7746, 7779), 'json.loads', 'json.loads', (['row.abstract_entities'], {}), '(row.abstract_entities)\n', (7756, 7779), False, 'import json\n'), ((8694, 8817), 'pandas.Series', 'pd.Series', (['new_row'], {'index': "['id', 'category', 'subcategory', 'title', 'abstract', 'title_entities',\n 'abstract_entities']"}), "(new_row, index=['id', 'category', 'subcategory', 'title',\n 'abstract', 'title_entities', 'abstract_entities'])\n", (8703, 8817), True, 'import pandas as pd\n'), ((10833, 10877), 'pandas.concat', 'pd.concat', (['[parsed_news, roberta_df]'], {'axis': '(1)'}), '([parsed_news, roberta_df], axis=1)\n', (10842, 10877), True, 'import pandas as pd\n'), ((15667, 15704), 'os.path.join', 'path.join', (['train_dir', '"""behaviors.tsv"""'], {}), "(train_dir, 'behaviors.tsv')\n", (15676, 15704), False, 'from os import path\n'), ((15726, 15770), 'os.path.join', 'path.join', (['train_dir', '"""behaviors_parsed.tsv"""'], {}), "(train_dir, 'behaviors_parsed.tsv')\n", (15735, 15770), False, 'from os import path\n'), ((15792, 15828), 'os.path.join', 'path.join', (['train_dir', '"""user2int.tsv"""'], {}), "(train_dir, 'user2int.tsv')\n", (15801, 15828), False, 'from os import path\n'), ((15870, 15902), 'os.path.join', 'path.join', (['train_dir', '"""news.tsv"""'], {}), "(train_dir, 'news.tsv')\n", (15879, 15902), False, 'from os import path\n'), ((15919, 15958), 'os.path.join', 'path.join', (['train_dir', '"""news_parsed.tsv"""'], {}), "(train_dir, 'news_parsed.tsv')\n", (15928, 15958), False, 'from os import path\n'), ((15975, 16006), 'os.path.join', 'path.join', (['train_dir', '"""roberta"""'], {}), "(train_dir, 'roberta')\n", (15984, 16006), False, 'from os import path\n'), ((16023, 16063), 'os.path.join', 'path.join', (['train_dir', '"""category2int.tsv"""'], {}), "(train_dir, 'category2int.tsv')\n", (16032, 16063), False, 'from os import path\n'), ((16080, 16116), 'os.path.join', 'path.join', (['train_dir', '"""word2int.tsv"""'], {}), "(train_dir, 'word2int.tsv')\n", (16089, 16116), False, 'from os import path\n'), ((16133, 16171), 'os.path.join', 'path.join', (['train_dir', '"""entity2int.tsv"""'], {}), "(train_dir, 'entity2int.tsv')\n", (16142, 16171), False, 'from os import path\n'), ((16346, 16399), 'os.path.join', 'path.join', (['train_dir', '"""pretrained_word_embedding.npy"""'], {}), "(train_dir, 'pretrained_word_embedding.npy')\n", (16355, 16399), False, 'from os import path\n'), ((16409, 16445), 'os.path.join', 'path.join', (['train_dir', '"""word2int.tsv"""'], {}), "(train_dir, 'word2int.tsv')\n", (16418, 16445), False, 'from os import path\n'), ((16529, 16573), 'os.path.join', 'path.join', (['train_dir', '"""entity_embedding.vec"""'], {}), "(train_dir, 'entity_embedding.vec')\n", (16538, 16573), False, 'from os import path\n'), ((16583, 16638), 'os.path.join', 'path.join', (['train_dir', '"""pretrained_entity_embedding.npy"""'], {}), "(train_dir, 'pretrained_entity_embedding.npy')\n", (16592, 16638), False, 'from os import path\n'), ((16648, 16686), 'os.path.join', 'path.join', (['train_dir', '"""entity2int.tsv"""'], {}), "(train_dir, 'entity2int.tsv')\n", (16657, 16686), False, 'from os import path\n'), ((16772, 16802), 'os.path.join', 'path.join', (['val_dir', '"""news.tsv"""'], {}), "(val_dir, 'news.tsv')\n", (16781, 16802), False, 'from os import path\n'), ((16819, 16856), 'os.path.join', 'path.join', (['val_dir', '"""news_parsed.tsv"""'], {}), "(val_dir, 'news_parsed.tsv')\n", (16828, 16856), False, 'from os import path\n'), ((16873, 16902), 'os.path.join', 'path.join', (['val_dir', '"""roberta"""'], {}), "(val_dir, 'roberta')\n", (16882, 16902), False, 'from os import path\n'), ((16919, 16959), 'os.path.join', 'path.join', (['train_dir', '"""category2int.tsv"""'], {}), "(train_dir, 'category2int.tsv')\n", (16928, 16959), False, 'from os import path\n'), ((16976, 17012), 'os.path.join', 'path.join', (['train_dir', '"""word2int.tsv"""'], {}), "(train_dir, 'word2int.tsv')\n", (16985, 17012), False, 'from os import path\n'), ((17029, 17067), 'os.path.join', 'path.join', (['train_dir', '"""entity2int.tsv"""'], {}), "(train_dir, 'entity2int.tsv')\n", (17038, 17067), False, 'from os import path\n'), ((17175, 17206), 'os.path.join', 'path.join', (['test_dir', '"""news.tsv"""'], {}), "(test_dir, 'news.tsv')\n", (17184, 17206), False, 'from os import path\n'), ((17223, 17261), 'os.path.join', 'path.join', (['test_dir', '"""news_parsed.tsv"""'], {}), "(test_dir, 'news_parsed.tsv')\n", (17232, 17261), False, 'from os import path\n'), ((17278, 17308), 'os.path.join', 'path.join', (['test_dir', '"""roberta"""'], {}), "(test_dir, 'roberta')\n", (17287, 17308), False, 'from os import path\n'), ((17325, 17365), 'os.path.join', 'path.join', (['train_dir', '"""category2int.tsv"""'], {}), "(train_dir, 'category2int.tsv')\n", (17334, 17365), False, 'from os import path\n'), ((17382, 17418), 'os.path.join', 'path.join', (['train_dir', '"""word2int.tsv"""'], {}), "(train_dir, 'word2int.tsv')\n", (17391, 17418), False, 'from os import path\n'), ((17435, 17473), 'os.path.join', 'path.join', (['train_dir', '"""entity2int.tsv"""'], {}), "(train_dir, 'entity2int.tsv')\n", (17444, 17473), False, 'from os import path\n'), ((4707, 4732), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4730, 4732), False, 'import torch\n'), ((4879, 4903), 'pathlib.Path', 'Path', (['roberta_output_dir'], {}), '(roberta_output_dir)\n', (4883, 4903), False, 'from pathlib import Path\n'), ((4953, 5015), 'transformers.RobertaModel.from_pretrained', 'RobertaModel.from_pretrained', (['"""roberta-base"""'], {'return_dict': '(True)'}), "('roberta-base', return_dict=True)\n", (4981, 5015), False, 'from transformers import RobertaTokenizer, RobertaModel\n'), ((6353, 6413), 'os.path.join', 'path.join', (['roberta_output_dir', '"""title_last_hidden_state.npy"""'], {}), "(roberta_output_dir, 'title_last_hidden_state.npy')\n", (6362, 6413), False, 'from os import path\n'), ((6431, 6478), 'numpy.concatenate', 'np.concatenate', (['title_last_hidden_state'], {'axis': '(0)'}), '(title_last_hidden_state, axis=0)\n', (6445, 6478), True, 'import numpy as np\n'), ((6496, 6552), 'os.path.join', 'path.join', (['roberta_output_dir', '"""title_pooler_output.npy"""'], {}), "(roberta_output_dir, 'title_pooler_output.npy')\n", (6505, 6552), False, 'from os import path\n'), ((6570, 6613), 'numpy.concatenate', 'np.concatenate', (['title_pooler_output'], {'axis': '(0)'}), '(title_pooler_output, axis=0)\n', (6584, 6613), True, 'import numpy as np\n'), ((6644, 6707), 'os.path.join', 'path.join', (['roberta_output_dir', '"""abstract_last_hidden_state.npy"""'], {}), "(roberta_output_dir, 'abstract_last_hidden_state.npy')\n", (6653, 6707), False, 'from os import path\n'), ((6721, 6771), 'numpy.concatenate', 'np.concatenate', (['abstract_last_hidden_state'], {'axis': '(0)'}), '(abstract_last_hidden_state, axis=0)\n', (6735, 6771), True, 'import numpy as np\n'), ((6789, 6848), 'os.path.join', 'path.join', (['roberta_output_dir', '"""abstract_pooler_output.npy"""'], {}), "(roberta_output_dir, 'abstract_pooler_output.npy')\n", (6798, 6848), False, 'from os import path\n'), ((6866, 6912), 'numpy.concatenate', 'np.concatenate', (['abstract_pooler_output'], {'axis': '(0)'}), '(abstract_pooler_output, axis=0)\n', (6880, 6912), True, 'import numpy as np\n'), ((9765, 9795), 'json.loads', 'json.loads', (['row.title_entities'], {}), '(row.title_entities)\n', (9775, 9795), False, 'import json\n'), ((10127, 10160), 'json.loads', 'json.loads', (['row.abstract_entities'], {}), '(row.abstract_entities)\n', (10137, 10160), False, 'import json\n'), ((12496, 12540), 'pandas.concat', 'pd.concat', (['[parsed_news, roberta_df]'], {'axis': '(1)'}), '([parsed_news, roberta_df], axis=1)\n', (12505, 12540), True, 'import pandas as pd\n'), ((14268, 14305), 'pandas.concat', 'pd.concat', (['[merged, missed_embedding]'], {}), '([merged, missed_embedding])\n', (14277, 14305), True, 'import pandas as pd\n'), ((15088, 15139), 'pandas.merge', 'pd.merge', (['entity_embedding', 'entity2int'], {'on': '"""entity"""'}), "(entity_embedding, entity2int, on='entity')\n", (15096, 15139), True, 'import pandas as pd\n'), ((4843, 4863), 'torch.tensor', 'torch.tensor', (['x[key]'], {}), '(x[key])\n', (4855, 4863), False, 'import torch\n'), ((12122, 12154), 'pandas.read_table', 'pd.read_table', (['category2int_path'], {}), '(category2int_path)\n', (12135, 12154), True, 'import pandas as pd\n'), ((12276, 12321), 'pandas.read_table', 'pd.read_table', (['word2int_path'], {'na_filter': '(False)'}), '(word2int_path, na_filter=False)\n', (12289, 12321), True, 'import pandas as pd\n'), ((12365, 12395), 'pandas.read_table', 'pd.read_table', (['entity2int_path'], {}), '(entity2int_path)\n', (12378, 12395), True, 'import pandas as pd\n')]
|
import modules.utils as utils
import numpy as np
import cv2
import scipy
import keras
from modules.logging import logger
import modules.utils as utils
import random
import tensorflow as tf
import keras
from keras import models
from keras import layers
from keras.layers import convolutional
from keras.layers import core
CLASS_LABELS = ['0-adult_male', '1-subadult_male', '2-adult_female', '3-juvenile', '4-pup', '5-non lion']
#each index is a min/max color for a class mark
C_MIN = [
np.array([0, 0, 160]),
np.array([200, 0, 200]),
np.array([10, 40, 75]),
np.array([150, 40, 0]),
np.array([25, 140, 40])
]
C_MAX = [
np.array([50, 50, 255]),
np.array([255, 55, 255]),
np.array([20, 55, 130]),
np.array([255, 80, 40]),
np.array([50, 255, 65])
]
#adapted from alexnet
def convnet_alexnet_lion_keras(image_dims):
# model = Sequential()
# model.add(Lambda(lambda x: (x / 255.0) - 0.5, input_shape=image_dims))
NR_CLASSES = 6
input = layers.Input(shape=image_dims, name="Input")
conv_1 = convolutional.Convolution2D(96, 11, 11, border_mode='valid', name="conv_1", activation='relu', init='glorot_uniform')(input)
pool_1 = convolutional.MaxPooling2D(pool_size=(3, 3), name="pool_1")(conv_1)
zero_padding_1 = convolutional.ZeroPadding2D(padding=(1, 1), name="zero_padding_1")(pool_1)
conv_2 = convolutional.Convolution2D(256, 3, 3, border_mode='valid', name="conv_2", activation='relu', init='glorot_uniform')(zero_padding_1)
pool_2 = convolutional.MaxPooling2D(pool_size=(3, 3), name="pool_2")(conv_2)
zero_padding_2 = keras.layers.convolutional.ZeroPadding2D(padding=(1, 1), name="zero_padding_2")(pool_2)
conv_3 = convolutional.Convolution2D(384, 3, 3, border_mode='valid', name="conv_3", activation='relu', init='glorot_uniform')(zero_padding_2)
conv_4 = convolutional.Convolution2D(384, 3, 3, border_mode='valid', name="conv_4", activation='relu', init='glorot_uniform')(conv_3)
conv_5 = convolutional.Convolution2D(256, 3, 3, border_mode='valid', name="conv_5", activation='relu', init='glorot_uniform')(conv_4)
pool_3 = convolutional.MaxPooling2D(pool_size=(3, 3), name="pool_3")(conv_5)
flatten = core.Flatten(name="flatten")(pool_3)
fc_1 = core.Dense(4096, name="fc_1", activation='relu', init='glorot_uniform')(flatten)
fc_1 = core.Dropout(0.5, name="fc_1_dropout")(fc_1)
output = core.Dense(4096, name="Output", activation='relu', init='glorot_uniform')(fc_1)
output = core.Dropout(0.5, name="Output_dropout")(output)
fc_2 = core.Dense(NR_CLASSES, name="fc_2", activation='softmax', init='glorot_uniform')(output)
return models.Model([input], [fc_2])
def convnet_medium1_lion_keras(image_dims):
model = keras.models.Sequential()
model.add(core.Lambda(lambda x: (x / 255.0) - 0.5, input_shape=image_dims))
model.add(convolutional.Conv2D(64, (3, 3), activation='relu', padding='same', init='glorot_uniform'))
model.add(convolutional.MaxPooling2D(pool_size=(2,2)))
model.add(convolutional.Conv2D(128, (3, 3), activation='relu', padding='same', init='glorot_uniform'))
model.add(convolutional.MaxPooling2D(pool_size=(2,2)))
model.add(convolutional.Conv2D(256, (3, 3), activation='relu', padding='same', init='glorot_uniform'))
model.add(convolutional.MaxPooling2D(pool_size=(2,2)))
model.add(core.Flatten())
model.add(core.Dense(1024, activation='relu', init='glorot_uniform'))
model.add(core.Dropout(0.5))
model.add(core.Dense(1024, activation='relu', init='glorot_uniform'))
model.add(core.Dropout(0.5))
model.add(core.Dense(6, activation='softmax', init='glorot_uniform'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
return model
def convnet_medium2_lion_keras(image_dims):
model = keras.models.Sequential()
model.add(core.Lambda(lambda x: (x / 255.0) - 0.5, input_shape=image_dims))
model.add(convolutional.Conv2D(128, (3, 3), activation='relu', padding='same', init='glorot_uniform'))
model.add(convolutional.MaxPooling2D(pool_size=(2,2)))
model.add(convolutional.Conv2D(256, (3, 3), activation='relu', padding='same', init='glorot_uniform'))
model.add(convolutional.Conv2D(256, (3, 3), activation='relu', padding='same', init='glorot_uniform'))
model.add(convolutional.MaxPooling2D(pool_size=(2,2)))
model.add(core.Flatten())
model.add(core.Dense(1024, activation='relu', init='glorot_uniform'))
model.add(core.Dropout(0.5))
model.add(core.Dense(2048, activation='relu', init='glorot_uniform'))
model.add(core.Dropout(0.5))
model.add(core.Dense(6, activation='softmax', init='glorot_uniform'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
return model
def convnet_medium3_lion_keras(image_dims):
model = keras.models.Sequential()
model.add(core.Lambda(lambda x: (x / 255.0) - 0.5, input_shape=image_dims))
model.add(convolutional.Conv2D(128, (3, 3), activation='relu', padding='same', init='glorot_uniform'))
model.add(convolutional.MaxPooling2D(pool_size=(2,2)))
model.add(convolutional.Conv2D(64, (3, 3), activation='relu', padding='same', init='glorot_uniform'))
model.add(convolutional.Conv2D(256, (3, 3), activation='relu', padding='same', init='glorot_uniform'))
model.add(convolutional.MaxPooling2D(pool_size=(2,2)))
model.add(core.Flatten())
model.add(core.Dense(2048, activation='relu', init='glorot_uniform'))
model.add(core.Dropout(0.5))
model.add(core.Dense(2048, activation='relu', init='glorot_uniform'))
model.add(core.Dropout(0.5))
model.add(core.Dense(6, activation='softmax', init='glorot_uniform'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
return model
#don't change. there are already good train for this net (72% acc)
def convnet_simple_lion_keras(image_dims):
model = keras.models.Sequential()
model.add(core.Lambda(lambda x: (x / 255.0) - 0.5, input_shape=image_dims))
model.add(convolutional.Conv2D(32, (3, 3), activation='relu', padding='same'))
model.add(convolutional.MaxPooling2D(pool_size=(2,2)))
model.add(convolutional.Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(convolutional.MaxPooling2D(pool_size=(2,2)))
model.add(convolutional.Conv2D(128, (3, 3), activation='relu', padding='same'))
model.add(convolutional.MaxPooling2D(pool_size=(2,2)))
model.add(core.Flatten())
model.add(core.Dense(512, activation='relu'))
model.add(core.Dropout(0.5))
model.add(core.Dense(1024, activation='relu'))
model.add(core.Dropout(0.5))
model.add(core.Dense(6, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
return model
def convnet_medium1_boolean(image_dims):
model = keras.models.Sequential()
model.add(core.Lambda(lambda x: (x / 255.0) - 0.5, input_shape=image_dims))
model.add(convolutional.Conv2D(64, (3, 3), activation='relu', padding='same', init='glorot_uniform'))
model.add(convolutional.MaxPooling2D(pool_size=(2,2)))
model.add(convolutional.Conv2D(128, (3, 3), activation='relu', padding='same', init='glorot_uniform'))
model.add(convolutional.MaxPooling2D(pool_size=(2,2)))
model.add(convolutional.Conv2D(256, (3, 3), activation='relu', padding='same', init='glorot_uniform'))
model.add(convolutional.MaxPooling2D(pool_size=(2,2)))
model.add(core.Flatten())
model.add(core.Dense(1024, activation='relu', init='glorot_uniform'))
model.add(core.Dropout(0.5))
model.add(core.Dense(1024, activation='relu', init='glorot_uniform'))
model.add(core.Dropout(0.5))
model.add(core.Dense(2, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
return model
#adapted from alexnet
def convnet_alexnet_lion_tflearn(image_dims):
#image augmentation
img_aug = ImageAugmentation()
img_aug.add_random_flip_leftright()
img_aug.add_random_flip_updown()
img_aug.add_random_rotation(max_angle=360.)
img_aug.add_random_blur(sigma_max=5.)
#image pre-processing
img_prep = ImagePreprocessing()
img_prep.add_featurewise_zero_center()
img_prep.add_featurewise_stdnorm()
#AlexNet
network = layers.core.input_data(shape=[None, image_dims[0], image_dims[1], image_dims[2]], dtype=tf.float32, data_preprocessing=img_prep, data_augmentation=img_aug)
network = layers.conv.conv_2d(network, 96, 11, strides=4, activation='relu')
network = layers.conv.max_pool_2d(network, 3, strides=2)
network = layers.normalization.local_response_normalization(network)
network = layers.conv.conv_2d(network, 256, 5, activation='relu')
network = layers.conv.max_pool_2d(network, 3, strides=2)
network = layers.normalization.local_response_normalization(network)
network = layers.conv.conv_2d(network, 384, 3, activation='relu')
network = layers.conv.conv_2d(network, 384, 3, activation='relu')
network = layers.conv.conv_2d(network, 256, 3, activation='relu')
network = layers.conv.max_pool_2d(network, 3, strides=2)
network = layers.normalization.local_response_normalization(network)
network = layers.core.fully_connected(network, 4096, activation='tanh')
network = layers.core.dropout(network, 0.5)
network = layers.core.fully_connected(network, 4096, activation='tanh')
network = layers.core.dropout(network, 0.5)
network = layers.core.fully_connected(network, 5, activation='softmax')
network = layers.estimator.regression(network, optimizer='momentum',
loss='categorical_crossentropy', learning_rate=0.001)
return network
def find_class(image, point):
image = image[point[1]-3:point[1]+3,point[0]-3:point[0]+3]
result = -1
max = 0
for col in range(5):
cmsk = cv2.inRange(image, C_MIN[col], C_MAX[col])
sm = np.sum(cmsk)
if(sm!=None and sm>max):
max = sm
result = col
return result
def export_lions(image_raw, image_dotted, target_x_ds, target_y_ds, image_dims, debug=False, min_distance_others=50, non_lion_distance=150, export_non_lion=True):
NR_CLASSES = 6
#BLACKOUT PORTIONS OF IMAGE IN RAW PICTURE
image_dotted_bw = cv2.cvtColor(image_dotted, cv2.COLOR_BGR2GRAY)
#utils.show_image(image_dotted_bw, size=8)
mask = cv2.threshold(image_dotted_bw, 5, 255, cv2.THRESH_BINARY)[1]
#utils.show_image(mask, size=8)
image_raw_bw = cv2.cvtColor(image_raw, cv2.COLOR_BGR2GRAY)
image_raw = cv2.bitwise_and(image_raw, image_raw, mask=mask)
#utils.show_image(image_raw, size=8, is_bgr=True)
#ISOLATE HUMAN MARKS ON DOTTED PICTURE
diff_color = cv2.absdiff(image_dotted, image_raw)
diff = cv2.cvtColor(diff_color, cv2.COLOR_BGR2GRAY)
kernel = np.ones((2,2),np.uint8)
diff = cv2.morphologyEx(diff, cv2.MORPH_OPEN, kernel)
ret,diff = cv2.threshold(diff,10,255,cv2.THRESH_TOZERO)
ret,diff = cv2.threshold(diff,0,255,cv2.THRESH_BINARY)
#debug data
debug_image = image_dotted.copy()
images = []
#find all dotted sea lions
count1 = 0
count_class = np.zeros(NR_CLASSES)
lion_positions = []
lion_classes = []
im2, contours, hierarchy = cv2.findContours(diff, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
x,y,w,h = cv2.boundingRect(c)
if(w>4 and h>4):
count1 = count1 + 1
center = (x+round(w/3),y+round(h/3))
clazz = find_class(image_dotted, center)
if(clazz==-1):
logger.warning('could not detect sea lion class at ' + str(center))
continue
lion_positions.append(center)
count_class[clazz] = count_class[clazz] + 1
lion_classes.append(clazz)
if(debug):
cv2.circle(debug_image,center,round(w/2),(255,0,0),1)
count_class_added = np.zeros(NR_CLASSES)
#add found sea lions to training dataset
#filter out lions that are too near each other to minimize noise on training set
count2 = 0
for i, lion_pos in enumerate(lion_positions):
lion_class = lion_classes[i]
is_far = True
if(min_distance_others>0):
is_far = utils.is_far_from_others(lion_pos, lion_positions, min_distance_others)
if(is_far):
#export patch to train dataset
count2 = count2 + 1
pw = round(image_dims[1]/2)
ph = image_dims[1] - pw
#trainX = image_raw[lion_pos[1]-pw:lion_pos[1]+ph,lion_pos[0]-pw:lion_pos[0]+ph]
trainX = utils.crop_image_fill(image_raw, (lion_pos[1]-pw,lion_pos[0]-pw), (lion_pos[1]+ph,lion_pos[0]+ph))
m = np.mean(trainX)
if(m>30 and m<225 and m!=127):
if(debug):
images.append(trainX)
cv2.circle(debug_image,lion_pos,round(w/2),(0,0,255),2)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(debug_image,str(lion_class),lion_pos, font, 1.1,(255,255,255),2,cv2.LINE_AA)
#normalize between 0-1
#trainX = trainX/255
trainY = keras.utils.np_utils.to_categorical([lion_class], NR_CLASSES)[0]
if(target_x_ds!=None and target_y_ds!=None):
utils.add_sample_to_dataset(target_x_ds, target_y_ds, trainX, trainY)
count_class_added[lion_class] = count_class_added[lion_class] + 1
#identify non sea lion patches
count3 = 0
if(export_non_lion):
s = np.shape(image_raw)
for i in range(int(count2*1.1)):
patch_pos = (random.randint(image_dims[1]*2, s[1]-image_dims[1]*2), random.randint(image_dims[0]*2, s[0]-image_dims[0]*2))
is_far = utils.is_far_from_others(patch_pos, lion_positions, non_lion_distance)
if(is_far):
#export patch to train dataset
pw = round(image_dims[1]/2)
ph = image_dims[1] - pw
#trainX = image_raw[lion_pos[1]-pw:lion_pos[1]+ph,lion_pos[0]-pw:lion_pos[0]+ph]
trainX = utils.crop_image_fill(image_raw, (patch_pos[1]-pw,patch_pos[0]-pw), (patch_pos[1]+ph,patch_pos[0]+ph))
m = np.mean(trainX)
if(m>50 and m<200):
count3 = count3 + 1
if(debug):
images.append(trainX)
cv2.circle(debug_image,patch_pos,round(w/2),(0,255,0),3)
#normalize between 0-1
#trainX = trainX/255
trainY = keras.utils.np_utils.to_categorical([5], NR_CLASSES)[0]
if(target_x_ds!=None and target_y_ds!=None):
utils.add_sample_to_dataset(target_x_ds, target_y_ds, trainX, trainY)
count_class[5] = count_class[5] + 1
count_class_added[5] = count_class_added[5] + 1
logger.info('sea lions found: ' + str(count1))
logger.info('sea lions added to dataset: ' + str(count2))
logger.info('non sea lions added to dataset: ' + str(count3))
if(target_x_ds!=None and target_y_ds!=None):
logger.info('dataset size: ' + str(len(target_x_ds)))
if(debug):
utils.show_image(debug_image, size=40, is_bgr=True)
utils.show_images(images, cols=10, is_bgr=True, size=1.5)
return count_class, count_class_added, lion_positions, lion_classes
|
[
"modules.utils.show_image",
"numpy.sum",
"cv2.bitwise_and",
"numpy.ones",
"keras.models.Model",
"modules.utils.is_far_from_others",
"numpy.shape",
"modules.utils.crop_image_fill",
"numpy.mean",
"modules.utils.add_sample_to_dataset",
"keras.layers.core.Flatten",
"keras.layers.Input",
"cv2.absdiff",
"cv2.inRange",
"random.randint",
"cv2.cvtColor",
"keras.layers.conv.conv_2d",
"keras.utils.np_utils.to_categorical",
"keras.layers.core.Dropout",
"keras.layers.estimator.regression",
"keras.layers.core.fully_connected",
"cv2.boundingRect",
"keras.layers.core.Lambda",
"keras.layers.core.Dense",
"keras.layers.conv.max_pool_2d",
"cv2.morphologyEx",
"keras.layers.convolutional.MaxPooling2D",
"cv2.threshold",
"numpy.zeros",
"keras.layers.core.dropout",
"modules.utils.show_images",
"keras.layers.convolutional.Conv2D",
"keras.layers.core.input_data",
"numpy.array",
"keras.layers.convolutional.Convolution2D",
"keras.layers.normalization.local_response_normalization",
"keras.layers.convolutional.ZeroPadding2D",
"keras.models.Sequential",
"cv2.findContours"
] |
[((500, 521), 'numpy.array', 'np.array', (['[0, 0, 160]'], {}), '([0, 0, 160])\n', (508, 521), True, 'import numpy as np\n'), ((535, 558), 'numpy.array', 'np.array', (['[200, 0, 200]'], {}), '([200, 0, 200])\n', (543, 558), True, 'import numpy as np\n'), ((572, 594), 'numpy.array', 'np.array', (['[10, 40, 75]'], {}), '([10, 40, 75])\n', (580, 594), True, 'import numpy as np\n'), ((608, 630), 'numpy.array', 'np.array', (['[150, 40, 0]'], {}), '([150, 40, 0])\n', (616, 630), True, 'import numpy as np\n'), ((644, 667), 'numpy.array', 'np.array', (['[25, 140, 40]'], {}), '([25, 140, 40])\n', (652, 667), True, 'import numpy as np\n'), ((701, 724), 'numpy.array', 'np.array', (['[50, 50, 255]'], {}), '([50, 50, 255])\n', (709, 724), True, 'import numpy as np\n'), ((738, 762), 'numpy.array', 'np.array', (['[255, 55, 255]'], {}), '([255, 55, 255])\n', (746, 762), True, 'import numpy as np\n'), ((776, 799), 'numpy.array', 'np.array', (['[20, 55, 130]'], {}), '([20, 55, 130])\n', (784, 799), True, 'import numpy as np\n'), ((813, 836), 'numpy.array', 'np.array', (['[255, 80, 40]'], {}), '([255, 80, 40])\n', (821, 836), True, 'import numpy as np\n'), ((850, 873), 'numpy.array', 'np.array', (['[50, 255, 65]'], {}), '([50, 255, 65])\n', (858, 873), True, 'import numpy as np\n'), ((1087, 1131), 'keras.layers.Input', 'layers.Input', ([], {'shape': 'image_dims', 'name': '"""Input"""'}), "(shape=image_dims, name='Input')\n", (1099, 1131), False, 'from keras import layers\n'), ((2752, 2781), 'keras.models.Model', 'models.Model', (['[input]', '[fc_2]'], {}), '([input], [fc_2])\n', (2764, 2781), False, 'from keras import models\n'), ((2839, 2864), 'keras.models.Sequential', 'keras.models.Sequential', ([], {}), '()\n', (2862, 2864), False, 'import keras\n'), ((3934, 3959), 'keras.models.Sequential', 'keras.models.Sequential', ([], {}), '()\n', (3957, 3959), False, 'import keras\n'), ((4971, 4996), 'keras.models.Sequential', 'keras.models.Sequential', ([], {}), '()\n', (4994, 4996), False, 'import keras\n'), ((6073, 6098), 'keras.models.Sequential', 'keras.models.Sequential', ([], {}), '()\n', (6096, 6098), False, 'import keras\n'), ((7025, 7050), 'keras.models.Sequential', 'keras.models.Sequential', ([], {}), '()\n', (7048, 7050), False, 'import keras\n'), ((8516, 8679), 'keras.layers.core.input_data', 'layers.core.input_data', ([], {'shape': '[None, image_dims[0], image_dims[1], image_dims[2]]', 'dtype': 'tf.float32', 'data_preprocessing': 'img_prep', 'data_augmentation': 'img_aug'}), '(shape=[None, image_dims[0], image_dims[1],\n image_dims[2]], dtype=tf.float32, data_preprocessing=img_prep,\n data_augmentation=img_aug)\n', (8538, 8679), False, 'from keras import layers\n'), ((8686, 8752), 'keras.layers.conv.conv_2d', 'layers.conv.conv_2d', (['network', '(96)', '(11)'], {'strides': '(4)', 'activation': '"""relu"""'}), "(network, 96, 11, strides=4, activation='relu')\n", (8705, 8752), False, 'from keras import layers\n'), ((8767, 8813), 'keras.layers.conv.max_pool_2d', 'layers.conv.max_pool_2d', (['network', '(3)'], {'strides': '(2)'}), '(network, 3, strides=2)\n', (8790, 8813), False, 'from keras import layers\n'), ((8828, 8886), 'keras.layers.normalization.local_response_normalization', 'layers.normalization.local_response_normalization', (['network'], {}), '(network)\n', (8877, 8886), False, 'from keras import layers\n'), ((8901, 8956), 'keras.layers.conv.conv_2d', 'layers.conv.conv_2d', (['network', '(256)', '(5)'], {'activation': '"""relu"""'}), "(network, 256, 5, activation='relu')\n", (8920, 8956), False, 'from keras import layers\n'), ((8971, 9017), 'keras.layers.conv.max_pool_2d', 'layers.conv.max_pool_2d', (['network', '(3)'], {'strides': '(2)'}), '(network, 3, strides=2)\n', (8994, 9017), False, 'from keras import layers\n'), ((9032, 9090), 'keras.layers.normalization.local_response_normalization', 'layers.normalization.local_response_normalization', (['network'], {}), '(network)\n', (9081, 9090), False, 'from keras import layers\n'), ((9105, 9160), 'keras.layers.conv.conv_2d', 'layers.conv.conv_2d', (['network', '(384)', '(3)'], {'activation': '"""relu"""'}), "(network, 384, 3, activation='relu')\n", (9124, 9160), False, 'from keras import layers\n'), ((9175, 9230), 'keras.layers.conv.conv_2d', 'layers.conv.conv_2d', (['network', '(384)', '(3)'], {'activation': '"""relu"""'}), "(network, 384, 3, activation='relu')\n", (9194, 9230), False, 'from keras import layers\n'), ((9245, 9300), 'keras.layers.conv.conv_2d', 'layers.conv.conv_2d', (['network', '(256)', '(3)'], {'activation': '"""relu"""'}), "(network, 256, 3, activation='relu')\n", (9264, 9300), False, 'from keras import layers\n'), ((9315, 9361), 'keras.layers.conv.max_pool_2d', 'layers.conv.max_pool_2d', (['network', '(3)'], {'strides': '(2)'}), '(network, 3, strides=2)\n', (9338, 9361), False, 'from keras import layers\n'), ((9376, 9434), 'keras.layers.normalization.local_response_normalization', 'layers.normalization.local_response_normalization', (['network'], {}), '(network)\n', (9425, 9434), False, 'from keras import layers\n'), ((9449, 9510), 'keras.layers.core.fully_connected', 'layers.core.fully_connected', (['network', '(4096)'], {'activation': '"""tanh"""'}), "(network, 4096, activation='tanh')\n", (9476, 9510), False, 'from keras import layers\n'), ((9525, 9558), 'keras.layers.core.dropout', 'layers.core.dropout', (['network', '(0.5)'], {}), '(network, 0.5)\n', (9544, 9558), False, 'from keras import layers\n'), ((9573, 9634), 'keras.layers.core.fully_connected', 'layers.core.fully_connected', (['network', '(4096)'], {'activation': '"""tanh"""'}), "(network, 4096, activation='tanh')\n", (9600, 9634), False, 'from keras import layers\n'), ((9649, 9682), 'keras.layers.core.dropout', 'layers.core.dropout', (['network', '(0.5)'], {}), '(network, 0.5)\n', (9668, 9682), False, 'from keras import layers\n'), ((9697, 9758), 'keras.layers.core.fully_connected', 'layers.core.fully_connected', (['network', '(5)'], {'activation': '"""softmax"""'}), "(network, 5, activation='softmax')\n", (9724, 9758), False, 'from keras import layers\n'), ((9773, 9890), 'keras.layers.estimator.regression', 'layers.estimator.regression', (['network'], {'optimizer': '"""momentum"""', 'loss': '"""categorical_crossentropy"""', 'learning_rate': '(0.001)'}), "(network, optimizer='momentum', loss=\n 'categorical_crossentropy', learning_rate=0.001)\n", (9800, 9890), False, 'from keras import layers\n'), ((10544, 10590), 'cv2.cvtColor', 'cv2.cvtColor', (['image_dotted', 'cv2.COLOR_BGR2GRAY'], {}), '(image_dotted, cv2.COLOR_BGR2GRAY)\n', (10556, 10590), False, 'import cv2\n'), ((10767, 10810), 'cv2.cvtColor', 'cv2.cvtColor', (['image_raw', 'cv2.COLOR_BGR2GRAY'], {}), '(image_raw, cv2.COLOR_BGR2GRAY)\n', (10779, 10810), False, 'import cv2\n'), ((10827, 10875), 'cv2.bitwise_and', 'cv2.bitwise_and', (['image_raw', 'image_raw'], {'mask': 'mask'}), '(image_raw, image_raw, mask=mask)\n', (10842, 10875), False, 'import cv2\n'), ((10996, 11032), 'cv2.absdiff', 'cv2.absdiff', (['image_dotted', 'image_raw'], {}), '(image_dotted, image_raw)\n', (11007, 11032), False, 'import cv2\n'), ((11044, 11088), 'cv2.cvtColor', 'cv2.cvtColor', (['diff_color', 'cv2.COLOR_BGR2GRAY'], {}), '(diff_color, cv2.COLOR_BGR2GRAY)\n', (11056, 11088), False, 'import cv2\n'), ((11103, 11128), 'numpy.ones', 'np.ones', (['(2, 2)', 'np.uint8'], {}), '((2, 2), np.uint8)\n', (11110, 11128), True, 'import numpy as np\n'), ((11138, 11184), 'cv2.morphologyEx', 'cv2.morphologyEx', (['diff', 'cv2.MORPH_OPEN', 'kernel'], {}), '(diff, cv2.MORPH_OPEN, kernel)\n', (11154, 11184), False, 'import cv2\n'), ((11200, 11247), 'cv2.threshold', 'cv2.threshold', (['diff', '(10)', '(255)', 'cv2.THRESH_TOZERO'], {}), '(diff, 10, 255, cv2.THRESH_TOZERO)\n', (11213, 11247), False, 'import cv2\n'), ((11260, 11306), 'cv2.threshold', 'cv2.threshold', (['diff', '(0)', '(255)', 'cv2.THRESH_BINARY'], {}), '(diff, 0, 255, cv2.THRESH_BINARY)\n', (11273, 11306), False, 'import cv2\n'), ((11444, 11464), 'numpy.zeros', 'np.zeros', (['NR_CLASSES'], {}), '(NR_CLASSES)\n', (11452, 11464), True, 'import numpy as np\n'), ((11542, 11604), 'cv2.findContours', 'cv2.findContours', (['diff', 'cv2.RETR_LIST', 'cv2.CHAIN_APPROX_SIMPLE'], {}), '(diff, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n', (11558, 11604), False, 'import cv2\n'), ((12249, 12269), 'numpy.zeros', 'np.zeros', (['NR_CLASSES'], {}), '(NR_CLASSES)\n', (12257, 12269), True, 'import numpy as np\n'), ((1145, 1266), 'keras.layers.convolutional.Convolution2D', 'convolutional.Convolution2D', (['(96)', '(11)', '(11)'], {'border_mode': '"""valid"""', 'name': '"""conv_1"""', 'activation': '"""relu"""', 'init': '"""glorot_uniform"""'}), "(96, 11, 11, border_mode='valid', name='conv_1',\n activation='relu', init='glorot_uniform')\n", (1172, 1266), False, 'from keras.layers import convolutional\n'), ((1283, 1342), 'keras.layers.convolutional.MaxPooling2D', 'convolutional.MaxPooling2D', ([], {'pool_size': '(3, 3)', 'name': '"""pool_1"""'}), "(pool_size=(3, 3), name='pool_1')\n", (1309, 1342), False, 'from keras.layers import convolutional\n'), ((1372, 1438), 'keras.layers.convolutional.ZeroPadding2D', 'convolutional.ZeroPadding2D', ([], {'padding': '(1, 1)', 'name': '"""zero_padding_1"""'}), "(padding=(1, 1), name='zero_padding_1')\n", (1399, 1438), False, 'from keras.layers import convolutional\n'), ((1460, 1580), 'keras.layers.convolutional.Convolution2D', 'convolutional.Convolution2D', (['(256)', '(3)', '(3)'], {'border_mode': '"""valid"""', 'name': '"""conv_2"""', 'activation': '"""relu"""', 'init': '"""glorot_uniform"""'}), "(256, 3, 3, border_mode='valid', name='conv_2',\n activation='relu', init='glorot_uniform')\n", (1487, 1580), False, 'from keras.layers import convolutional\n'), ((1606, 1665), 'keras.layers.convolutional.MaxPooling2D', 'convolutional.MaxPooling2D', ([], {'pool_size': '(3, 3)', 'name': '"""pool_2"""'}), "(pool_size=(3, 3), name='pool_2')\n", (1632, 1665), False, 'from keras.layers import convolutional\n'), ((1695, 1774), 'keras.layers.convolutional.ZeroPadding2D', 'keras.layers.convolutional.ZeroPadding2D', ([], {'padding': '(1, 1)', 'name': '"""zero_padding_2"""'}), "(padding=(1, 1), name='zero_padding_2')\n", (1735, 1774), False, 'import keras\n'), ((1796, 1916), 'keras.layers.convolutional.Convolution2D', 'convolutional.Convolution2D', (['(384)', '(3)', '(3)'], {'border_mode': '"""valid"""', 'name': '"""conv_3"""', 'activation': '"""relu"""', 'init': '"""glorot_uniform"""'}), "(384, 3, 3, border_mode='valid', name='conv_3',\n activation='relu', init='glorot_uniform')\n", (1823, 1916), False, 'from keras.layers import convolutional\n'), ((1942, 2062), 'keras.layers.convolutional.Convolution2D', 'convolutional.Convolution2D', (['(384)', '(3)', '(3)'], {'border_mode': '"""valid"""', 'name': '"""conv_4"""', 'activation': '"""relu"""', 'init': '"""glorot_uniform"""'}), "(384, 3, 3, border_mode='valid', name='conv_4',\n activation='relu', init='glorot_uniform')\n", (1969, 2062), False, 'from keras.layers import convolutional\n'), ((2080, 2200), 'keras.layers.convolutional.Convolution2D', 'convolutional.Convolution2D', (['(256)', '(3)', '(3)'], {'border_mode': '"""valid"""', 'name': '"""conv_5"""', 'activation': '"""relu"""', 'init': '"""glorot_uniform"""'}), "(256, 3, 3, border_mode='valid', name='conv_5',\n activation='relu', init='glorot_uniform')\n", (2107, 2200), False, 'from keras.layers import convolutional\n'), ((2218, 2277), 'keras.layers.convolutional.MaxPooling2D', 'convolutional.MaxPooling2D', ([], {'pool_size': '(3, 3)', 'name': '"""pool_3"""'}), "(pool_size=(3, 3), name='pool_3')\n", (2244, 2277), False, 'from keras.layers import convolutional\n'), ((2300, 2328), 'keras.layers.core.Flatten', 'core.Flatten', ([], {'name': '"""flatten"""'}), "(name='flatten')\n", (2312, 2328), False, 'from keras.layers import core\n'), ((2348, 2419), 'keras.layers.core.Dense', 'core.Dense', (['(4096)'], {'name': '"""fc_1"""', 'activation': '"""relu"""', 'init': '"""glorot_uniform"""'}), "(4096, name='fc_1', activation='relu', init='glorot_uniform')\n", (2358, 2419), False, 'from keras.layers import core\n'), ((2440, 2478), 'keras.layers.core.Dropout', 'core.Dropout', (['(0.5)'], {'name': '"""fc_1_dropout"""'}), "(0.5, name='fc_1_dropout')\n", (2452, 2478), False, 'from keras.layers import core\n'), ((2498, 2571), 'keras.layers.core.Dense', 'core.Dense', (['(4096)'], {'name': '"""Output"""', 'activation': '"""relu"""', 'init': '"""glorot_uniform"""'}), "(4096, name='Output', activation='relu', init='glorot_uniform')\n", (2508, 2571), False, 'from keras.layers import core\n'), ((2591, 2631), 'keras.layers.core.Dropout', 'core.Dropout', (['(0.5)'], {'name': '"""Output_dropout"""'}), "(0.5, name='Output_dropout')\n", (2603, 2631), False, 'from keras.layers import core\n'), ((2651, 2736), 'keras.layers.core.Dense', 'core.Dense', (['NR_CLASSES'], {'name': '"""fc_2"""', 'activation': '"""softmax"""', 'init': '"""glorot_uniform"""'}), "(NR_CLASSES, name='fc_2', activation='softmax', init='glorot_uniform'\n )\n", (2661, 2736), False, 'from keras.layers import core\n'), ((2880, 2942), 'keras.layers.core.Lambda', 'core.Lambda', (['(lambda x: x / 255.0 - 0.5)'], {'input_shape': 'image_dims'}), '(lambda x: x / 255.0 - 0.5, input_shape=image_dims)\n', (2891, 2942), False, 'from keras.layers import core\n'), ((2961, 3056), 'keras.layers.convolutional.Conv2D', 'convolutional.Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'init': '"""glorot_uniform"""'}), "(64, (3, 3), activation='relu', padding='same', init=\n 'glorot_uniform')\n", (2981, 3056), False, 'from keras.layers import convolutional\n'), ((3067, 3111), 'keras.layers.convolutional.MaxPooling2D', 'convolutional.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (3093, 3111), False, 'from keras.layers import convolutional\n'), ((3126, 3222), 'keras.layers.convolutional.Conv2D', 'convolutional.Conv2D', (['(128)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'init': '"""glorot_uniform"""'}), "(128, (3, 3), activation='relu', padding='same', init=\n 'glorot_uniform')\n", (3146, 3222), False, 'from keras.layers import convolutional\n'), ((3233, 3277), 'keras.layers.convolutional.MaxPooling2D', 'convolutional.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (3259, 3277), False, 'from keras.layers import convolutional\n'), ((3292, 3388), 'keras.layers.convolutional.Conv2D', 'convolutional.Conv2D', (['(256)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'init': '"""glorot_uniform"""'}), "(256, (3, 3), activation='relu', padding='same', init=\n 'glorot_uniform')\n", (3312, 3388), False, 'from keras.layers import convolutional\n'), ((3399, 3443), 'keras.layers.convolutional.MaxPooling2D', 'convolutional.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (3425, 3443), False, 'from keras.layers import convolutional\n'), ((3459, 3473), 'keras.layers.core.Flatten', 'core.Flatten', ([], {}), '()\n', (3471, 3473), False, 'from keras.layers import core\n'), ((3490, 3548), 'keras.layers.core.Dense', 'core.Dense', (['(1024)'], {'activation': '"""relu"""', 'init': '"""glorot_uniform"""'}), "(1024, activation='relu', init='glorot_uniform')\n", (3500, 3548), False, 'from keras.layers import core\n'), ((3564, 3581), 'keras.layers.core.Dropout', 'core.Dropout', (['(0.5)'], {}), '(0.5)\n', (3576, 3581), False, 'from keras.layers import core\n'), ((3597, 3655), 'keras.layers.core.Dense', 'core.Dense', (['(1024)'], {'activation': '"""relu"""', 'init': '"""glorot_uniform"""'}), "(1024, activation='relu', init='glorot_uniform')\n", (3607, 3655), False, 'from keras.layers import core\n'), ((3671, 3688), 'keras.layers.core.Dropout', 'core.Dropout', (['(0.5)'], {}), '(0.5)\n', (3683, 3688), False, 'from keras.layers import core\n'), ((3704, 3762), 'keras.layers.core.Dense', 'core.Dense', (['(6)'], {'activation': '"""softmax"""', 'init': '"""glorot_uniform"""'}), "(6, activation='softmax', init='glorot_uniform')\n", (3714, 3762), False, 'from keras.layers import core\n'), ((3975, 4037), 'keras.layers.core.Lambda', 'core.Lambda', (['(lambda x: x / 255.0 - 0.5)'], {'input_shape': 'image_dims'}), '(lambda x: x / 255.0 - 0.5, input_shape=image_dims)\n', (3986, 4037), False, 'from keras.layers import core\n'), ((4056, 4152), 'keras.layers.convolutional.Conv2D', 'convolutional.Conv2D', (['(128)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'init': '"""glorot_uniform"""'}), "(128, (3, 3), activation='relu', padding='same', init=\n 'glorot_uniform')\n", (4076, 4152), False, 'from keras.layers import convolutional\n'), ((4163, 4207), 'keras.layers.convolutional.MaxPooling2D', 'convolutional.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (4189, 4207), False, 'from keras.layers import convolutional\n'), ((4222, 4318), 'keras.layers.convolutional.Conv2D', 'convolutional.Conv2D', (['(256)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'init': '"""glorot_uniform"""'}), "(256, (3, 3), activation='relu', padding='same', init=\n 'glorot_uniform')\n", (4242, 4318), False, 'from keras.layers import convolutional\n'), ((4329, 4425), 'keras.layers.convolutional.Conv2D', 'convolutional.Conv2D', (['(256)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'init': '"""glorot_uniform"""'}), "(256, (3, 3), activation='relu', padding='same', init=\n 'glorot_uniform')\n", (4349, 4425), False, 'from keras.layers import convolutional\n'), ((4436, 4480), 'keras.layers.convolutional.MaxPooling2D', 'convolutional.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (4462, 4480), False, 'from keras.layers import convolutional\n'), ((4496, 4510), 'keras.layers.core.Flatten', 'core.Flatten', ([], {}), '()\n', (4508, 4510), False, 'from keras.layers import core\n'), ((4527, 4585), 'keras.layers.core.Dense', 'core.Dense', (['(1024)'], {'activation': '"""relu"""', 'init': '"""glorot_uniform"""'}), "(1024, activation='relu', init='glorot_uniform')\n", (4537, 4585), False, 'from keras.layers import core\n'), ((4601, 4618), 'keras.layers.core.Dropout', 'core.Dropout', (['(0.5)'], {}), '(0.5)\n', (4613, 4618), False, 'from keras.layers import core\n'), ((4634, 4692), 'keras.layers.core.Dense', 'core.Dense', (['(2048)'], {'activation': '"""relu"""', 'init': '"""glorot_uniform"""'}), "(2048, activation='relu', init='glorot_uniform')\n", (4644, 4692), False, 'from keras.layers import core\n'), ((4708, 4725), 'keras.layers.core.Dropout', 'core.Dropout', (['(0.5)'], {}), '(0.5)\n', (4720, 4725), False, 'from keras.layers import core\n'), ((4741, 4799), 'keras.layers.core.Dense', 'core.Dense', (['(6)'], {'activation': '"""softmax"""', 'init': '"""glorot_uniform"""'}), "(6, activation='softmax', init='glorot_uniform')\n", (4751, 4799), False, 'from keras.layers import core\n'), ((5012, 5074), 'keras.layers.core.Lambda', 'core.Lambda', (['(lambda x: x / 255.0 - 0.5)'], {'input_shape': 'image_dims'}), '(lambda x: x / 255.0 - 0.5, input_shape=image_dims)\n', (5023, 5074), False, 'from keras.layers import core\n'), ((5093, 5189), 'keras.layers.convolutional.Conv2D', 'convolutional.Conv2D', (['(128)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'init': '"""glorot_uniform"""'}), "(128, (3, 3), activation='relu', padding='same', init=\n 'glorot_uniform')\n", (5113, 5189), False, 'from keras.layers import convolutional\n'), ((5200, 5244), 'keras.layers.convolutional.MaxPooling2D', 'convolutional.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (5226, 5244), False, 'from keras.layers import convolutional\n'), ((5259, 5354), 'keras.layers.convolutional.Conv2D', 'convolutional.Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'init': '"""glorot_uniform"""'}), "(64, (3, 3), activation='relu', padding='same', init=\n 'glorot_uniform')\n", (5279, 5354), False, 'from keras.layers import convolutional\n'), ((5365, 5461), 'keras.layers.convolutional.Conv2D', 'convolutional.Conv2D', (['(256)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'init': '"""glorot_uniform"""'}), "(256, (3, 3), activation='relu', padding='same', init=\n 'glorot_uniform')\n", (5385, 5461), False, 'from keras.layers import convolutional\n'), ((5472, 5516), 'keras.layers.convolutional.MaxPooling2D', 'convolutional.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (5498, 5516), False, 'from keras.layers import convolutional\n'), ((5532, 5546), 'keras.layers.core.Flatten', 'core.Flatten', ([], {}), '()\n', (5544, 5546), False, 'from keras.layers import core\n'), ((5563, 5621), 'keras.layers.core.Dense', 'core.Dense', (['(2048)'], {'activation': '"""relu"""', 'init': '"""glorot_uniform"""'}), "(2048, activation='relu', init='glorot_uniform')\n", (5573, 5621), False, 'from keras.layers import core\n'), ((5637, 5654), 'keras.layers.core.Dropout', 'core.Dropout', (['(0.5)'], {}), '(0.5)\n', (5649, 5654), False, 'from keras.layers import core\n'), ((5670, 5728), 'keras.layers.core.Dense', 'core.Dense', (['(2048)'], {'activation': '"""relu"""', 'init': '"""glorot_uniform"""'}), "(2048, activation='relu', init='glorot_uniform')\n", (5680, 5728), False, 'from keras.layers import core\n'), ((5744, 5761), 'keras.layers.core.Dropout', 'core.Dropout', (['(0.5)'], {}), '(0.5)\n', (5756, 5761), False, 'from keras.layers import core\n'), ((5777, 5835), 'keras.layers.core.Dense', 'core.Dense', (['(6)'], {'activation': '"""softmax"""', 'init': '"""glorot_uniform"""'}), "(6, activation='softmax', init='glorot_uniform')\n", (5787, 5835), False, 'from keras.layers import core\n'), ((6114, 6176), 'keras.layers.core.Lambda', 'core.Lambda', (['(lambda x: x / 255.0 - 0.5)'], {'input_shape': 'image_dims'}), '(lambda x: x / 255.0 - 0.5, input_shape=image_dims)\n', (6125, 6176), False, 'from keras.layers import core\n'), ((6195, 6262), 'keras.layers.convolutional.Conv2D', 'convolutional.Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(32, (3, 3), activation='relu', padding='same')\n", (6215, 6262), False, 'from keras.layers import convolutional\n'), ((6278, 6322), 'keras.layers.convolutional.MaxPooling2D', 'convolutional.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (6304, 6322), False, 'from keras.layers import convolutional\n'), ((6337, 6404), 'keras.layers.convolutional.Conv2D', 'convolutional.Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(64, (3, 3), activation='relu', padding='same')\n", (6357, 6404), False, 'from keras.layers import convolutional\n'), ((6420, 6464), 'keras.layers.convolutional.MaxPooling2D', 'convolutional.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (6446, 6464), False, 'from keras.layers import convolutional\n'), ((6479, 6547), 'keras.layers.convolutional.Conv2D', 'convolutional.Conv2D', (['(128)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(128, (3, 3), activation='relu', padding='same')\n", (6499, 6547), False, 'from keras.layers import convolutional\n'), ((6563, 6607), 'keras.layers.convolutional.MaxPooling2D', 'convolutional.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (6589, 6607), False, 'from keras.layers import convolutional\n'), ((6623, 6637), 'keras.layers.core.Flatten', 'core.Flatten', ([], {}), '()\n', (6635, 6637), False, 'from keras.layers import core\n'), ((6654, 6688), 'keras.layers.core.Dense', 'core.Dense', (['(512)'], {'activation': '"""relu"""'}), "(512, activation='relu')\n", (6664, 6688), False, 'from keras.layers import core\n'), ((6704, 6721), 'keras.layers.core.Dropout', 'core.Dropout', (['(0.5)'], {}), '(0.5)\n', (6716, 6721), False, 'from keras.layers import core\n'), ((6737, 6772), 'keras.layers.core.Dense', 'core.Dense', (['(1024)'], {'activation': '"""relu"""'}), "(1024, activation='relu')\n", (6747, 6772), False, 'from keras.layers import core\n'), ((6788, 6805), 'keras.layers.core.Dropout', 'core.Dropout', (['(0.5)'], {}), '(0.5)\n', (6800, 6805), False, 'from keras.layers import core\n'), ((6821, 6856), 'keras.layers.core.Dense', 'core.Dense', (['(6)'], {'activation': '"""softmax"""'}), "(6, activation='softmax')\n", (6831, 6856), False, 'from keras.layers import core\n'), ((7066, 7128), 'keras.layers.core.Lambda', 'core.Lambda', (['(lambda x: x / 255.0 - 0.5)'], {'input_shape': 'image_dims'}), '(lambda x: x / 255.0 - 0.5, input_shape=image_dims)\n', (7077, 7128), False, 'from keras.layers import core\n'), ((7147, 7242), 'keras.layers.convolutional.Conv2D', 'convolutional.Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'init': '"""glorot_uniform"""'}), "(64, (3, 3), activation='relu', padding='same', init=\n 'glorot_uniform')\n", (7167, 7242), False, 'from keras.layers import convolutional\n'), ((7253, 7297), 'keras.layers.convolutional.MaxPooling2D', 'convolutional.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (7279, 7297), False, 'from keras.layers import convolutional\n'), ((7312, 7408), 'keras.layers.convolutional.Conv2D', 'convolutional.Conv2D', (['(128)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'init': '"""glorot_uniform"""'}), "(128, (3, 3), activation='relu', padding='same', init=\n 'glorot_uniform')\n", (7332, 7408), False, 'from keras.layers import convolutional\n'), ((7419, 7463), 'keras.layers.convolutional.MaxPooling2D', 'convolutional.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (7445, 7463), False, 'from keras.layers import convolutional\n'), ((7478, 7574), 'keras.layers.convolutional.Conv2D', 'convolutional.Conv2D', (['(256)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'init': '"""glorot_uniform"""'}), "(256, (3, 3), activation='relu', padding='same', init=\n 'glorot_uniform')\n", (7498, 7574), False, 'from keras.layers import convolutional\n'), ((7585, 7629), 'keras.layers.convolutional.MaxPooling2D', 'convolutional.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (7611, 7629), False, 'from keras.layers import convolutional\n'), ((7645, 7659), 'keras.layers.core.Flatten', 'core.Flatten', ([], {}), '()\n', (7657, 7659), False, 'from keras.layers import core\n'), ((7676, 7734), 'keras.layers.core.Dense', 'core.Dense', (['(1024)'], {'activation': '"""relu"""', 'init': '"""glorot_uniform"""'}), "(1024, activation='relu', init='glorot_uniform')\n", (7686, 7734), False, 'from keras.layers import core\n'), ((7750, 7767), 'keras.layers.core.Dropout', 'core.Dropout', (['(0.5)'], {}), '(0.5)\n', (7762, 7767), False, 'from keras.layers import core\n'), ((7783, 7841), 'keras.layers.core.Dense', 'core.Dense', (['(1024)'], {'activation': '"""relu"""', 'init': '"""glorot_uniform"""'}), "(1024, activation='relu', init='glorot_uniform')\n", (7793, 7841), False, 'from keras.layers import core\n'), ((7857, 7874), 'keras.layers.core.Dropout', 'core.Dropout', (['(0.5)'], {}), '(0.5)\n', (7869, 7874), False, 'from keras.layers import core\n'), ((7890, 7925), 'keras.layers.core.Dense', 'core.Dense', (['(2)'], {'activation': '"""softmax"""'}), "(2, activation='softmax')\n", (7900, 7925), False, 'from keras.layers import core\n'), ((10116, 10158), 'cv2.inRange', 'cv2.inRange', (['image', 'C_MIN[col]', 'C_MAX[col]'], {}), '(image, C_MIN[col], C_MAX[col])\n', (10127, 10158), False, 'import cv2\n'), ((10172, 10184), 'numpy.sum', 'np.sum', (['cmsk'], {}), '(cmsk)\n', (10178, 10184), True, 'import numpy as np\n'), ((10650, 10707), 'cv2.threshold', 'cv2.threshold', (['image_dotted_bw', '(5)', '(255)', 'cv2.THRESH_BINARY'], {}), '(image_dotted_bw, 5, 255, cv2.THRESH_BINARY)\n', (10663, 10707), False, 'import cv2\n'), ((11646, 11665), 'cv2.boundingRect', 'cv2.boundingRect', (['c'], {}), '(c)\n', (11662, 11665), False, 'import cv2\n'), ((13932, 13951), 'numpy.shape', 'np.shape', (['image_raw'], {}), '(image_raw)\n', (13940, 13951), True, 'import numpy as np\n'), ((15657, 15708), 'modules.utils.show_image', 'utils.show_image', (['debug_image'], {'size': '(40)', 'is_bgr': '(True)'}), '(debug_image, size=40, is_bgr=True)\n', (15673, 15708), True, 'import modules.utils as utils\n'), ((15717, 15774), 'modules.utils.show_images', 'utils.show_images', (['images'], {'cols': '(10)', 'is_bgr': '(True)', 'size': '(1.5)'}), '(images, cols=10, is_bgr=True, size=1.5)\n', (15734, 15774), True, 'import modules.utils as utils\n'), ((12583, 12654), 'modules.utils.is_far_from_others', 'utils.is_far_from_others', (['lion_pos', 'lion_positions', 'min_distance_others'], {}), '(lion_pos, lion_positions, min_distance_others)\n', (12607, 12654), True, 'import modules.utils as utils\n'), ((12941, 13054), 'modules.utils.crop_image_fill', 'utils.crop_image_fill', (['image_raw', '(lion_pos[1] - pw, lion_pos[0] - pw)', '(lion_pos[1] + ph, lion_pos[0] + ph)'], {}), '(image_raw, (lion_pos[1] - pw, lion_pos[0] - pw), (\n lion_pos[1] + ph, lion_pos[0] + ph))\n', (12962, 13054), True, 'import modules.utils as utils\n'), ((13057, 13072), 'numpy.mean', 'np.mean', (['trainX'], {}), '(trainX)\n', (13064, 13072), True, 'import numpy as np\n'), ((14149, 14219), 'modules.utils.is_far_from_others', 'utils.is_far_from_others', (['patch_pos', 'lion_positions', 'non_lion_distance'], {}), '(patch_pos, lion_positions, non_lion_distance)\n', (14173, 14219), True, 'import modules.utils as utils\n'), ((14018, 14077), 'random.randint', 'random.randint', (['(image_dims[1] * 2)', '(s[1] - image_dims[1] * 2)'], {}), '(image_dims[1] * 2, s[1] - image_dims[1] * 2)\n', (14032, 14077), False, 'import random\n'), ((14073, 14132), 'random.randint', 'random.randint', (['(image_dims[0] * 2)', '(s[0] - image_dims[0] * 2)'], {}), '(image_dims[0] * 2, s[0] - image_dims[0] * 2)\n', (14087, 14132), False, 'import random\n'), ((14498, 14615), 'modules.utils.crop_image_fill', 'utils.crop_image_fill', (['image_raw', '(patch_pos[1] - pw, patch_pos[0] - pw)', '(patch_pos[1] + ph, patch_pos[0] + ph)'], {}), '(image_raw, (patch_pos[1] - pw, patch_pos[0] - pw), (\n patch_pos[1] + ph, patch_pos[0] + ph))\n', (14519, 14615), True, 'import modules.utils as utils\n'), ((14622, 14637), 'numpy.mean', 'np.mean', (['trainX'], {}), '(trainX)\n', (14629, 14637), True, 'import numpy as np\n'), ((13538, 13599), 'keras.utils.np_utils.to_categorical', 'keras.utils.np_utils.to_categorical', (['[lion_class]', 'NR_CLASSES'], {}), '([lion_class], NR_CLASSES)\n', (13573, 13599), False, 'import keras\n'), ((13684, 13753), 'modules.utils.add_sample_to_dataset', 'utils.add_sample_to_dataset', (['target_x_ds', 'target_y_ds', 'trainX', 'trainY'], {}), '(target_x_ds, target_y_ds, trainX, trainY)\n', (13711, 13753), True, 'import modules.utils as utils\n'), ((14987, 15039), 'keras.utils.np_utils.to_categorical', 'keras.utils.np_utils.to_categorical', (['[5]', 'NR_CLASSES'], {}), '([5], NR_CLASSES)\n', (15022, 15039), False, 'import keras\n'), ((15132, 15201), 'modules.utils.add_sample_to_dataset', 'utils.add_sample_to_dataset', (['target_x_ds', 'target_y_ds', 'trainX', 'trainY'], {}), '(target_x_ds, target_y_ds, trainX, trainY)\n', (15159, 15201), True, 'import modules.utils as utils\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
IEMOCAP Speech 2D Spectrograms
Quadrant
->data.py
Created on Sat May 9 15:32:48 2020
@author: <NAME>
@email:<EMAIL>
"""
import torch
from torch.utils import data
from torch.utils.data import Dataset
import os
import numpy as np
import pandas as pd
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from scipy.io.wavfile import read,write #read and write audios
from sklearn.utils import class_weight
from skimage.transform import resize
from Spectrum import Spectrum_3D_Tensors
import torchvision as tv
import random
from itertools import chain
import librosa
#%%
class SpeechDataset(Dataset):
def __init__(self, files, norm_c1, norm_c2,norm_c3):
self.norm_c1=norm_c1
self.norm_c2=norm_c2
self.norm_c3=norm_c3
self.files=files
def __getitem__(self, index):
if torch.is_tensor(index):
index = index.tolist()
input_tensor = torch.Tensor(self.files[index])
#Min Max Normalization
input_tensor=tensor_min_max(input_tensor,self.norm_c1,self.norm_c2,self.norm_c3)
#spec=spec.unsqueeze(0)
return (input_tensor)
def __len__(self):
return len(self.files)
def tensor_min_max(input_tensor,norm_c1,norm_c2,norm_c3):
tensor = input_tensor
tensor[0,:,:] = (input_tensor[0,:,:]-np.float(norm_c1['min']))/(np.float(norm_c1['max'])-np.float(norm_c1['min']))
tensor[1,:,:] = (input_tensor[1,:,:]-np.float(norm_c2['min']))/(np.float(norm_c2['max'])-np.float(norm_c2['min']))
tensor[2,:,:] = (input_tensor[2,:,:]-np.float(norm_c3['min']))/(np.float(norm_c3['max'])-np.float(norm_c3['min']))
return tensor
def get_dataset(sig, norm_c1,norm_c2,norm_c3):
#print('%-----Test tensors-----%')
spec=Spectrum_3D_Tensors(sig,16000)
tensors=spec.get_spectrogram_tensors()
speechData=SpeechDataset(tensors,norm_c1, norm_c2,norm_c3)
return speechData
|
[
"torch.Tensor",
"torch.is_tensor",
"numpy.float",
"Spectrum.Spectrum_3D_Tensors"
] |
[((2026, 2057), 'Spectrum.Spectrum_3D_Tensors', 'Spectrum_3D_Tensors', (['sig', '(16000)'], {}), '(sig, 16000)\n', (2045, 2057), False, 'from Spectrum import Spectrum_3D_Tensors\n'), ((981, 1003), 'torch.is_tensor', 'torch.is_tensor', (['index'], {}), '(index)\n', (996, 1003), False, 'import torch\n'), ((1072, 1103), 'torch.Tensor', 'torch.Tensor', (['self.files[index]'], {}), '(self.files[index])\n', (1084, 1103), False, 'import torch\n'), ((1559, 1583), 'numpy.float', 'np.float', (["norm_c1['min']"], {}), "(norm_c1['min'])\n", (1567, 1583), True, 'import numpy as np\n'), ((1586, 1610), 'numpy.float', 'np.float', (["norm_c1['max']"], {}), "(norm_c1['max'])\n", (1594, 1610), True, 'import numpy as np\n'), ((1611, 1635), 'numpy.float', 'np.float', (["norm_c1['min']"], {}), "(norm_c1['min'])\n", (1619, 1635), True, 'import numpy as np\n'), ((1678, 1702), 'numpy.float', 'np.float', (["norm_c2['min']"], {}), "(norm_c2['min'])\n", (1686, 1702), True, 'import numpy as np\n'), ((1705, 1729), 'numpy.float', 'np.float', (["norm_c2['max']"], {}), "(norm_c2['max'])\n", (1713, 1729), True, 'import numpy as np\n'), ((1730, 1754), 'numpy.float', 'np.float', (["norm_c2['min']"], {}), "(norm_c2['min'])\n", (1738, 1754), True, 'import numpy as np\n'), ((1797, 1821), 'numpy.float', 'np.float', (["norm_c3['min']"], {}), "(norm_c3['min'])\n", (1805, 1821), True, 'import numpy as np\n'), ((1824, 1848), 'numpy.float', 'np.float', (["norm_c3['max']"], {}), "(norm_c3['max'])\n", (1832, 1848), True, 'import numpy as np\n'), ((1849, 1873), 'numpy.float', 'np.float', (["norm_c3['min']"], {}), "(norm_c3['min'])\n", (1857, 1873), True, 'import numpy as np\n')]
|
import numpy as np
def get_matrix():
A = np.random.random((20, 3))
while np.linalg.matrix_rank(A) < 3:
A = np.random.random((20, 3))
B = np.random.random((3, 5))
while np.linalg.matrix_rank(B) < 3:
B = np.random.random((20, 3))
origin_matrix = np.dot(A, B)
if np.linalg.matrix_rank(origin_matrix) == 3:
return origin_matrix
else:
print("error")
# test
'''
origin_matrix = np.array([[1,0,0,0,2],
[0,0,3,0,0],
[0,0,0,0,0],
[0,2,0,0,0]])
'''
return origin_matrix
def svd_factor(M):
# SVD
U, S, Vh = np.linalg.svd(M)
# print(U.shape, S.shape, Vh.shape)
# print(U,"\n",S,"\n",Vh)
# construct matrix sigma
# sigma = np.zeros([U.shape[1], Vh.shape[0]])
sigma = np.zeros([U.shape[1], Vh.shape[0]])
for i in range(S.shape[0]):
sigma[i, i] = S[i]
A = np.dot(np.dot(U, sigma), Vh)
return np.linalg.matrix_rank(A)
def reconstruct_matrix(M, k):
# SVD
U, S, Vh = np.linalg.svd(M)
# print(U.shape, S.shape, Vh.shape)
# print(U,"\n",S,"\n",Vh)
# construct matrix sigma
# sigma = np.zeros([U.shape[1], Vh.shape[0]])
sigma = np.zeros([U.shape[1], Vh.shape[0]])
for i in range(S.shape[0]):
sigma[i, i] = S[i]
A = np.dot(np.dot(U[:, :k], sigma[:k, :k]), Vh[:k, :])
return A
if __name__ == "__main__":
x = np.random.randint(0, 20, size=5)
y = np.random.randint(0, 5, size=5)
print(x, y)
max_rank = [0, 0, 0, 0, 0, 0]
max_svd_rank = [0, 0, 0, 0, 0, 0]
min_rank = [0, 0, 3, 3, 3, 3]
min_svd_rank = [0, 0, 3, 3, 3, 3]
dist_list = [0, 0, 0, 0, 0, 0]
for _ in range(10000000):
A = get_matrix()
temp = np.array(A)
temp[x[0], y[0]] = 0
for i in range(1, 5):
temp[x[i], y[i]] = 0
rank = np.linalg.matrix_rank(temp)
rank_svd = svd_factor(temp)
if rank != rank_svd:
print(_, i)
if rank < 3:
print("rank", _, i)
if rank_svd < 3:
print("rank_svd", _, i)
max_rank[i+1] = max(rank, max_rank[i+1])
max_svd_rank[i+1] = max(rank_svd, max_svd_rank[i+1])
min_rank[i+1] = min(rank, min_rank[i+1])
min_svd_rank[i+1] = min(rank_svd, max_svd_rank[i+1])
# reconstruct
recons = reconstruct_matrix(temp, 3)
dist = np.linalg.norm(A - recons)
dist_list[i+1] += dist
print(max_rank)
print(max_svd_rank)
print(min_rank)
print(min_svd_rank)
print(np.array(dist_list) / 10000000)
'''
A = np.zeros([20, 5])
A[0,0],A[0,1],A[1,2],A[1,3],A[2,4] = 1,1,1,1,1
print(np.linalg.matrix_rank(A))
A[1,2],A[1,3],A[2,4] = 0,0,0
print(np.linalg.matrix_rank(A))
'''
|
[
"numpy.zeros",
"numpy.linalg.svd",
"numpy.random.random",
"numpy.linalg.matrix_rank",
"numpy.random.randint",
"numpy.array",
"numpy.linalg.norm",
"numpy.dot"
] |
[((46, 71), 'numpy.random.random', 'np.random.random', (['(20, 3)'], {}), '((20, 3))\n', (62, 71), True, 'import numpy as np\n'), ((159, 183), 'numpy.random.random', 'np.random.random', (['(3, 5)'], {}), '((3, 5))\n', (175, 183), True, 'import numpy as np\n'), ((287, 299), 'numpy.dot', 'np.dot', (['A', 'B'], {}), '(A, B)\n', (293, 299), True, 'import numpy as np\n'), ((679, 695), 'numpy.linalg.svd', 'np.linalg.svd', (['M'], {}), '(M)\n', (692, 695), True, 'import numpy as np\n'), ((858, 893), 'numpy.zeros', 'np.zeros', (['[U.shape[1], Vh.shape[0]]'], {}), '([U.shape[1], Vh.shape[0]])\n', (866, 893), True, 'import numpy as np\n'), ((1003, 1027), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['A'], {}), '(A)\n', (1024, 1027), True, 'import numpy as np\n'), ((1085, 1101), 'numpy.linalg.svd', 'np.linalg.svd', (['M'], {}), '(M)\n', (1098, 1101), True, 'import numpy as np\n'), ((1264, 1299), 'numpy.zeros', 'np.zeros', (['[U.shape[1], Vh.shape[0]]'], {}), '([U.shape[1], Vh.shape[0]])\n', (1272, 1299), True, 'import numpy as np\n'), ((1470, 1502), 'numpy.random.randint', 'np.random.randint', (['(0)', '(20)'], {'size': '(5)'}), '(0, 20, size=5)\n', (1487, 1502), True, 'import numpy as np\n'), ((1511, 1542), 'numpy.random.randint', 'np.random.randint', (['(0)', '(5)'], {'size': '(5)'}), '(0, 5, size=5)\n', (1528, 1542), True, 'import numpy as np\n'), ((82, 106), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['A'], {}), '(A)\n', (103, 106), True, 'import numpy as np\n'), ((124, 149), 'numpy.random.random', 'np.random.random', (['(20, 3)'], {}), '((20, 3))\n', (140, 149), True, 'import numpy as np\n'), ((194, 218), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['B'], {}), '(B)\n', (215, 218), True, 'import numpy as np\n'), ((236, 261), 'numpy.random.random', 'np.random.random', (['(20, 3)'], {}), '((20, 3))\n', (252, 261), True, 'import numpy as np\n'), ((307, 343), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['origin_matrix'], {}), '(origin_matrix)\n', (328, 343), True, 'import numpy as np\n'), ((969, 985), 'numpy.dot', 'np.dot', (['U', 'sigma'], {}), '(U, sigma)\n', (975, 985), True, 'import numpy as np\n'), ((1375, 1406), 'numpy.dot', 'np.dot', (['U[:, :k]', 'sigma[:k, :k]'], {}), '(U[:, :k], sigma[:k, :k])\n', (1381, 1406), True, 'import numpy as np\n'), ((1812, 1823), 'numpy.array', 'np.array', (['A'], {}), '(A)\n', (1820, 1823), True, 'import numpy as np\n'), ((1937, 1964), 'numpy.linalg.matrix_rank', 'np.linalg.matrix_rank', (['temp'], {}), '(temp)\n', (1958, 1964), True, 'import numpy as np\n'), ((2529, 2555), 'numpy.linalg.norm', 'np.linalg.norm', (['(A - recons)'], {}), '(A - recons)\n', (2543, 2555), True, 'import numpy as np\n'), ((2691, 2710), 'numpy.array', 'np.array', (['dist_list'], {}), '(dist_list)\n', (2699, 2710), True, 'import numpy as np\n')]
|
import torch
import torch.nn as nn
import numpy as np
from torch.nn import functional as F
import math
from utils.tools import make_positions
def Embedding(num_embeddings, embedding_dim, padding_idx=None):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
if padding_idx is not None:
nn.init.constant_(m.weight[padding_idx], 0)
return m
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.)
return m
def get_sinusoid_encoding_table(n_position, d_hid, padding_idx=None):
""" Sinusoid position encoding table """
def cal_angle(position, hid_idx):
return position / np.power(10000, 2 * (hid_idx // 2) / d_hid)
def get_posi_angle_vec(position):
return [cal_angle(position, hid_j) for hid_j in range(d_hid)]
sinusoid_table = np.array(
[get_posi_angle_vec(pos_i) for pos_i in range(n_position)]
)
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
if padding_idx is not None:
# zero vector for padding dimension
sinusoid_table[padding_idx] = 0.0
return torch.FloatTensor(sinusoid_table)
class SinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length.
Padding symbols are ignored.
"""
def __init__(self, embedding_dim, padding_idx, init_size=1024):
super().__init__()
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
self.weights = SinusoidalPositionalEmbedding.get_embedding(
init_size,
embedding_dim,
padding_idx,
)
self.register_buffer("_float_tensor", torch.FloatTensor(1))
@staticmethod
def get_embedding(num_embeddings, embedding_dim, padding_idx=None):
"""Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
return emb
def forward(self, input, incremental_state=None, timestep=None, positions=None, **kwargs):
"""Input is expected to be of size [bsz x seqlen]."""
bsz, seq_len = input.shape[:2]
max_pos = self.padding_idx + 1 + seq_len
if self.weights is None or max_pos > self.weights.size(0):
# recompute/expand embeddings if needed
self.weights = SinusoidalPositionalEmbedding.get_embedding(
max_pos,
self.embedding_dim,
self.padding_idx,
)
self.weights = self.weights.to(self._float_tensor)
if incremental_state is not None:
# positions is the same for every token when decoding a single step
pos = timestep.view(-1)[0] + 1 if timestep is not None else seq_len
return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1)
positions = make_positions(input, self.padding_idx) if positions is None else positions
return self.weights.index_select(0, positions.view(-1)).view(bsz, seq_len, -1).detach()
def max_positions(self):
"""Maximum number of supported positions."""
return int(1e5) # an arbitrary large number
class Swish(nn.Module):
"""
Swish is a smooth, non-monotonic function that consistently matches or outperforms ReLU on deep networks applied
to a variety of challenging domains such as Image classification and Machine translation.
"""
def __init__(self):
super(Swish, self).__init__()
def forward(self, inputs):
return inputs * inputs.sigmoid()
class GLU(nn.Module):
"""
The gating mechanism is called Gated Linear Units (GLU), which was first introduced for natural language processing
in the paper “Language Modeling with Gated Convolutional Networks”
"""
def __init__(self, dim: int) -> None:
super(GLU, self).__init__()
self.dim = dim
def forward(self, inputs):
outputs, gate = inputs.chunk(2, dim=self.dim)
return outputs * gate.sigmoid()
class LayerNorm(torch.nn.LayerNorm):
"""Layer normalization module.
:param int nout: output dim size
:param int dim: dimension to be normalized
"""
def __init__(self, nout, dim=-1):
"""Construct an LayerNorm object."""
super(LayerNorm, self).__init__(nout, eps=1e-12)
self.dim = dim
def forward(self, x):
"""Apply layer normalization.
:param torch.Tensor x: input tensor
:return: layer normalized tensor
:rtype torch.Tensor
"""
if self.dim == -1:
return super(LayerNorm, self).forward(x)
return super(LayerNorm, self).forward(x.transpose(1, -1)).transpose(1, -1)
class LinearNorm(nn.Module):
""" LinearNorm Projection """
def __init__(self, in_features, out_features, bias=False):
super(LinearNorm, self).__init__()
self.linear = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(self.linear.weight)
if bias:
nn.init.constant_(self.linear.bias, 0.0)
def forward(self, x):
x = self.linear(x)
return x
class ConvBlock(nn.Module):
""" 1D Convolutional Block """
def __init__(self, in_channels, out_channels, kernel_size, dropout=None, normalization=nn.BatchNorm1d, activation=nn.ReLU, transpose=False):
super(ConvBlock, self).__init__()
self.conv_layer = nn.Sequential(
ConvNorm(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
padding=int((kernel_size - 1) / 2),
dilation=1,
w_init_gain="tanh",
transpose=transpose
),
normalization(out_channels),
activation(),
)
self.dropout = dropout if dropout is not None else None
self.transpose = transpose
def forward(self, enc_input, mask=None):
if not self.transpose:
enc_input = enc_input.contiguous().transpose(1, 2)
enc_output = self.conv_layer(enc_input)
if self.dropout is not None:
enc_output = F.dropout(enc_output, self.dropout, training=True) # self.training)
if not self.transpose:
enc_output = enc_output.contiguous().transpose(1, 2)
if mask is not None:
enc_output = enc_output.masked_fill(mask.unsqueeze(-1), 0)
return enc_output
class ConvBlock2D(nn.Module):
""" 2D Convolutional Block """
def __init__(self, in_channels, out_channels, kernel_size, dropout=None, normalization=nn.BatchNorm2d, activation=nn.ReLU, transpose=False):
super(ConvBlock2D, self).__init__()
self.conv_layer = nn.Sequential(
ConvNorm2D(
in_channels,
out_channels,
kernel_size=(1, kernel_size),
stride=1,
padding=(0, int((kernel_size - 1) / 2)),
bias=False,
w_init_gain="tanh",
transpose=transpose,
),
normalization(out_channels),
activation(),
)
self.dropout = dropout if dropout is not None else None
self.transpose = transpose
def forward(self, enc_input, mask=None):
"""
enc_input -- [B, H, W, C_in]
mask -- [B, H]
"""
if not self.transpose:
enc_input = enc_input.contiguous().permute(0, 3, 1, 2) # [B, C_in, H, W]
enc_output = self.conv_layer(enc_input)
if self.dropout is not None:
enc_output = F.dropout(enc_output, self.dropout, self.training)
if not self.transpose:
enc_output = enc_output.contiguous().permute(0, 2, 3, 1) # [B, H, W, C_out]
if mask is not None:
enc_output = enc_output.masked_fill(mask.unsqueeze(-1).unsqueeze(-1), 0)
return enc_output
class ConvNorm(nn.Module):
""" 1D Convolution """
def __init__(
self,
in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=None,
dilation=1,
bias=True,
w_init_gain="linear",
transpose=False,
):
super(ConvNorm, self).__init__()
if padding is None:
assert kernel_size % 2 == 1
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = nn.Conv1d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
)
torch.nn.init.xavier_uniform_(
self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain)
)
self.transpose = transpose
def forward(self, x):
if self.transpose:
x = x.contiguous().transpose(1, 2)
x = self.conv(x)
if self.transpose:
x = x.contiguous().transpose(1, 2)
return x
class ConvNorm2D(nn.Module):
""" 2D Convolution """
def __init__(
self,
in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=None,
dilation=1,
bias=True,
w_init_gain="linear",
transpose=False,
):
super(ConvNorm2D, self).__init__()
if padding is None:
assert kernel_size % 2 == 1
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
)
torch.nn.init.xavier_uniform_(
self.conv.weight, gain=torch.nn.init.calculate_gain(w_init_gain)
)
self.transpose = transpose
def forward(self, x):
"""
x -- [B, H, W, C] or [B, C, H, W]
"""
if self.transpose:
x = x.contiguous().permute(0, 3, 1, 2) # [B, C, H, W]
x = self.conv(x)
if self.transpose:
x = x.contiguous().permute(0, 2, 3, 1) # [B, H, W, C]
return x
|
[
"torch.nn.Embedding",
"torch.nn.functional.dropout",
"torch.cos",
"numpy.sin",
"torch.nn.init.constant_",
"torch.arange",
"torch.nn.init.calculate_gain",
"numpy.power",
"torch.nn.Conv1d",
"torch.FloatTensor",
"torch.nn.Linear",
"torch.zeros",
"math.log",
"torch.nn.init.xavier_uniform_",
"torch.nn.Conv2d",
"utils.tools.make_positions",
"numpy.cos",
"torch.nn.init.normal_",
"torch.sin"
] |
[((217, 285), 'torch.nn.Embedding', 'nn.Embedding', (['num_embeddings', 'embedding_dim'], {'padding_idx': 'padding_idx'}), '(num_embeddings, embedding_dim, padding_idx=padding_idx)\n', (229, 285), True, 'import torch.nn as nn\n'), ((290, 350), 'torch.nn.init.normal_', 'nn.init.normal_', (['m.weight'], {'mean': '(0)', 'std': '(embedding_dim ** -0.5)'}), '(m.weight, mean=0, std=embedding_dim ** -0.5)\n', (305, 350), True, 'import torch.nn as nn\n'), ((508, 550), 'torch.nn.Linear', 'nn.Linear', (['in_features', 'out_features', 'bias'], {}), '(in_features, out_features, bias)\n', (517, 550), True, 'import torch.nn as nn\n'), ((555, 588), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['m.weight'], {}), '(m.weight)\n', (578, 588), True, 'import torch.nn as nn\n'), ((1124, 1155), 'numpy.sin', 'np.sin', (['sinusoid_table[:, 0::2]'], {}), '(sinusoid_table[:, 0::2])\n', (1130, 1155), True, 'import numpy as np\n'), ((1196, 1227), 'numpy.cos', 'np.cos', (['sinusoid_table[:, 1::2]'], {}), '(sinusoid_table[:, 1::2])\n', (1202, 1227), True, 'import numpy as np\n'), ((1371, 1404), 'torch.FloatTensor', 'torch.FloatTensor', (['sinusoid_table'], {}), '(sinusoid_table)\n', (1388, 1404), False, 'import torch\n'), ((391, 434), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.weight[padding_idx]', '(0)'], {}), '(m.weight[padding_idx], 0)\n', (408, 434), True, 'import torch.nn as nn\n'), ((610, 640), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0.0)'], {}), '(m.bias, 0.0)\n', (627, 640), True, 'import torch.nn as nn\n'), ((5779, 5821), 'torch.nn.Linear', 'nn.Linear', (['in_features', 'out_features', 'bias'], {}), '(in_features, out_features, bias)\n', (5788, 5821), True, 'import torch.nn as nn\n'), ((5831, 5874), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['self.linear.weight'], {}), '(self.linear.weight)\n', (5854, 5874), True, 'import torch.nn as nn\n'), ((9306, 9433), 'torch.nn.Conv1d', 'nn.Conv1d', (['in_channels', 'out_channels'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding', 'dilation': 'dilation', 'bias': 'bias'}), '(in_channels, out_channels, kernel_size=kernel_size, stride=stride,\n padding=padding, dilation=dilation, bias=bias)\n', (9315, 9433), True, 'import torch.nn as nn\n'), ((10396, 10523), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'out_channels'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding', 'dilation': 'dilation', 'bias': 'bias'}), '(in_channels, out_channels, kernel_size=kernel_size, stride=stride,\n padding=padding, dilation=dilation, bias=bias)\n', (10405, 10523), True, 'import torch.nn as nn\n'), ((835, 878), 'numpy.power', 'np.power', (['(10000)', '(2 * (hid_idx // 2) / d_hid)'], {}), '(10000, 2 * (hid_idx // 2) / d_hid)\n', (843, 878), True, 'import numpy as np\n'), ((1950, 1970), 'torch.FloatTensor', 'torch.FloatTensor', (['(1)'], {}), '(1)\n', (1967, 1970), False, 'import torch\n'), ((2323, 2338), 'math.log', 'math.log', (['(10000)'], {}), '(10000)\n', (2331, 2338), False, 'import math\n'), ((3744, 3783), 'utils.tools.make_positions', 'make_positions', (['input', 'self.padding_idx'], {}), '(input, self.padding_idx)\n', (3758, 3783), False, 'from utils.tools import make_positions\n'), ((5904, 5944), 'torch.nn.init.constant_', 'nn.init.constant_', (['self.linear.bias', '(0.0)'], {}), '(self.linear.bias, 0.0)\n', (5921, 5944), True, 'import torch.nn as nn\n'), ((7056, 7106), 'torch.nn.functional.dropout', 'F.dropout', (['enc_output', 'self.dropout'], {'training': '(True)'}), '(enc_output, self.dropout, training=True)\n', (7065, 7106), True, 'from torch.nn import functional as F\n'), ((8507, 8557), 'torch.nn.functional.dropout', 'F.dropout', (['enc_output', 'self.dropout', 'self.training'], {}), '(enc_output, self.dropout, self.training)\n', (8516, 8557), True, 'from torch.nn import functional as F\n'), ((2380, 2421), 'torch.arange', 'torch.arange', (['half_dim'], {'dtype': 'torch.float'}), '(half_dim, dtype=torch.float)\n', (2392, 2421), False, 'import torch\n'), ((9600, 9641), 'torch.nn.init.calculate_gain', 'torch.nn.init.calculate_gain', (['w_init_gain'], {}), '(w_init_gain)\n', (9628, 9641), False, 'import torch\n'), ((10690, 10731), 'torch.nn.init.calculate_gain', 'torch.nn.init.calculate_gain', (['w_init_gain'], {}), '(w_init_gain)\n', (10718, 10731), False, 'import torch\n'), ((2444, 2491), 'torch.arange', 'torch.arange', (['num_embeddings'], {'dtype': 'torch.float'}), '(num_embeddings, dtype=torch.float)\n', (2456, 2491), False, 'import torch\n'), ((2706, 2736), 'torch.zeros', 'torch.zeros', (['num_embeddings', '(1)'], {}), '(num_embeddings, 1)\n', (2717, 2736), False, 'import torch\n'), ((2549, 2563), 'torch.sin', 'torch.sin', (['emb'], {}), '(emb)\n', (2558, 2563), False, 'import torch\n'), ((2565, 2579), 'torch.cos', 'torch.cos', (['emb'], {}), '(emb)\n', (2574, 2579), False, 'import torch\n')]
|
import os.path
import unittest
import numpy
from openquake.hazardlib import geo, imt
from openquake.hazardlib.shakemap import (
get_shakemap_array, get_sitecol_shakemap, to_gmfs, amplify_ground_shaking,
spatial_correlation_array, spatial_covariance_array,
cross_correlation_matrix, cholesky)
aae = numpy.testing.assert_almost_equal
F64 = numpy.float64
imts = [imt.from_string(x) for x in ['PGA', 'SA(0.3)', 'SA(1.0)', 'SA(3.0)']]
imt_dt = numpy.dtype([(str(imt), float) for imt in imts])
shakemap_dt = numpy.dtype([('lon', float), ('lat', float), ('val', imt_dt),
('std', imt_dt), ('vs30', float)])
CDIR = os.path.dirname(__file__)
def mean_gmf(shakemap):
gmfs = to_gmfs(
shakemap, crosscorr='cross', site_effects=True, trunclevel=3,
num_gmfs=10, seed=42)
return [gmfs[..., i].mean() for i in range(len(imts))]
class ShakemapTestCase(unittest.TestCase):
def test_gmfs(self):
f1 = os.path.join(CDIR, 'ghorka_grid.xml')
f2 = os.path.join(CDIR, 'ghorka_uncertainty.xml')
array = get_shakemap_array(f1, f2)
sitecol, shakemap = get_sitecol_shakemap(array)
n = 4 # number of sites
self.assertEqual(len(sitecol), n)
gmf_by_imt = mean_gmf(shakemap)
aae(gmf_by_imt, [0.0047045, 0.0184625, 0.0346171, 0.0175625])
def test_amplify(self):
res = amplify_ground_shaking(T=3.0, vs30=780, gmvs=[0.1, 0.2, 0.3])
aae(res, [0.09832577, 0.19690711, 0.2958982])
res = amplify_ground_shaking(T=0.3, vs30=780, gmvs=[0.1, 0.2, 0.3])
aae(res, [0.09909498, 0.19870543, 0.29922175])
def test_matrices(self):
# distance matrix
lons = numpy.array([84., 84., 84., 85.5, 85.5, 85.5, 87., 87., 87.])
lats = numpy.array([26., 27.5, 29., 26., 27.5, 29., 26., 27.5, 29.])
dmatrix = geo.geodetic.distance_matrix(lons, lats)
aae(dmatrix.sum(), 18539.6131407)
# spatial correlation
sca = spatial_correlation_array(dmatrix, imts, 'spatial')
aae(sca.sum(), 36.000370229)
# spatial covariance
std = numpy.array([(0.5, 0.52, 0.64, 0.73)] * 9, imt_dt) # 9 sites
scov = spatial_covariance_array([std[n] for n in imt_dt.names], sca)
aae(scov.sum(), 13.166200147)
# cross correlation
ccor = cross_correlation_matrix(imts, 'cross')
aae(ccor.sum(), 10.49124788)
# cholesky decomposition
L = cholesky(scov, ccor)
self.assertEqual(L.shape, (36, 36))
aae(L.sum(), 30.5121263)
# intensity
val = numpy.array(
[(5.38409665, 3.9383686, 3.55435415, 4.37692394)] * 9, imt_dt)
shakemap = numpy.zeros(9, shakemap_dt) # 9 sites
shakemap['lon'] = lons
shakemap['lat'] = lats
shakemap['vs30'] = numpy.array([301.17] * 9)
shakemap['val'] = val
shakemap['std'] = std
gmfs = to_gmfs(
shakemap, crosscorr='corr', site_effects=False, trunclevel=3,
num_gmfs=2, seed=42)
# shape (R, N, E, M)
aae(gmfs[..., 0].sum(axis=1), [[0.3708301, 0.5671011]]) # PGA
gmfs = to_gmfs(
shakemap, crosscorr='cross', site_effects=True, trunclevel=3,
num_gmfs=2, seed=42)
aae(gmfs[..., 0].sum(axis=1), [[0.4101717, 0.6240185]]) # PGA
aae(gmfs[..., 2].sum(axis=1), [[0.3946015, 0.5385107]]) # SA(1.0)
|
[
"openquake.hazardlib.shakemap.cholesky",
"openquake.hazardlib.shakemap.to_gmfs",
"openquake.hazardlib.shakemap.cross_correlation_matrix",
"openquake.hazardlib.shakemap.spatial_correlation_array",
"openquake.hazardlib.imt.from_string",
"numpy.dtype",
"numpy.zeros",
"openquake.hazardlib.geo.geodetic.distance_matrix",
"openquake.hazardlib.shakemap.get_shakemap_array",
"numpy.array",
"openquake.hazardlib.shakemap.get_sitecol_shakemap",
"openquake.hazardlib.shakemap.spatial_covariance_array",
"openquake.hazardlib.shakemap.amplify_ground_shaking"
] |
[((515, 615), 'numpy.dtype', 'numpy.dtype', (["[('lon', float), ('lat', float), ('val', imt_dt), ('std', imt_dt), ('vs30',\n float)]"], {}), "([('lon', float), ('lat', float), ('val', imt_dt), ('std',\n imt_dt), ('vs30', float)])\n", (526, 615), False, 'import numpy\n'), ((373, 391), 'openquake.hazardlib.imt.from_string', 'imt.from_string', (['x'], {}), '(x)\n', (388, 391), False, 'from openquake.hazardlib import geo, imt\n'), ((709, 804), 'openquake.hazardlib.shakemap.to_gmfs', 'to_gmfs', (['shakemap'], {'crosscorr': '"""cross"""', 'site_effects': '(True)', 'trunclevel': '(3)', 'num_gmfs': '(10)', 'seed': '(42)'}), "(shakemap, crosscorr='cross', site_effects=True, trunclevel=3,\n num_gmfs=10, seed=42)\n", (716, 804), False, 'from openquake.hazardlib.shakemap import get_shakemap_array, get_sitecol_shakemap, to_gmfs, amplify_ground_shaking, spatial_correlation_array, spatial_covariance_array, cross_correlation_matrix, cholesky\n'), ((1073, 1099), 'openquake.hazardlib.shakemap.get_shakemap_array', 'get_shakemap_array', (['f1', 'f2'], {}), '(f1, f2)\n', (1091, 1099), False, 'from openquake.hazardlib.shakemap import get_shakemap_array, get_sitecol_shakemap, to_gmfs, amplify_ground_shaking, spatial_correlation_array, spatial_covariance_array, cross_correlation_matrix, cholesky\n'), ((1128, 1155), 'openquake.hazardlib.shakemap.get_sitecol_shakemap', 'get_sitecol_shakemap', (['array'], {}), '(array)\n', (1148, 1155), False, 'from openquake.hazardlib.shakemap import get_shakemap_array, get_sitecol_shakemap, to_gmfs, amplify_ground_shaking, spatial_correlation_array, spatial_covariance_array, cross_correlation_matrix, cholesky\n'), ((1384, 1445), 'openquake.hazardlib.shakemap.amplify_ground_shaking', 'amplify_ground_shaking', ([], {'T': '(3.0)', 'vs30': '(780)', 'gmvs': '[0.1, 0.2, 0.3]'}), '(T=3.0, vs30=780, gmvs=[0.1, 0.2, 0.3])\n', (1406, 1445), False, 'from openquake.hazardlib.shakemap import get_shakemap_array, get_sitecol_shakemap, to_gmfs, amplify_ground_shaking, spatial_correlation_array, spatial_covariance_array, cross_correlation_matrix, cholesky\n'), ((1515, 1576), 'openquake.hazardlib.shakemap.amplify_ground_shaking', 'amplify_ground_shaking', ([], {'T': '(0.3)', 'vs30': '(780)', 'gmvs': '[0.1, 0.2, 0.3]'}), '(T=0.3, vs30=780, gmvs=[0.1, 0.2, 0.3])\n', (1537, 1576), False, 'from openquake.hazardlib.shakemap import get_shakemap_array, get_sitecol_shakemap, to_gmfs, amplify_ground_shaking, spatial_correlation_array, spatial_covariance_array, cross_correlation_matrix, cholesky\n'), ((1704, 1771), 'numpy.array', 'numpy.array', (['[84.0, 84.0, 84.0, 85.5, 85.5, 85.5, 87.0, 87.0, 87.0]'], {}), '([84.0, 84.0, 84.0, 85.5, 85.5, 85.5, 87.0, 87.0, 87.0])\n', (1715, 1771), False, 'import numpy\n'), ((1781, 1848), 'numpy.array', 'numpy.array', (['[26.0, 27.5, 29.0, 26.0, 27.5, 29.0, 26.0, 27.5, 29.0]'], {}), '([26.0, 27.5, 29.0, 26.0, 27.5, 29.0, 26.0, 27.5, 29.0])\n', (1792, 1848), False, 'import numpy\n'), ((1861, 1901), 'openquake.hazardlib.geo.geodetic.distance_matrix', 'geo.geodetic.distance_matrix', (['lons', 'lats'], {}), '(lons, lats)\n', (1889, 1901), False, 'from openquake.hazardlib import geo, imt\n'), ((1989, 2040), 'openquake.hazardlib.shakemap.spatial_correlation_array', 'spatial_correlation_array', (['dmatrix', 'imts', '"""spatial"""'], {}), "(dmatrix, imts, 'spatial')\n", (2014, 2040), False, 'from openquake.hazardlib.shakemap import get_shakemap_array, get_sitecol_shakemap, to_gmfs, amplify_ground_shaking, spatial_correlation_array, spatial_covariance_array, cross_correlation_matrix, cholesky\n'), ((2122, 2172), 'numpy.array', 'numpy.array', (['([(0.5, 0.52, 0.64, 0.73)] * 9)', 'imt_dt'], {}), '([(0.5, 0.52, 0.64, 0.73)] * 9, imt_dt)\n', (2133, 2172), False, 'import numpy\n'), ((2199, 2260), 'openquake.hazardlib.shakemap.spatial_covariance_array', 'spatial_covariance_array', (['[std[n] for n in imt_dt.names]', 'sca'], {}), '([std[n] for n in imt_dt.names], sca)\n', (2223, 2260), False, 'from openquake.hazardlib.shakemap import get_shakemap_array, get_sitecol_shakemap, to_gmfs, amplify_ground_shaking, spatial_correlation_array, spatial_covariance_array, cross_correlation_matrix, cholesky\n'), ((2343, 2382), 'openquake.hazardlib.shakemap.cross_correlation_matrix', 'cross_correlation_matrix', (['imts', '"""cross"""'], {}), "(imts, 'cross')\n", (2367, 2382), False, 'from openquake.hazardlib.shakemap import get_shakemap_array, get_sitecol_shakemap, to_gmfs, amplify_ground_shaking, spatial_correlation_array, spatial_covariance_array, cross_correlation_matrix, cholesky\n'), ((2466, 2486), 'openquake.hazardlib.shakemap.cholesky', 'cholesky', (['scov', 'ccor'], {}), '(scov, ccor)\n', (2474, 2486), False, 'from openquake.hazardlib.shakemap import get_shakemap_array, get_sitecol_shakemap, to_gmfs, amplify_ground_shaking, spatial_correlation_array, spatial_covariance_array, cross_correlation_matrix, cholesky\n'), ((2599, 2673), 'numpy.array', 'numpy.array', (['([(5.38409665, 3.9383686, 3.55435415, 4.37692394)] * 9)', 'imt_dt'], {}), '([(5.38409665, 3.9383686, 3.55435415, 4.37692394)] * 9, imt_dt)\n', (2610, 2673), False, 'import numpy\n'), ((2707, 2734), 'numpy.zeros', 'numpy.zeros', (['(9)', 'shakemap_dt'], {}), '(9, shakemap_dt)\n', (2718, 2734), False, 'import numpy\n'), ((2835, 2860), 'numpy.array', 'numpy.array', (['([301.17] * 9)'], {}), '([301.17] * 9)\n', (2846, 2860), False, 'import numpy\n'), ((2936, 3030), 'openquake.hazardlib.shakemap.to_gmfs', 'to_gmfs', (['shakemap'], {'crosscorr': '"""corr"""', 'site_effects': '(False)', 'trunclevel': '(3)', 'num_gmfs': '(2)', 'seed': '(42)'}), "(shakemap, crosscorr='corr', site_effects=False, trunclevel=3,\n num_gmfs=2, seed=42)\n", (2943, 3030), False, 'from openquake.hazardlib.shakemap import get_shakemap_array, get_sitecol_shakemap, to_gmfs, amplify_ground_shaking, spatial_correlation_array, spatial_covariance_array, cross_correlation_matrix, cholesky\n'), ((3168, 3262), 'openquake.hazardlib.shakemap.to_gmfs', 'to_gmfs', (['shakemap'], {'crosscorr': '"""cross"""', 'site_effects': '(True)', 'trunclevel': '(3)', 'num_gmfs': '(2)', 'seed': '(42)'}), "(shakemap, crosscorr='cross', site_effects=True, trunclevel=3,\n num_gmfs=2, seed=42)\n", (3175, 3262), False, 'from openquake.hazardlib.shakemap import get_shakemap_array, get_sitecol_shakemap, to_gmfs, amplify_ground_shaking, spatial_correlation_array, spatial_covariance_array, cross_correlation_matrix, cholesky\n')]
|
# importing numpy, pandas, and matplotlib
import numpy as np
import pandas as pd
import matplotlib
import multiprocessing
matplotlib.use('agg')
import matplotlib.pyplot as plt
# importing sklearn
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
from sklearn.decomposition import PCA
from sklearn.random_projection import GaussianRandomProjection
from sklearn import cluster
from sklearn.model_selection import GridSearchCV
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_auc_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
# importing keras
import keras
import keras.backend as K
from keras.wrappers.scikit_learn import KerasClassifier
from keras.callbacks import EarlyStopping, ModelCheckpoint, LambdaCallback
from keras.models import Model, load_model
# importing util libraries
import datetime
import time
import math
import os
import importlib
# importing custom library
import DNN_models
import exception_handle
#fix np.random.seed for reproducibility in numpy processing
np.random.seed(7)
class DeepMicrobiome(object):
def __init__(self, data, seed, data_dir):
self.t_start = time.time()
self.filename = str(data)
self.data = self.filename.split('.')[0]
self.seed = seed
self.data_dir = data_dir
self.prefix = ''
self.representation_only = False
def loadData(self, feature_string, label_string, label_dict, dtype=None):
# read file
filename = self.data_dir + "data/" + self.filename
if os.path.isfile(filename):
raw = pd.read_csv(filename, sep='\t', index_col=0, header=None)
else:
print("FileNotFoundError: File {} does not exist".format(filename))
exit()
# select rows having feature index identifier string
X = raw.loc[raw.index.str.contains(feature_string, regex=False)].T
self.X = X
# get class labels
Y = raw.loc[label_string] #'disease'
self.Y = Y.replace(label_dict)
# indices
self.sample_ids = raw.iloc[1]
# train and test split
self.X_train, self.X_test, self.y_train, self.y_test, self.train_indices, self.test_indices = train_test_split(self.X.values.astype(dtype), self.Y.values.astype('int'), self.sample_ids, test_size=0.2, random_state=self.seed, stratify=Y.values)
self.printDataShapes()
def setIndices(self, train_indices, test_indices):
self.X_train = self.X.iloc[train_indices]
self.X_test = self.X.iloc[test_indices]
self.y_train = self.Y.iloc[train_indices]
self.y_test = self.Y.iloc[test_indices]
self.train_indices = self.sample_ids.iloc[train_indices]
self.test_indices = self.sample_ids.iloc[test_indices]
def loadCustomData(self, dtype=None):
# read file
filename = self.data_dir + "data/" + self.filename
if os.path.isfile(filename):
raw = pd.read_csv(filename, sep=',', index_col=False, header=None)
else:
print("FileNotFoundError: File {} does not exist".format(filename))
exit()
# load data
self.X_train = raw.values.astype(dtype)
# put nothing or zeros for y_train, y_test, and X_test
self.y_train = np.zeros(shape=(self.X_train.shape[0])).astype(dtype)
self.X_test = np.zeros(shape=(1,self.X_train.shape[1])).astype(dtype)
self.y_test = np.zeros(shape=(1,)).astype(dtype)
self.printDataShapes(train_only=True)
def loadCustomDataWithLabels(self, label_data, dtype=None):
# read file
filename = self.data_dir + "data/" + self.filename
label_filename = self.data_dir + "data/" + label_data
if os.path.isfile(filename) and os.path.isfile(label_filename):
raw = pd.read_csv(filename, sep=',', index_col=0, header=None)
label = pd.read_csv(label_filename, sep=',', index_col=0, header=None)
assert (raw.index == label.index).all()
else:
if not os.path.isfile(filename):
print("FileNotFoundError: File {} does not exist".format(filename))
if not os.path.isfile(label_filename):
print("FileNotFoundError: File {} does not exist".format(label_filename))
exit()
# label data validity check
if not label.values.shape[1] > 1:
pass
#label = label.values.reshape((label.values.shape[0]))
else:
print('FileSpecificationError: The label file contains more than 1 column.')
exit()
# train and test split
self.X = raw.astype(dtype)
self.sample_ids = raw.index.to_series()
self.Y = label.astype(int)
self.X_train, self.X_test, self.y_train, self.y_test, self.train_indices, self.test_indices = train_test_split(raw.values.astype(dtype),
label.astype('int'), self.sample_ids, test_size=0.2,
random_state=self.seed,
stratify=label)
self.printDataShapes()
#Principal Component Analysis
def pca(self, ratio=0.99):
# manipulating an experiment identifier in the output file
self.prefix = self.prefix + 'PCA_'
# PCA
pca = PCA()
pca.fit(self.X_train)
n_comp = 0
ratio_sum = 0.0
for comp in pca.explained_variance_ratio_:
ratio_sum += comp
n_comp += 1
if ratio_sum >= ratio: # Selecting components explaining 99% of variance
break
pca = PCA(n_components=n_comp)
pca.fit(self.X_train)
X_train = pca.transform(self.X_train)
X_test = pca.transform(self.X_test)
# applying the eigenvectors to the whole training and the test set.
self.X_train = X_train
self.X_test = X_test
self.printDataShapes()
#Gausian Random Projection
def rp(self):
# manipulating an experiment identifier in the output file
self.prefix = self.prefix + 'RandP_'
# GRP
rf = GaussianRandomProjection(eps=0.5)
rf.fit(self.X_train)
# applying GRP to the whole training and the test set.
self.X_train = rf.transform(self.X_train)
self.X_test = rf.transform(self.X_test)
self.printDataShapes()
#Shallow Autoencoder & Deep Autoencoder
def ae(self, dims = [50], epochs= 2000, batch_size=100, verbose=2, loss='mean_squared_error', latent_act=False, output_act=False, act='relu', patience=20, val_rate=0.2, no_trn=False):
# manipulating an experiment identifier in the output file
if patience != 20:
self.prefix += 'p' + str(patience) + '_'
if len(dims) == 1:
self.prefix += 'AE'
else:
self.prefix += 'DAE'
if loss == 'binary_crossentropy':
self.prefix += 'b'
if latent_act:
self.prefix += 't'
if output_act:
self.prefix += 'T'
self.prefix += str(dims).replace(", ", "-") + '_'
if act == 'sigmoid':
self.prefix = self.prefix + 's'
# filename for temporary model checkpoint
modelName = self.prefix + self.data + '.h5'
# clean up model checkpoint before use
if os.path.isfile(modelName):
os.remove(modelName)
# callbacks for each epoch
callbacks = [EarlyStopping(monitor='val_loss', patience=patience, mode='min', verbose=1),
ModelCheckpoint(modelName, monitor='val_loss', mode='min', verbose=1, save_best_only=True)]
# spliting the training set into the inner-train and the inner-test set (validation set)
X_inner_train, X_inner_test, y_inner_train, y_inner_test = train_test_split(self.X_train, self.y_train, test_size=val_rate, random_state=self.seed, stratify=self.y_train)
# insert input shape into dimension list
dims.insert(0, X_inner_train.shape[1])
# create autoencoder model
self.autoencoder, self.encoder = DNN_models.autoencoder(dims, act=act, latent_act=latent_act, output_act=output_act)
self.autoencoder.summary()
if no_trn:
return
# compile model
self.autoencoder.compile(optimizer='adam', loss=loss)
# fit model
self.history = self.autoencoder.fit(X_inner_train, X_inner_train, epochs=epochs, batch_size=batch_size, callbacks=callbacks,
verbose=verbose, validation_data=(X_inner_test, X_inner_test))
# save loss progress
self.saveLossProgress()
# load best model
self.autoencoder = load_model(modelName)
layer_idx = int((len(self.autoencoder.layers) - 1) / 2)
self.encoder = Model(self.autoencoder.layers[0].input, self.autoencoder.layers[layer_idx].output)
# applying the learned encoder into the whole training and the test set.
self.X_train = self.encoder.predict(self.X_train)
self.X_test = self.encoder.predict(self.X_test)
# Variational Autoencoder
def vae(self, dims = [10], epochs=2000, batch_size=100, verbose=2, loss='mse', output_act=False, act='relu', patience=25, beta=1.0, warmup=True, warmup_rate=0.01, val_rate=0.2, no_trn=False):
# manipulating an experiment identifier in the output file
if patience != 25:
self.prefix += 'p' + str(patience) + '_'
if warmup:
self.prefix += 'w' + str(warmup_rate) + '_'
self.prefix += 'VAE'
if loss == 'binary_crossentropy':
self.prefix += 'b'
if output_act:
self.prefix += 'T'
if beta != 1:
self.prefix += 'B' + str(beta)
self.prefix += str(dims).replace(", ", "-") + '_'
if act == 'sigmoid':
self.prefix += 'sig_'
# filename for temporary model checkpoint
modelName = self.prefix + self.data + '.h5'
# clean up model checkpoint before use
if os.path.isfile(modelName):
os.remove(modelName)
# callbacks for each epoch
callbacks = [EarlyStopping(monitor='val_loss', patience=patience, mode='min', verbose=1),
ModelCheckpoint(modelName, monitor='val_loss', mode='min', verbose=1, save_best_only=True,save_weights_only=True)]
# warm-up callback
warm_up_cb = LambdaCallback(on_epoch_end=lambda epoch, logs: [warm_up(epoch)]) # , print(epoch), print(K.get_value(beta))])
# warm-up implementation
def warm_up(epoch):
val = epoch * warmup_rate
if val <= 1.0:
K.set_value(beta, val)
# add warm-up callback if requested
if warmup:
beta = K.variable(value=0.0)
callbacks.append(warm_up_cb)
# spliting the training set into the inner-train and the inner-test set (validation set)
X_inner_train, X_inner_test, y_inner_train, y_inner_test = train_test_split(self.X_train, self.y_train,
test_size=val_rate,
random_state=self.seed,
stratify=self.y_train)
# insert input shape into dimension list
dims.insert(0, X_inner_train.shape[1])
# create vae model
self.vae, self.encoder, self.decoder = DNN_models.variational_AE(dims, act=act, recon_loss=loss, output_act=output_act, beta=beta)
self.vae.summary()
if no_trn:
return
# fit
self.history = self.vae.fit(X_inner_train, epochs=epochs, batch_size=batch_size, callbacks=callbacks, verbose=verbose, validation_data=(X_inner_test, None))
# save loss progress
self.saveLossProgress()
# load best model
self.vae.load_weights(modelName)
self.encoder = self.vae.layers[1]
# applying the learned encoder into the whole training and the test set.
_, _, self.X_train = self.encoder.predict(self.X_train)
_, _, self.X_test = self.encoder.predict(self.X_test)
# Convolutional Autoencoder
def cae(self, dims = [32], epochs=2000, batch_size=100, verbose=2, loss='mse', output_act=False, act='relu', patience=25, val_rate=0.2, rf_rate = 0.1, st_rate = 0.25, no_trn=False):
# manipulating an experiment identifier in the output file
self.prefix += 'CAE'
if loss == 'binary_crossentropy':
self.prefix += 'b'
if output_act:
self.prefix += 'T'
self.prefix += str(dims).replace(", ", "-") + '_'
if act == 'sigmoid':
self.prefix += 'sig_'
# filename for temporary model checkpoint
modelName = self.prefix + self.data + '.h5'
# clean up model checkpoint before use
if os.path.isfile(modelName):
os.remove(modelName)
# callbacks for each epoch
callbacks = [EarlyStopping(monitor='val_loss', patience=patience, mode='min', verbose=1),
ModelCheckpoint(modelName, monitor='val_loss', mode='min', verbose=1, save_best_only=True,save_weights_only=True)]
# fill out blank
onesideDim = int(math.sqrt(self.X_train.shape[1])) + 1
enlargedDim = onesideDim ** 2
self.X_train = np.column_stack((self.X_train, np.zeros((self.X_train.shape[0], enlargedDim - self.X_train.shape[1]))))
self.X_test = np.column_stack((self.X_test, np.zeros((self.X_test.shape[0], enlargedDim - self.X_test.shape[1]))))
# reshape
self.X_train = np.reshape(self.X_train, (len(self.X_train), onesideDim, onesideDim, 1))
self.X_test = np.reshape(self.X_test, (len(self.X_test), onesideDim, onesideDim, 1))
self.printDataShapes()
# spliting the training set into the inner-train and the inner-test set (validation set)
X_inner_train, X_inner_test, y_inner_train, y_inner_test = train_test_split(self.X_train, self.y_train,
test_size=val_rate,
random_state=self.seed,
stratify=self.y_train)
# insert input shape into dimension list
dims.insert(0, (onesideDim, onesideDim, 1))
# create cae model
self.cae, self.encoder = DNN_models.conv_autoencoder(dims, act=act, output_act=output_act, rf_rate = rf_rate, st_rate = st_rate)
self.cae.summary()
if no_trn:
return
# compile
self.cae.compile(optimizer='adam', loss=loss)
# fit
self.history = self.cae.fit(X_inner_train, X_inner_train, epochs=epochs, batch_size=batch_size, callbacks=callbacks, verbose=verbose, validation_data=(X_inner_test, X_inner_test, None))
# save loss progress
self.saveLossProgress()
# load best model
self.cae.load_weights(modelName)
if len(self.cae.layers) % 2 == 0:
layer_idx = int((len(self.cae.layers) - 2) / 2)
else:
layer_idx = int((len(self.cae.layers) - 1) / 2)
self.encoder = Model(self.cae.layers[0].input, self.cae.layers[layer_idx].output)
# applying the learned encoder into the whole training and the test set.
self.X_train = self.encoder.predict(self.X_train)
self.X_test = self.encoder.predict(self.X_test)
self.printDataShapes()
# Classification
def classification(self, hyper_parameters, method='svm', cv=5, scoring='roc_auc', n_jobs=1, cache_size=10000):
clf_start_time = time.time()
print("# Tuning hyper-parameters")
print(self.X_train.shape, self.y_train.shape)
# Support Vector Machine
if method == 'svm':
clf = GridSearchCV(SVC(probability=True, cache_size=cache_size), hyper_parameters, cv=StratifiedKFold(cv, shuffle=True), scoring=scoring, n_jobs=n_jobs, verbose=100, )
clf.fit(self.X_train, self.y_train)
# Random Forest
if method == 'rf':
clf = GridSearchCV(RandomForestClassifier(n_jobs=-1, random_state=0), hyper_parameters, cv=StratifiedKFold(cv, shuffle=True), scoring=scoring, n_jobs=n_jobs, verbose=100)
clf.fit(self.X_train, self.y_train)
# Multi-layer Perceptron
if method == 'mlp':
model = KerasClassifier(build_fn=DNN_models.mlp_model, input_dim=self.X_train.shape[1], verbose=0, )
clf = GridSearchCV(estimator=model, param_grid=hyper_parameters, cv=StratifiedKFold(cv, shuffle=True), scoring=scoring, n_jobs=n_jobs, verbose=100)
clf.fit(self.X_train, self.y_train, batch_size=32)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
# Evaluate performance of the best model on test set
y_true, y_pred = self.y_test, clf.predict(self.X_test)
y_prob = clf.predict_proba(self.X_test)
# Performance Metrics: AUC, ACC, Recall, Precision, F1_score
metrics = [round(roc_auc_score(y_true, y_prob[:, 1]), 4),
round(accuracy_score(y_true, y_pred), 4),
round(recall_score(y_true, y_pred), 4),
round(precision_score(y_true, y_pred), 4),
round(f1_score(y_true, y_pred), 4), ]
# time stamp
metrics.append(str(datetime.datetime.now()))
# running time
metrics.append(round( (time.time() - self.t_start), 2))
# classification time
metrics.append(round( (time.time() - clf_start_time), 2))
# best hyper-parameter append
metrics.append(str(clf.best_params_))
# Write performance metrics as a file
res = pd.DataFrame([metrics], index=[self.prefix + method])
with open(self.data_dir + "results/" + self.data + "_result.txt", 'a') as f:
res.to_csv(f, header=None)
print('Accuracy metrics')
print('AUC, ACC, Recall, Precision, F1_score, time-end, runtime(sec), classfication time(sec), best hyper-parameter')
print(metrics)
def printDataShapes(self, train_only=False):
print("X_train.shape: ", self.X_train.shape)
if not train_only:
print("y_train.shape: ", self.y_train.shape)
print("X_test.shape: ", self.X_test.shape)
print("y_test.shape: ", self.y_test.shape)
# ploting loss progress over epochs
def saveLossProgress(self):
#print(self.history.history.keys())
#print(type(self.history.history['loss']))
#print(min(self.history.history['loss']))
loss_collector, loss_max_atTheEnd = self.saveLossProgress_ylim()
# save loss progress - train and val loss only
figureName = self.prefix + self.data + '_' + str(self.seed)
plt.ylim(min(loss_collector)*0.9, loss_max_atTheEnd * 2.0)
plt.plot(self.history.history['loss'])
plt.plot(self.history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train loss', 'val loss'],
loc='upper right')
plt.savefig(self.data_dir + "results/" + figureName + '.png')
plt.close()
if 'recon_loss' in self.history.history:
figureName = self.prefix + self.data + '_' + str(self.seed) + '_detailed'
plt.ylim(min(loss_collector) * 0.9, loss_max_atTheEnd * 2.0)
plt.plot(self.history.history['loss'])
plt.plot(self.history.history['val_loss'])
plt.plot(self.history.history['recon_loss'])
plt.plot(self.history.history['val_recon_loss'])
plt.plot(self.history.history['kl_loss'])
plt.plot(self.history.history['val_kl_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train loss', 'val loss', 'recon_loss', 'val recon_loss', 'kl_loss', 'val kl_loss'], loc='upper right')
plt.savefig(self.data_dir + "results/" + figureName + '.png')
plt.close()
# supporting loss plot
def saveLossProgress_ylim(self):
loss_collector = []
loss_max_atTheEnd = 0.0
for hist in self.history.history:
current = self.history.history[hist]
loss_collector += current
if current[-1] >= loss_max_atTheEnd:
loss_max_atTheEnd = current[-1]
return loss_collector, loss_max_atTheEnd
if __name__ == '__main__':
# argparse
import argparse
parser = argparse.ArgumentParser()
parser._action_groups.pop()
# load data
load_data = parser.add_argument_group('Loading data')
load_data.add_argument("-d", "--data", help="prefix of dataset to open (e.g. abundance_Cirrhosis)", type=str,
choices=["abundance_Cirrhosis", "abundance_Colorectal", "abundance_IBD",
"abundance_Obesity", "abundance_T2D", "abundance_WT2D",
"marker_Cirrhosis", "marker_Colorectal", "marker_IBD",
"marker_Obesity", "marker_T2D", "marker_WT2D",
])
load_data.add_argument("-cd", "--custom_data", help="filename for custom input data under the 'data' folder", type=str,)
load_data.add_argument("-cl", "--custom_data_labels", help="filename for custom input labels under the 'data' folder", type=str,)
load_data.add_argument("-p", "--data_dir", help="custom path for both '/data' and '/results' folders", default="")
load_data.add_argument("-dt", "--dataType", help="Specify data type for numerical values (float16, float32, float64)",
default="float64", type=str, choices=["float16", "float32", "float64"])
dtypeDict = {"float16": np.float16, "float32": np.float32, "float64": np.float64}
# experiment design
exp_design = parser.add_argument_group('Experiment design')
exp_design.add_argument("-s", "--seed", help="random seed for train and test split", type=int, default=0)
exp_design.add_argument("-k", "--kfold", help="Number of stratified folds to perform", type=int, default=5)
exp_design.add_argument("-r", "--repeat", help="repeat experiment x times by changing random seed for splitting data",
default=5, type=int)
# classification
classification = parser.add_argument_group('Classification')
classification.add_argument("-f", "--numFolds", help="The number of folds for cross-validation in the tranining set",
default=5, type=int)
classification.add_argument("-m", "--method", help="classifier(s) to use", type=str, default="all",
choices=["all", "svm", "rf", "mlp", "svm_rf"])
classification.add_argument("-sc", "--svm_cache", help="cache size for svm run", type=int, default=1000)
classification.add_argument("-t", "--numJobs",
help="The number of jobs used in parallel GridSearch. (-1: utilize all possible cores; -2: utilize all possible cores except one.)",
default=-2, type=int)
parser.add_argument("--scoring", help="Metrics used to optimize method", type=str, default='roc_auc',
choices=['roc_auc', 'accuracy', 'f1', 'recall', 'precision'])
# representation learning & dimensionality reduction algorithms
rl = parser.add_argument_group('Representation learning')
rl.add_argument("--pca", help="run PCA", action='store_true')
rl.add_argument("--rp", help="run Random Projection", action='store_true')
rl.add_argument("--ae", help="run Autoencoder or Deep Autoencoder", action='store_true')
rl.add_argument("--vae", help="run Variational Autoencoder", action='store_true')
rl.add_argument("--cae", help="run Convolutional Autoencoder", action='store_true')
rl.add_argument("--save_rep", help="write the learned representation of the training set as a file", action='store_true')
# detailed options for representation learning
## common options
common = parser.add_argument_group('Common options for representation learning (SAE,DAE,VAE,CAE)')
common.add_argument("--aeloss", help="set autoencoder reconstruction loss function", type=str,
choices=['mse', 'binary_crossentropy'], default='mse')
common.add_argument("--ae_oact", help="output layer sigmoid activation function on/off", action='store_true')
common.add_argument("-a", "--act", help="activation function for hidden layers", type=str, default='relu',
choices=['relu', 'sigmoid'])
common.add_argument("-dm", "--dims",
help="Comma-separated dimensions for deep representation learning e.g. (-dm 50,30,20)",
type=str, default='50')
common.add_argument("-e", "--max_epochs", help="Maximum epochs when training autoencoder", type=int, default=2000)
common.add_argument("-pt", "--patience",
help="The number of epochs which can be executed without the improvement in validation loss, right after the last improvement.",
type=int, default=20)
## AE & DAE only
AE = parser.add_argument_group('SAE & DAE-specific arguments')
AE.add_argument("--ae_lact", help="latent layer activation function on/off", action='store_true')
## VAE only
VAE = parser.add_argument_group('VAE-specific arguments')
VAE.add_argument("--vae_beta", help="weight of KL term", type=float, default=1.0)
VAE.add_argument("--vae_warmup", help="turn on warm up", action='store_true')
VAE.add_argument("--vae_warmup_rate", help="warm-up rate which will be multiplied by current epoch to calculate current beta", default=0.01, type=float)
## CAE only
CAE = parser.add_argument_group('CAE-specific arguments')
CAE.add_argument("--rf_rate", help="What percentage of input size will be the receptive field (kernel) size? [0,1]", type=float, default=0.1)
CAE.add_argument("--st_rate", help="What percentage of receptive field (kernel) size will be the stride size? [0,1]", type=float, default=0.25)
# other options
others = parser.add_argument_group('other optional arguments')
others.add_argument("--no_trn", help="stop before learning representation to see specified autoencoder structure", action='store_true')
others.add_argument("--no_clf", help="skip classification tasks", action='store_true')
args = parser.parse_args()
print(args)
# set labels for diseases and controls
label_dict = {
# Controls
'n': 0,
# Chirrhosis
'cirrhosis': 1,
# Colorectal Cancer
'cancer': 1, 'small_adenoma': 0,
# IBD
'ibd_ulcerative_colitis': 1, 'ibd_crohn_disease': 1,
# T2D and WT2D
't2d': 1,
# Obesity
'leaness': 0, 'obesity': 1,
}
# hyper-parameter grids for classifiers
rf_hyper_parameters = [{'n_estimators': [s for s in range(100, 1001, 200)],
'max_features': ['sqrt', 'log2'],
'min_samples_leaf': [1, 2, 3, 4, 5],
'criterion': ['gini', 'entropy']
}, ]
#svm_hyper_parameters_pasolli = [{'C': [2 ** s for s in range(-5, 16, 2)], 'kernel': ['linear']},
# {'C': [2 ** s for s in range(-5, 16, 2)], 'gamma': [2 ** s for s in range(3, -15, -2)],
# 'kernel': ['rbf']}]
svm_hyper_parameters = [{'C': [2 ** s for s in range(-5, 6, 2)], 'kernel': ['linear']},
{'C': [2 ** s for s in range(-5, 6, 2)], 'gamma': [2 ** s for s in range(3, -15, -2)],'kernel': ['rbf']}]
mlp_hyper_parameters = [{'numHiddenLayers': [1, 2, 3],
'epochs': [30, 50, 100, 200, 300],
'numUnits': [10, 30, 50, 100],
'dropout_rate': [0.1, 0.3],
},]
def loadData():
dm = None
if args.data == None and args.custom_data == None:
print("[Error] Please specify an input file. (use -h option for help)")
exit()
## provided data
elif args.data != None:
dm = DeepMicrobiome(data=args.data + '.txt', seed=args.seed, data_dir=args.data_dir)
## specify feature string
feature_string = ''
data_string = str(args.data)
if data_string.split('_')[0] == 'abundance':
feature_string = "k__"
if data_string.split('_')[0] == 'marker':
feature_string = "gi|"
## load data into the object
dm.loadData(feature_string=feature_string, label_string='disease', label_dict=label_dict,
dtype=dtypeDict[args.dataType])
## user data
elif args.custom_data != None:
### without labels - only conducting representation learning
if args.custom_data_labels == None:
dm = DeepMicrobiome(data=args.custom_data, seed=args.seed, data_dir=args.data_dir)
dm.loadCustomData(dtype=dtypeDict[args.dataType])
### with labels - conducting representation learning + classification
else:
dm = DeepMicrobiome(data=args.custom_data, seed=args.seed, data_dir=args.data_dir)
dm.loadCustomDataWithLabels(label_data=args.custom_data_labels, dtype=dtypeDict[args.dataType])
else:
exit()
return dm
# run exp function
def run_exp(seed):
# create an object and load data
## no argument founded
def run_fold(train_indices, test_indices, k: int = 1):
dm = loadData()
numRLrequired = args.pca + args.ae + args.rp + args.vae + args.cae
if numRLrequired > 1:
raise ValueError('No multiple dimensionality Reduction')
# time check after data has been loaded
dm.t_start = time.time()
# Representation learning (Dimensionality reduction)
dm.setIndices(train_indices, test_indices)
if args.pca:
dm.pca()
if args.ae:
dm.ae(dims=[int(i) for i in args.dims.split(',')], act=args.act, epochs=args.max_epochs,
loss=args.aeloss,
latent_act=args.ae_lact, output_act=args.ae_oact, patience=args.patience, no_trn=args.no_trn)
if args.vae:
dm.vae(dims=[int(i) for i in args.dims.split(',')], act=args.act, epochs=args.max_epochs,
loss=args.aeloss, output_act=args.ae_oact,
patience=25 if args.patience == 20 else args.patience, beta=args.vae_beta,
warmup=args.vae_warmup, warmup_rate=args.vae_warmup_rate, no_trn=args.no_trn)
if args.cae:
dm.cae(dims=[int(i) for i in args.dims.split(',')], act=args.act, epochs=args.max_epochs,
loss=args.aeloss, output_act=args.ae_oact,
patience=args.patience, rf_rate=args.rf_rate, st_rate=args.st_rate, no_trn=args.no_trn)
if args.rp:
dm.rp()
# write the learned representation of the training set as a file
if args.save_rep:
if numRLrequired == 1:
fold_dir = os.path.join(dm.data_dir, dm.data, "results", str(k))
if not os.path.isdir(fold_dir):
os.makedirs(fold_dir)
rep_file = os.path.join(fold_dir, dm.prefix + dm.data + f".train.csv")
pd.DataFrame(dm.X_train, index=dm.train_indices).to_csv(rep_file, header=False, index=True)
print("The learned representation of the training set has been saved in '{}'".format(rep_file))
rep_file = os.path.join(fold_dir, dm.prefix + dm.data + f".test.csv")
pd.DataFrame(dm.X_test, index=dm.test_indices).to_csv(rep_file, header=False, index=True)
print("The learned representation of the training set has been saved in '{}'".format(rep_file))
else:
print(
"Warning: Command option '--save_rep' is not applied as no representation learning or dimensionality reduction has been conducted.")
# Classification
if args.no_clf or (args.data == None and args.custom_data_labels == None):
print("Classification task has been skipped.")
else:
# turn off GPU
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
importlib.reload(keras)
# training classification models
if args.method == "svm":
dm.classification(hyper_parameters=svm_hyper_parameters, method='svm', cv=args.numFolds,
n_jobs=args.numJobs, scoring=args.scoring, cache_size=args.svm_cache)
elif args.method == "rf":
dm.classification(hyper_parameters=rf_hyper_parameters, method='rf', cv=args.numFolds,
n_jobs=args.numJobs, scoring=args.scoring)
elif args.method == "mlp":
dm.classification(hyper_parameters=mlp_hyper_parameters, method='mlp', cv=args.numFolds,
n_jobs=args.numJobs, scoring=args.scoring)
elif args.method == "svm_rf":
dm.classification(hyper_parameters=svm_hyper_parameters, method='svm', cv=args.numFolds,
n_jobs=args.numJobs, scoring=args.scoring, cache_size=args.svm_cache)
dm.classification(hyper_parameters=rf_hyper_parameters, method='rf', cv=args.numFolds,
n_jobs=args.numJobs, scoring=args.scoring)
else:
dm.classification(hyper_parameters=svm_hyper_parameters, method='svm', cv=args.numFolds,
n_jobs=args.numJobs, scoring=args.scoring, cache_size=args.svm_cache)
dm.classification(hyper_parameters=rf_hyper_parameters, method='rf', cv=args.numFolds,
n_jobs=args.numJobs, scoring=args.scoring)
dm.classification(hyper_parameters=mlp_hyper_parameters, method='mlp', cv=args.numFolds,
n_jobs=args.numJobs, scoring=args.scoring)
dm = loadData()
if args.kfold:
kf = StratifiedKFold(n_splits=args.kfold, random_state=seed)
i = 0
for train_indices, test_indices in kf.split(dm.X.values, dm.Y.values.astype('int')):
fold_process = multiprocessing.Process(target=run_fold, args=(train_indices, test_indices, i))
fold_process.start()
fold_process.join()
i += 1
else:
run_fold(dm.train_indices, dm.test_indices)
# run experiments
try:
if args.repeat > 1:
for i in range(args.repeat):
run_exp(i)
else:
run_exp(args.seed)
except OSError as error:
exception_handle.log_exception(error)
|
[
"keras.models.load_model",
"matplotlib.pyplot.title",
"os.remove",
"numpy.random.seed",
"argparse.ArgumentParser",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"keras.backend.set_value",
"sklearn.metrics.accuracy_score",
"keras.models.Model",
"os.path.isfile",
"sklearn.metrics.f1_score",
"sklearn.svm.SVC",
"os.path.join",
"DNN_models.variational_AE",
"pandas.DataFrame",
"keras.wrappers.scikit_learn.KerasClassifier",
"sklearn.random_projection.GaussianRandomProjection",
"matplotlib.pyplot.close",
"DNN_models.conv_autoencoder",
"datetime.datetime.now",
"sklearn.ensemble.RandomForestClassifier",
"math.sqrt",
"keras.callbacks.ModelCheckpoint",
"matplotlib.pyplot.legend",
"sklearn.metrics.recall_score",
"sklearn.metrics.roc_auc_score",
"matplotlib.use",
"matplotlib.pyplot.ylabel",
"os.makedirs",
"matplotlib.pyplot.plot",
"os.path.isdir",
"numpy.zeros",
"time.time",
"importlib.reload",
"keras.callbacks.EarlyStopping",
"sklearn.decomposition.PCA",
"sklearn.model_selection.StratifiedKFold",
"DNN_models.autoencoder",
"sklearn.metrics.precision_score",
"multiprocessing.Process",
"matplotlib.pyplot.xlabel",
"keras.backend.variable",
"matplotlib.pyplot.savefig",
"exception_handle.log_exception"
] |
[((122, 143), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (136, 143), False, 'import matplotlib\n'), ((1225, 1242), 'numpy.random.seed', 'np.random.seed', (['(7)'], {}), '(7)\n', (1239, 1242), True, 'import numpy as np\n'), ((21201, 21226), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (21224, 21226), False, 'import argparse\n'), ((1343, 1354), 'time.time', 'time.time', ([], {}), '()\n', (1352, 1354), False, 'import time\n'), ((1730, 1754), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (1744, 1754), False, 'import os\n'), ((3099, 3123), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (3113, 3123), False, 'import os\n'), ((5650, 5655), 'sklearn.decomposition.PCA', 'PCA', ([], {}), '()\n', (5653, 5655), False, 'from sklearn.decomposition import PCA\n'), ((5958, 5982), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'n_comp'}), '(n_components=n_comp)\n', (5961, 5982), False, 'from sklearn.decomposition import PCA\n'), ((6461, 6494), 'sklearn.random_projection.GaussianRandomProjection', 'GaussianRandomProjection', ([], {'eps': '(0.5)'}), '(eps=0.5)\n', (6485, 6494), False, 'from sklearn.random_projection import GaussianRandomProjection\n'), ((7678, 7703), 'os.path.isfile', 'os.path.isfile', (['modelName'], {}), '(modelName)\n', (7692, 7703), False, 'import os\n'), ((8150, 8265), 'sklearn.model_selection.train_test_split', 'train_test_split', (['self.X_train', 'self.y_train'], {'test_size': 'val_rate', 'random_state': 'self.seed', 'stratify': 'self.y_train'}), '(self.X_train, self.y_train, test_size=val_rate,\n random_state=self.seed, stratify=self.y_train)\n', (8166, 8265), False, 'from sklearn.model_selection import train_test_split\n'), ((8436, 8524), 'DNN_models.autoencoder', 'DNN_models.autoencoder', (['dims'], {'act': 'act', 'latent_act': 'latent_act', 'output_act': 'output_act'}), '(dims, act=act, latent_act=latent_act, output_act=\n output_act)\n', (8458, 8524), False, 'import DNN_models\n'), ((9042, 9063), 'keras.models.load_model', 'load_model', (['modelName'], {}), '(modelName)\n', (9052, 9063), False, 'from keras.models import Model, load_model\n'), ((9151, 9238), 'keras.models.Model', 'Model', (['self.autoencoder.layers[0].input', 'self.autoencoder.layers[layer_idx].output'], {}), '(self.autoencoder.layers[0].input, self.autoencoder.layers[layer_idx].\n output)\n', (9156, 9238), False, 'from keras.models import Model, load_model\n'), ((10384, 10409), 'os.path.isfile', 'os.path.isfile', (['modelName'], {}), '(modelName)\n', (10398, 10409), False, 'import os\n'), ((11351, 11466), 'sklearn.model_selection.train_test_split', 'train_test_split', (['self.X_train', 'self.y_train'], {'test_size': 'val_rate', 'random_state': 'self.seed', 'stratify': 'self.y_train'}), '(self.X_train, self.y_train, test_size=val_rate,\n random_state=self.seed, stratify=self.y_train)\n', (11367, 11466), False, 'from sklearn.model_selection import train_test_split\n'), ((11887, 11983), 'DNN_models.variational_AE', 'DNN_models.variational_AE', (['dims'], {'act': 'act', 'recon_loss': 'loss', 'output_act': 'output_act', 'beta': 'beta'}), '(dims, act=act, recon_loss=loss, output_act=\n output_act, beta=beta)\n', (11912, 11983), False, 'import DNN_models\n'), ((13331, 13356), 'os.path.isfile', 'os.path.isfile', (['modelName'], {}), '(modelName)\n', (13345, 13356), False, 'import os\n'), ((14443, 14558), 'sklearn.model_selection.train_test_split', 'train_test_split', (['self.X_train', 'self.y_train'], {'test_size': 'val_rate', 'random_state': 'self.seed', 'stratify': 'self.y_train'}), '(self.X_train, self.y_train, test_size=val_rate,\n random_state=self.seed, stratify=self.y_train)\n', (14459, 14558), False, 'from sklearn.model_selection import train_test_split\n'), ((14970, 15074), 'DNN_models.conv_autoencoder', 'DNN_models.conv_autoencoder', (['dims'], {'act': 'act', 'output_act': 'output_act', 'rf_rate': 'rf_rate', 'st_rate': 'st_rate'}), '(dims, act=act, output_act=output_act, rf_rate=\n rf_rate, st_rate=st_rate)\n', (14997, 15074), False, 'import DNN_models\n'), ((15750, 15816), 'keras.models.Model', 'Model', (['self.cae.layers[0].input', 'self.cae.layers[layer_idx].output'], {}), '(self.cae.layers[0].input, self.cae.layers[layer_idx].output)\n', (15755, 15816), False, 'from keras.models import Model, load_model\n'), ((16206, 16217), 'time.time', 'time.time', ([], {}), '()\n', (16215, 16217), False, 'import time\n'), ((18353, 18406), 'pandas.DataFrame', 'pd.DataFrame', (['[metrics]'], {'index': '[self.prefix + method]'}), '([metrics], index=[self.prefix + method])\n', (18365, 18406), True, 'import pandas as pd\n'), ((19503, 19541), 'matplotlib.pyplot.plot', 'plt.plot', (["self.history.history['loss']"], {}), "(self.history.history['loss'])\n", (19511, 19541), True, 'import matplotlib.pyplot as plt\n'), ((19550, 19592), 'matplotlib.pyplot.plot', 'plt.plot', (["self.history.history['val_loss']"], {}), "(self.history.history['val_loss'])\n", (19558, 19592), True, 'import matplotlib.pyplot as plt\n'), ((19601, 19624), 'matplotlib.pyplot.title', 'plt.title', (['"""model loss"""'], {}), "('model loss')\n", (19610, 19624), True, 'import matplotlib.pyplot as plt\n'), ((19633, 19651), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (19643, 19651), True, 'import matplotlib.pyplot as plt\n'), ((19660, 19679), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (19670, 19679), True, 'import matplotlib.pyplot as plt\n'), ((19688, 19745), 'matplotlib.pyplot.legend', 'plt.legend', (["['train loss', 'val loss']"], {'loc': '"""upper right"""'}), "(['train loss', 'val loss'], loc='upper right')\n", (19698, 19745), True, 'import matplotlib.pyplot as plt\n'), ((19773, 19834), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(self.data_dir + 'results/' + figureName + '.png')"], {}), "(self.data_dir + 'results/' + figureName + '.png')\n", (19784, 19834), True, 'import matplotlib.pyplot as plt\n'), ((19843, 19854), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (19852, 19854), True, 'import matplotlib.pyplot as plt\n'), ((1774, 1831), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'sep': '"""\t"""', 'index_col': '(0)', 'header': 'None'}), "(filename, sep='\\t', index_col=0, header=None)\n", (1785, 1831), True, 'import pandas as pd\n'), ((3143, 3203), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'sep': '""","""', 'index_col': '(False)', 'header': 'None'}), "(filename, sep=',', index_col=False, header=None)\n", (3154, 3203), True, 'import pandas as pd\n'), ((3925, 3949), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (3939, 3949), False, 'import os\n'), ((3954, 3984), 'os.path.isfile', 'os.path.isfile', (['label_filename'], {}), '(label_filename)\n', (3968, 3984), False, 'import os\n'), ((4004, 4060), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'sep': '""","""', 'index_col': '(0)', 'header': 'None'}), "(filename, sep=',', index_col=0, header=None)\n", (4015, 4060), True, 'import pandas as pd\n'), ((4081, 4143), 'pandas.read_csv', 'pd.read_csv', (['label_filename'], {'sep': '""","""', 'index_col': '(0)', 'header': 'None'}), "(label_filename, sep=',', index_col=0, header=None)\n", (4092, 4143), True, 'import pandas as pd\n'), ((7717, 7737), 'os.remove', 'os.remove', (['modelName'], {}), '(modelName)\n', (7726, 7737), False, 'import os\n'), ((7795, 7870), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': 'patience', 'mode': '"""min"""', 'verbose': '(1)'}), "(monitor='val_loss', patience=patience, mode='min', verbose=1)\n", (7808, 7870), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, LambdaCallback\n'), ((7893, 7987), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['modelName'], {'monitor': '"""val_loss"""', 'mode': '"""min"""', 'verbose': '(1)', 'save_best_only': '(True)'}), "(modelName, monitor='val_loss', mode='min', verbose=1,\n save_best_only=True)\n", (7908, 7987), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, LambdaCallback\n'), ((10423, 10443), 'os.remove', 'os.remove', (['modelName'], {}), '(modelName)\n', (10432, 10443), False, 'import os\n'), ((10501, 10576), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': 'patience', 'mode': '"""min"""', 'verbose': '(1)'}), "(monitor='val_loss', patience=patience, mode='min', verbose=1)\n", (10514, 10576), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, LambdaCallback\n'), ((10599, 10717), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['modelName'], {'monitor': '"""val_loss"""', 'mode': '"""min"""', 'verbose': '(1)', 'save_best_only': '(True)', 'save_weights_only': '(True)'}), "(modelName, monitor='val_loss', mode='min', verbose=1,\n save_best_only=True, save_weights_only=True)\n", (10614, 10717), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, LambdaCallback\n'), ((11123, 11144), 'keras.backend.variable', 'K.variable', ([], {'value': '(0.0)'}), '(value=0.0)\n', (11133, 11144), True, 'import keras.backend as K\n'), ((13370, 13390), 'os.remove', 'os.remove', (['modelName'], {}), '(modelName)\n', (13379, 13390), False, 'import os\n'), ((13448, 13523), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': 'patience', 'mode': '"""min"""', 'verbose': '(1)'}), "(monitor='val_loss', patience=patience, mode='min', verbose=1)\n", (13461, 13523), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, LambdaCallback\n'), ((13546, 13664), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['modelName'], {'monitor': '"""val_loss"""', 'mode': '"""min"""', 'verbose': '(1)', 'save_best_only': '(True)', 'save_weights_only': '(True)'}), "(modelName, monitor='val_loss', mode='min', verbose=1,\n save_best_only=True, save_weights_only=True)\n", (13561, 13664), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, LambdaCallback\n'), ((16971, 17066), 'keras.wrappers.scikit_learn.KerasClassifier', 'KerasClassifier', ([], {'build_fn': 'DNN_models.mlp_model', 'input_dim': 'self.X_train.shape[1]', 'verbose': '(0)'}), '(build_fn=DNN_models.mlp_model, input_dim=self.X_train.shape\n [1], verbose=0)\n', (16986, 17066), False, 'from keras.wrappers.scikit_learn import KerasClassifier\n'), ((20076, 20114), 'matplotlib.pyplot.plot', 'plt.plot', (["self.history.history['loss']"], {}), "(self.history.history['loss'])\n", (20084, 20114), True, 'import matplotlib.pyplot as plt\n'), ((20127, 20169), 'matplotlib.pyplot.plot', 'plt.plot', (["self.history.history['val_loss']"], {}), "(self.history.history['val_loss'])\n", (20135, 20169), True, 'import matplotlib.pyplot as plt\n'), ((20182, 20226), 'matplotlib.pyplot.plot', 'plt.plot', (["self.history.history['recon_loss']"], {}), "(self.history.history['recon_loss'])\n", (20190, 20226), True, 'import matplotlib.pyplot as plt\n'), ((20239, 20287), 'matplotlib.pyplot.plot', 'plt.plot', (["self.history.history['val_recon_loss']"], {}), "(self.history.history['val_recon_loss'])\n", (20247, 20287), True, 'import matplotlib.pyplot as plt\n'), ((20300, 20341), 'matplotlib.pyplot.plot', 'plt.plot', (["self.history.history['kl_loss']"], {}), "(self.history.history['kl_loss'])\n", (20308, 20341), True, 'import matplotlib.pyplot as plt\n'), ((20354, 20399), 'matplotlib.pyplot.plot', 'plt.plot', (["self.history.history['val_kl_loss']"], {}), "(self.history.history['val_kl_loss'])\n", (20362, 20399), True, 'import matplotlib.pyplot as plt\n'), ((20412, 20435), 'matplotlib.pyplot.title', 'plt.title', (['"""model loss"""'], {}), "('model loss')\n", (20421, 20435), True, 'import matplotlib.pyplot as plt\n'), ((20448, 20466), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (20458, 20466), True, 'import matplotlib.pyplot as plt\n'), ((20479, 20498), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), "('epoch')\n", (20489, 20498), True, 'import matplotlib.pyplot as plt\n'), ((20511, 20630), 'matplotlib.pyplot.legend', 'plt.legend', (["['train loss', 'val loss', 'recon_loss', 'val recon_loss', 'kl_loss',\n 'val kl_loss']"], {'loc': '"""upper right"""'}), "(['train loss', 'val loss', 'recon_loss', 'val recon_loss',\n 'kl_loss', 'val kl_loss'], loc='upper right')\n", (20521, 20630), True, 'import matplotlib.pyplot as plt\n'), ((20639, 20700), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(self.data_dir + 'results/' + figureName + '.png')"], {}), "(self.data_dir + 'results/' + figureName + '.png')\n", (20650, 20700), True, 'import matplotlib.pyplot as plt\n'), ((20713, 20724), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (20722, 20724), True, 'import matplotlib.pyplot as plt\n'), ((30731, 30742), 'time.time', 'time.time', ([], {}), '()\n', (30740, 30742), False, 'import time\n'), ((35356, 35411), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'args.kfold', 'random_state': 'seed'}), '(n_splits=args.kfold, random_state=seed)\n', (35371, 35411), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((36015, 36052), 'exception_handle.log_exception', 'exception_handle.log_exception', (['error'], {}), '(error)\n', (36045, 36052), False, 'import exception_handle\n'), ((3473, 3510), 'numpy.zeros', 'np.zeros', ([], {'shape': 'self.X_train.shape[0]'}), '(shape=self.X_train.shape[0])\n', (3481, 3510), True, 'import numpy as np\n'), ((3549, 3591), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1, self.X_train.shape[1])'}), '(shape=(1, self.X_train.shape[1]))\n', (3557, 3591), True, 'import numpy as np\n'), ((3627, 3647), 'numpy.zeros', 'np.zeros', ([], {'shape': '(1,)'}), '(shape=(1,))\n', (3635, 3647), True, 'import numpy as np\n'), ((4229, 4253), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (4243, 4253), False, 'import os\n'), ((4358, 4388), 'os.path.isfile', 'os.path.isfile', (['label_filename'], {}), '(label_filename)\n', (4372, 4388), False, 'import os\n'), ((11018, 11040), 'keras.backend.set_value', 'K.set_value', (['beta', 'val'], {}), '(beta, val)\n', (11029, 11040), True, 'import keras.backend as K\n'), ((13713, 13745), 'math.sqrt', 'math.sqrt', (['self.X_train.shape[1]'], {}), '(self.X_train.shape[1])\n', (13722, 13745), False, 'import math\n'), ((13843, 13913), 'numpy.zeros', 'np.zeros', (['(self.X_train.shape[0], enlargedDim - self.X_train.shape[1])'], {}), '((self.X_train.shape[0], enlargedDim - self.X_train.shape[1]))\n', (13851, 13913), True, 'import numpy as np\n'), ((13968, 14036), 'numpy.zeros', 'np.zeros', (['(self.X_test.shape[0], enlargedDim - self.X_test.shape[1])'], {}), '((self.X_test.shape[0], enlargedDim - self.X_test.shape[1]))\n', (13976, 14036), True, 'import numpy as np\n'), ((16409, 16453), 'sklearn.svm.SVC', 'SVC', ([], {'probability': '(True)', 'cache_size': 'cache_size'}), '(probability=True, cache_size=cache_size)\n', (16412, 16453), False, 'from sklearn.svm import SVC\n'), ((16689, 16738), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_jobs': '(-1)', 'random_state': '(0)'}), '(n_jobs=-1, random_state=0)\n', (16711, 16738), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((17667, 17702), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['y_true', 'y_prob[:, 1]'], {}), '(y_true, y_prob[:, 1])\n', (17680, 17702), False, 'from sklearn.metrics import roc_auc_score\n'), ((17733, 17763), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (17747, 17763), False, 'from sklearn.metrics import accuracy_score\n'), ((17794, 17822), 'sklearn.metrics.recall_score', 'recall_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (17806, 17822), False, 'from sklearn.metrics import recall_score\n'), ((17853, 17884), 'sklearn.metrics.precision_score', 'precision_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (17868, 17884), False, 'from sklearn.metrics import precision_score\n'), ((17915, 17939), 'sklearn.metrics.f1_score', 'f1_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (17923, 17939), False, 'from sklearn.metrics import f1_score\n'), ((17996, 18019), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (18017, 18019), False, 'import datetime\n'), ((33428, 33451), 'importlib.reload', 'importlib.reload', (['keras'], {}), '(keras)\n', (33444, 33451), False, 'import importlib\n'), ((35558, 35637), 'multiprocessing.Process', 'multiprocessing.Process', ([], {'target': 'run_fold', 'args': '(train_indices, test_indices, i)'}), '(target=run_fold, args=(train_indices, test_indices, i))\n', (35581, 35637), False, 'import multiprocessing\n'), ((16476, 16509), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', (['cv'], {'shuffle': '(True)'}), '(cv, shuffle=True)\n', (16491, 16509), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((16761, 16794), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', (['cv'], {'shuffle': '(True)'}), '(cv, shuffle=True)\n', (16776, 16794), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((17144, 17177), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', (['cv'], {'shuffle': '(True)'}), '(cv, shuffle=True)\n', (17159, 17177), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((18077, 18088), 'time.time', 'time.time', ([], {}), '()\n', (18086, 18088), False, 'import time\n'), ((18172, 18183), 'time.time', 'time.time', ([], {}), '()\n', (18181, 18183), False, 'import time\n'), ((32314, 32373), 'os.path.join', 'os.path.join', (['fold_dir', "(dm.prefix + dm.data + f'.train.csv')"], {}), "(fold_dir, dm.prefix + dm.data + f'.train.csv')\n", (32326, 32373), False, 'import os\n'), ((32634, 32692), 'os.path.join', 'os.path.join', (['fold_dir', "(dm.prefix + dm.data + f'.test.csv')"], {}), "(fold_dir, dm.prefix + dm.data + f'.test.csv')\n", (32646, 32692), False, 'import os\n'), ((32211, 32234), 'os.path.isdir', 'os.path.isdir', (['fold_dir'], {}), '(fold_dir)\n', (32224, 32234), False, 'import os\n'), ((32260, 32281), 'os.makedirs', 'os.makedirs', (['fold_dir'], {}), '(fold_dir)\n', (32271, 32281), False, 'import os\n'), ((32394, 32442), 'pandas.DataFrame', 'pd.DataFrame', (['dm.X_train'], {'index': 'dm.train_indices'}), '(dm.X_train, index=dm.train_indices)\n', (32406, 32442), True, 'import pandas as pd\n'), ((32713, 32759), 'pandas.DataFrame', 'pd.DataFrame', (['dm.X_test'], {'index': 'dm.test_indices'}), '(dm.X_test, index=dm.test_indices)\n', (32725, 32759), True, 'import pandas as pd\n')]
|
from utils import parse_args, create_experiment_dirs, calculate_flops
from model import MobileNet
from train import Train
from data_loader import DataLoader
from summarizer import Summarizer
import tensorflow as tf
from crop_face import FaceCropper
import cv2
import numpy as np
def main():
# Parse the JSON arguments
try:
config_args = parse_args()
except:
print("Add a config file using \'--config file_name.json\'")
exit(1)
# Create the experiment directories
_, config_args.summary_dir, config_args.checkpoint_dir = create_experiment_dirs(config_args.experiment_dir)
# Reset the default Tensorflow graph
tf.reset_default_graph()
# Tensorflow specific configuration
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
# Data loading
data = DataLoader(config_args.batch_size, config_args.shuffle)
print("Loading Data...")
config_args.img_height, config_args.img_width, config_args.num_channels, \
config_args.train_data_size, config_args.test_data_size = data.load_data()
print("Data loaded\n\n")
# Model creation
print("Building the model...")
model = MobileNet(config_args)
print("Model is built successfully\n\n")
# Summarizer creation
summarizer = Summarizer(sess, config_args.summary_dir)
# Train class
trainer = Train(sess, model, data, summarizer)
# if config_args.to_train:
# try:
# print("Training...")
# trainer.train()
# print("Training Finished\n\n")
# except KeyboardInterrupt:
# trainer.save_model()
# if config_args.to_test:
# print("Final test!")
# trainer.test('val')
# print("Testing Finished\n\n")
# trainer.dectect(FaceCropper().generate('fake.png'))
if __name__ == '__main__':
# main()
config_args = parse_args()
config_args.img_height, config_args.img_width, config_args.num_channels = (224, 224, 3)
_, config_args.summary_dir, config_args.checkpoint_dir = create_experiment_dirs(config_args.experiment_dir)
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
faces = FaceCropper().generate('maxresdefault.jpg')
with tf.Session(config=config) as sess:
config_args.batch_size = len(faces)
model = MobileNet(config_args)
sess.run(tf.group(tf.global_variables_initializer(), tf.local_variables_initializer()))
saver = tf.train.Saver(max_to_keep=config_args.max_to_keep,
keep_checkpoint_every_n_hours=10,
save_relative_paths=True)
saver.restore(sess, tf.train.latest_checkpoint(config_args.checkpoint_dir))
# show camera
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0)
while True:
_, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.1, 4)
croppedFaces = []
for (x, y, w, h) in faces:
r = max(w, h) / 2
centerx = x + w / 2
centery = y + h / 2
nx = int(centerx - r)
ny = int(centery - r)
nr = int(r * 2)
faceimg = img[ny:ny+nr, nx:nx+nr]
lastimg = cv2.resize(faceimg, (224, 224))
croppedFaces.append(lastimg)
normalizedFaces = np.array(croppedFaces).reshape((len(croppedFaces), 224, 224, 3))
results = sess.run(model.y_out_argmax, feed_dict={model.X: normalizedFaces, model.is_training: False})
i = 0
for (x, y, w, h) in faces:
if (results[i] == 0):
cv2.rectangle(img, (x, y), (x+w, y+h), (255, 255, 0), 2)
cv2.putText(img, 'Fake', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 0), 2)
if (results[i] == 1):
cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)
cv2.putText(img, 'Real', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2)
i += 1
cv2.imshow('img', img)
k = cv2.waitKey(30) & 0xff
if k==27:
break
cap.release()
|
[
"tensorflow.reset_default_graph",
"tensorflow.local_variables_initializer",
"tensorflow.ConfigProto",
"tensorflow.train.latest_checkpoint",
"cv2.rectangle",
"cv2.imshow",
"train.Train",
"utils.create_experiment_dirs",
"crop_face.FaceCropper",
"cv2.cvtColor",
"summarizer.Summarizer",
"data_loader.DataLoader",
"cv2.resize",
"tensorflow.train.Saver",
"tensorflow.global_variables_initializer",
"cv2.waitKey",
"tensorflow.Session",
"model.MobileNet",
"cv2.putText",
"cv2.VideoCapture",
"numpy.array",
"cv2.CascadeClassifier",
"utils.parse_args"
] |
[((566, 616), 'utils.create_experiment_dirs', 'create_experiment_dirs', (['config_args.experiment_dir'], {}), '(config_args.experiment_dir)\n', (588, 616), False, 'from utils import parse_args, create_experiment_dirs, calculate_flops\n'), ((663, 687), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (685, 687), True, 'import tensorflow as tf\n'), ((742, 783), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (756, 783), True, 'import tensorflow as tf\n'), ((838, 863), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (848, 863), True, 'import tensorflow as tf\n'), ((895, 950), 'data_loader.DataLoader', 'DataLoader', (['config_args.batch_size', 'config_args.shuffle'], {}), '(config_args.batch_size, config_args.shuffle)\n', (905, 950), False, 'from data_loader import DataLoader\n'), ((1236, 1258), 'model.MobileNet', 'MobileNet', (['config_args'], {}), '(config_args)\n', (1245, 1258), False, 'from model import MobileNet\n'), ((1348, 1389), 'summarizer.Summarizer', 'Summarizer', (['sess', 'config_args.summary_dir'], {}), '(sess, config_args.summary_dir)\n', (1358, 1389), False, 'from summarizer import Summarizer\n'), ((1422, 1458), 'train.Train', 'Train', (['sess', 'model', 'data', 'summarizer'], {}), '(sess, model, data, summarizer)\n', (1427, 1458), False, 'from train import Train\n'), ((1936, 1948), 'utils.parse_args', 'parse_args', ([], {}), '()\n', (1946, 1948), False, 'from utils import parse_args, create_experiment_dirs, calculate_flops\n'), ((2102, 2152), 'utils.create_experiment_dirs', 'create_experiment_dirs', (['config_args.experiment_dir'], {}), '(config_args.experiment_dir)\n', (2124, 2152), False, 'from utils import parse_args, create_experiment_dirs, calculate_flops\n'), ((2166, 2207), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': '(True)'}), '(allow_soft_placement=True)\n', (2180, 2207), True, 'import tensorflow as tf\n'), ((354, 366), 'utils.parse_args', 'parse_args', ([], {}), '()\n', (364, 366), False, 'from utils import parse_args, create_experiment_dirs, calculate_flops\n'), ((2316, 2341), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (2326, 2341), True, 'import tensorflow as tf\n'), ((2411, 2433), 'model.MobileNet', 'MobileNet', (['config_args'], {}), '(config_args)\n', (2420, 2433), False, 'from model import MobileNet\n'), ((2546, 2661), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': 'config_args.max_to_keep', 'keep_checkpoint_every_n_hours': '(10)', 'save_relative_paths': '(True)'}), '(max_to_keep=config_args.max_to_keep,\n keep_checkpoint_every_n_hours=10, save_relative_paths=True)\n', (2560, 2661), True, 'import tensorflow as tf\n'), ((2860, 2920), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""haarcascade_frontalface_default.xml"""'], {}), "('haarcascade_frontalface_default.xml')\n", (2881, 2920), False, 'import cv2\n'), ((2935, 2954), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (2951, 2954), False, 'import cv2\n'), ((2263, 2276), 'crop_face.FaceCropper', 'FaceCropper', ([], {}), '()\n', (2274, 2276), False, 'from crop_face import FaceCropper\n'), ((2758, 2812), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['config_args.checkpoint_dir'], {}), '(config_args.checkpoint_dir)\n', (2784, 2812), True, 'import tensorflow as tf\n'), ((3026, 3063), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (3038, 3063), False, 'import cv2\n'), ((4302, 4324), 'cv2.imshow', 'cv2.imshow', (['"""img"""', 'img'], {}), "('img', img)\n", (4312, 4324), False, 'import cv2\n'), ((2460, 2493), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2491, 2493), True, 'import tensorflow as tf\n'), ((2495, 2527), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (2525, 2527), True, 'import tensorflow as tf\n'), ((3488, 3519), 'cv2.resize', 'cv2.resize', (['faceimg', '(224, 224)'], {}), '(faceimg, (224, 224))\n', (3498, 3519), False, 'import cv2\n'), ((4341, 4356), 'cv2.waitKey', 'cv2.waitKey', (['(30)'], {}), '(30)\n', (4352, 4356), False, 'import cv2\n'), ((3604, 3626), 'numpy.array', 'np.array', (['croppedFaces'], {}), '(croppedFaces)\n', (3612, 3626), True, 'import numpy as np\n'), ((3899, 3959), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x + w, y + h)', '(255, 255, 0)', '(2)'], {}), '(img, (x, y), (x + w, y + h), (255, 255, 0), 2)\n', (3912, 3959), False, 'import cv2\n'), ((3976, 4055), 'cv2.putText', 'cv2.putText', (['img', '"""Fake"""', '(x, y)', 'cv2.FONT_HERSHEY_SIMPLEX', '(2)', '(255, 255, 0)', '(2)'], {}), "(img, 'Fake', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 0), 2)\n", (3987, 4055), False, 'import cv2\n'), ((4114, 4172), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x + w, y + h)', '(0, 255, 0)', '(2)'], {}), '(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\n', (4127, 4172), False, 'import cv2\n'), ((4189, 4266), 'cv2.putText', 'cv2.putText', (['img', '"""Real"""', '(x, y)', 'cv2.FONT_HERSHEY_SIMPLEX', '(2)', '(0, 255, 0)', '(2)'], {}), "(img, 'Real', (x, y), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0), 2)\n", (4200, 4266), False, 'import cv2\n')]
|
import numpy as np
import open3d as o3d
import networkx as nx
from scipy.spatial.distance import cdist
def crack2graph(pcd, category):
""" Create connected graph for cracks from point cloud. """
# compute all pairwise distances (upper triangle)
points = np.array(pcd.points)
normals = np.array(pcd.normals)
dist = cdist(points, points)
dist = np.triu(dist, k=0)
dist[dist == 0] = np.inf
# create nodes
G = nx.Graph()
for i, pt in enumerate(points):
G.add_node(i, pos=points[i, ...], normal=normals[i, ...], category=category)
# connect until graph is completely connected
while not nx.is_connected(G):
src, tar = np.unravel_index(dist.argmin(), dist.shape)
dist[src, tar] = np.inf
if not nx.has_path(G, src, tar):
length = np.sqrt(np.sum(np.power(G.nodes[src]["pos"] - G.nodes[tar]["pos"], 2)))
G.add_edge(src, tar, weight=length)
return G
def noncrack2graph(pcd, category):
""" Create graph for noncracks from point cloud. """
# compute all pairwise distances (upper triangle)
points = np.array(pcd.points)
normals = np.array(pcd.normals)
# create nodes
G = nx.Graph()
for i, pt in enumerate(points):
G.add_node(i, pos=points[i, ...], normal=normals[i, ...], category=category)
# create edges (to obtain fully-connected graph)
for src in G.nodes:
for tar in range(src, len(G.nodes)):
length = np.sqrt(np.sum(np.power(G.nodes[src]["pos"] - G.nodes[tar]["pos"], 2)))
G.add_edge(src, tar, weight=length)
# solve traveling salesman
tsp = nx.approximation.traveling_salesman_problem
pts = tsp(G, cycle=False, method=nx.algorithms.approximation.traveling_salesman.greedy_tsp)
# changes fully-connected to tsp edges
G.remove_edges_from(list(G.edges))
for i in range(1, len(pts)):
points = [G.nodes[pts[i - 1]]['pos'], G.nodes[pts[i]]['pos']]
normals = [G.nodes[pts[i - 1]]['normal'], G.nodes[pts[i]]['normal']]
G.add_edge(pts[i - 1], pts[i], points=points, normals=normals)
return G
def simplify_graph(G):
""" Removes intermediate nodes with only two neighbors. """
deg = G.degree(G.nodes)
end_nodes = np.array([elem for elem in deg if elem[1] == 1])
inter_nodes = np.array([elem for elem in deg if elem[1] > 2])
inter_and_end_nodes = np.array([elem for elem in deg if elem[1] != 2])
# cycle case
if len(inter_and_end_nodes) <= 1:
# get path of largest cycle and convert to graph edge
cycles = nx.cycle_basis(G)
path = sorted(cycles, key=lambda elem: G.subgraph(elem).size(weight="weight"), reverse=True)[0]
points = [G.nodes[elem]['pos'] for elem in path]
normals = [G.nodes[elem]['normal'] for elem in path]
GG = nx.Graph(G.subgraph([path[0]]))
GG.add_edge(path[0], path[0], points=points, normals=normals)
return GG
GG = nx.Graph(G.subgraph(inter_and_end_nodes[:, 0]))
# furcations absent
if len(inter_nodes) == 0:
path = nx.shortest_path(G, end_nodes[0, 0], end_nodes[-1, 0])
points = [G.nodes[elem]['pos'] for elem in path]
normals = [G.nodes[elem]['normal'] for elem in path]
GG.add_edge(path[0], path[-1], points=points, normals=normals)
# furcations present: loop over inter nodes
for source in inter_nodes:
source = source[0]
# case: inter node to end node
for target in end_nodes:
target = target[0]
path = nx.shortest_path(G, source, target)
# if only exactly this inter node in path, add path
if np.sum(np.isin(inter_nodes[:, 0], path)) == 1:
points = [G.nodes[elem]['pos'] for elem in path]
normals = [G.nodes[elem]['normal'] for elem in path]
GG.add_edge(path[0], path[-1], points=points, normals=normals)
# case: inter node to inter node
for target in inter_nodes:
target = target[0]
path = nx.shortest_path(G, source, target)
# if only exactly these two inter node in path, add path
if np.sum(np.isin(inter_nodes[:, 0], path)) == 2:
points = [G.nodes[elem]['pos'] for elem in path]
normals = [G.nodes[elem]['normal'] for elem in path]
GG.add_edge(path[0], path[-1], points=points, normals=normals)
return GG
def uniquify_graph_nodes(G):
""" Converts node IDs into unique (position-based) IDs. """
name_mapping = dict()
for node in G.nodes:
name_mapping[node] = "_".join(G.nodes[node]["pos"].astype(str))
G = nx.relabel_nodes(G, name_mapping)
return G
def remove_duplicates(pcd):
""" Removes identical points from point cloud. """
# rounding required
pcd.points = o3d.utility.Vector3dVector(np.round(np.array(pcd.points), 4))
uni, idxs = np.unique(np.array(pcd.points), return_index=True, axis=0)
# create reduced point cloud
pcd_red = o3d.geometry.PointCloud()
pcd_red.points = o3d.utility.Vector3dVector(np.array(pcd.points)[idxs, :])
pcd_red.colors = o3d.utility.Vector3dVector(np.array(pcd.colors)[idxs, :])
pcd_red.normals = o3d.utility.Vector3dVector(np.array(pcd.normals)[idxs, :])
return pcd_red
|
[
"scipy.spatial.distance.cdist",
"numpy.isin",
"numpy.triu",
"networkx.is_connected",
"numpy.power",
"open3d.geometry.PointCloud",
"networkx.relabel_nodes",
"networkx.shortest_path",
"networkx.Graph",
"numpy.array",
"networkx.has_path",
"networkx.cycle_basis"
] |
[((268, 288), 'numpy.array', 'np.array', (['pcd.points'], {}), '(pcd.points)\n', (276, 288), True, 'import numpy as np\n'), ((303, 324), 'numpy.array', 'np.array', (['pcd.normals'], {}), '(pcd.normals)\n', (311, 324), True, 'import numpy as np\n'), ((336, 357), 'scipy.spatial.distance.cdist', 'cdist', (['points', 'points'], {}), '(points, points)\n', (341, 357), False, 'from scipy.spatial.distance import cdist\n'), ((369, 387), 'numpy.triu', 'np.triu', (['dist'], {'k': '(0)'}), '(dist, k=0)\n', (376, 387), True, 'import numpy as np\n'), ((445, 455), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (453, 455), True, 'import networkx as nx\n'), ((1116, 1136), 'numpy.array', 'np.array', (['pcd.points'], {}), '(pcd.points)\n', (1124, 1136), True, 'import numpy as np\n'), ((1151, 1172), 'numpy.array', 'np.array', (['pcd.normals'], {}), '(pcd.normals)\n', (1159, 1172), True, 'import numpy as np\n'), ((1201, 1211), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (1209, 1211), True, 'import networkx as nx\n'), ((2260, 2308), 'numpy.array', 'np.array', (['[elem for elem in deg if elem[1] == 1]'], {}), '([elem for elem in deg if elem[1] == 1])\n', (2268, 2308), True, 'import numpy as np\n'), ((2327, 2374), 'numpy.array', 'np.array', (['[elem for elem in deg if elem[1] > 2]'], {}), '([elem for elem in deg if elem[1] > 2])\n', (2335, 2374), True, 'import numpy as np\n'), ((2401, 2449), 'numpy.array', 'np.array', (['[elem for elem in deg if elem[1] != 2]'], {}), '([elem for elem in deg if elem[1] != 2])\n', (2409, 2449), True, 'import numpy as np\n'), ((4687, 4720), 'networkx.relabel_nodes', 'nx.relabel_nodes', (['G', 'name_mapping'], {}), '(G, name_mapping)\n', (4703, 4720), True, 'import networkx as nx\n'), ((5044, 5069), 'open3d.geometry.PointCloud', 'o3d.geometry.PointCloud', ([], {}), '()\n', (5067, 5069), True, 'import open3d as o3d\n'), ((643, 661), 'networkx.is_connected', 'nx.is_connected', (['G'], {}), '(G)\n', (658, 661), True, 'import networkx as nx\n'), ((2585, 2602), 'networkx.cycle_basis', 'nx.cycle_basis', (['G'], {}), '(G)\n', (2599, 2602), True, 'import networkx as nx\n'), ((3087, 3141), 'networkx.shortest_path', 'nx.shortest_path', (['G', 'end_nodes[0, 0]', 'end_nodes[-1, 0]'], {}), '(G, end_nodes[0, 0], end_nodes[-1, 0])\n', (3103, 3141), True, 'import networkx as nx\n'), ((4948, 4968), 'numpy.array', 'np.array', (['pcd.points'], {}), '(pcd.points)\n', (4956, 4968), True, 'import numpy as np\n'), ((774, 798), 'networkx.has_path', 'nx.has_path', (['G', 'src', 'tar'], {}), '(G, src, tar)\n', (785, 798), True, 'import networkx as nx\n'), ((3561, 3596), 'networkx.shortest_path', 'nx.shortest_path', (['G', 'source', 'target'], {}), '(G, source, target)\n', (3577, 3596), True, 'import networkx as nx\n'), ((4064, 4099), 'networkx.shortest_path', 'nx.shortest_path', (['G', 'source', 'target'], {}), '(G, source, target)\n', (4080, 4099), True, 'import networkx as nx\n'), ((4896, 4916), 'numpy.array', 'np.array', (['pcd.points'], {}), '(pcd.points)\n', (4904, 4916), True, 'import numpy as np\n'), ((5118, 5138), 'numpy.array', 'np.array', (['pcd.points'], {}), '(pcd.points)\n', (5126, 5138), True, 'import numpy as np\n'), ((5197, 5217), 'numpy.array', 'np.array', (['pcd.colors'], {}), '(pcd.colors)\n', (5205, 5217), True, 'import numpy as np\n'), ((5277, 5298), 'numpy.array', 'np.array', (['pcd.normals'], {}), '(pcd.normals)\n', (5285, 5298), True, 'import numpy as np\n'), ((836, 890), 'numpy.power', 'np.power', (["(G.nodes[src]['pos'] - G.nodes[tar]['pos'])", '(2)'], {}), "(G.nodes[src]['pos'] - G.nodes[tar]['pos'], 2)\n", (844, 890), True, 'import numpy as np\n'), ((1492, 1546), 'numpy.power', 'np.power', (["(G.nodes[src]['pos'] - G.nodes[tar]['pos'])", '(2)'], {}), "(G.nodes[src]['pos'] - G.nodes[tar]['pos'], 2)\n", (1500, 1546), True, 'import numpy as np\n'), ((3684, 3716), 'numpy.isin', 'np.isin', (['inter_nodes[:, 0]', 'path'], {}), '(inter_nodes[:, 0], path)\n', (3691, 3716), True, 'import numpy as np\n'), ((4192, 4224), 'numpy.isin', 'np.isin', (['inter_nodes[:, 0]', 'path'], {}), '(inter_nodes[:, 0], path)\n', (4199, 4224), True, 'import numpy as np\n')]
|
# Taken From http://stackoverflow.com/questions/32551610/overlapping-probability-of-two-normal-distribution-with-scipy
from numpy import roots, log
from scipy.stats import norm
def solve_norm_intersect(m1, m2, std1, std2):
a = 1 / (2 * std1 ** 2) - 1 / (2 * std2 ** 2)
b = m2 / (std2 ** 2) - m1 / (std1 ** 2)
c = m1 ** 2 / (2 * std1 ** 2) - m2 ** 2 / (2 * std2 ** 2) - log(std2 / std1)
return roots([a, b, c])
def get_overlap(m1, m2, std1, std2):
# Get point of intersect
result = solve_norm_intersect(m1, m2, std1, std2)
r = result[0]
# integrate
return norm.cdf(r, m2, std2)
|
[
"scipy.stats.norm.cdf",
"numpy.roots",
"numpy.log"
] |
[((412, 428), 'numpy.roots', 'roots', (['[a, b, c]'], {}), '([a, b, c])\n', (417, 428), False, 'from numpy import roots, log\n'), ((598, 619), 'scipy.stats.norm.cdf', 'norm.cdf', (['r', 'm2', 'std2'], {}), '(r, m2, std2)\n', (606, 619), False, 'from scipy.stats import norm\n'), ((384, 400), 'numpy.log', 'log', (['(std2 / std1)'], {}), '(std2 / std1)\n', (387, 400), False, 'from numpy import roots, log\n')]
|
import os
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader, ConcatDataset, Sampler
from deepSM.smutils import SMFile
from deepSM import utils
import deepSM.beat_time_converter as BTC
from deepSM import wavutils
import h5py
from importlib import reload
reload(BTC)
reload(wavutils)
reload(utils)
def get_dataset_from_file(song_names, base_path='.', data_folder='data'):
smds = []
for song_name in song_names:
smds.append(load(song_name, base_path, data_folder))
return ConcatDataset(smds)
def get_dataset_from_raw(song_names, base_path='.'):
smds = []
for song_name in song_names:
smds.append(generate(song_name, base_path))
return ConcatDataset(smds)
def save_generated_datasets(song_names, base_path='.'):
for song_name in song_names:
smd = generate(song_name, base_path)
smd.save()
def generate(song_name, base_path='.', chunk_size=100, context_size=7):
"""
Generate an SMDataset from SM/wav files.
"""
sm = SMFile(song_name, base_path)
# May want to save the time mapping later.
btc = BTC.BeatTimeConverter(sm.offset, sm.bpms, sm.stops)
# Will want to mantain order.
# List of strings, not ints.
diffs = list(filter(lambda x: x != 'Edit', sm.note_charts.keys()))
notes = {} # Contains only a list of notes for each difficulty.
times = {} # List of times per diff.
frames = {}
# labels = {} # List of note aligned labels for note events. {0, 1} for now.
# Track first and last notes for wav padding.
first_frame = np.inf
last_frame = -np.inf
# Find note times and frames for alignment to features.
for diff in diffs:
times[diff], notes[diff] = \
btc.gen_time_notes(sm.note_charts[diff].notes)
frames[diff] = btc.align_to_frame(times[diff])
if frames[diff][0] < first_frame:
first_frame = frames[diff][0]
if frames[diff][-1] > last_frame:
last_frame = frames[diff][-1]
# Test this!
# Test by writing beeps again.
front_pad_frames, padded_wav = wavutils.pad_wav(first_frame, last_frame, sm.wavdata)
fft_features = wavutils.gen_fft_features(padded_wav)
# N_channels = 3 (1024, 2048, 4096)
# N_frames ~ song length * 44100 / 512
# N_freqs = 80 (Number of mel coefs per frame)
N_channels, N_frames, N_freqs = fft_features.shape
# Number of possible starting frames in the song.
# Need to exclude ending lag and unusable frames at the very ends.
sample_length = N_frames - chunk_size - context_size * 2
labels = np.zeros((len(diffs), N_frames))
for i, diff in enumerate(diffs):
# Adjusting for the new frames added on to the front.
frames[diff] += front_pad_frames
# Generating final frame-aligned labels for note event:
labels[i, frames[diff]] = 1
# Testing alignment of frames.
# wavutils.test_alignment(padded_wav, frames[diff] * 512 / 44100)
return SMDataset(song_name, fft_features, labels, diffs, chunk_size,
context_size)
class SMDataset(Dataset):
"""
Dataset loader for note placement network.
Loads and feature engineers the songs and sm files for training.
Note: Frame context size is currently hard coded!
"""
def __init__(self, song_name, fft_features, labels, diffs, chunk_size,
context_size):
# Dataset properties.
self.song_name = song_name
self.fft_features = fft_features
self.labels = labels
self.diffs = diffs
self.chunk_size = chunk_size
self.context_size = context_size
# Genratable from dataset properties.
self.N_frames = fft_features.shape[1]
self.sample_length = self.N_frames - self.chunk_size - self.context_size * 2
def __len__(self):
# Can start at any point in the song, as long as there is enough
# room to unroll to chunk_size.
return len(self.diffs) * self.sample_length
def __getitem__(self, idx):
# Since all difficulties have the same number of frames, divide to get
# which diff, order determined by self.diffs.
# Remainder to find the frame.
# "Concatenated" representation.
diff_idx = idx // self.sample_length
frame_idx = idx % self.sample_length
# First self.context_size frames are unusable.
frame_idx += self.context_size
diff = self.diffs[diff_idx]
diff_code = utils.difficulties[diff]
# chuck_slice = slice(frame_idx, frame_idx + self.chunk_size)
chunk_slice = slice(frame_idx - self.context_size, frame_idx + self.context_size + 1)
# Get the slice of the features/labels for the chunk.
fft_features = self.fft_features[:,chunk_slice, :]
# event_labels = self.labels[diff][chunk_slice]
# CNN version. currently scalar.
event_labels = self.labels[diff_idx, frame_idx].reshape((1))
diff_vec = np.zeros(5)
diff_vec[diff_code] = 1
res = {
'fft_features': fft_features.astype(np.float32),
'diff': diff_vec.astype(np.float32),
'labels': event_labels.astype(np.float32)
}
return res
def save(self, base_path='.', data_folder='data', fname=None):
if fname is None:
song_name = self.song_name
fname = '%s/%s/%s/%s.h5' % (base_path, data_folder, song_name, song_name)
print(fname)
if not os.path.isdir('/'.join([base_path,data_folder])):
os.mkdir('/'.join([base_path,data_folder]))
if not os.path.isdir('/'.join([base_path,data_folder,song_name])):
os.mkdir('/'.join([base_path,data_folder,song_name]))
with h5py.File(fname, 'w') as hf:
hf.attrs['song_name'] = self.song_name
hf.attrs['diffs'] = np.array(self.diffs, dtype='S10')
hf.attrs['chunk_size'] = self.chunk_size
hf.attrs['context_size'] = self.context_size
hf.create_dataset('fft_features', data=self.fft_features)
hf.create_dataset('labels', data=self.labels)
def load(fname, base_path='.', data_folder='data'):
h5name = base_path + f'/{data_folder}/{fname}/{fname}.h5'
with h5py.File(h5name, 'r') as hf:
song_name = hf.attrs['song_name']
diffs = list(map(lambda x: x.decode('ascii'), hf.attrs['diffs']))
chunk_size = hf.attrs['chunk_size']
context_size = hf.attrs['context_size']
fft_features = hf['fft_features'].value
labels = hf['labels'].value
return SMDataset(song_name, fft_features, labels, diffs, chunk_size,
context_size)
|
[
"h5py.File",
"torch.utils.data.ConcatDataset",
"numpy.zeros",
"deepSM.wavutils.pad_wav",
"deepSM.beat_time_converter.BeatTimeConverter",
"deepSM.smutils.SMFile",
"importlib.reload",
"deepSM.wavutils.gen_fft_features",
"numpy.array"
] |
[((288, 299), 'importlib.reload', 'reload', (['BTC'], {}), '(BTC)\n', (294, 299), False, 'from importlib import reload\n'), ((300, 316), 'importlib.reload', 'reload', (['wavutils'], {}), '(wavutils)\n', (306, 316), False, 'from importlib import reload\n'), ((317, 330), 'importlib.reload', 'reload', (['utils'], {}), '(utils)\n', (323, 330), False, 'from importlib import reload\n'), ((527, 546), 'torch.utils.data.ConcatDataset', 'ConcatDataset', (['smds'], {}), '(smds)\n', (540, 546), False, 'from torch.utils.data import Dataset, DataLoader, ConcatDataset, Sampler\n'), ((716, 735), 'torch.utils.data.ConcatDataset', 'ConcatDataset', (['smds'], {}), '(smds)\n', (729, 735), False, 'from torch.utils.data import Dataset, DataLoader, ConcatDataset, Sampler\n'), ((1035, 1063), 'deepSM.smutils.SMFile', 'SMFile', (['song_name', 'base_path'], {}), '(song_name, base_path)\n', (1041, 1063), False, 'from deepSM.smutils import SMFile\n'), ((1122, 1173), 'deepSM.beat_time_converter.BeatTimeConverter', 'BTC.BeatTimeConverter', (['sm.offset', 'sm.bpms', 'sm.stops'], {}), '(sm.offset, sm.bpms, sm.stops)\n', (1143, 1173), True, 'import deepSM.beat_time_converter as BTC\n'), ((2114, 2167), 'deepSM.wavutils.pad_wav', 'wavutils.pad_wav', (['first_frame', 'last_frame', 'sm.wavdata'], {}), '(first_frame, last_frame, sm.wavdata)\n', (2130, 2167), False, 'from deepSM import wavutils\n'), ((2188, 2225), 'deepSM.wavutils.gen_fft_features', 'wavutils.gen_fft_features', (['padded_wav'], {}), '(padded_wav)\n', (2213, 2225), False, 'from deepSM import wavutils\n'), ((5029, 5040), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (5037, 5040), True, 'import numpy as np\n'), ((6364, 6386), 'h5py.File', 'h5py.File', (['h5name', '"""r"""'], {}), "(h5name, 'r')\n", (6373, 6386), False, 'import h5py\n'), ((5853, 5874), 'h5py.File', 'h5py.File', (['fname', '"""w"""'], {}), "(fname, 'w')\n", (5862, 5874), False, 'import h5py\n'), ((5966, 5999), 'numpy.array', 'np.array', (['self.diffs'], {'dtype': '"""S10"""'}), "(self.diffs, dtype='S10')\n", (5974, 5999), True, 'import numpy as np\n')]
|
from __future__ import absolute_import
from celeryTasks.celery import app
# The function takes as input:
# 1) src_path: Input image, directory, or npy.
# 2) socketid: The socket id of the connection.
# 3) result_path: The folder path where the result image will be stored.
# It should be full path in case of a single file, else the directory path.
# It should be web accessible.
# NOTE:
# 1) Its job is to classify the images according to the pre-trained model.
# 2) ignore_result=True signifies that celery won't pass any result to the backend.
# 3) It is important to import all the modules only inside the function
# 4) When running with new version of caffe do np.load(MEAN_FILE).mean(1).mean(1)
@app.task(ignore_result=True)
def classifyImages(src_path, socketid, result_path):
# Establishing connection to send results and write messages
import redis
import json
from cloudcv17 import config
rs = redis.StrictRedis(host=config.REDIS_HOST, port=6379)
try:
import caffe
import numpy as np
import os
import glob
import time
import operator
import scipy.io as sio
# Used to assign labels to the results
matWNID = sio.loadmat(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'WNID.mat'))
WNID_cells = matWNID['wordsortWNID']
# Caffe Initialisations
CAFFE_DIR = os.path.normpath(os.path.join(os.path.dirname(caffe.__file__), "..", ".."))
MODEL_FILE = os.path.join(CAFFE_DIR, 'models/bvlc_reference_caffenet/deploy.prototxt')
PRETRAINED = os.path.join(CAFFE_DIR, 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel')
MEAN_FILE = os.path.join(CAFFE_DIR, 'python/caffe/imagenet/ilsvrc_2012_mean.npy')
RAW_SCALE = 255.0
IMAGE_DIMS = (256, 256)
CHANNEL_SWAP = (2, 1, 0)
# Set CPU mode
caffe.set_mode_cpu()
# Make classifier.
classifier = caffe.Classifier(MODEL_FILE, PRETRAINED, image_dims=IMAGE_DIMS,
mean=np.load(MEAN_FILE).mean(1).mean(1), raw_scale=RAW_SCALE,
channel_swap=CHANNEL_SWAP)
# Classify and Send Results
if os.path.isdir(src_path):
for input_file in glob.glob(os.path.join(src_path, '*')):
if os.path.isfile(input_file):
# Load file
rs.publish('chat', json.dumps({'message': 'Processing ' +
os.path.basename(input_file), 'socketid': str(socketid)}))
inputs = [caffe.io.load_image(input_file)]
# Classify.
start = time.time()
prediction = classifier.predict(inputs)
timeMsg = "Completed in %.2f s." % (time.time() - start)
rs.publish('chat', json.dumps({'message': timeMsg, 'socketid': str(socketid)}))
dictionary = {}
for i, j in enumerate(prediction[0]):
dictionary[i] = j
predsorted = sorted(dictionary.iteritems(), key=operator.itemgetter(1), reverse=True)
top5 = predsorted[0:5]
topresults = []
for item in top5:
topresults.append([str(WNID_cells[item, 0][0][0]), str(item[1])])
web_result = {}
web_result[os.path.join(result_path, os.path.basename(input_file))] = topresults
rs.publish('chat', json.dumps({'web_result': json.dumps(web_result), 'socketid': str(socketid)}))
else:
input_file = src_path
# Load file
rs.publish('chat', json.dumps({'message': 'Processing ' +
os.path.basename(input_file), 'socketid': str(socketid)}))
inputs = [caffe.io.load_image(input_file)]
# Classify.
start = time.time()
prediction = classifier.predict(inputs)
timeMsg = "Completed in %.2f s." % (time.time() - start)
rs.publish('chat', json.dumps({'message': timeMsg, 'socketid': str(socketid)}))
dictionary = {}
for i, j in enumerate(prediction[0]):
dictionary[i] = j
predsorted = sorted(dictionary.iteritems(), key=operator.itemgetter(1), reverse=True)
top5 = predsorted[0:5]
topresults = []
for item in top5:
topresults.append([str(WNID_cells[item, 0][0][0]), str(item[1])])
web_result = {}
web_result[result_path] = topresults
rs.publish('chat', json.dumps({'web_result': json.dumps(web_result), 'socketid': str(socketid)}))
rs.publish('chat', json.dumps({'message': 'Thank you for using CloudCV', 'socketid': str(socketid)}))
except:
import traceback
rs.publish('chat', json.dumps({'message': str(traceback.format_exc()), 'socketid': str(socketid)}))
|
[
"os.path.abspath",
"numpy.load",
"caffe.io.load_image",
"os.path.basename",
"os.path.isdir",
"os.path.dirname",
"caffe.set_mode_cpu",
"time.time",
"json.dumps",
"os.path.isfile",
"traceback.format_exc",
"redis.StrictRedis",
"celeryTasks.celery.app.task",
"os.path.join",
"operator.itemgetter"
] |
[((709, 737), 'celeryTasks.celery.app.task', 'app.task', ([], {'ignore_result': '(True)'}), '(ignore_result=True)\n', (717, 737), False, 'from celeryTasks.celery import app\n'), ((931, 983), 'redis.StrictRedis', 'redis.StrictRedis', ([], {'host': 'config.REDIS_HOST', 'port': '(6379)'}), '(host=config.REDIS_HOST, port=6379)\n', (948, 983), False, 'import redis\n'), ((1498, 1571), 'os.path.join', 'os.path.join', (['CAFFE_DIR', '"""models/bvlc_reference_caffenet/deploy.prototxt"""'], {}), "(CAFFE_DIR, 'models/bvlc_reference_caffenet/deploy.prototxt')\n", (1510, 1571), False, 'import os\n'), ((1593, 1689), 'os.path.join', 'os.path.join', (['CAFFE_DIR', '"""models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel"""'], {}), "(CAFFE_DIR,\n 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel')\n", (1605, 1689), False, 'import os\n'), ((1706, 1775), 'os.path.join', 'os.path.join', (['CAFFE_DIR', '"""python/caffe/imagenet/ilsvrc_2012_mean.npy"""'], {}), "(CAFFE_DIR, 'python/caffe/imagenet/ilsvrc_2012_mean.npy')\n", (1718, 1775), False, 'import os\n'), ((1899, 1919), 'caffe.set_mode_cpu', 'caffe.set_mode_cpu', ([], {}), '()\n', (1917, 1919), False, 'import caffe\n'), ((2246, 2269), 'os.path.isdir', 'os.path.isdir', (['src_path'], {}), '(src_path)\n', (2259, 2269), False, 'import os\n'), ((4033, 4044), 'time.time', 'time.time', ([], {}), '()\n', (4042, 4044), False, 'import time\n'), ((1431, 1462), 'os.path.dirname', 'os.path.dirname', (['caffe.__file__'], {}), '(caffe.__file__)\n', (1446, 1462), False, 'import os\n'), ((2311, 2338), 'os.path.join', 'os.path.join', (['src_path', '"""*"""'], {}), "(src_path, '*')\n", (2323, 2338), False, 'import os\n'), ((2360, 2386), 'os.path.isfile', 'os.path.isfile', (['input_file'], {}), '(input_file)\n', (2374, 2386), False, 'import os\n'), ((3955, 3986), 'caffe.io.load_image', 'caffe.io.load_image', (['input_file'], {}), '(input_file)\n', (3974, 3986), False, 'import caffe\n'), ((1262, 1287), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (1277, 1287), False, 'import os\n'), ((2732, 2743), 'time.time', 'time.time', ([], {}), '()\n', (2741, 2743), False, 'import time\n'), ((4145, 4156), 'time.time', 'time.time', ([], {}), '()\n', (4154, 4156), False, 'import time\n'), ((4431, 4453), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (4450, 4453), False, 'import operator\n'), ((2638, 2669), 'caffe.io.load_image', 'caffe.io.load_image', (['input_file'], {}), '(input_file)\n', (2657, 2669), False, 'import caffe\n'), ((4779, 4801), 'json.dumps', 'json.dumps', (['web_result'], {}), '(web_result)\n', (4789, 4801), False, 'import json\n'), ((2860, 2871), 'time.time', 'time.time', ([], {}), '()\n', (2869, 2871), False, 'import time\n'), ((3186, 3208), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (3205, 3208), False, 'import operator\n'), ((3525, 3553), 'os.path.basename', 'os.path.basename', (['input_file'], {}), '(input_file)\n', (3541, 3553), False, 'import os\n'), ((3874, 3902), 'os.path.basename', 'os.path.basename', (['input_file'], {}), '(input_file)\n', (3890, 3902), False, 'import os\n'), ((5035, 5057), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (5055, 5057), False, 'import traceback\n'), ((2076, 2094), 'numpy.load', 'np.load', (['MEAN_FILE'], {}), '(MEAN_FILE)\n', (2083, 2094), True, 'import numpy as np\n'), ((3634, 3656), 'json.dumps', 'json.dumps', (['web_result'], {}), '(web_result)\n', (3644, 3656), False, 'import json\n'), ((2549, 2577), 'os.path.basename', 'os.path.basename', (['input_file'], {}), '(input_file)\n', (2565, 2577), False, 'import os\n')]
|
import os
import cv2
import pathlib
import numpy as np
class image_warper():
def __init__(self, image_path, save_folder="out", windows_size=(1200, 700)):
self.image_path = image_path
self.save_folder = save_folder
self.windows_size = windows_size
self.setup()
def setup(self):
"""setup for opencv window"""
self.window_name = "warp image"
cv2.namedWindow(self.window_name, cv2.WINDOW_NORMAL)
cv2.resizeWindow(self.window_name, self.windows_size[0], self.windows_size[1])
cv2.setMouseCallback(self.window_name, self.draw_circle)
def warp_image(self):
"""warps image with opecv findHomography and warpPerspective functions"""
image = cv2.imread(self.image_path)
points = np.array(self.points)
image_array = np.array([[0, 0],[image.shape[1], 0],[0, image.shape[0]],[image.shape[1], image.shape[0]]])
h, status = cv2.findHomography(points, image_array)
new_image = cv2.warpPerspective(image, h, (image.shape[1],image.shape[0]))
return new_image
def refresh_image(self):
self.image = cv2.imread(self.image_path)
self.points = []
def draw_circle(self, event, x, y, flags, param):
if(event == cv2.EVENT_LBUTTONDOWN):
if(len(self.points) < 4):
cv2.circle(self.image,(x,y),3,(255,0,0),-1)
points_temp = []
points_temp.append(x)
points_temp.append(y)
self.points.append(points_temp)
print("points: {0}".format(self.points))
else:
print("can not select more than 4 points")
def create_unique_file_name(self, file_path, before_number="(", after_number=")"):
temp_file_path = file_path
file_name_counter = 1
if(os.path.isfile(temp_file_path)):
while(True):
save_path, temp_file_name = os.path.split(temp_file_path)
temp_file_name, temp_file_extension = os.path.splitext(temp_file_name)
temp_file_name = "{0}{1}{2}{3}{4}".format(temp_file_name, before_number, file_name_counter, after_number, temp_file_extension)
temp_file_path = os.path.join(save_path, temp_file_name)
file_name_counter += 1
if(os.path.isfile(temp_file_path)):
temp_file_path = file_path
else:
file_path = temp_file_path
break
return file_path
def start_warper(self):
"""starts opencv window"""
print("please select points with this order.\ntop left, top right, bottom left, bottom right")
self.image = cv2.imread(self.image_path)
self.points = []
while(True):
cv2.imshow(self.window_name, self.image)
key = cv2.waitKey(1)
if(key == ord("s")):
if(len(self.points) == 4):
# warp image
new_image = self.warp_image()
# create new path
pathlib.Path(self.save_folder).mkdir(parents=True, exist_ok=True)
_, image_name = os.path.split(self.image_path)
save_path = os.path.join(self.save_folder, image_name)
save_path = self.create_unique_file_name(save_path)
# save and show new image
cv2.imwrite(save_path, new_image)
cv2.imshow("warped image", new_image)
print("image saved '{0}'".format(save_path))
# refresh image
self.refresh_image()
else:
print(self.points)
print("select more points or\nclear selected points with c")
# options
if(key == ord("c")):
print("image reloaded")
self.refresh_image()
# exit
if(cv2.getWindowProperty(self.window_name, 0) < 0):
break
if(key == 27):
break
cv2.destroyAllWindows()
if __name__ == "__main__":
warper = image_warper("example_images/1.jpeg")
warper.start_warper()
|
[
"cv2.warpPerspective",
"cv2.circle",
"os.path.join",
"cv2.waitKey",
"cv2.imwrite",
"cv2.imshow",
"cv2.imread",
"os.path.isfile",
"cv2.setMouseCallback",
"numpy.array",
"os.path.splitext",
"pathlib.Path",
"cv2.resizeWindow",
"cv2.destroyAllWindows",
"cv2.findHomography",
"cv2.getWindowProperty",
"cv2.namedWindow",
"os.path.split"
] |
[((407, 459), 'cv2.namedWindow', 'cv2.namedWindow', (['self.window_name', 'cv2.WINDOW_NORMAL'], {}), '(self.window_name, cv2.WINDOW_NORMAL)\n', (422, 459), False, 'import cv2\n'), ((468, 546), 'cv2.resizeWindow', 'cv2.resizeWindow', (['self.window_name', 'self.windows_size[0]', 'self.windows_size[1]'], {}), '(self.window_name, self.windows_size[0], self.windows_size[1])\n', (484, 546), False, 'import cv2\n'), ((555, 611), 'cv2.setMouseCallback', 'cv2.setMouseCallback', (['self.window_name', 'self.draw_circle'], {}), '(self.window_name, self.draw_circle)\n', (575, 611), False, 'import cv2\n'), ((738, 765), 'cv2.imread', 'cv2.imread', (['self.image_path'], {}), '(self.image_path)\n', (748, 765), False, 'import cv2\n'), ((783, 804), 'numpy.array', 'np.array', (['self.points'], {}), '(self.points)\n', (791, 804), True, 'import numpy as np\n'), ((832, 930), 'numpy.array', 'np.array', (['[[0, 0], [image.shape[1], 0], [0, image.shape[0]], [image.shape[1], image.\n shape[0]]]'], {}), '([[0, 0], [image.shape[1], 0], [0, image.shape[0]], [image.shape[1],\n image.shape[0]]])\n', (840, 930), True, 'import numpy as np\n'), ((945, 984), 'cv2.findHomography', 'cv2.findHomography', (['points', 'image_array'], {}), '(points, image_array)\n', (963, 984), False, 'import cv2\n'), ((1006, 1069), 'cv2.warpPerspective', 'cv2.warpPerspective', (['image', 'h', '(image.shape[1], image.shape[0])'], {}), '(image, h, (image.shape[1], image.shape[0]))\n', (1025, 1069), False, 'import cv2\n'), ((1146, 1173), 'cv2.imread', 'cv2.imread', (['self.image_path'], {}), '(self.image_path)\n', (1156, 1173), False, 'import cv2\n'), ((1852, 1882), 'os.path.isfile', 'os.path.isfile', (['temp_file_path'], {}), '(temp_file_path)\n', (1866, 1882), False, 'import os\n'), ((2736, 2763), 'cv2.imread', 'cv2.imread', (['self.image_path'], {}), '(self.image_path)\n', (2746, 2763), False, 'import cv2\n'), ((4157, 4180), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4178, 4180), False, 'import cv2\n'), ((2822, 2862), 'cv2.imshow', 'cv2.imshow', (['self.window_name', 'self.image'], {}), '(self.window_name, self.image)\n', (2832, 2862), False, 'import cv2\n'), ((2881, 2895), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2892, 2895), False, 'import cv2\n'), ((1352, 1402), 'cv2.circle', 'cv2.circle', (['self.image', '(x, y)', '(3)', '(255, 0, 0)', '(-1)'], {}), '(self.image, (x, y), 3, (255, 0, 0), -1)\n', (1362, 1402), False, 'import cv2\n'), ((1954, 1983), 'os.path.split', 'os.path.split', (['temp_file_path'], {}), '(temp_file_path)\n', (1967, 1983), False, 'import os\n'), ((2038, 2070), 'os.path.splitext', 'os.path.splitext', (['temp_file_name'], {}), '(temp_file_name)\n', (2054, 2070), False, 'import os\n'), ((2247, 2286), 'os.path.join', 'os.path.join', (['save_path', 'temp_file_name'], {}), '(save_path, temp_file_name)\n', (2259, 2286), False, 'import os\n'), ((2345, 2375), 'os.path.isfile', 'os.path.isfile', (['temp_file_path'], {}), '(temp_file_path)\n', (2359, 2375), False, 'import os\n'), ((4027, 4069), 'cv2.getWindowProperty', 'cv2.getWindowProperty', (['self.window_name', '(0)'], {}), '(self.window_name, 0)\n', (4048, 4069), False, 'import cv2\n'), ((3217, 3247), 'os.path.split', 'os.path.split', (['self.image_path'], {}), '(self.image_path)\n', (3230, 3247), False, 'import os\n'), ((3280, 3322), 'os.path.join', 'os.path.join', (['self.save_folder', 'image_name'], {}), '(self.save_folder, image_name)\n', (3292, 3322), False, 'import os\n'), ((3482, 3515), 'cv2.imwrite', 'cv2.imwrite', (['save_path', 'new_image'], {}), '(save_path, new_image)\n', (3493, 3515), False, 'import cv2\n'), ((3536, 3573), 'cv2.imshow', 'cv2.imshow', (['"""warped image"""', 'new_image'], {}), "('warped image', new_image)\n", (3546, 3573), False, 'import cv2\n'), ((3115, 3145), 'pathlib.Path', 'pathlib.Path', (['self.save_folder'], {}), '(self.save_folder)\n', (3127, 3145), False, 'import pathlib\n')]
|
# -*- coding: utf-8 -*-
import json
import re
import warnings
import numpy as np
from bs4 import BeautifulSoup
from astropy.io import ascii
from astropy.time import Time
from astropy.table import Table, QTable, Column
import astropy.units as u
from astropy.coordinates import EarthLocation, Angle, SkyCoord
from astropy._erfa.core import ErfaWarning
from ..query import BaseQuery
from . import conf
from ..utils import async_to_sync, class_or_instance
from ..exceptions import InvalidQueryError
__all__ = ['MPCClass']
@async_to_sync
class MPCClass(BaseQuery):
MPC_URL = 'https://' + conf.web_service_server + '/web_service'
# The authentication credentials for the MPC web service are publicly
# available and can be openly viewed on the documentation page at
# https://minorplanetcenter.net/web_service/
MPC_USERNAME = 'mpc_ws'
MPC_PASSWORD = '<PASSWORD>'
MPES_URL = 'https://' + conf.mpes_server + '/cgi-bin/mpeph2.cgi'
OBSERVATORY_CODES_URL = ('https://' + conf.web_service_server +
'/iau/lists/ObsCodes.html')
MPCOBS_URL = conf.mpcdb_server
TIMEOUT = conf.timeout
_ephemeris_types = {
'equatorial': 'a',
'heliocentric': 's',
'geocentric': 'G'
}
_default_number_of_steps = {
'd': '21',
'h': '49',
'm': '121',
's': '301'
}
_proper_motions = {
'total': 't',
'coordinate': 'c',
'sky': 's'
}
def __init__(self):
super(MPCClass, self).__init__()
def query_object_async(self, target_type, get_query_payload=False, *args, **kwargs):
"""
Query around a specific object within a given mission catalog. When
searching for a comet, it will return the entry with the latest epoch.
The following are valid query parameters for the MPC API search. The
params list and description are from
https://minorplanetcenter.net/web_service/ and are accurate as of
3/6/2018.
Parameters
----------
target_type : str
Search for either a comet or an asteroid, with the two valid values being,
naturally, "comet" and "asteroid"
updated_at : str
Date-time when the Orbits table was last updated (YYYY-MM-DDThh:mm:ssZ). Note:
the documentation lists this field as "orbit-updated-at", but the service
response contained "updated_at", which appears to correlate and can also be
used as a query parameter.
name : str
The object's name; e.g., Eros. This can be queried as 'Eros' or 'eros'. If
the object has not yet been named, this field will be 'null'.
number : integer
The object's number; e.g., 433. If the object has not yet been numbered,
this field will be 'null'.
designation : str
The object's provisional designation (e.g., 2014 AA) if it has not been
numbered yet. If the object has been numbered, this number is its permanent
designation and is what the 'designation' parameter will return, padded with
leading zeroes for a total of 7 digits; e.g., '0000433'. When querying for
provisional designations, because white spaces aren't allowed in the query,
escape the space with either a '+' or '%20'; e.g., '2014+AA' or '2014%20AA'.
epoch : str
The date/time of reference for the current orbital parameters.
epoch_jd : str
The Julian Date of the epoch.
period (years) : str
Time it takes for the object to complete one orbit around the Sun.
semimajor_axis : str
a, one half of the longest diameter of the orbital ellipse. (AU)
aphelion_distance : str
The distance when the object is furthest from the Sun in its orbit. (AU)
perihelion_distance : str
The distance when the object is nearest to the Sun in its orbit. (AU)
perihelion_date : str
Date when the object is at perihelion, i.e., reaches its closest point to
the Sun.
perihelion_date_jd : str
The Julian Date of perihelion.
argument_of_perihelion (°) : str
ω, defines the orientation of the ellipse in the orbital plane and is the
angle from the object's ascending node to its perihelion, measured in the
direction of motion. Range: 0–360°.
ascending_node (°) : str
Ω, the longitude of the ascending node, it defines the horizontal orientation
of the ellipse with respect to the ecliptic, and is the angle measured
counterclockwise (as seen from North of the ecliptic) from the First Point of
Aries to the ascending node. Range: 0–360°.
inclination (°) : str
i, the angle between the object's orbit and the ecliptic. Range: 0–180°.
eccentricity : str
e, a measure of how far the orbit shape departs from a circle. Range: 0–1,
with e = 0 being a perfect circle, intermediate values being ellipses ever
more elongated as e increases, and e = 1 describing a parabola.
mean_anomaly (°) : str
M, is related to the position of the object along its orbit at the given
epoch. Range: 0–360°.
mean_daily_motion (°/day) : str
n, a measure of the average speed of the object along its orbit.
absolute_magnitude : str
H, apparent magnitude the object would have if it were observed from 1 AU
away at zero phase, while it was 1 AU away from the Sun. Note this is
geometrically impossible and is equivalent to observing the object from the
center of the Sun.
phase_slope : str
G, slope parameter as calculated or assumed by the MPC. The slope parameter
is a measure of how much brighter the object gets as its phase angle
decreases. When not known, a value of G = 0.15 is assumed.
orbit_type : integer
Asteroids are classified from a dynamics perspective by the area of the Solar
System in which they orbit. A number identifies each orbit type.
0: Unclassified (mostly Main Belters)
1: Atiras
2: Atens
3: Apollos
4: Amors
5: Mars Crossers
6: Hungarias
7: Phocaeas
8: Hildas
9: Jupiter Trojans
10: Distant Objects
delta_v (km/sec) : float
Δv, an estimate of the amount of energy necessary to jump from LEO (Low Earth
Orbit) to the object's orbit.
tisserand_jupiter : float
TJ, Tisserand parameter with respect to Jupiter, which is a quasi-invariant
value for each object and is frequently used to distinguish objects
(typically TJ > 3) from Jupiter-family comets (typically 2 < TJ < 3).
neo : bool
value = 1 flags Near Earth Objects (NEOs).
km_neo : bool
value = 1 flags NEOs larger than ~1 km in diameter.
pha : bool
value = 1 flags Potentially Hazardous Asteroids (PHAs).
mercury_moid : float
Minimum Orbit Intersection Distance with respect to Mercury. (AU)
venus_moid : float
Minimum Orbit Intersection Distance with respect to Venus. (AU)
earth_moid : float
Minimum Orbit Intersection Distance with respect to Earth. (AU)
mars_moid : float
Minimum Orbit Intersection Distance with respect to Mars. (AU)
jupiter_moid : float
Minimum Orbit Intersection Distance with respect to Jupiter. (AU)
saturn_moid : float
Minimum Orbit Intersection Distance with respect to Saturn. (AU)
uranus_moid : float
Minimum Orbit Intersection Distance with respect to Uranus. (AU)
neptune_moid : float
Minimum Orbit Intersection Distance with respect to Neptune. (AU)
"""
mpc_endpoint = self.get_mpc_object_endpoint(target_type)
kwargs['limit'] = 1
return self.query_objects_async(target_type, get_query_payload, *args, **kwargs)
def query_objects_async(self, target_type, get_query_payload=False, *args, **kwargs):
"""
Query around a specific object within a given mission catalog
The following are valid query parameters for the MPC API search. The params list and
description are from https://minorplanetcenter.net/web_service/ and are accurate
as of 3/6/2018:
Parameters
----------
target_type : str
Search for either a comet or an asteroid, with the two valid values being,
naturally, "comet" and "asteroid"
updated_at : str
Date-time when the Orbits table was last updated (YYYY-MM-DDThh:mm:ssZ). Note:
the documentation lists this field as "orbit-updated-at", but the service
response contained "updated_at", which appears to correlate and can also be
used as a query parameter.
name : str
The object's name; e.g., Eros. This can be queried as 'Eros' or 'eros'. If
the object has not yet been named, this field will be 'null'.
number : integer
The object's number; e.g., 433. If the object has not yet been numbered,
this field will be 'null'.
designation : str
The object's provisional designation (e.g., 2014 AA) if it has not been
numbered yet. If the object has been numbered, this number is its permanent
designation and is what the 'designation' parameter will return, padded with
leading zeroes for a total of 7 digits; e.g., '0000433'. When querying for
provisional designations, because white spaces aren't allowed in the query,
escape the space with either a '+' or '%20'; e.g., '2014+AA' or '2014%20AA'.
epoch : str
The date/time of reference for the current orbital parameters.
epoch_jd : str
The Julian Date of the epoch.
period (years) : str
Time it takes for the object to complete one orbit around the Sun.
semimajor_axis : str
a, one half of the longest diameter of the orbital ellipse. (AU)
aphelion_distance : str
The distance when the object is furthest from the Sun in its orbit. (AU)
perihelion_distance : str
The distance when the object is nearest to the Sun in its orbit. (AU)
perihelion_date : str
Date when the object is at perihelion, i.e., reaches its closest point to
the Sun.
perihelion_date_jd : str
The Julian Date of perihelion.
argument_of_perihelion (°) : str
ω, defines the orientation of the ellipse in the orbital plane and is the
angle from the object's ascending node to its perihelion, measured in the
direction of motion. Range: 0–360°.
ascending_node (°) : str
Ω, the longitude of the ascending node, it defines the horizontal orientation
of the ellipse with respect to the ecliptic, and is the angle measured
counterclockwise (as seen from North of the ecliptic) from the First Point of
Aries to the ascending node. Range: 0–360°.
inclination (°) : str
i, the angle between the object's orbit and the ecliptic. Range: 0–180°.
eccentricity : str
e, a measure of how far the orbit shape departs from a circle. Range: 0–1,
with e = 0 being a perfect circle, intermediate values being ellipses ever
more elongated as e increases, and e = 1 describing a parabola.
mean_anomaly (°) : str
M, is related to the position of the object along its orbit at the given
epoch. Range: 0–360°.
mean_daily_motion (°/day) : str
n, a measure of the average speed of the object along its orbit.
absolute_magnitude : str
H, apparent magnitude the object would have if it were observed from 1 AU
away at zero phase, while it was 1 AU away from the Sun. Note this is
geometrically impossible and is equivalent to observing the object from the
center of the Sun.
phase_slope : str
G, slope parameter as calculated or assumed by the MPC. The slope parameter
is a measure of how much brighter the object gets as its phase angle
decreases. When not known, a value of G = 0.15 is assumed.
orbit_type : integer
Asteroids are classified from a dynamics perspective by the area of the Solar
System in which they orbit. A number identifies each orbit type.
0: Unclassified (mostly Main Belters)
1: Atiras
2: Atens
3: Apollos
4: Amors
5: Mars Crossers
6: Hungarias
7: Phocaeas
8: Hildas
9: Jupiter Trojans
10: Distant Objects
delta_v (km/sec) : float
Δv, an estimate of the amount of energy necessary to jump from LEO (Low Earth
Orbit) to the object's orbit.
tisserand_jupiter : float
TJ, Tisserand parameter with respect to Jupiter, which is a quasi-invariant
value for each object and is frequently used to distinguish objects
(typically TJ > 3) from Jupiter-family comets (typically 2 < TJ < 3).
neo : bool
value = 1 flags Near Earth Objects (NEOs).
km_neo : bool
value = 1 flags NEOs larger than ~1 km in diameter.
pha : bool
value = 1 flags Potentially Hazardous Asteroids (PHAs).
mercury_moid : float
Minimum Orbit Intersection Distance with respect to Mercury. (AU)
venus_moid : float
Minimum Orbit Intersection Distance with respect to Venus. (AU)
earth_moid : float
Minimum Orbit Intersection Distance with respect to Earth. (AU)
mars_moid : float
Minimum Orbit Intersection Distance with respect to Mars. (AU)
jupiter_moid : float
Minimum Orbit Intersection Distance with respect to Jupiter. (AU)
saturn_moid : float
Minimum Orbit Intersection Distance with respect to Saturn. (AU)
uranus_moid : float
Minimum Orbit Intersection Distance with respect to Uranus. (AU)
neptune_moid : float
Minimum Orbit Intersection Distance with respect to Neptune. (AU)
limit : integer
Limit the number of results to the given value
"""
mpc_endpoint = self.get_mpc_object_endpoint(target_type)
if (target_type == 'comet'):
kwargs['order_by_desc'] = "epoch"
request_args = self._args_to_object_payload(**kwargs)
# Return payload if requested
if get_query_payload:
return request_args
self.query_type = 'object'
auth = (self.MPC_USERNAME, self.MPC_PASSWORD)
return self._request('GET', mpc_endpoint, params=request_args, auth=auth)
def get_mpc_object_endpoint(self, target_type):
mpc_endpoint = self.MPC_URL
if target_type == 'asteroid':
mpc_endpoint = mpc_endpoint + '/search_orbits'
elif target_type == 'comet':
mpc_endpoint = mpc_endpoint + '/search_comet_orbits'
return mpc_endpoint
@class_or_instance
def get_ephemeris_async(self, target, location='500', start=None, step='1d',
number=None, ut_offset=0, eph_type='equatorial',
ra_format=None, dec_format=None,
proper_motion='total', proper_motion_unit='arcsec/h',
suppress_daytime=False, suppress_set=False,
perturbed=True, unc_links=False,
get_query_payload=False,
get_raw_response=False, cache=False):
r"""
Object ephemerides from the Minor Planet Ephemeris Service.
Parameters
----------
target : str
Designation of the object of interest. See Notes for
acceptable formats.
location : str, array-like, or `~astropy.coordinates.EarthLocation`, optional
Observer's location as an IAU observatory code, a
3-element array of Earth longitude, latitude, altitude, or
a `~astropy.coordinates.EarthLocation`. Longitude and
latitude should be anything that initializes an
`~astropy.coordinates.Angle` object, and altitude should
initialize an `~astropy.units.Quantity` object (with units
of length). If ``None``, then the geocenter (code 500) is
used.
start : str or `~astropy.time.Time`, optional
First epoch of the ephemeris as a string (UT), or astropy
`~astropy.time.Time`. Strings are parsed by
`~astropy.time.Time`. If ``None``, then today is used.
Valid dates span the time period 1900 Jan 1 - 2099 Dec 31
[MPES]_.
step : str or `~astropy.units.Quantity`, optional
The ephemeris step size or interval in units of days,
hours, minutes, or seconds. Strings are parsed by
`~astropy.units.Quantity`. All inputs are rounded to the
nearest integer. Default is 1 day.
number : int, optional
The number of ephemeris dates to compute. Must be ≤1441.
If ``None``, the value depends on the units of ``step``: 21
for days, 49 for hours, 121 for minutes, or 301 for
seconds.
ut_offset : int, optional
Number of hours to offset from 0 UT for daily ephemerides.
eph_type : str, optional
Specify the type of ephemeris::
equatorial: RA and Dec (default)
heliocentric: heliocentric position and velocity vectors
geocentric: geocentric position vector
ra_format : dict, optional
Format the RA column with
`~astropy.coordinates.Angle.to_string` using these keyword
arguments, e.g.,
``{'sep': ':', 'unit': 'hourangle', 'precision': 1}``.
dec_format : dict, optional
Format the Dec column with
`~astropy.coordinates.Angle.to_string` using these keyword
arguments, e.g., ``{'sep': ':', 'precision': 0}``.
proper_motion : str, optional
total: total motion and direction (default)
coordinate: separate RA and Dec coordinate motion
sky: separate RA and Dec sky motion (i.e., includes a
cos(Dec) term).
proper_motion_unit : string or Unit, optional
Convert proper motion to this unit. Must be an angular
rate. Default is 'arcsec/h'.
suppress_daytime : bool, optional
Suppress output when the Sun is above the local
horizon. (default ``False``)
suppress_set : bool, optional
Suppress output when the object is below the local
horizon. (default ``False``)
perturbed : bool, optional
Generate perturbed ephemerides for unperturbed orbits
(default ``True``).
unc_links : bool, optional
Return columns with uncertainty map and offset links, if
available.
get_query_payload : bool, optional
Return the HTTP request parameters as a dictionary
(default: ``False``).
get_raw_response : bool, optional
Return raw data without parsing into a table (default:
``False``).
cache : bool, optional
Cache results or use cached results (default: ``False``).
Returns
-------
response : `requests.Response`
The HTTP response returned from the service.
Notes
-----
See the MPES user's guide [MPES]_ for details on options and
implementation.
MPES allows azimuths to be measured eastwards from the north
meridian, or westwards from the south meridian. However, the
`~astropy.coordinates.AltAz` coordinate frame assumes
eastwards of north. To remain consistent with Astropy,
eastwards of north is used.
Acceptable target names [MPES]_ are listed in the tables below.
.. attention:: Asteroid designations in the text version of the
documentation may be prefixed with a backslash, which
should be ignored. This is to force correct rendering of
the designation in the rendered versions of the
documentation (e.g., HTML).
+------------+-----------------------------------+
| Target | Description |
+============+===================================+
| \(3202) | Numbered minor planet (3202) |
+------------+-----------------------------------+
| 14829 | Numbered minor planet (14829) |
+------------+-----------------------------------+
| 1997 XF11 | Unnumbered minor planet 1997 XF11 |
+------------+-----------------------------------+
| 1P | Comet 1P/Halley |
+------------+-----------------------------------+
| C/2003 A2 | Comet C/2003 A2 (Gleason) |
+------------+-----------------------------------+
| P/2003 CP7 | Comet P/2003 CP7 (LINEAR-NEAT) |
+------------+-----------------------------------+
For comets, P/ and C/ are interchangable. The designation
may also be in a packed format:
+------------+-----------------------------------+
| Target | Description |
+============+===================================+
| 00233 | Numbered minor planet (233) |
+------------+-----------------------------------+
| K03A07A | Unnumbered minor planet 2003 AA7 |
+------------+-----------------------------------+
| PK03C07P | Comet P/2003 CP7 (LINEAR-NEAT) |
+------------+-----------------------------------+
| 0039P | Comet 39P/Oterma |
+------------+-----------------------------------+
You may also search by name:
+------------+-----------------------------------+
| Target | Description |
+============+===================================+
| Encke | \(9134) Encke |
+------------+-----------------------------------+
| Africa | \(1193) Africa |
+------------+-----------------------------------+
| Africano | \(6391) Africano |
+------------+-----------------------------------+
| P/Encke | 2P/Encke |
+------------+-----------------------------------+
| C/Encke | 2P/Encke |
+------------+-----------------------------------+
| C/Gleason | C/2003 A2 (Gleason) |
+------------+-----------------------------------+
If a comet name is not unique, the first match will be
returned.
References
----------
.. [MPES] <NAME>. The Minor Planet Ephemeris Service.
https://minorplanetcenter.net/iau/info/MPES.pdf (retrieved
2018 June 19).
.. IAU Minor Planet Center. List of Observatory codes.
https://minorplanetcenter.net/iau/lists/ObsCodesF.html
(retrieved 2018 June 19).
Examples
--------
>>> from astroquery.mpc import MPC
>>> tab = astroquery.mpc.MPC.get_ephemeris('(24)', location=568,
... start='2003-02-26', step='100d', number=3) # doctest: +SKIP
>>> print(tab) # doctest: +SKIP
"""
# parameter checks
if type(location) not in (str, int, EarthLocation):
if hasattr(location, '__iter__'):
if len(location) != 3:
raise ValueError(
"location arrays require three values:"
" longitude, latitude, and altitude")
else:
raise TypeError(
"location must be a string, integer, array-like,"
" or astropy EarthLocation")
if start is not None:
_start = Time(start)
else:
_start = None
# step must be one of these units, and must be an integer (we
# will convert to an integer later). MPES fails for large
# integers, so we cannot just convert everything to seconds.
_step = u.Quantity(step)
if _step.unit not in [u.d, u.h, u.min, u.s]:
raise ValueError(
'step must have units of days, hours, minutes, or seconds.')
if number is not None:
if number > 1441:
raise ValueError('number must be <=1441')
if eph_type not in self._ephemeris_types.keys():
raise ValueError("eph_type must be one of {}".format(
self._ephemeris_types.keys()))
if proper_motion not in self._proper_motions.keys():
raise ValueError("proper_motion must be one of {}".format(
self._proper_motions.keys()))
if not u.Unit(proper_motion_unit).is_equivalent('rad/s'):
raise ValueError("proper_motion_unit must be an angular rate.")
# setup payload
request_args = self._args_to_ephemeris_payload(
target=target, ut_offset=ut_offset, suppress_daytime=suppress_daytime,
suppress_set=suppress_set, perturbed=perturbed, location=location,
start=_start, step=_step, number=number, eph_type=eph_type,
proper_motion=proper_motion)
# store for retrieval in _parse_result
self._ra_format = ra_format
self._dec_format = dec_format
self._proper_motion_unit = u.Unit(proper_motion_unit)
self._unc_links = unc_links
if get_query_payload:
return request_args
self.query_type = 'ephemeris'
response = self._request('POST', self.MPES_URL, data=request_args)
return response
@class_or_instance
def get_observatory_codes_async(self, get_raw_response=False, cache=True):
"""
Table of observatory codes from the IAU Minor Planet Center.
Parameters
----------
get_raw_response : bool, optional
Return raw data without parsing into a table (default:
`False`).
cache : bool, optional
Cache results or use cached results (default: `True`).
Returns
-------
response : `requests.Response`
The HTTP response returned from the service.
References
----------
.. IAU Minor Planet Center. List of Observatory codes.
https://minorplanetcenter.net/iau/lists/ObsCodesF.html
(retrieved 2018 June 19).
Examples
--------
>>> from astroquery.mpc import MPC
>>> obs = MPC.get_observatory_codes() # doctest: +SKIP
>>> print(obs[295]) # doctest: +SKIP
Code Longitude cos sin Name
---- --------- -------- --------- -------------
309 289.59569 0.909943 -0.414336 Cerro Paranal
"""
self.query_type = 'observatory_code'
response = self._request('GET', self.OBSERVATORY_CODES_URL,
timeout=self.TIMEOUT, cache=cache)
return response
@class_or_instance
def get_observatory_location(self, code, cache=True):
"""
IAU observatory location.
Parameters
----------
code : string
Three-character IAU observatory code.
cache : bool, optional
Cache observatory table or use cached results (default:
`True`).
Returns
-------
longitude : Angle
Observatory longitude (east of Greenwich).
cos : float
Parallax constant ``rho * cos(phi)`` where ``rho`` is the
geocentric distance in earth radii, and ``phi`` is the
geocentric latitude.
sin : float
Parallax constant ``rho * sin(phi)``.
name : string
The name of the observatory.
Raises
------
LookupError
If `code` is not found in the MPC table.
Examples
--------
>>> from astroquery.mpc import MPC
>>> obs = MPC.get_observatory_location('000')
>>> print(obs) # doctest: +SKIP
(<Angle 0. deg>, 0.62411, 0.77873, 'Greenwich')
"""
if not isinstance(code, str):
raise TypeError('code must be a string')
if len(code) != 3:
raise ValueError('code must be three charaters long')
tab = self.get_observatory_codes(cache=cache)
for row in tab:
if row[0] == code:
return Angle(row[1], 'deg'), row[2], row[3], row[4]
raise LookupError('{} not found'.format(code))
def _args_to_object_payload(self, **kwargs):
request_args = kwargs
kwargs['json'] = 1
return_fields = kwargs.pop('return_fields', None)
if return_fields:
kwargs['return'] = return_fields
return request_args
def _args_to_ephemeris_payload(self, **kwargs):
request_args = {
'ty': 'e',
'TextArea': str(kwargs['target']),
'uto': str(kwargs['ut_offset']),
'igd': 'y' if kwargs['suppress_daytime'] else 'n',
'ibh': 'y' if kwargs['suppress_set'] else 'n',
'fp': 'y' if kwargs['perturbed'] else 'n',
'adir': 'N', # always measure azimuth eastward from north
'tit': '', # dummy page title
'bu': '' # dummy base URL
}
location = kwargs['location']
if isinstance(location, str):
request_args['c'] = location
elif isinstance(location, int):
request_args['c'] = '{:03d}'.format(location)
elif isinstance(location, EarthLocation):
loc = location.geodetic
request_args['long'] = loc[0].deg
request_args['lat'] = loc[1].deg
request_args['alt'] = loc[2].to(u.m).value
elif hasattr(location, '__iter__'):
request_args['long'] = Angle(location[0]).deg
request_args['lat'] = Angle(location[1]).deg
request_args['alt'] = u.Quantity(location[2]).to('m').value
if kwargs['start'] is None:
_start = Time.now()
_start.precision = 0 # integer seconds
request_args['d'] = _start.iso.replace(':', '')
else:
_start = Time(kwargs['start'], precision=0,
scale='utc') # integer seconds
request_args['d'] = _start.iso.replace(':', '')
request_args['i'] = str(int(round(kwargs['step'].value)))
request_args['u'] = str(kwargs['step'].unit)[:1]
if kwargs['number'] is None:
request_args['l'] = self._default_number_of_steps[
request_args['u']]
else:
request_args['l'] = kwargs['number']
request_args['raty'] = self._ephemeris_types[kwargs['eph_type']]
request_args['s'] = self._proper_motions[kwargs['proper_motion']]
request_args['m'] = 'h' # always return proper_motion as arcsec/hr
return request_args
@class_or_instance
def get_observations_async(self, targetid,
id_type=None,
comettype=None,
get_mpcformat=False,
get_raw_response=False,
get_query_payload=False,
cache=True):
"""
Obtain all reported observations for an asteroid or a comet
from the `Minor Planet Center observations database
<https://minorplanetcenter.net/db_search>`_.
Parameters
----------
targetid : int or str
Official target number or
designation. If a number is provided (either as int or
str), the input is interpreted as an asteroid number;
asteroid designations are interpreted as such (note that a
whitespace between the year and the remainder of the
designation is required and no packed designations are
allowed). To query a periodic comet number, you have to
append ``'P'``, e.g., ``'234P'``. To query any comet
designation, the designation has to start with a letter
describing the comet type and a slash, e.g., ``'C/2018 E1'``.
Comet or asteroid names, Palomar-Leiden Survey
designations, and individual comet fragments cannot be
queried.
id_type : str, optional
Manual override for identifier type. If ``None``, the
identifier type is derived by parsing ``targetid``; if this
automated classification fails, it can be set manually using
this parameter. Possible values are ``'asteroid number'``,
``'asteroid designation'``, ``'comet number'``, and
``'comet designation'``. Default: ``None``
get_mpcformat : bool, optional
If ``True``, this method will return an `~astropy.table.QTable`
with only a single column holding the original MPC 80-column
observation format. Default: ``False``
get_raw_response : bool, optional
If ``True``, this method will return the raw output from the
MPC servers (json). Default: ``False``
get_query_payload : bool, optional
Return the HTTP request parameters as a dictionary
(default: ``False``).
cache : bool, optional
If ``True``, queries will be cached. Default: ``True``
Raises
------
RuntimeError
If query did not return any data.
ValueError
If target name could not be parsed properly and target type
could not be identified.
Notes
-----
The following quantities are included in the output table
+-------------------+--------------------------------------------+
| Column Name | Definition |
+===================+============================================+
| ``number`` | official IAU target number (int) |
+-------------------+--------------------------------------------+
| ``desig`` | provisional target designation (str) |
+-------------------+--------------------------------------------+
| ``discovery`` (*) | target discovery flag (str) |
+-------------------+--------------------------------------------+
| ``comettype`` (*) | orbital type of comet (str) |
+-------------------+--------------------------------------------+
| ``note1`` (#) | Note1 (str) |
+-------------------+--------------------------------------------+
| ``note2`` (#) | Note2 (str) |
+-------------------+--------------------------------------------+
| ``epoch`` | epoch of observation (Julian Date, float) |
+-------------------+--------------------------------------------+
| ``RA`` | RA reported (J2000, deg, float) |
+-------------------+--------------------------------------------+
| ``DEC`` | declination reported (J2000, deg, float) |
+-------------------+--------------------------------------------+
| ``mag`` | reported magnitude (mag, float) |
+-------------------+--------------------------------------------+
| ``band`` (*) | photometric band for ``mag`` (str) |
+-------------------+--------------------------------------------+
| ``phottype`` (*) | comet photometry type (nuclear/total, str) |
+-------------------+--------------------------------------------+
| ``observatory`` | IAU observatory code (str) |
+-------------------+--------------------------------------------+
(*): Column names are optional and
depend on whether an asteroid or a comet has been queried.
(#): Parameters ``Note1`` and ``Note2`` are defined `here
<https://minorplanetcenter.net/iau/info/OpticalObs.html>`_.
Examples
--------
>>> from astroquery.mpc import MPC
>>> MPC.get_observations(12893) # doctest: +SKIP
<QTable masked=True length=1401>
number desig discovery note1 ... mag band observatory
... mag
int64 str9 str1 str1 ... float64 str1 str3
------ --------- --------- ----- ... ------- ---- -----------
12893 1998 QS55 -- -- ... 0.0 -- 413
12893 1998 QS55 -- -- ... 0.0 -- 413
12893 1998 QS55 * 4 ... 0.0 -- 809
12893 1998 QS55 -- 4 ... 0.0 -- 809
12893 1998 QS55 -- 4 ... 0.0 -- 809
12893 1998 QS55 -- 4 ... 18.4 -- 809
... ... ... ... ... ... ... ...
12893 1998 QS55 -- -- ... 18.63 c T05
12893 1998 QS55 -- -- ... 18.55 c T05
12893 1998 QS55 -- -- ... 18.3 r I41
12893 1998 QS55 -- -- ... 18.3 r I41
12893 1998 QS55 -- -- ... 18.2 r I41
12893 1998 QS55 -- -- ... 18.3 r I41
"""
request_payload = {'table': 'observations'}
if id_type is None:
pat = ('(^[0-9]*$)|' # [0] asteroid number
'(^[0-9]{1,3}[PIA]$)' # [1] periodic comet number
'(-[1-9A-Z]{0,2})?$|' # [2] fragment
'(^[PDCXAI]/[- 0-9A-Za-z]*)'
# [3] comet designation
'(-[1-9A-Z]{0,2})?$|' # [4] fragment
'(^([1A][8-9][0-9]{2}[ _][A-Z]{2}[0-9]{0,3}$|'
'^20[0-9]{2}[ _][A-Z]{2}[0-9]{0,3}$)|'
'(^[1-9][0-9]{3}[ _](P-L|T-[1-3]))$)'
# asteroid designation [5] (old/new/Palomar-Leiden style)
)
# comet fragments are extracted here, but the MPC server does
# not allow for fragment-based queries
m = re.findall(pat, str(targetid))
if len(m) == 0:
raise ValueError(('Cannot interpret target '
'identifier "{}".').format(targetid))
else:
m = m[0]
request_payload['object_type'] = 'M'
if m[1] != '':
request_payload['object_type'] = 'P'
if m[3] != '':
request_payload['object_type'] = m[3][0]
if m[0] != '':
request_payload['number'] = m[0] # asteroid number
elif m[1] != '':
request_payload['number'] = m[1][:-1] # per. comet number
elif m[3] != '':
request_payload['designation'] = m[3] # comet designation
elif m[5] != '':
request_payload['designation'] = m[5] # ast. designation
else:
if 'asteroid' in id_type:
request_payload['object_type'] = 'M'
if 'number' in id_type:
request_payload['number'] = str(targetid)
elif 'designation' in id_type:
request_payload['designation'] = targetid
if 'comet' in id_type:
pat = ('(^[0-9]{1,3}[PIA])|' # [0] number
'(^[PDCXAI]/[- 0-9A-Za-z]*)' # [1] designation
)
m = re.findall(pat, str(targetid))
if len(m) == 0:
raise ValueError(('Cannot parse comet type '
'from "{}".').format(targetid))
else:
m = m[0]
if m[0] != '':
request_payload['object_type'] = m[0][-1]
elif m[1] != '':
request_payload['object_type'] = m[1][0]
if 'number' in id_type:
request_payload['number'] = targetid[:-1]
elif 'designation' in id_type:
request_payload['designation'] = targetid
self.query_type = 'observations'
if get_query_payload:
return request_payload
response = self._request('GET', url=self.MPCOBS_URL,
params=request_payload,
auth=(self.MPC_USERNAME,
self.MPC_PASSWORD),
timeout=self.TIMEOUT, cache=cache)
if get_mpcformat:
self.obsformat = 'mpc'
else:
self.obsformat = 'table'
if get_raw_response:
self.get_raw_response = True
else:
self.get_raw_response = False
return response
def _parse_result(self, result, **kwargs):
if self.query_type == 'object':
try:
data = result.json()
except ValueError:
raise InvalidQueryError(result.text)
return data
elif self.query_type == 'observatory_code':
root = BeautifulSoup(result.content, 'html.parser')
text_table = root.find('pre').text
start = text_table.index('000')
text_table = text_table[start:]
# parse table ourselves to make sure the code column is a
# string and that blank cells are masked
rows = []
for line in text_table.splitlines():
lon = line[4:13]
if len(lon.strip()) == 0:
lon = np.nan
else:
lon = float(lon)
c = line[13:21]
if len(c.strip()) == 0:
c = np.nan
else:
c = float(c)
s = line[21:30]
if len(s.strip()) == 0:
s = np.nan
else:
s = float(s)
rows.append((line[:3], lon, c, s, line[30:]))
tab = Table(rows=rows,
names=('Code', 'Longitude', 'cos', 'sin', 'Name'),
dtype=(str, float, float, float, str),
masked=True)
tab['Longitude'].mask = ~np.isfinite(tab['Longitude'])
tab['cos'].mask = ~np.isfinite(tab['cos'])
tab['sin'].mask = ~np.isfinite(tab['sin'])
return tab
elif self.query_type == 'ephemeris':
content = result.content.decode()
table_start = content.find('<pre>')
if table_start == -1:
raise InvalidQueryError(content)
table_end = content.find('</pre>')
text_table = content[table_start + 5:table_end]
SKY = 'raty=a' in result.request.body
HELIOCENTRIC = 'raty=s' in result.request.body
GEOCENTRIC = 'raty=G' in result.request.body
# columns = '\n'.join(text_table.splitlines()[:2])
# find column headings
if SKY:
# slurp to newline after "h m s"
i = text_table.index('\n', text_table.index('h m s')) + 1
columns = text_table[:i]
data_start = columns.count('\n') - 1
else:
# slurp to newline after "JD_TT"
i = text_table.index('\n', text_table.index('JD_TT')) + 1
columns = text_table[:i]
data_start = columns.count('\n') - 1
first_row = text_table.splitlines()[data_start + 1]
if SKY:
names = ('Date', 'RA', 'Dec', 'Delta',
'r', 'Elongation', 'Phase', 'V')
col_starts = (0, 18, 29, 39, 47, 56, 62, 69)
col_ends = (17, 28, 38, 46, 55, 61, 68, 72)
units = (None, None, None, 'au', 'au', 'deg', 'deg', 'mag')
if 's=t' in result.request.body: # total motion
names += ('Proper motion', 'Direction')
units += ('arcsec/h', 'deg')
elif 's=c' in result.request.body: # coord Motion
names += ('dRA', 'dDec')
units += ('arcsec/h', 'arcsec/h')
elif 's=s' in result.request.body: # sky Motion
names += ('dRA cos(Dec)', 'dDec')
units += ('arcsec/h', 'arcsec/h')
col_starts += (73, 81)
col_ends += (80, 89)
if 'Moon' in columns:
# table includes Alt, Az, Sun and Moon geometry
names += ('Azimuth', 'Altitude', 'Sun altitude', 'Moon phase',
'Moon distance', 'Moon altitude')
col_starts += tuple((col_ends[-1] + offset for offset in
(2, 9, 14, 20, 27, 33)))
col_ends += tuple((col_ends[-1] + offset for offset in
(8, 13, 19, 26, 32, 37)))
units += ('deg', 'deg', 'deg', None, 'deg', 'deg')
if 'Uncertainty' in columns:
names += ('Uncertainty 3sig', 'Unc. P.A.')
col_starts += tuple((col_ends[-1] + offset for offset in
(2, 11)))
col_ends += tuple((col_ends[-1] + offset for offset in
(10, 16)))
units += ('arcsec', 'deg')
if ">Map</a>" in first_row and self._unc_links:
names += ('Unc. map', 'Unc. offsets')
col_starts += (first_row.index(' / <a') + 3, )
col_starts += (
first_row.index(' / <a', col_starts[-1]) + 3, )
# Unc. offsets is always last
col_ends += (col_starts[-1] - 3,
first_row.rindex('</a>') + 4)
units += (None, None)
elif HELIOCENTRIC:
names = ('Object', 'JD', 'X', 'Y', 'Z', "X'", "Y'", "Z'")
col_starts = (0, 12, 28, 45, 61, 77, 92, 108)
col_ends = None
units = (None, None, 'au', 'au', 'au', 'au/d', 'au/d', 'au/d')
elif GEOCENTRIC:
names = ('Object', 'JD', 'X', 'Y', 'Z')
col_starts = (0, 12, 28, 45, 61)
col_ends = None
units = (None, None, 'au', 'au', 'au')
tab = ascii.read(text_table, format='fixed_width_no_header',
names=names, col_starts=col_starts,
col_ends=col_ends, data_start=data_start,
fill_values=(('N/A', np.nan),))
for col, unit in zip(names, units):
tab[col].unit = unit
# Time for dates, Angle for RA and Dec; convert columns at user's request
if SKY:
# convert from MPES string to Time, MPES uses UT timescale
tab['Date'] = Time(['{}-{}-{} {}:{}:{}'.format(
d[:4], d[5:7], d[8:10], d[11:13], d[13:15], d[15:17])
for d in tab['Date']], scale='utc')
# convert from MPES string:
ra = Angle(tab['RA'], unit='hourangle').to('deg')
dec = Angle(tab['Dec'], unit='deg')
# optionally convert back to a string
if self._ra_format is not None:
ra_unit = self._ra_format.get('unit', ra.unit)
ra = ra.to_string(**self._ra_format)
else:
ra_unit = ra.unit
if self._dec_format is not None:
dec_unit = self._dec_format.get('unit', dec.unit)
dec = dec.to_string(**self._dec_format)
else:
dec_unit = dec.unit
# replace columns
tab.remove_columns(('RA', 'Dec'))
tab.add_column(Column(ra, name='RA', unit=ra_unit), index=1)
tab.add_column(Column(dec, name='Dec', unit=dec_unit), index=2)
# convert proper motion columns
for col in ('Proper motion', 'dRA', 'dRA cos(Dec)', 'dDec'):
if col in tab.colnames:
tab[col].convert_unit_to(self._proper_motion_unit)
else:
# convert from MPES string to Time
tab['JD'] = Time(tab['JD'], format='jd', scale='tt')
return tab
elif self.query_type == 'observations':
warnings.simplefilter("ignore", ErfaWarning)
try:
src = json.loads(result.text)
except (ValueError, json.decoder.JSONDecodeError):
raise RuntimeError(
'Server response not readable: "{}"'.format(
result.text))
if len(src) == 0:
raise RuntimeError(('No data queried. Are the target '
'identifiers correct?'))
# return raw response if requested
if self.get_raw_response:
return src
# return raw 80-column observation format if requested
if self.obsformat == 'mpc':
tab = Table([[o['original_record'] for o in src]])
tab.rename_column('col0', 'obs')
return tab
if all([o['object_type'] == 'M' for o in src]):
# minor planets (asteroids)
data = ascii.read("\n".join([o['original_record']
for o in src]),
format='fixed_width_no_header',
names=('number', 'pdesig', 'discovery',
'note1', 'note2', 'epoch',
'RA', 'DEC', 'mag', 'band',
'observatory'),
col_starts=(0, 5, 12, 13, 14, 15,
32, 44, 65, 70, 77),
col_ends=(4, 11, 12, 13, 14, 31,
43, 55, 69, 70, 79))
# convert asteroid designations
# old designation style, e.g.: 1989AB
ident = data['pdesig'][0]
if isinstance(ident, np.ma.masked_array) and ident.mask:
ident = ''
elif (len(ident) < 7 and ident[:4].isdigit() and
ident[4:6].isalpha()):
ident = ident[:4]+' '+ident[4:6]
# Palomar Survey
elif 'PLS' in ident:
ident = ident[3:] + " P-L"
# Trojan Surveys
elif 'T1S' in ident:
ident = ident[3:] + " T-1"
elif 'T2S' in ident:
ident = ident[3:] + " T-2"
elif 'T3S' in ident:
ident = ident[3:] + " T-3"
# standard MPC packed 7-digit designation
elif (ident[0].isalpha() and ident[1:3].isdigit() and
ident[-1].isalpha() and ident[-2].isdigit()):
yr = str(conf.pkd.find(ident[0]))+ident[1:3]
let = ident[3]+ident[-1]
num = str(conf.pkd.find(ident[4]))+ident[5]
num = num.lstrip("0")
ident = yr+' '+let+num
data.add_column(Column([ident]*len(data), name='desig'),
index=1)
data.remove_column('pdesig')
elif all([o['object_type'] != 'M' for o in src]):
# comets
data = ascii.read("\n".join([o['original_record']
for o in src]),
format='fixed_width_no_header',
names=('number', 'comettype', 'desig',
'note1', 'note2', 'epoch',
'RA', 'DEC', 'mag', 'phottype',
'observatory'),
col_starts=(0, 4, 5, 13, 14, 15,
32, 44, 65, 70, 77),
col_ends=(3, 4, 12, 13, 14, 31,
43, 55, 69, 70, 79))
# convert comet designations
ident = data['desig'][0]
if (not isinstance(ident, (np.ma.masked_array,
np.ma.core.MaskedConstant))
or not ident.mask):
yr = str(conf.pkd.find(ident[0]))+ident[1:3]
let = ident[3]
# patch to parse asteroid designations
if len(ident) == 7 and str.isalpha(ident[6]):
let += ident[6]
ident = ident[:6] + ident[7:]
num = str(conf.pkd.find(ident[4]))+ident[5]
num = num.lstrip("0")
if len(ident) >= 7:
frag = ident[6] if ident[6] != '0' else ''
else:
frag = ''
ident = yr+' '+let+num+frag
# remove and add desig column to overcome length limit
data.remove_column('desig')
data.add_column(Column([ident]*len(data),
name='desig'), index=3)
else:
raise ValueError(('Object type is ambiguous. "{}" '
'are present.').format(
set([o['object_type'] for o in src])))
# convert dates to Julian Dates
dates = [d[:10].replace(' ', '-') for d in data['epoch']]
times = np.array([float(d[10:]) for d in data['epoch']])
jds = Time(dates, format='iso').jd+times
data['epoch'] = jds
# convert ra and dec to degrees
coo = SkyCoord(ra=data['RA'], dec=data['DEC'],
unit=(u.hourangle, u.deg),
frame='icrs')
data['RA'] = coo.ra.deg
data['DEC'] = coo.dec.deg
# convert Table to QTable
data = QTable(data)
data['epoch'].unit = u.d
data['RA'].unit = u.deg
data['DEC'].unit = u.deg
data['mag'].unit = u.mag
return data
MPC = MPCClass()
|
[
"astropy.units.Quantity",
"astropy.table.Table",
"astropy.io.ascii.read",
"warnings.simplefilter",
"json.loads",
"astropy.time.Time",
"astropy.table.QTable",
"astropy.time.Time.now",
"numpy.isfinite",
"astropy.table.Column",
"bs4.BeautifulSoup",
"astropy.coordinates.Angle",
"astropy.coordinates.SkyCoord",
"astropy.units.Unit"
] |
[((25249, 25265), 'astropy.units.Quantity', 'u.Quantity', (['step'], {}), '(step)\n', (25259, 25265), True, 'import astropy.units as u\n'), ((26552, 26578), 'astropy.units.Unit', 'u.Unit', (['proper_motion_unit'], {}), '(proper_motion_unit)\n', (26558, 26578), True, 'import astropy.units as u\n'), ((56084, 56096), 'astropy.table.QTable', 'QTable', (['data'], {}), '(data)\n', (56090, 56096), False, 'from astropy.table import Table, QTable, Column\n'), ((24974, 24985), 'astropy.time.Time', 'Time', (['start'], {}), '(start)\n', (24978, 24985), False, 'from astropy.time import Time\n'), ((31282, 31292), 'astropy.time.Time.now', 'Time.now', ([], {}), '()\n', (31290, 31292), False, 'from astropy.time import Time\n'), ((31440, 31487), 'astropy.time.Time', 'Time', (["kwargs['start']"], {'precision': '(0)', 'scale': '"""utc"""'}), "(kwargs['start'], precision=0, scale='utc')\n", (31444, 31487), False, 'from astropy.time import Time\n'), ((42649, 42693), 'bs4.BeautifulSoup', 'BeautifulSoup', (['result.content', '"""html.parser"""'], {}), "(result.content, 'html.parser')\n", (42662, 42693), False, 'from bs4 import BeautifulSoup\n'), ((43591, 43715), 'astropy.table.Table', 'Table', ([], {'rows': 'rows', 'names': "('Code', 'Longitude', 'cos', 'sin', 'Name')", 'dtype': '(str, float, float, float, str)', 'masked': '(True)'}), "(rows=rows, names=('Code', 'Longitude', 'cos', 'sin', 'Name'), dtype=(\n str, float, float, float, str), masked=True)\n", (43596, 43715), False, 'from astropy.table import Table, QTable, Column\n'), ((25912, 25938), 'astropy.units.Unit', 'u.Unit', (['proper_motion_unit'], {}), '(proper_motion_unit)\n', (25918, 25938), True, 'import astropy.units as u\n'), ((29647, 29667), 'astropy.coordinates.Angle', 'Angle', (['row[1]', '"""deg"""'], {}), "(row[1], 'deg')\n", (29652, 29667), False, 'from astropy.coordinates import EarthLocation, Angle, SkyCoord\n'), ((43820, 43849), 'numpy.isfinite', 'np.isfinite', (["tab['Longitude']"], {}), "(tab['Longitude'])\n", (43831, 43849), True, 'import numpy as np\n'), ((43881, 43904), 'numpy.isfinite', 'np.isfinite', (["tab['cos']"], {}), "(tab['cos'])\n", (43892, 43904), True, 'import numpy as np\n'), ((43936, 43959), 'numpy.isfinite', 'np.isfinite', (["tab['sin']"], {}), "(tab['sin'])\n", (43947, 43959), True, 'import numpy as np\n'), ((48078, 48250), 'astropy.io.ascii.read', 'ascii.read', (['text_table'], {'format': '"""fixed_width_no_header"""', 'names': 'names', 'col_starts': 'col_starts', 'col_ends': 'col_ends', 'data_start': 'data_start', 'fill_values': "(('N/A', np.nan),)"}), "(text_table, format='fixed_width_no_header', names=names,\n col_starts=col_starts, col_ends=col_ends, data_start=data_start,\n fill_values=(('N/A', np.nan),))\n", (48088, 48250), False, 'from astropy.io import ascii\n'), ((48925, 48954), 'astropy.coordinates.Angle', 'Angle', (["tab['Dec']"], {'unit': '"""deg"""'}), "(tab['Dec'], unit='deg')\n", (48930, 48954), False, 'from astropy.coordinates import EarthLocation, Angle, SkyCoord\n'), ((50068, 50108), 'astropy.time.Time', 'Time', (["tab['JD']"], {'format': '"""jd"""', 'scale': '"""tt"""'}), "(tab['JD'], format='jd', scale='tt')\n", (50072, 50108), False, 'from astropy.time import Time\n'), ((50195, 50239), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'ErfaWarning'], {}), "('ignore', ErfaWarning)\n", (50216, 50239), False, 'import warnings\n'), ((55824, 55910), 'astropy.coordinates.SkyCoord', 'SkyCoord', ([], {'ra': "data['RA']", 'dec': "data['DEC']", 'unit': '(u.hourangle, u.deg)', 'frame': '"""icrs"""'}), "(ra=data['RA'], dec=data['DEC'], unit=(u.hourangle, u.deg), frame=\n 'icrs')\n", (55832, 55910), False, 'from astropy.coordinates import EarthLocation, Angle, SkyCoord\n'), ((31072, 31090), 'astropy.coordinates.Angle', 'Angle', (['location[0]'], {}), '(location[0])\n', (31077, 31090), False, 'from astropy.coordinates import EarthLocation, Angle, SkyCoord\n'), ((31129, 31147), 'astropy.coordinates.Angle', 'Angle', (['location[1]'], {}), '(location[1])\n', (31134, 31147), False, 'from astropy.coordinates import EarthLocation, Angle, SkyCoord\n'), ((49600, 49635), 'astropy.table.Column', 'Column', (['ra'], {'name': '"""RA"""', 'unit': 'ra_unit'}), "(ra, name='RA', unit=ra_unit)\n", (49606, 49635), False, 'from astropy.table import Table, QTable, Column\n'), ((49677, 49715), 'astropy.table.Column', 'Column', (['dec'], {'name': '"""Dec"""', 'unit': 'dec_unit'}), "(dec, name='Dec', unit=dec_unit)\n", (49683, 49715), False, 'from astropy.table import Table, QTable, Column\n'), ((50280, 50303), 'json.loads', 'json.loads', (['result.text'], {}), '(result.text)\n', (50290, 50303), False, 'import json\n'), ((50912, 50956), 'astropy.table.Table', 'Table', (["[[o['original_record'] for o in src]]"], {}), "([[o['original_record'] for o in src]])\n", (50917, 50956), False, 'from astropy.table import Table, QTable, Column\n'), ((48858, 48892), 'astropy.coordinates.Angle', 'Angle', (["tab['RA']"], {'unit': '"""hourangle"""'}), "(tab['RA'], unit='hourangle')\n", (48863, 48892), False, 'from astropy.coordinates import EarthLocation, Angle, SkyCoord\n'), ((55694, 55719), 'astropy.time.Time', 'Time', (['dates'], {'format': '"""iso"""'}), "(dates, format='iso')\n", (55698, 55719), False, 'from astropy.time import Time\n'), ((31186, 31209), 'astropy.units.Quantity', 'u.Quantity', (['location[2]'], {}), '(location[2])\n', (31196, 31209), True, 'import astropy.units as u\n')]
|
"""
Strategies that try to maximize the posterior mean function.
"""
from argparse import Namespace
import numpy as np
from OCBO.cstrats.cts_opt import ContinuousOpt
from dragonfly.utils.option_handler import get_option_specs
from OCBO.util.misc_util import sample_grid, uniform_draw, knowledge_gradient
pm_args = [\
get_option_specs('judge_act_size', False, 50,
'Number of points to use to judge candidates.'),
get_option_specs('judge_ctx_size', False, 50,
'Number of points to use to judge candidates.'),
get_option_specs('judge_ctx_thresh', False, None,
'Threshold for how related judgement set should be in context.'),
get_option_specs('judge_act_thresh', False, None,
'Threshold for how related judgement set should be in context.'),
]
class PosteriorMaximization(ContinuousOpt):
def _child_set_up(self, function, domain, ctx_dim, options):
self.judge_act_size = options.judge_act_size
self.judge_ctx_size = options.judge_ctx_size
self.judge_ctx_thresh = options.judge_ctx_thresh
if self.judge_ctx_thresh is not None:
self.judge_ctx_thresh = float(self.judge_ctx_thresh)
self.judge_act_thresh = options.judge_act_thresh
if self.judge_act_thresh is not None:
self.judge_act_thresh = float(self.judge_act_thresh)
def _make_judgement_set(self, cand_pt):
"""Make judgement set. Restruct to have some delta threshold in the
context and action space. Distance between points uses l-infty norm.
Args:
cand_pt: ndarray of specific candidate point to make the set for.
Returns: ndarray of points.
"""
# Find context threshold.
if self.judge_ctx_thresh is None:
c_thresh = self.ctx_domain
else:
c_thresh = self._find_ctx_thresh(cand_pt)
# Draw contexts based on context threshold constraint.
ctxs = uniform_draw(c_thresh, self.judge_ctx_size)
# Find Action threshold.
if self.judge_act_thresh is None:
a_thresh = self.act_domain
else:
a_thresh = self._find_act_thresh(cand_pt)
ctxs = [list(cx) for cx in ctxs]
return sample_grid(ctxs, a_thresh, self.judge_act_size)
def _find_ctx_thresh(self, cand_pt):
# Draw points at random with same action.
cand_ctx, cand_act = np.split(cand_pt, [self.ctx_dim])
check_ctxs = uniform_draw(self.ctx_domain, self.judge_ctx_size)
dist_pairs = []
for cc in check_ctxs:
linf = np.max(np.abs(cc - cand_ctx))
dist_pairs.append((linf, cc))
dist_pairs.sort()
dist_pairs = dist_pairs[::-1]
check_ctxs = np.asarray([dp[1] for dp in dist_pairs])
check_ctxs = check_ctxs.reshape(self.judge_ctx_size, self.ctx_dim)
repeated_act = np.tile(cand_act, self.judge_ctx_size)
repeated_act = repeated_act.reshape(self.judge_ctx_size,
self.act_dim)
check_pts = np.hstack([check_ctxs, repeated_act])
check_pts = check_pts.reshape(self.judge_ctx_size, self.dim)
# Find posterior covariances between cand_pt and drawn.
all_pts = np.vstack([cand_pt, check_pts])
_, covmat = self.gp.eval(all_pts, include_covar=True)
covs = covmat[0]
# Find furthest point that satisfies threshold.
dist_idx = 1
while dist_idx < self.judge_ctx_size \
and covs[dist_idx] > self.judge_ctx_thresh:
dist_idx += 1
max_ldist = dist_pairs[dist_idx - 1][0]
new_domain = []
for dim, dim_domain in enumerate(self.ctx_domain):
lower = np.max([cand_ctx[dim] - max_ldist, dim_domain[0]])
upper = np.min([cand_ctx[dim] + max_ldist, dim_domain[1]])
new_domain.append([lower, upper])
return new_domain
def _find_act_thresh(self, cand_pt):
# Draw points at random with same action.
cand_ctx, cand_act = np.split(cand_pt, [self.ctx_dim])
check_acts = uniform_draw(self.act_domain, self.judge_act_size)
dist_pairs = []
for ca in check_acts:
linf = np.max(np.abs(ca - cand_ctx))
dist_pairs.append((linf, ca))
dist_pairs.sort()
dist_pairs = dist_pairs[::-1]
check_acts = np.asarray([dp[1] for dp in dist_pairs])
check_acts = check_acts.reshape(self.judge_act_size, self.act_dim)
repeated_ctx = np.tile(cand_ctx, self.judge_act_size)
repeated_ctx = repeated_ctx.reshape(self.judge_act_size,
self.ctx_dim)
check_pts = np.hstack([repeated_ctx, check_acts])
check_pts = check_pts.reshape(self.judge_act_size, self.dim)
# Find posterior covariances between cand_pt and drawn.
all_pts = np.vstack([cand_pt, check_pts])
_, covmat = self.gp.eval(all_pts, uncert_form='covar')
covs = covmat[0]
# Find furthest point that satisfies threshold.
dist_idx = 1
while dist_idx < self.judge_ctx_size \
and covs[dist_idx] > self.judge_ctx_thresh:
dist_idx += 1
max_ldist = dist_pairs[dist_idx - 1][0]
new_domain = []
for dim, dim_domain in enumerate(self.act_domain):
lower = np.max([cand_act[dim] - max_ldist, dim_domain[0]])
upper = np.min([cand_act[dim] + max_ldist, dim_domain[1]])
new_domain.append([lower, upper])
return new_domain
class REVI(PosteriorMaximization):
@staticmethod
def get_strat_name():
"""Get the name of the strategies."""
return 'revi'
def _determine_next_query(self):
"""Pick the next query uniformly at random."""
if self.judge_act_thresh is None and self.judge_ctx_thresh is None:
return self._set_eval_next_query()
return self._diff_eval_next_query()
def _set_eval_next_query(self):
# Form candidate and judgement sets.
cands = self._make_candidate_set()
noise = self.gp.get_estimated_noise()
judge = self._make_judgement_set(cands[0])
conjoined = np.vstack([cands, judge])
means, _ = self.gp.eval(conjoined, include_covar=False)
judge_means = means[self.cand_size:].reshape(self.judge_ctx_size,
self.judge_act_size)
interactions = self.gp.get_pt_relations(cands, judge)
cand_vars = self.gp.eval(cands, include_covar=True)[1].diagonal()
best_pt, best_val = None, float('-inf')
for c_idx in range(self.cand_size):
interaction = interactions[c_idx].reshape(self.judge_ctx_size,
self.judge_act_size)
var = cand_vars[c_idx]
improvement = 0
# Judge the effect of the candidate point.
for ctx_idx in range(self.judge_ctx_size):
means = judge_means[ctx_idx]
sigmas = interaction[ctx_idx] / np.sqrt(noise + var)
improvement += knowledge_gradient(means, sigmas)
if improvement > best_val:
best_pt, best_val = cands[c_idx], improvement
# Return the best candidate point.
return best_pt
def _diff_eval_next_query(self):
# Form candidate and judgement sets.
cands = self._make_candidate_set()
noise = self.gp.get_estimated_noise()
best_pt, best_val = None, float('-inf')
for c_idx in range(self.cand_size):
cand_pt = cands[c_idx]
judge = self._make_judgement_set(cands[c_idx])
# Find mean and covar mat for combined cand-judgement set.
conjoined = np.vstack([cand_pt, judge])
means, covar = self.gp.eval(conjoined, include_covar=False)
judge_means = means[1:].reshape(self.judge_ctx_size,
self.judge_act_size)
interaction = self.gp.get_pt_relations([cand_pt], judge)
interaction = interaction.reshape(self.judge_ctx_size,
self.judge_act_size)
var = float(self.gp.eval([cand_pt], include_covar=True)[1])
improvement = 0
# Judge the effect of the candidate point.
for ctx_idx in range(self.judge_ctx_size):
means = judge_means[ctx_idx]
sigmas = interaction[ctx_idx] / np.sqrt(noise + var)
improvement += knowledge_gradient(means, sigmas)
if improvement > best_val:
best_pt, best_val = cand_pt, improvement
# Return the best candidate point.
return best_pt
pm_strats = [Namespace(impl=REVI, name=REVI.get_strat_name())]
|
[
"numpy.abs",
"OCBO.util.misc_util.sample_grid",
"numpy.asarray",
"OCBO.util.misc_util.knowledge_gradient",
"numpy.split",
"numpy.hstack",
"numpy.max",
"numpy.min",
"numpy.tile",
"OCBO.util.misc_util.uniform_draw",
"numpy.sqrt",
"numpy.vstack",
"dragonfly.utils.option_handler.get_option_specs"
] |
[((328, 425), 'dragonfly.utils.option_handler.get_option_specs', 'get_option_specs', (['"""judge_act_size"""', '(False)', '(50)', '"""Number of points to use to judge candidates."""'], {}), "('judge_act_size', False, 50,\n 'Number of points to use to judge candidates.')\n", (344, 425), False, 'from dragonfly.utils.option_handler import get_option_specs\n'), ((443, 540), 'dragonfly.utils.option_handler.get_option_specs', 'get_option_specs', (['"""judge_ctx_size"""', '(False)', '(50)', '"""Number of points to use to judge candidates."""'], {}), "('judge_ctx_size', False, 50,\n 'Number of points to use to judge candidates.')\n", (459, 540), False, 'from dragonfly.utils.option_handler import get_option_specs\n'), ((558, 676), 'dragonfly.utils.option_handler.get_option_specs', 'get_option_specs', (['"""judge_ctx_thresh"""', '(False)', 'None', '"""Threshold for how related judgement set should be in context."""'], {}), "('judge_ctx_thresh', False, None,\n 'Threshold for how related judgement set should be in context.')\n", (574, 676), False, 'from dragonfly.utils.option_handler import get_option_specs\n'), ((694, 812), 'dragonfly.utils.option_handler.get_option_specs', 'get_option_specs', (['"""judge_act_thresh"""', '(False)', 'None', '"""Threshold for how related judgement set should be in context."""'], {}), "('judge_act_thresh', False, None,\n 'Threshold for how related judgement set should be in context.')\n", (710, 812), False, 'from dragonfly.utils.option_handler import get_option_specs\n'), ((1976, 2019), 'OCBO.util.misc_util.uniform_draw', 'uniform_draw', (['c_thresh', 'self.judge_ctx_size'], {}), '(c_thresh, self.judge_ctx_size)\n', (1988, 2019), False, 'from OCBO.util.misc_util import sample_grid, uniform_draw, knowledge_gradient\n'), ((2258, 2306), 'OCBO.util.misc_util.sample_grid', 'sample_grid', (['ctxs', 'a_thresh', 'self.judge_act_size'], {}), '(ctxs, a_thresh, self.judge_act_size)\n', (2269, 2306), False, 'from OCBO.util.misc_util import sample_grid, uniform_draw, knowledge_gradient\n'), ((2428, 2461), 'numpy.split', 'np.split', (['cand_pt', '[self.ctx_dim]'], {}), '(cand_pt, [self.ctx_dim])\n', (2436, 2461), True, 'import numpy as np\n'), ((2483, 2533), 'OCBO.util.misc_util.uniform_draw', 'uniform_draw', (['self.ctx_domain', 'self.judge_ctx_size'], {}), '(self.ctx_domain, self.judge_ctx_size)\n', (2495, 2533), False, 'from OCBO.util.misc_util import sample_grid, uniform_draw, knowledge_gradient\n'), ((2764, 2804), 'numpy.asarray', 'np.asarray', (['[dp[1] for dp in dist_pairs]'], {}), '([dp[1] for dp in dist_pairs])\n', (2774, 2804), True, 'import numpy as np\n'), ((2903, 2941), 'numpy.tile', 'np.tile', (['cand_act', 'self.judge_ctx_size'], {}), '(cand_act, self.judge_ctx_size)\n', (2910, 2941), True, 'import numpy as np\n'), ((3085, 3122), 'numpy.hstack', 'np.hstack', (['[check_ctxs, repeated_act]'], {}), '([check_ctxs, repeated_act])\n', (3094, 3122), True, 'import numpy as np\n'), ((3274, 3305), 'numpy.vstack', 'np.vstack', (['[cand_pt, check_pts]'], {}), '([cand_pt, check_pts])\n', (3283, 3305), True, 'import numpy as np\n'), ((4069, 4102), 'numpy.split', 'np.split', (['cand_pt', '[self.ctx_dim]'], {}), '(cand_pt, [self.ctx_dim])\n', (4077, 4102), True, 'import numpy as np\n'), ((4124, 4174), 'OCBO.util.misc_util.uniform_draw', 'uniform_draw', (['self.act_domain', 'self.judge_act_size'], {}), '(self.act_domain, self.judge_act_size)\n', (4136, 4174), False, 'from OCBO.util.misc_util import sample_grid, uniform_draw, knowledge_gradient\n'), ((4405, 4445), 'numpy.asarray', 'np.asarray', (['[dp[1] for dp in dist_pairs]'], {}), '([dp[1] for dp in dist_pairs])\n', (4415, 4445), True, 'import numpy as np\n'), ((4544, 4582), 'numpy.tile', 'np.tile', (['cand_ctx', 'self.judge_act_size'], {}), '(cand_ctx, self.judge_act_size)\n', (4551, 4582), True, 'import numpy as np\n'), ((4726, 4763), 'numpy.hstack', 'np.hstack', (['[repeated_ctx, check_acts]'], {}), '([repeated_ctx, check_acts])\n', (4735, 4763), True, 'import numpy as np\n'), ((4915, 4946), 'numpy.vstack', 'np.vstack', (['[cand_pt, check_pts]'], {}), '([cand_pt, check_pts])\n', (4924, 4946), True, 'import numpy as np\n'), ((6241, 6266), 'numpy.vstack', 'np.vstack', (['[cands, judge]'], {}), '([cands, judge])\n', (6250, 6266), True, 'import numpy as np\n'), ((3754, 3804), 'numpy.max', 'np.max', (['[cand_ctx[dim] - max_ldist, dim_domain[0]]'], {}), '([cand_ctx[dim] - max_ldist, dim_domain[0]])\n', (3760, 3804), True, 'import numpy as np\n'), ((3825, 3875), 'numpy.min', 'np.min', (['[cand_ctx[dim] + max_ldist, dim_domain[1]]'], {}), '([cand_ctx[dim] + max_ldist, dim_domain[1]])\n', (3831, 3875), True, 'import numpy as np\n'), ((5396, 5446), 'numpy.max', 'np.max', (['[cand_act[dim] - max_ldist, dim_domain[0]]'], {}), '([cand_act[dim] - max_ldist, dim_domain[0]])\n', (5402, 5446), True, 'import numpy as np\n'), ((5467, 5517), 'numpy.min', 'np.min', (['[cand_act[dim] + max_ldist, dim_domain[1]]'], {}), '([cand_act[dim] + max_ldist, dim_domain[1]])\n', (5473, 5517), True, 'import numpy as np\n'), ((7829, 7856), 'numpy.vstack', 'np.vstack', (['[cand_pt, judge]'], {}), '([cand_pt, judge])\n', (7838, 7856), True, 'import numpy as np\n'), ((2614, 2635), 'numpy.abs', 'np.abs', (['(cc - cand_ctx)'], {}), '(cc - cand_ctx)\n', (2620, 2635), True, 'import numpy as np\n'), ((4255, 4276), 'numpy.abs', 'np.abs', (['(ca - cand_ctx)'], {}), '(ca - cand_ctx)\n', (4261, 4276), True, 'import numpy as np\n'), ((7175, 7208), 'OCBO.util.misc_util.knowledge_gradient', 'knowledge_gradient', (['means', 'sigmas'], {}), '(means, sigmas)\n', (7193, 7208), False, 'from OCBO.util.misc_util import sample_grid, uniform_draw, knowledge_gradient\n'), ((8617, 8650), 'OCBO.util.misc_util.knowledge_gradient', 'knowledge_gradient', (['means', 'sigmas'], {}), '(means, sigmas)\n', (8635, 8650), False, 'from OCBO.util.misc_util import sample_grid, uniform_draw, knowledge_gradient\n'), ((7123, 7143), 'numpy.sqrt', 'np.sqrt', (['(noise + var)'], {}), '(noise + var)\n', (7130, 7143), True, 'import numpy as np\n'), ((8565, 8585), 'numpy.sqrt', 'np.sqrt', (['(noise + var)'], {}), '(noise + var)\n', (8572, 8585), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
import xarray as xr
import Grid
import pf_dynamic_sph
import os
import sys
from timeit import default_timer as timer
from copy import copy
if __name__ == "__main__":
start = timer()
# ---- INITIALIZE GRIDS ----
higherCutoff = True; cutoffRat = 1.5
betterResolution = False; resRat = 0.5
(Lx, Ly, Lz) = (60, 60, 60)
(dx, dy, dz) = (0.25, 0.25, 0.25)
# (Lx, Ly, Lz) = (40, 40, 40)
# (dx, dy, dz) = (0.25, 0.25, 0.25)
# (Lx, Ly, Lz) = (21, 21, 21)
# (dx, dy, dz) = (0.375, 0.375, 0.375)
xgrid = Grid.Grid('CARTESIAN_3D')
xgrid.initArray('x', -Lx, Lx, dx); xgrid.initArray('y', -Ly, Ly, dy); xgrid.initArray('z', -Lz, Lz, dz)
NGridPoints_cart = (1 + 2 * Lx / dx) * (1 + 2 * Ly / dy) * (1 + 2 * Lz / dz)
NGridPoints_desired = (1 + 2 * Lx / dx) * (1 + 2 * Lz / dz)
Ntheta = 50
Nk = np.ceil(NGridPoints_desired / Ntheta)
theta_max = np.pi
thetaArray, dtheta = np.linspace(0, theta_max, Ntheta, retstep=True)
# k_max = np.sqrt((np.pi / dx)**2 + (np.pi / dy)**2 + (np.pi / dz)**2)
k_max = ((2 * np.pi / dx)**3 / (4 * np.pi / 3))**(1 / 3)
k_min = 1e-5
kArray, dk = np.linspace(k_min, k_max, Nk, retstep=True)
if dk < k_min:
print('k ARRAY GENERATION ERROR')
kgrid = Grid.Grid("SPHERICAL_2D")
if higherCutoff is True and betterResolution is False:
kgrid.initArray('k', k_min, cutoffRat * k_max, dk)
k_max = kgrid.getArray('k')[-1]
elif higherCutoff is False and betterResolution is True:
kgrid.initArray('k', k_min, k_max, resRat * dk)
dk = kgrid.getArray('k')[1] - kgrid.getArray('k')[0]
else:
kgrid.initArray_premade('k', kArray)
kgrid.initArray_premade('th', thetaArray)
# for realdyn evolution
tMax = 100
dt = 0.2
CoarseGrainRate = 50
tgrid = np.arange(0, tMax + dt, dt)
gParams = [xgrid, kgrid, tgrid]
NGridPoints = kgrid.size()
print('Total time steps: {0}'.format(tgrid.size))
print('UV cutoff: {0}'.format(k_max))
print('dk: {0}'.format(dk))
print('NGridPoints: {0}'.format(NGridPoints))
print(NGridPoints_cart, NGridPoints)
# Toggle parameters
toggleDict = {'Location': 'cluster', 'Dynamics': 'real', 'Coupling': 'twophonon', 'Grid': 'spherical', 'Longtime': 'false', 'CoarseGrainRate': CoarseGrainRate}
# ---- SET PARAMS ----
mB = 1
n0 = 1
gBB = (4 * np.pi / mB) * 0.05 # Dresher uses aBB ~ 0.2 instead of 0.5 here
# gBB = (4 * np.pi / mB) * 0.02 # Dresher uses aBB ~ 0.2 instead of 0.5 here
nu = np.sqrt(n0 * gBB / mB)
aBB = (mB / (4 * np.pi)) * gBB
xi = (8 * np.pi * n0 * aBB)**(-1 / 2)
print(k_max * xi)
print(5 * mB * xi**2)
print(-3.0 / xi)
print((n0 * aBB * 3)**(-1 / 2) * mB * xi**2)
Params_List = []
mI_Vals = np.array([0.5, 1.0, 2, 5.0])
aIBi_Vals = np.array([-10.0, -5.0, -2.0])
if higherCutoff is True or betterResolution is True:
mI_Vals = np.array([1.0, 5.0])
aIBi_Vals = np.array([-2.0, -1.0])
P_Vals_norm = np.concatenate((np.linspace(0.1, 0.8, 5, endpoint=False), np.linspace(0.8, 1.2, 10, endpoint=False), np.linspace(1.2, 3.0, 12, endpoint=False), np.linspace(3.0, 5.0, 3)))
for mI in mI_Vals:
P_Vals = mI * nu * P_Vals_norm
for aIBi in aIBi_Vals:
for P in P_Vals:
sParams = [mI, mB, n0, gBB]
cParams = [P, aIBi]
if toggleDict['Location'] == 'home':
datapath = '/home/kis/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_{:.2E}'.format(NGridPoints_cart)
elif toggleDict['Location'] == 'work':
datapath = '/media/kis/Storage/Dropbox/VariationalResearch/HarvardOdyssey/genPol_data/NGridPoints_{:.2E}'.format(NGridPoints_cart)
elif toggleDict['Location'] == 'cluster':
datapath = '/n/scratchlfs/demler_lab/kis/genPol_data/NGridPoints_{:.2E}'.format(NGridPoints_cart)
if higherCutoff is True:
datapath = datapath + '_cutoffRat_{:.2f}'.format(cutoffRat)
if betterResolution is True:
datapath = datapath + '_resRat_{:.2f}'.format(resRat)
gridpath = copy(datapath)
datapath = datapath + '/massRatio={:.1f}'.format(mI / mB)
if toggleDict['Dynamics'] == 'real':
innerdatapath = datapath + '/redyn'
elif toggleDict['Dynamics'] == 'imaginary':
innerdatapath = datapath + '/imdyn'
if toggleDict['Grid'] == 'cartesian':
innerdatapath = innerdatapath + '_cart'
elif toggleDict['Grid'] == 'spherical':
innerdatapath = innerdatapath + '_spherical'
if toggleDict['Coupling'] == 'frohlich':
innerdatapath = innerdatapath + '_froh'
elif toggleDict['Coupling'] == 'twophonon':
innerdatapath = innerdatapath
Params_List.append([sParams, cParams, innerdatapath])
# if os.path.isdir(gridpath) is False:
# os.mkdir(gridpath)
# if os.path.isdir(datapath) is False:
# os.mkdir(datapath)
# if os.path.isdir(innerdatapath) is False:
# os.mkdir(innerdatapath)
print(len(Params_List))
# # ---- COMPUTE DATA ON COMPUTER ----
# runstart = timer()
# for ind, Params in enumerate(Params_List):
# loopstart = timer()
# [sParams, cParams, innerdatapath] = Params_List[ind]
# [mI, mB, n0, gBB] = sParams
# [P, aIBi] = cParams
# dyncart_ds = pf_dynamic_cart.quenchDynamics_DataGeneration(cParams, gParams, sParams, toggleDict)
# dyncart_ds.to_netcdf(innerdatapath + '/P_{:.3f}_aIBi_{:.2f}.nc'.format(P, aIBi))
# loopend = timer()
# print('Index: {:d}, P: {:.2f}, aIBi: {:.2f} Time: {:.2f}'.format(ind, P, aIBi, loopend - loopstart))
# end = timer()
# print('Total Time: {:.2f}'.format(end - runstart))
# ---- COMPUTE DATA ON CLUSTER ----
runstart = timer()
taskCount = int(os.getenv('SLURM_ARRAY_TASK_COUNT'))
taskID = int(os.getenv('SLURM_ARRAY_TASK_ID'))
# taskCount = len(Params_List)
# taskID = 72
if(taskCount > len(Params_List)):
print('ERROR: TASK COUNT MISMATCH')
P = float('nan')
aIBi = float('nan')
sys.exit()
else:
[sParams, cParams, innerdatapath] = Params_List[taskID]
[mI, mB, n0, gBB] = sParams
[P, aIBi] = cParams
dynsph_ds = pf_dynamic_sph.quenchDynamics_DataGeneration(cParams, gParams, sParams, toggleDict)
dynsph_ds.to_netcdf(innerdatapath + '/P_{:.3f}_aIBi_{:.2f}.nc'.format(P, aIBi))
end = timer()
print('Task ID: {:d}, P: {:.2f}, aIBi: {:.2f} Time: {:.2f}'.format(taskID, P, aIBi, end - runstart))
|
[
"pf_dynamic_sph.quenchDynamics_DataGeneration",
"numpy.ceil",
"timeit.default_timer",
"copy.copy",
"numpy.arange",
"numpy.array",
"numpy.linspace",
"Grid.Grid",
"os.getenv",
"sys.exit",
"numpy.sqrt"
] |
[((220, 227), 'timeit.default_timer', 'timer', ([], {}), '()\n', (225, 227), True, 'from timeit import default_timer as timer\n'), ((584, 609), 'Grid.Grid', 'Grid.Grid', (['"""CARTESIAN_3D"""'], {}), "('CARTESIAN_3D')\n", (593, 609), False, 'import Grid\n'), ((889, 926), 'numpy.ceil', 'np.ceil', (['(NGridPoints_desired / Ntheta)'], {}), '(NGridPoints_desired / Ntheta)\n', (896, 926), True, 'import numpy as np\n'), ((975, 1022), 'numpy.linspace', 'np.linspace', (['(0)', 'theta_max', 'Ntheta'], {'retstep': '(True)'}), '(0, theta_max, Ntheta, retstep=True)\n', (986, 1022), True, 'import numpy as np\n'), ((1195, 1238), 'numpy.linspace', 'np.linspace', (['k_min', 'k_max', 'Nk'], {'retstep': '(True)'}), '(k_min, k_max, Nk, retstep=True)\n', (1206, 1238), True, 'import numpy as np\n'), ((1313, 1338), 'Grid.Grid', 'Grid.Grid', (['"""SPHERICAL_2D"""'], {}), "('SPHERICAL_2D')\n", (1322, 1338), False, 'import Grid\n'), ((1872, 1899), 'numpy.arange', 'np.arange', (['(0)', '(tMax + dt)', 'dt'], {}), '(0, tMax + dt, dt)\n', (1881, 1899), True, 'import numpy as np\n'), ((2600, 2622), 'numpy.sqrt', 'np.sqrt', (['(n0 * gBB / mB)'], {}), '(n0 * gBB / mB)\n', (2607, 2622), True, 'import numpy as np\n'), ((2855, 2883), 'numpy.array', 'np.array', (['[0.5, 1.0, 2, 5.0]'], {}), '([0.5, 1.0, 2, 5.0])\n', (2863, 2883), True, 'import numpy as np\n'), ((2900, 2929), 'numpy.array', 'np.array', (['[-10.0, -5.0, -2.0]'], {}), '([-10.0, -5.0, -2.0])\n', (2908, 2929), True, 'import numpy as np\n'), ((6234, 6241), 'timeit.default_timer', 'timer', ([], {}), '()\n', (6239, 6241), True, 'from timeit import default_timer as timer\n'), ((6715, 6802), 'pf_dynamic_sph.quenchDynamics_DataGeneration', 'pf_dynamic_sph.quenchDynamics_DataGeneration', (['cParams', 'gParams', 'sParams', 'toggleDict'], {}), '(cParams, gParams, sParams,\n toggleDict)\n', (6759, 6802), False, 'import pf_dynamic_sph\n'), ((6894, 6901), 'timeit.default_timer', 'timer', ([], {}), '()\n', (6899, 6901), True, 'from timeit import default_timer as timer\n'), ((3005, 3025), 'numpy.array', 'np.array', (['[1.0, 5.0]'], {}), '([1.0, 5.0])\n', (3013, 3025), True, 'import numpy as np\n'), ((3046, 3068), 'numpy.array', 'np.array', (['[-2.0, -1.0]'], {}), '([-2.0, -1.0])\n', (3054, 3068), True, 'import numpy as np\n'), ((6263, 6298), 'os.getenv', 'os.getenv', (['"""SLURM_ARRAY_TASK_COUNT"""'], {}), "('SLURM_ARRAY_TASK_COUNT')\n", (6272, 6298), False, 'import os\n'), ((6317, 6349), 'os.getenv', 'os.getenv', (['"""SLURM_ARRAY_TASK_ID"""'], {}), "('SLURM_ARRAY_TASK_ID')\n", (6326, 6349), False, 'import os\n'), ((6549, 6559), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6557, 6559), False, 'import sys\n'), ((3104, 3144), 'numpy.linspace', 'np.linspace', (['(0.1)', '(0.8)', '(5)'], {'endpoint': '(False)'}), '(0.1, 0.8, 5, endpoint=False)\n', (3115, 3144), True, 'import numpy as np\n'), ((3146, 3187), 'numpy.linspace', 'np.linspace', (['(0.8)', '(1.2)', '(10)'], {'endpoint': '(False)'}), '(0.8, 1.2, 10, endpoint=False)\n', (3157, 3187), True, 'import numpy as np\n'), ((3189, 3230), 'numpy.linspace', 'np.linspace', (['(1.2)', '(3.0)', '(12)'], {'endpoint': '(False)'}), '(1.2, 3.0, 12, endpoint=False)\n', (3200, 3230), True, 'import numpy as np\n'), ((3232, 3256), 'numpy.linspace', 'np.linspace', (['(3.0)', '(5.0)', '(3)'], {}), '(3.0, 5.0, 3)\n', (3243, 3256), True, 'import numpy as np\n'), ((4306, 4320), 'copy.copy', 'copy', (['datapath'], {}), '(datapath)\n', (4310, 4320), False, 'from copy import copy\n')]
|
import os.path
import numpy as np
import matplotlib.pyplot as plt
from .logger import log
from .act_on_image import ActOnImage
from .bpcs_steg import arr_bpcs_complexity, conjugate, max_bpcs_complexity
from .array_message import get_n_message_grids
from .array_grid import get_next_grid_dims
def histogram_of_complexity(arr, grid_size, alpha, comp_fcn):
log.critical('Creating histograms of image complexity...')
max_complexity = max_bpcs_complexity(*grid_size)
vals = [arr_bpcs_complexity(arr[dims]) for dims in get_next_grid_dims(arr, grid_size)]
fig = plt.figure()
ax = fig.add_subplot(111)
ns, bins, patches = ax.hist(vals, 200, facecolor='red', alpha=0.75)
navail = sum([n for n, bin in zip(ns, bins) if comp_fcn(bin, alpha)])
return fig, navail, sum(ns)
def rand_image_complexity(arr, alpha, comp_fcn, grid_size):
n = 0
for dims in get_next_grid_dims(arr, grid_size):
grid = arr[dims]
if comp_fcn(arr_bpcs_complexity(grid), alpha): # < or >
n += 1
init_grid = np.copy(grid)
np.random.shuffle(init_grid.reshape(-1))
arr[dims] = init_grid
log.critical('Conjugated {0} grids'.format(n))
# histogram_of_complexity(arr, params)
return arr, n
def flip_image_complexity(arr, alpha, comp_fcn, grid_size):
n = 0
for dims in get_next_grid_dims(arr, grid_size):
grid = arr[dims]
if comp_fcn(arr_bpcs_complexity(grid), alpha): # < or >
n += 1
init_grid = np.copy(grid)
arr[dims] = conjugate(grid)
assert abs((1 - arr_bpcs_complexity(init_grid)) - arr_bpcs_complexity(grid)) < 0.01
assert not(arr[dims].tolist() == init_grid.tolist() and alpha != 0.5)
log.critical('Conjugated {0} grids'.format(n))
# histogram_of_complexity(arr, params)
return arr, n
class HistogramComplexityImage(ActOnImage):
def modify(self, alpha, comp_fcn, grid_size=(8,8)):
hist, navail, ntotal = histogram_of_complexity(self.arr, grid_size, alpha, comp_fcn)
log.critical('{0} of {1} grids available with alpha of {2}'.format(navail, ntotal, alpha))
nbits_per_grid = grid_size[0]*grid_size[1]
nbytes = (get_n_message_grids([nbits_per_grid]*int(navail), int(navail))*nbits_per_grid)/8.0
percent = nbytes*1.0/os.path.getsize(self.infile)
log.critical('Approximately {0} bytes of storage space can fit in this vessel image.'.format(nbytes))
log.critical('{0} byte message would utilize {1:.1%} of the vessel image.'.format(nbytes, percent))
return hist
class ComplexifyImage(ActOnImage):
def modify(self, alpha, grid_size=(8,8)):
new_arr = np.array(self.arr, copy=True)
return rand_image_complexity(new_arr, alpha, lambda x,thresh: x>=thresh, grid_size)
# return flip_image_complexity(new_arr, alpha, lambda x,thresh: x>=thresh, grid_size)
class SimplifyImage(ActOnImage):
def modify(self, alpha, grid_size=(8,8)):
new_arr = np.array(self.arr, copy=True)
return rand_image_complexity(new_arr, alpha, lambda x,thresh: x<thresh, grid_size)
# return flip_image_complexity(new_arr, alpha, lambda x,thresh: x<thresh, grid_size)
def histogram(infile, outfile, alpha, comp_fcn):
x = HistogramComplexityImage(infile, as_rgb=True, bitplane=True, gray=True, nbits_per_layer=8)
hist = x.modify(alpha, comp_fcn)
if outfile is not None:
hist.savefig(outfile)
log.critical('Wrote histogram of image complexity to {0}'.format(outfile))
# plt.show()
def complexify(infile, outfile, alpha):
x = ComplexifyImage(infile, as_rgb=True, bitplane=True, gray=True, nbits_per_layer=8)
arr, stats = x.modify(alpha)
x.write(outfile, arr)
return stats
def simplify(infile, outfile, alpha):
x = SimplifyImage(infile, as_rgb=True, bitplane=True, gray=True, nbits_per_layer=8)
arr, stats = x.modify(alpha)
x.write(outfile, arr)
return stats
def capacity(infile, alpha=0.45, outfile=None):
greater = lambda x,thresh: x>=thresh
histogram(infile, outfile, alpha, greater)
|
[
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.copy"
] |
[((575, 587), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (585, 587), True, 'import matplotlib.pyplot as plt\n'), ((2705, 2734), 'numpy.array', 'np.array', (['self.arr'], {'copy': '(True)'}), '(self.arr, copy=True)\n', (2713, 2734), True, 'import numpy as np\n'), ((3019, 3048), 'numpy.array', 'np.array', (['self.arr'], {'copy': '(True)'}), '(self.arr, copy=True)\n', (3027, 3048), True, 'import numpy as np\n'), ((1052, 1065), 'numpy.copy', 'np.copy', (['grid'], {}), '(grid)\n', (1059, 1065), True, 'import numpy as np\n'), ((1520, 1533), 'numpy.copy', 'np.copy', (['grid'], {}), '(grid)\n', (1527, 1533), True, 'import numpy as np\n')]
|
from __future__ import print_function, division
import sys
import os
sys.path.append(os.path.abspath("."))
sys.dont_write_bytecode = True
__author__ = "bigfatnoob"
import lib
import argparse
import numpy as np
from joblib import Parallel, delayed
import multiprocessing
import pandas as pd
CNT_PROBE_LIMITS = {
1: 178.2,
2: 826.2,
3: 3830.1,
4: 17756.5,
5: 82319.1,
6: 381631.3,
7: 1769243.0,
8: 8202208.0
}
def get_file_args():
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--seedInit", type=int, default=None, help="Initial seed of experiment")
parser.add_argument("-d", "--digits", type=int, default=3, help="Max significant digits")
parser.add_argument("-p", "--samples", type=int, default=100, help="Number of samples in the experiment")
return parser.parse_args()
def main():
args = get_file_args()
max_signif_digits = args.digits
samples = args.samples
seed_init = args.seedInit
num_cores = multiprocessing.cpu_count()
if seed_init is None:
seed_init = np.random.randint(0, 2 ** 16)
np.random.seed(seed_init)
print("# Seed Init = %d" % seed_init)
print("# Max Signif Digit = %d" % max_signif_digits)
print("# Samples = %d" % samples)
print("# Cores = %d" % num_cores)
print("# Solver = Needles3")
results = pd.DataFrame(columns=["seedInit", "solverName", "signifDigits", "piHat", "OFtol", "error", "isCensored",
"numThrows", "runtime"])
counter = 1
for signif_digit in range(2, max_signif_digits + 1):
print("### For %d significant digits" % signif_digit)
seeds = np.random.randint(0, 2 ** 16, samples)
signif_results = Parallel(n_jobs=num_cores)(delayed(
lib.pi_needle_triple)(1, 1, cnt_probe_limit=CNT_PROBE_LIMITS[signif_digit],
signif_digits=signif_digit, seed=seed, reject_censored=True) for seed in seeds)
for i in range(len(signif_results)):
results.loc[i + counter] = signif_results[i]
counter += len(signif_results)
results.index.name = 'sampleId'
results.columns.name = results.index.name
txt_file_name = 'results-python/fg_asym_pi_signif_needles3_%d_%d.txt' % (max_signif_digits, seed_init)
html_file_name = 'results-python/fg_asym_pi_signif_needles3_%d_%d.html' % (max_signif_digits, seed_init)
results.to_csv(txt_file_name, sep="\t")
results.to_html(html_file_name)
with open(html_file_name) as f:
html_data = f.read()
html_data += "\n"
with open(html_file_name, "wb") as f:
f.write(html_data)
if __name__ == "__main__":
main()
|
[
"pandas.DataFrame",
"os.path.abspath",
"numpy.random.seed",
"argparse.ArgumentParser",
"numpy.random.randint",
"joblib.Parallel",
"joblib.delayed",
"multiprocessing.cpu_count"
] |
[((85, 105), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (100, 105), False, 'import os\n'), ((477, 502), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (500, 502), False, 'import argparse\n'), ((973, 1000), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (998, 1000), False, 'import multiprocessing\n'), ((1073, 1098), 'numpy.random.seed', 'np.random.seed', (['seed_init'], {}), '(seed_init)\n', (1087, 1098), True, 'import numpy as np\n'), ((1309, 1442), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['seedInit', 'solverName', 'signifDigits', 'piHat', 'OFtol', 'error',\n 'isCensored', 'numThrows', 'runtime']"}), "(columns=['seedInit', 'solverName', 'signifDigits', 'piHat',\n 'OFtol', 'error', 'isCensored', 'numThrows', 'runtime'])\n", (1321, 1442), True, 'import pandas as pd\n'), ((1041, 1070), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2 ** 16)'], {}), '(0, 2 ** 16)\n', (1058, 1070), True, 'import numpy as np\n'), ((1612, 1650), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2 ** 16)', 'samples'], {}), '(0, 2 ** 16, samples)\n', (1629, 1650), True, 'import numpy as np\n'), ((1672, 1698), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'num_cores'}), '(n_jobs=num_cores)\n', (1680, 1698), False, 'from joblib import Parallel, delayed\n'), ((1699, 1728), 'joblib.delayed', 'delayed', (['lib.pi_needle_triple'], {}), '(lib.pi_needle_triple)\n', (1706, 1728), False, 'from joblib import Parallel, delayed\n')]
|
#conding: utf-8
import cv2
import numpy as np
classificador = cv2.CascadeClassifier("haarcascade-frontalface-default.xml")
classificadorOlho = cv2.CascadeClassifier("haarcascade-eye.xml")
camera = cv2.VideoCapture(0)
amostra = 1
numeroAmostra = 25
id = input('Digite seu identificador: ')
largura, altura = 220, 220
print("Capturando as faces....")
while (True):
conectado, imagem = camera.read()
imagemCinza = cv2.cvtColor(imagem, cv2.COLOR_BGR2GRAY)
#print(np.average(imagemCinza))
facesDetectadas = classificador.detectMultiScale(imagemCinza, scaleFactor=1.5, minSize=(150, 150))
for (x, y, l, a) in facesDetectadas:
cv2.rectangle(imagem, (x, y), (x+l, y+a), (0, 0, 255), 2)
regiao = imagem[y:y + a, x:x + l]
regiaoCinzaOlho = cv2.cvtColor(regiao, cv2.COLOR_BGR2GRAY)
olhosDetectados = classificadorOlho.detectMultiScale(regiaoCinzaOlho)
for (ox, oy, ol, oa) in olhosDetectados:
cv2.rectangle(regiao, (ox, oy), (ox + ol, oy + oa), (0, 255, 0), 2)
if cv2.waitKey (l) & 0xff == ord('q'):
if np.average(imagemCinza) > 110:
imagemFace = cv2.resize(imagemCinza[y:y + a, x:x +l],(largura, altura))
cv2.imwrite("fotos/pessoa." + str (id) + "." + str(amostra) + ".jpg", imagemFace)
print("[foto" + str (amostra) + " capturada com sucesso]")
amostra += 1
cv2.imshow("Face", imagem)
cv2.waitKey(1)
if (amostra >= numeroAmostra +1):
break
camera.release()
cv2.destroyAllWindows()
|
[
"numpy.average",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.imshow",
"cv2.VideoCapture",
"cv2.rectangle",
"cv2.CascadeClassifier",
"cv2.destroyAllWindows",
"cv2.resize"
] |
[((63, 123), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""haarcascade-frontalface-default.xml"""'], {}), "('haarcascade-frontalface-default.xml')\n", (84, 123), False, 'import cv2\n'), ((144, 188), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""haarcascade-eye.xml"""'], {}), "('haarcascade-eye.xml')\n", (165, 188), False, 'import cv2\n'), ((198, 217), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (214, 217), False, 'import cv2\n'), ((1558, 1581), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1579, 1581), False, 'import cv2\n'), ((423, 463), 'cv2.cvtColor', 'cv2.cvtColor', (['imagem', 'cv2.COLOR_BGR2GRAY'], {}), '(imagem, cv2.COLOR_BGR2GRAY)\n', (435, 463), False, 'import cv2\n'), ((1442, 1468), 'cv2.imshow', 'cv2.imshow', (['"""Face"""', 'imagem'], {}), "('Face', imagem)\n", (1452, 1468), False, 'import cv2\n'), ((1473, 1487), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (1484, 1487), False, 'import cv2\n'), ((653, 714), 'cv2.rectangle', 'cv2.rectangle', (['imagem', '(x, y)', '(x + l, y + a)', '(0, 0, 255)', '(2)'], {}), '(imagem, (x, y), (x + l, y + a), (0, 0, 255), 2)\n', (666, 714), False, 'import cv2\n'), ((779, 819), 'cv2.cvtColor', 'cv2.cvtColor', (['regiao', 'cv2.COLOR_BGR2GRAY'], {}), '(regiao, cv2.COLOR_BGR2GRAY)\n', (791, 819), False, 'import cv2\n'), ((960, 1027), 'cv2.rectangle', 'cv2.rectangle', (['regiao', '(ox, oy)', '(ox + ol, oy + oa)', '(0, 255, 0)', '(2)'], {}), '(regiao, (ox, oy), (ox + ol, oy + oa), (0, 255, 0), 2)\n', (973, 1027), False, 'import cv2\n'), ((1044, 1058), 'cv2.waitKey', 'cv2.waitKey', (['l'], {}), '(l)\n', (1055, 1058), False, 'import cv2\n'), ((1099, 1122), 'numpy.average', 'np.average', (['imagemCinza'], {}), '(imagemCinza)\n', (1109, 1122), True, 'import numpy as np\n'), ((1163, 1223), 'cv2.resize', 'cv2.resize', (['imagemCinza[y:y + a, x:x + l]', '(largura, altura)'], {}), '(imagemCinza[y:y + a, x:x + l], (largura, altura))\n', (1173, 1223), False, 'import cv2\n')]
|
"""
ttrvna
Author: <NAME>
Institution: University of Missouri Kansas City
Created: 6/20/2019
Edited: 6/27/2019
Python 3.6.0 64-bit (Anaconda 4.3.0)
This file contains the Ttrvna class
which is initialized when someone wants to do an
experiment with the Tektronix TTR506A VNA.
See VNAandPowSup.py if you want to control
the VNA in conjunction with a Power Supply.
"""
# IMPORTS ===================================================================
import visa
import numpy as np
import matplotlib.pyplot as plt
import time
import csv
import instrument as instr
# Ttrvna ===============================================================
class Ttrvna(instr.Instrument):
"""
The Ttrvna class is used for instances of experiments with the Tektronix
TTR506A VNA where the user may set up an automated design of experiment.
Attributes:
_rm : ResourceManager : instance of visa's Resource Manager class
_instr : Resource : instrument, TTR506A VNA in this case
startFreqSweep : str as SI unit : determines start frequency for frequency sweep
stopFreqSweep : str as SI unit : determines stop frequency for frequency sweep
sweepDelay : str as SI unit : determines delay between each sweep
sParam : str : determines which S Parameter is measured
trials : int > 0 : determines the number of trials/sweeps
format : str : determines format for the data to be outputted into
_measuredRange : list of ints : y values for the eventual output, usually s parameter
_freqDomain : list of ints : x values in frequency for the eventual output
_trial : int : current trial number
"""
def __init__(self,start=None,stop=None,delay=None,sParam=None,trials=None,format='mlogarithmic'):
"""
Constructor that initializes attributes of Ttrvna instance.
By default the data format is 'mlogarithmic'.
Parameters:
start : str as SI unit : determines start frequency for frequency sweep
stop : str as SI unit : determines stop frequency for frequency sweep
dekay : str as SI unit : determines delay between each sweep
sParam : str : determines which S Parameter is measured
trials : int > 0 : determines the number of trials/sweeps
format : str : determines format for the data to be outputted into
"""
self._rm = visa.ResourceManager()
self._instr = self._rm.open_resource('GPIB8::1::INSTR')
if start is not None:
self.setStartSweep(start)
if stop is not None:
self.setStopSweep(stop)
if delay is not None:
self.setSweepDelay(delay)
if sParam is not None:
self.setsParam(sParam)
if trials is not None:
self.setTrials(trials)
self.setFormat(format)
self._measuredRange = None
self._freqDomain = None
self._trial = 1
def setStartSweep(self,start):
"""
Setter for Start of Frequency Sweep
Example start = '50 MHz'
"""
self.isSIUnit(start)
self.startFreqSweep = start
def setStopSweep(self,stop):
"""
Setter for Stop of Frequency Sweep
Example stop = '6 GHz'
"""
self.isSIUnit(stop)
self.stopFreqSweep = stop
def setSweepDelay(self,delay):
"""
Setter for delay between each sweep
Example: delay = '1s'
"""
self.isSIUnit(delay)
self.sweepDelay = delay
def setsParam(self,param):
"""
Setter for the S parameter you would like to measure
sParam must be one of the following as a srting:
S11, S12, S21, S22
Example: param = 'S21'
"""
assert param in ['S11','S12','S21','S22']
self.sParam = param
def setTrials(self,trials):
"""
Setter for number of trials/sweeps you would like to run
Example: trials = 40
"""
assert type(trials) == int
assert trials > 0
self.trials = trials
def setFormat(self,format):
"""
Setter for format of outputted data:
Format must be one of the following as a string:
mlogarithmic, phase, gdelay, slinear, slogarithmic, scomplex,
smith, sadmittance, plinear, plogarithmic, polar, mlinear,
swr, real, imaginary, uphase, pphase
Example: format = mlogarithmic
"""
assert type(format) == str
assert format in ["mlogarithmic", "phase", "gdelay", "slinear", "slogarithmic", "scomplex",
"smith", "sadmittance", "plinear", "plogarithmic", "polar", "mlinear",
"swr", "real", "imaginary", "uphase", "pphase"]
self.format = format
def setMeasuredRange(self):
"""
Takes a measurement according to inputted parameter after
initializing data acquisition. After the data is appropriately
modified into a list, it is set as the value for the
_measuredRange attribute.
"""
self._initDataAcquisition()
self._instr.write('calculate1:parameter1:define {}'.format(self.sParam))
time.sleep(1) # delay sometimes needed to ensure commands are used in sequence
self._instr.write('calculate1:selected:format %s' % self.format)
measurement = self._instr.query('calculate1:selected:data:fdata?')
if self.isTwoComponents(): # If two components, create list of containing both components
self._measuredRange = self._listify(measurement)
else: # Otherwise, get rid of zero components that the TTR VNA
measurement = self._listify(measurement) # uses to fill the second/imaginary component of
self._measuredRange = self._removeSecondComponent(measurement) # the measurement
def setFreqDomain(self):
"""
Used to set the frequency domain based on the the start
and stop of the frequency sweep. Uses this as the value
for self._freqDomain
"""
start = self.unitConverter(self.startFreqSweep)
stop = self.unitConverter(self.stopFreqSweep)
if self.isTwoComponents(): # If two components, set the domain for half the range
freqDomain = np.linspace(start,stop, num=((len(self._measuredRange))/2)) # because of 1:2 Domain:Range correspondence
else:
freqDomain = np.linspace(start,stop, num=len(self._measuredRange)) # Otherwise, set domain to be same length as
self._freqDomain = freqDomain # the _measuredRange
def makeSweep(self):
"""
Takes a sweep measurement for desired value with start
and stop be the span of the sweep and delay being the time
between each sweep. Logs data in three ways, text file, plot as
png, and csv.
The Conditionals with the trials is used because it usually takes
the VNA a trial to adjust to it the commanded settings.
So the first trial is not recorded until I find a better way to do this.
"""
self._configInst()
self.setMeasuredRange()
self.setFreqDomain()
if self._trial > 1: # Ensures that the first trial is not recorded
self._createPlot() # png
self._logger() # txt
self._csvWriter() # csv
self._trial += 1
def makeSweepUnprocessed(self):
"""
Same as makeSweep, but it does not process the data. It merely returns the data
as a list.
"""
self._configInst()
self.setMeasuredRange()
self.setFreqDomain()
if self.isTwoComponents():
self._getMagnitudes()
self._measuredRange = self._magnitudes
self._trial += 1
return [self._freqDomain,self._measuredRange]
def run(self):
"""
Function that you should call to run an experiment after the
constructor has been called.
Makes a sweep for each trial.
"""
for _ in range(self.trials):
self.makeSweep()
time.sleep(self.unitConverter(self.sweepDelay))
print("Done!")
# HELPER FUNCTIONS ==============================================
def _initDataAcquisition(self):
"""
Initializes data acquizition for the instrument.
"""
self._instr.write('initiate:immediate')
self._instr.query('*opc?')
def _configInst(self):
"""
Configures the instrument to the constants specified
by the user.
Precondition: startFreqSweep < stopFreqSweep
"""
assert self.startFreqSweep < self.stopFreqSweep
self._instr.timeout = 10000
self._instr.encoding = 'latin_1'
self._instr.write_termination = None
self._instr.read_termination = '\n'
if self.trials is None or self._trial == 1:
print(self._instr.query('*idn?'))
self._instr.write('*rst') # turns instrument settings to factory default
self._instr.write('*cls') # Clears these analyzer status data structures:
# Event Queue, Status Byte Register (except the MAV bit), Standard Event Status Register (SESR)
self._instr.write('abort') # Aborts the current measurement and changes the trigger sequence to idle state for all channel
self._instr.write('display:enable 1')
self._instr.write('sense1:frequency:start {}'.format(self.unitConverter(self.startFreqSweep)))
self._instr.write('sense1:frequency:stop {}'.format(self.unitConverter(self.stopFreqSweep)))
self._instr.write('sense1:sweep:delay {}'.format(self.unitConverter(self.sweepDelay)))
def _createPlot(self):
"""
Creates plot with from xAxis and yAxis and then saves
the plot in Graphs/ with a file name corresponding to
the date and time.
"""
fig = plt.figure(1, figsize=(20, 10))
ax = fig.add_subplot(111, facecolor='k')
if self.isTwoComponents():
ax.plot(self._freqDomain,self._magnitudes,'y')
lower = min(self._magnitudes)
upper = max(self._magnitudes)
else:
ax.plot(self._freqDomain, self._measuredRange, 'y') # just plot _freqDomain
lower = min(self._measuredRange) # against _measuredRange
upper = max(self._measuredRange)
ax.set_title('Amplitude vs Frequency')
ax.set_ylabel('Amplitude (dBm)')
ax.set_xlabel('Freq (Hz)')
ax.set_xlim(self._freqDomain[0],self._freqDomain[-1])
ax.set_ylim(lower-(0.2*(upper-lower)),upper+(0.2*(upper-lower))) # Scaled to fit range
# Save Plot
filenameG = self.getDateFormatted() + ".png"
filenameG = "Graphs/" + filenameG
plt.savefig(filenameG) # Plot saved in directory named Graphs located in same directory as pyTekVNA
plt.clf()
def _logger(self):
"""
Takes xAxis and yAxis to make a plot. It then saves
the plot with a filename corresponding to its
timestamp.
"""
# Create filename for log
filenameF = self.getDateFormatted() + ".txt"
filenameF = "Logs/" + filenameF
f = open(filenameF, "a+") # Log saved in directory named logs located in same directory as this file
# Fill log with contents
if self.isTwoComponents():
for i in range(len(self._freqDomain)):
line = str(i) + ": " + str(self._freqDomain[i])+ "\t" + \
str(self._measuredRange[2*i]) + "\t" + str(self._measuredRange[(2*i)+1]) # If two components, log each
f.write(line) # pair on same line against
f.write('\n') # frequency
else:
for i in range(len(self._freqDomain)): # Otherwise, log 1:1 domain:range
line = str(i) + ": " + str(self._freqDomain[i]) + "\t" + str(self._measuredRange[i])
f.write(line)
f.write('\n')
def _csvWriter(self):
"""
Writes dataX and dataY to a csv file by making data into table first
Titled based on the date.
The CSV is comma delimited.
"""
# Make data into table
table = []
if self.isTwoComponents(): # Same as _logger()
for i in range(len(self._freqDomain)): # If two components, log each
table.append([self._freqDomain[i],self._measuredRange[2*i],self._measuredRange[(2*i)+1]]) # pair on same line against
else: # frequency
for i in range(len(self._freqDomain)):
table.append([self._freqDomain[i],self._measuredRange[i]]) # Otherwise, log 1:1 domain:range
# Write to CSV
filename = 'CSVs/' + self.getDateFormatted() + '.csv'
with open(filename, 'w', newline='') as csvfile:
dataWriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
for i in range(len(table)):
dataWriter.writerow(table[i])
def isTwoComponents(self):
"""
Determines if the measured output is one or
two components based on format
Return: True if is two component
"""
if self.format in ["slinear","slogarithmic","scomplex",
"smith","sadmittance","plinear",
"plogarithmic","polar"]:
return True
else:
return False
def _inverseFFT(self):
"""
Calculates the inverse fourier transform of the measured range.
Precondition: self.isTwoComponents() == True
Return: invFFT as list of complex values
"""
assert self.isTwoComponents() == True
return(np.fft.ifft(self._measuredRange))
@staticmethod
def _listify(initial):
"""
Takes a string separated by commas, and breaks each component
separated by commas into its own element in a list. Then, it
returns the list as float elements.
Return: listified version of initial
Precondition: initial is a string with commas and numbers between commas
Parameters:
initial : str : this is what is listified
"""
assert type(initial) == str
assert ',' in initial
listified = []
while ',' in initial:
pos = initial.find(',')
listified.append(initial[0:pos])
initial = initial[pos+1:]
if ',' not in initial:
listified.append(initial)
final = []
for item in listified:
final.append(float(item))
return final
@staticmethod
def _removeSecondComponent(values):
"""
Removes the second component in a list of complex values assuming
that every other value starting from index one ought to be removed.
Return: list with second component / imaginary values removed
Parameters:
values : list : the complex values are removed from this list
"""
assert type(values) == list
newList = []
for i in range(len(values)):
if (i%2) == 0:
newList.append(values[i])
return newList
def _getMagnitudes(self):
"""
Gets magnitude of measured range if measured range is complex and stores
it in self._magnitudes as a list
"""
assert self.isTwoComponents()
self._magnitudes = []
realPart = []
complexPartTemp = []
complexPart = []
for i in range(len(self._measuredRange)):
if (i%2) == 1:
complexPartTemp.append(self._measuredRange[i])
else:
realPart.append(self._measuredRange[i])
for i in range(len(complexPartTemp)):
complexPart.append(complex(str(complexPartTemp[i])+'j'))
for i in range(len(realPart)):
self._magnitudes.append(abs(realPart[i]+complexPart[i]))
# EXECUTION ============================================
if __name__ == "__main__":
test = Ttrvna(start='50 MHz', stop='6 GHz', delay='8s', sParam='S21',trials=1,format='smith')
test.run()
test._inverseFFT()
|
[
"numpy.fft.ifft",
"csv.writer",
"matplotlib.pyplot.clf",
"time.sleep",
"matplotlib.pyplot.figure",
"visa.ResourceManager",
"matplotlib.pyplot.savefig"
] |
[((2778, 2800), 'visa.ResourceManager', 'visa.ResourceManager', ([], {}), '()\n', (2798, 2800), False, 'import visa\n'), ((5718, 5731), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (5728, 5731), False, 'import time\n'), ((10873, 10904), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(20, 10)'}), '(1, figsize=(20, 10))\n', (10883, 10904), True, 'import matplotlib.pyplot as plt\n'), ((12099, 12121), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filenameG'], {}), '(filenameG)\n', (12110, 12121), True, 'import matplotlib.pyplot as plt\n'), ((12226, 12235), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (12233, 12235), True, 'import matplotlib.pyplot as plt\n'), ((15676, 15708), 'numpy.fft.ifft', 'np.fft.ifft', (['self._measuredRange'], {}), '(self._measuredRange)\n', (15687, 15708), True, 'import numpy as np\n'), ((14754, 14815), 'csv.writer', 'csv.writer', (['csvfile'], {'delimiter': '""","""', 'quoting': 'csv.QUOTE_MINIMAL'}), "(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)\n", (14764, 14815), False, 'import csv\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the
# Apode Project (https://github.com/mchalela/apode).
# Copyright (c) 2020, <NAME> and <NAME>
# License: MIT
# Full Text: https://github.com/ngrion/apode/blob/master/LICENSE.txt
from apode import ApodeData
from apode import datasets
from apode.concentration import ConcentrationMeasures
from apode.inequality import InequalityMeasures
from apode.polarization import PolarizationMeasures
from apode.poverty import PovertyMeasures
import numpy as np
import pandas as pd
import pytest
def test_df_converter():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
assert isinstance(ad.data, pd.DataFrame)
def test_invalid():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(AttributeError):
ad.poverty("foo")
def test_empty_data():
df = pd.DataFrame({"x" ", []"})
with pytest.raises(ValueError):
ApodeData(df, "x")
def test_income_column_validator():
random = np.random.RandomState(seed=42)
x = random.uniform(size=300)
df1 = pd.DataFrame({"x": x})
with pytest.raises(ValueError):
ApodeData(df1, income_column="y")
def test_call_poverty():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pline = 0.3
pov = PovertyMeasures(ad)
assert pov.idf.data.equals(ad.data)
assert pov.headcount(pline) == ad.poverty.headcount(pline)
def test_call_inequality():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
ineq = InequalityMeasures(ad)
assert ineq.idf.data.equals(ad.data)
assert ineq.gini() == ad.inequality.gini()
def test_call_polarization():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
pol = PolarizationMeasures(ad)
assert pol.idf.data.equals(ad.data)
assert pol.wolfson() == ad.polarization.wolfson()
def test_call_concentration():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
conc = ConcentrationMeasures(ad)
assert conc.idf.data.equals(ad.data)
assert conc.rosenbluth() == ad.concentration.rosenbluth()
def test_getitem_numeric_slices():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
expected1 = ad.data[2:5]
result1 = ad[2:5].data
expected2 = ad.data[:-1]
result2 = ad[:-1].data
expected3 = ad.data[:]
result3 = ad[:].data
assert expected1.equals(result1)
assert expected2.equals(result2)
assert expected3.equals(result3)
def test_getitem_column_slice():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pytest.raises(AttributeError):
ad["x"]
with pytest.raises(KeyError):
ad["y"]
with pytest.raises(KeyError):
ad["income_column"]
def test_getattr():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
assert ad.shape == ad.data.shape
np.testing.assert_array_equal(ad.sum(), ad.data.sum())
def test_repr():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pd.option_context("display.show_dimensions", False):
df_body = repr(ad.data).splitlines()
footer = ad._get_footer()
expected = "\n".join(df_body + [footer])
assert repr(ad) == expected
def test_repr_html():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
with pd.option_context("display.show_dimensions", False):
df_html = ad.data._repr_html_()
ad_id = id(ad)
footer = ad._get_footer(html=True)
parts = [
f'<div class="apode-data-container" id={ad_id}>',
df_html,
footer,
"</div>",
]
expected = "".join(parts)
assert ad._repr_html_() == expected
def test_dir():
ad = datasets.make_uniform(seed=42, size=300, mu=1, nbin=None)
for a in dir(ad):
assert hasattr(ad, a)
|
[
"pandas.DataFrame",
"pandas.option_context",
"apode.datasets.make_uniform",
"apode.inequality.InequalityMeasures",
"apode.ApodeData",
"numpy.random.RandomState",
"pytest.raises",
"apode.polarization.PolarizationMeasures",
"apode.concentration.ConcentrationMeasures",
"apode.poverty.PovertyMeasures"
] |
[((599, 656), 'apode.datasets.make_uniform', 'datasets.make_uniform', ([], {'seed': '(42)', 'size': '(300)', 'mu': '(1)', 'nbin': 'None'}), '(seed=42, size=300, mu=1, nbin=None)\n', (620, 656), False, 'from apode import datasets\n'), ((733, 790), 'apode.datasets.make_uniform', 'datasets.make_uniform', ([], {'seed': '(42)', 'size': '(300)', 'mu': '(1)', 'nbin': 'None'}), '(seed=42, size=300, mu=1, nbin=None)\n', (754, 790), False, 'from apode import datasets\n'), ((891, 914), 'pandas.DataFrame', 'pd.DataFrame', (["{'x, []'}"], {}), "({'x, []'})\n", (903, 914), True, 'import pandas as pd\n'), ((1032, 1062), 'numpy.random.RandomState', 'np.random.RandomState', ([], {'seed': '(42)'}), '(seed=42)\n', (1053, 1062), True, 'import numpy as np\n'), ((1106, 1128), 'pandas.DataFrame', 'pd.DataFrame', (["{'x': x}"], {}), "({'x': x})\n", (1118, 1128), True, 'import pandas as pd\n'), ((1243, 1300), 'apode.datasets.make_uniform', 'datasets.make_uniform', ([], {'seed': '(42)', 'size': '(300)', 'mu': '(1)', 'nbin': 'None'}), '(seed=42, size=300, mu=1, nbin=None)\n', (1264, 1300), False, 'from apode import datasets\n'), ((1327, 1346), 'apode.poverty.PovertyMeasures', 'PovertyMeasures', (['ad'], {}), '(ad)\n', (1342, 1346), False, 'from apode.poverty import PovertyMeasures\n'), ((1489, 1546), 'apode.datasets.make_uniform', 'datasets.make_uniform', ([], {'seed': '(42)', 'size': '(300)', 'mu': '(1)', 'nbin': 'None'}), '(seed=42, size=300, mu=1, nbin=None)\n', (1510, 1546), False, 'from apode import datasets\n'), ((1558, 1580), 'apode.inequality.InequalityMeasures', 'InequalityMeasures', (['ad'], {}), '(ad)\n', (1576, 1580), False, 'from apode.inequality import InequalityMeasures\n'), ((1710, 1767), 'apode.datasets.make_uniform', 'datasets.make_uniform', ([], {'seed': '(42)', 'size': '(300)', 'mu': '(1)', 'nbin': 'None'}), '(seed=42, size=300, mu=1, nbin=None)\n', (1731, 1767), False, 'from apode import datasets\n'), ((1778, 1802), 'apode.polarization.PolarizationMeasures', 'PolarizationMeasures', (['ad'], {}), '(ad)\n', (1798, 1802), False, 'from apode.polarization import PolarizationMeasures\n'), ((1939, 1996), 'apode.datasets.make_uniform', 'datasets.make_uniform', ([], {'seed': '(42)', 'size': '(300)', 'mu': '(1)', 'nbin': 'None'}), '(seed=42, size=300, mu=1, nbin=None)\n', (1960, 1996), False, 'from apode import datasets\n'), ((2008, 2033), 'apode.concentration.ConcentrationMeasures', 'ConcentrationMeasures', (['ad'], {}), '(ad)\n', (2029, 2033), False, 'from apode.concentration import ConcentrationMeasures\n'), ((2183, 2240), 'apode.datasets.make_uniform', 'datasets.make_uniform', ([], {'seed': '(42)', 'size': '(300)', 'mu': '(1)', 'nbin': 'None'}), '(seed=42, size=300, mu=1, nbin=None)\n', (2204, 2240), False, 'from apode import datasets\n'), ((2560, 2617), 'apode.datasets.make_uniform', 'datasets.make_uniform', ([], {'seed': '(42)', 'size': '(300)', 'mu': '(1)', 'nbin': 'None'}), '(seed=42, size=300, mu=1, nbin=None)\n', (2581, 2617), False, 'from apode import datasets\n'), ((2817, 2874), 'apode.datasets.make_uniform', 'datasets.make_uniform', ([], {'seed': '(42)', 'size': '(300)', 'mu': '(1)', 'nbin': 'None'}), '(seed=42, size=300, mu=1, nbin=None)\n', (2838, 2874), False, 'from apode import datasets\n'), ((2999, 3056), 'apode.datasets.make_uniform', 'datasets.make_uniform', ([], {'seed': '(42)', 'size': '(300)', 'mu': '(1)', 'nbin': 'None'}), '(seed=42, size=300, mu=1, nbin=None)\n', (3020, 3056), False, 'from apode import datasets\n'), ((3304, 3361), 'apode.datasets.make_uniform', 'datasets.make_uniform', ([], {'seed': '(42)', 'size': '(300)', 'mu': '(1)', 'nbin': 'None'}), '(seed=42, size=300, mu=1, nbin=None)\n', (3325, 3361), False, 'from apode import datasets\n'), ((3748, 3805), 'apode.datasets.make_uniform', 'datasets.make_uniform', ([], {'seed': '(42)', 'size': '(300)', 'mu': '(1)', 'nbin': 'None'}), '(seed=42, size=300, mu=1, nbin=None)\n', (3769, 3805), False, 'from apode import datasets\n'), ((800, 829), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (813, 829), False, 'import pytest\n'), ((927, 952), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (940, 952), False, 'import pytest\n'), ((962, 980), 'apode.ApodeData', 'ApodeData', (['df', '"""x"""'], {}), "(df, 'x')\n", (971, 980), False, 'from apode import ApodeData\n'), ((1138, 1163), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1151, 1163), False, 'import pytest\n'), ((1173, 1206), 'apode.ApodeData', 'ApodeData', (['df1'], {'income_column': '"""y"""'}), "(df1, income_column='y')\n", (1182, 1206), False, 'from apode import ApodeData\n'), ((2627, 2656), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (2640, 2656), False, 'import pytest\n'), ((2683, 2706), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (2696, 2706), False, 'import pytest\n'), ((2733, 2756), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (2746, 2756), False, 'import pytest\n'), ((3066, 3117), 'pandas.option_context', 'pd.option_context', (['"""display.show_dimensions"""', '(False)'], {}), "('display.show_dimensions', False)\n", (3083, 3117), True, 'import pandas as pd\n'), ((3371, 3422), 'pandas.option_context', 'pd.option_context', (['"""display.show_dimensions"""', '(False)'], {}), "('display.show_dimensions', False)\n", (3388, 3422), True, 'import pandas as pd\n')]
|
import numpy as np
from nltk import wordpunct_tokenize
import nltk
import itertools
import operator
import sklearn
import re, string
import math
SENTENCE_START_TOKEN = "sentence_<PASSWORD>"
SENTENCE_END_TOKEN = "sentence_<PASSWORD>"
UNKNOWN_TOKEN = "<PASSWORD>"
def load_data(loc='./data/'):
trainloc = loc + '20_news_group_sentences.txt'
sentences = []
with open(trainloc, 'r', encoding='utf8') as f:
for line in f:
sentences.append("%s %s %s" % (SENTENCE_START_TOKEN, line, SENTENCE_END_TOKEN))
return sentences
def build_dictionary(loc='./data/', vocabulary_size=-1):
trainloc = loc + '20_news_group_sentences.txt'
document_frequency = {}
total_document = 0
with open(trainloc, 'r', encoding='utf8') as f:
for line in f:
sentence = my_tokenizer(line)
for token in set(sentence):
if token in document_frequency:
document_frequency[token] += 1
else:
document_frequency[token] = 1
total_document += 1
for key, value in document_frequency.items():
document_frequency[key] = math.log(total_document / document_frequency[key])
vocab = sorted(document_frequency.items(), key=operator.itemgetter(1), reverse=True)
word_to_index = {}
index_to_word = {}
word_to_index[SENTENCE_START_TOKEN] = 0
word_to_index[SENTENCE_END_TOKEN] = 1
word_to_index[UNKNOWN_TOKEN] = 2
index_to_word[0] = SENTENCE_START_TOKEN
index_to_word[1] = SENTENCE_END_TOKEN
index_to_word[2] = UNKNOWN_TOKEN
counter = 3
for key, value in vocab:
if len(key) < 4:
continue
elif counter == vocabulary_size:
break
word_to_index[key] = counter
index_to_word[counter] = key
counter += 1
return word_to_index, index_to_word
def my_tokenizer(input):
token_list = []
tokens = wordpunct_tokenize(input.lower())
token_list.extend([x for x in tokens if not re.fullmatch('[' + string.punctuation + ']+', x)])
return token_list
def get_train_data(vocabulary_size):
word_to_index, index_to_word = build_dictionary(vocabulary_size=vocabulary_size)
sentences = load_data()
sentences_tokenized = [my_tokenizer(sent) for sent in sentences]
for i, sent in enumerate(sentences_tokenized):
sentences_tokenized[i] = [w if w in word_to_index else UNKNOWN_TOKEN for w in sent]
sentences_indices = []
for sentence in sentences_tokenized:
sentences_indices.append([word_to_index[word] for word in sentence])
return sentences_indices, word_to_index, index_to_word
def get_train_data_reversed(vocabulary_size):
sentences_indices, word_to_index, index_to_word = get_train_data(vocabulary_size)
sentences_indices_reversed = []
for index_list in sentences_indices:
temp = []
temp.extend(index_list)
temp.reverse()
sentences_indices_reversed.append(temp)
return sentences_indices_reversed, word_to_index, index_to_word
def get_train_sentences(vocabulary_size):
sentences_indices, word_to_index, index_to_word = get_train_data(vocabulary_size)
all_sentences = []
all_sentences.extend(sentences_indices)
x_train = np.asarray([[w for w in sentence[:-1]] for sentence in all_sentences])
y_train = np.asarray([[w for w in sentence[1:]] for sentence in all_sentences])
return x_train, y_train, word_to_index, index_to_word
def get_train_sentences_reversed(vocabulary_size):
sentences_indices_reversed, word_to_index, index_to_word = get_train_data_reversed(vocabulary_size)
all_sentences = []
all_sentences.extend(sentences_indices_reversed)
x_train = np.asarray([[w for w in sentence[:-1]] for sentence in all_sentences])
y_train = np.asarray([[w for w in sentence[1:]] for sentence in all_sentences])
return x_train, y_train, word_to_index, index_to_word
|
[
"math.log",
"numpy.asarray",
"operator.itemgetter",
"re.fullmatch"
] |
[((3283, 3353), 'numpy.asarray', 'np.asarray', (['[[w for w in sentence[:-1]] for sentence in all_sentences]'], {}), '([[w for w in sentence[:-1]] for sentence in all_sentences])\n', (3293, 3353), True, 'import numpy as np\n'), ((3368, 3437), 'numpy.asarray', 'np.asarray', (['[[w for w in sentence[1:]] for sentence in all_sentences]'], {}), '([[w for w in sentence[1:]] for sentence in all_sentences])\n', (3378, 3437), True, 'import numpy as np\n'), ((3745, 3815), 'numpy.asarray', 'np.asarray', (['[[w for w in sentence[:-1]] for sentence in all_sentences]'], {}), '([[w for w in sentence[:-1]] for sentence in all_sentences])\n', (3755, 3815), True, 'import numpy as np\n'), ((3830, 3899), 'numpy.asarray', 'np.asarray', (['[[w for w in sentence[1:]] for sentence in all_sentences]'], {}), '([[w for w in sentence[1:]] for sentence in all_sentences])\n', (3840, 3899), True, 'import numpy as np\n'), ((1162, 1212), 'math.log', 'math.log', (['(total_document / document_frequency[key])'], {}), '(total_document / document_frequency[key])\n', (1170, 1212), False, 'import math\n'), ((1265, 1287), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (1284, 1287), False, 'import operator\n'), ((2025, 2073), 're.fullmatch', 're.fullmatch', (["('[' + string.punctuation + ']+')", 'x'], {}), "('[' + string.punctuation + ']+', x)\n", (2037, 2073), False, 'import re, string\n')]
|
import numpy as np
import matplotlib.pyplot as plt
plt.xlabel("time")
plt.ylabel("Rms")
plt.yscale("log")
directory = './pyplot/'
colors = ['r', 'g', 'b']
N = ['256', '512', '1024']
for n, col in zip(N, colors):
time, rms = np.loadtxt(directory + 'pde_' + n + '.txt', unpack = True)
plt.plot(time, rms, marker = ".",linestyle = "none", c = col)
plt.legend([r'$2^8$', r'$2^9$', r'$2^{10}$'])
plt.savefig('./output/pde_rms.pdf');
|
[
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.loadtxt",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((55, 73), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time"""'], {}), "('time')\n", (65, 73), True, 'import matplotlib.pyplot as plt\n'), ((74, 91), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Rms"""'], {}), "('Rms')\n", (84, 91), True, 'import matplotlib.pyplot as plt\n'), ((92, 109), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (102, 109), True, 'import matplotlib.pyplot as plt\n'), ((355, 397), 'matplotlib.pyplot.legend', 'plt.legend', (["['$2^8$', '$2^9$', '$2^{10}$']"], {}), "(['$2^8$', '$2^9$', '$2^{10}$'])\n", (365, 397), True, 'import matplotlib.pyplot as plt\n'), ((401, 436), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./output/pde_rms.pdf"""'], {}), "('./output/pde_rms.pdf')\n", (412, 436), True, 'import matplotlib.pyplot as plt\n'), ((231, 287), 'numpy.loadtxt', 'np.loadtxt', (["(directory + 'pde_' + n + '.txt')"], {'unpack': '(True)'}), "(directory + 'pde_' + n + '.txt', unpack=True)\n", (241, 287), True, 'import numpy as np\n'), ((291, 347), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'rms'], {'marker': '"""."""', 'linestyle': '"""none"""', 'c': 'col'}), "(time, rms, marker='.', linestyle='none', c=col)\n", (299, 347), True, 'import matplotlib.pyplot as plt\n')]
|
import numpy as np
import imagezmq
import imutils
import cv2
import time
import threading
# initialize the ImageHub object
imageHub = imagezmq.ImageHub()
thread_imageHub = imagezmq.ImageHub(open_port='tcp://*:5556')
thread_frame = None
def thread_recv():
global rpiName, thread_frame
(rpiName, thread_frame) = thread_imageHub.recv_image()
thread_imageHub.send_reply(b'OK')
print('here')
# start looping over all the frames
time.sleep(10)
start = time.time()
num_of_frames = 0
while True:
# receive RPi name and frame from the RPi and acknowledge
# the receipt
th = threading.Thread(target=thread_recv)
th.start()
(rpiName, frame) = imageHub.recv_image()
# print(time.time() - start)
# print(frame.shape)
imageHub.send_reply(b'OK')
th.join()
# frame = imutils.resize(frame, width=400)
num_of_frames += 1
cv2.imshow('frame', np.vstack((thread_frame, frame)))
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
# do a bit of cleanup
fps = num_of_frames / (time.time() - start)
print("fps is: ")
print(fps)
cv2.destroyAllWindows()
|
[
"threading.Thread",
"cv2.waitKey",
"imagezmq.ImageHub",
"time.time",
"time.sleep",
"cv2.destroyAllWindows",
"numpy.vstack"
] |
[((136, 155), 'imagezmq.ImageHub', 'imagezmq.ImageHub', ([], {}), '()\n', (153, 155), False, 'import imagezmq\n'), ((174, 217), 'imagezmq.ImageHub', 'imagezmq.ImageHub', ([], {'open_port': '"""tcp://*:5556"""'}), "(open_port='tcp://*:5556')\n", (191, 217), False, 'import imagezmq\n'), ((443, 457), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (453, 457), False, 'import time\n'), ((466, 477), 'time.time', 'time.time', ([], {}), '()\n', (475, 477), False, 'import time\n'), ((1097, 1120), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1118, 1120), False, 'import cv2\n'), ((598, 634), 'threading.Thread', 'threading.Thread', ([], {'target': 'thread_recv'}), '(target=thread_recv)\n', (614, 634), False, 'import threading\n'), ((895, 927), 'numpy.vstack', 'np.vstack', (['(thread_frame, frame)'], {}), '((thread_frame, frame))\n', (904, 927), True, 'import numpy as np\n'), ((939, 953), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (950, 953), False, 'import cv2\n'), ((1047, 1058), 'time.time', 'time.time', ([], {}), '()\n', (1056, 1058), False, 'import time\n')]
|
import sys
sys.path.append('../')
sys.path.append('../apex')
import torch
import numpy as np
from nltk.tokenize import word_tokenize
from sklearn.metrics import precision_recall_fscore_support
from tqdm import tqdm
import argparse
from bert_nli import BertNLIModel
from utils.nli_data_reader import NLIDataReader
def evaluate(model, test_data, checkpoint, mute=False, test_bs=10):
model.eval()
sent_pairs = [test_data[i].get_texts() for i in range(len(test_data))]
all_labels = [test_data[i].get_label() for i in range(len(test_data))]
with torch.no_grad():
_, probs = model(sent_pairs,checkpoint,bs=test_bs)
all_predict = [np.argmax(pp) for pp in probs]
assert len(all_predict) == len(all_labels)
acc = len([i for i in range(len(all_labels)) if all_predict[i]==all_labels[i]])*1./len(all_labels)
prf = precision_recall_fscore_support(all_labels, all_predict, average=None, labels=[0,1,2])
if not mute:
print('==>acc<==', acc)
print('label meanings: 0: contradiction, 1: entail, 2: neutral')
print('==>precision-recall-f1<==\n', prf)
return acc
def parse_args():
ap = argparse.ArgumentParser("arguments for bert-nli evaluation")
ap.add_argument('-b','--batch_size',type=int,default=100,help='batch size')
ap.add_argument('-g','--gpu',type=int,default=1,help='run the model on gpu (1) or not (0)')
ap.add_argument('-cp','--checkpoint',type=int,default=0,help='run the model with checkpointing (1) or not (0)')
ap.add_argument('-tm','--trained_model',type=str,default='default',help='path to the trained model you want to test; if set as "default", it will find in output xx.state_dict, where xx is the bert-type you specified')
ap.add_argument('-bt','--bert_type',type=str,default='bert-large',help='model you want to test; make sure this is consistent with your trained model')
ap.add_argument('--hans',type=int,default=0,help='use hans dataset (1) or not (0)')
args = ap.parse_args()
return args.batch_size, args.gpu, args.trained_model, args.checkpoint, args.bert_type, args.hans
if __name__ == '__main__':
batch_size, gpu, mpath, checkpoint, bert_type, hans = parse_args()
if mpath == 'default': mpath = 'output/{}.state_dict'.format(bert_type)
gpu = bool(gpu)
hans = bool(hans)
checkpoint = bool(checkpoint)
print('=====Arguments=====')
print('bert type:\t{}'.format(bert_type))
print('trained model path:\t{}'.format(mpath))
print('gpu:\t{}'.format(gpu))
print('checkpoint:\t{}'.format(checkpoint))
print('batch size:\t{}'.format(batch_size))
print('hans data:\t{}'.format(hans))
# Read the dataset
nli_reader = NLIDataReader('./datasets/AllNLI')
test_data = nli_reader.get_examples('dev.gz') #,max_examples=50)
if hans:
nli_reader = NLIDataReader('./datasets/Hans')
test_data += nli_reader.get_hans_examples('heuristics_evaluation_set.txt')
model = BertNLIModel(model_path=mpath,batch_size=batch_size,bert_type=bert_type)
print('test data size: {}'.format(len(test_data)))
evaluate(model,test_data,checkpoint,test_bs=batch_size)
|
[
"sys.path.append",
"argparse.ArgumentParser",
"numpy.argmax",
"utils.nli_data_reader.NLIDataReader",
"bert_nli.BertNLIModel",
"torch.no_grad",
"sklearn.metrics.precision_recall_fscore_support"
] |
[((11, 33), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (26, 33), False, 'import sys\n'), ((34, 60), 'sys.path.append', 'sys.path.append', (['"""../apex"""'], {}), "('../apex')\n", (49, 60), False, 'import sys\n'), ((848, 940), 'sklearn.metrics.precision_recall_fscore_support', 'precision_recall_fscore_support', (['all_labels', 'all_predict'], {'average': 'None', 'labels': '[0, 1, 2]'}), '(all_labels, all_predict, average=None,\n labels=[0, 1, 2])\n', (879, 940), False, 'from sklearn.metrics import precision_recall_fscore_support\n'), ((1153, 1213), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""arguments for bert-nli evaluation"""'], {}), "('arguments for bert-nli evaluation')\n", (1176, 1213), False, 'import argparse\n'), ((2695, 2729), 'utils.nli_data_reader.NLIDataReader', 'NLIDataReader', (['"""./datasets/AllNLI"""'], {}), "('./datasets/AllNLI')\n", (2708, 2729), False, 'from utils.nli_data_reader import NLIDataReader\n'), ((2963, 3037), 'bert_nli.BertNLIModel', 'BertNLIModel', ([], {'model_path': 'mpath', 'batch_size': 'batch_size', 'bert_type': 'bert_type'}), '(model_path=mpath, batch_size=batch_size, bert_type=bert_type)\n', (2975, 3037), False, 'from bert_nli import BertNLIModel\n'), ((561, 576), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (574, 576), False, 'import torch\n'), ((656, 669), 'numpy.argmax', 'np.argmax', (['pp'], {}), '(pp)\n', (665, 669), True, 'import numpy as np\n'), ((2834, 2866), 'utils.nli_data_reader.NLIDataReader', 'NLIDataReader', (['"""./datasets/Hans"""'], {}), "('./datasets/Hans')\n", (2847, 2866), False, 'from utils.nli_data_reader import NLIDataReader\n')]
|
import numpy as np
def step_function(x):
return np.array(x > 0, dtype=np.int)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def relu(x):
return np.maximum(0, x)
def cross_entropy_error(y, t):
delta = 1e-7
return -np.sum(t * np.log(y + delta))
def softmax(a):
c = np.max(a)
exp_a = np.exp(a - c)
sum_exp_a = np.sum(exp_a)
y = exp_a / sum_exp_a
return y
def numerical_gradient(f, x):
delta = 1e-4
return (f(x + delta) - f(x - delta)) / (delta * 2)
|
[
"numpy.sum",
"numpy.maximum",
"numpy.log",
"numpy.max",
"numpy.array",
"numpy.exp"
] |
[((53, 82), 'numpy.array', 'np.array', (['(x > 0)'], {'dtype': 'np.int'}), '(x > 0, dtype=np.int)\n', (61, 82), True, 'import numpy as np\n'), ((159, 175), 'numpy.maximum', 'np.maximum', (['(0)', 'x'], {}), '(0, x)\n', (169, 175), True, 'import numpy as np\n'), ((293, 302), 'numpy.max', 'np.max', (['a'], {}), '(a)\n', (299, 302), True, 'import numpy as np\n'), ((315, 328), 'numpy.exp', 'np.exp', (['(a - c)'], {}), '(a - c)\n', (321, 328), True, 'import numpy as np\n'), ((345, 358), 'numpy.sum', 'np.sum', (['exp_a'], {}), '(exp_a)\n', (351, 358), True, 'import numpy as np\n'), ((121, 131), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (127, 131), True, 'import numpy as np\n'), ((249, 266), 'numpy.log', 'np.log', (['(y + delta)'], {}), '(y + delta)\n', (255, 266), True, 'import numpy as np\n')]
|
# !/usr/bin/python
"""Bayesian model-based change detection for input-output sequence data
The Bayesian change-point detection model (BCDM) class implements a recursive
algorithm for partitioning a sequence of real-valued input-output data into
non-overlapping segments. The segment boundaries are chosen under the
assumption that, within each segment, the data follow a multi-variate linear
model.
Segmentation is carried out in an online fashion by recursively updating a set
of hypotheses. The hypotheses capture the belief about the current segment,
e.g. its duration and the linear relationship between inputs and outputs, given
all the data so far. Each time a new pair of data is received, the hypotheses
are propagated and re-weighted to reflect this new knowledge.
.. codeauthor:: <NAME> <<EMAIL>>
.. codeauthor:: <NAME> <<EMAIL>>
"""
import numpy as np
from numpy import linalg
from numpy import random
from scipy import special
class MatrixVariateNormalInvGamma(object):
"""Matrix-variate normal, matrix-variate inverse gamma distribution
The matrix-variate normal, inverse-gamma distribution is the conjugate
prior for a matrix-variate normal distribution. As a result the
distribution can be used in Bayesian estimation of the location and scale
parameters of the matrix-variate normal distribution.
"""
def __init__(self, mu, omega, sigma, eta):
# Get size of data.
m, n = np.shape(mu)
self.__m, self.__n = m, n
# Check that the location parameter is a matrix of finite numbers.
if not (np.ndim(mu) == 2 and
np.shape(mu) == (m, n) and
not np.isnan(mu).any() and
np.isfinite(mu).all()):
msg = 'The location parameter must be a matrix of finite numbers.'
raise Exception(msg)
# Check that the scale parameter is a symmetric, positive-definite
# matrix.
if not (np.ndim(omega) == 2 and
np.shape(omega) == (m, m) and
not np.isnan(omega).any() and
np.isfinite(omega).all() and
np.allclose(np.transpose(omega), omega) and
linalg.det(omega) > 0.0):
msg = 'The scale parameter must be a symmetric, positive-definite'
msg += ' matrix.'
raise Exception(msg)
# Check that the dispersion parameter is a symmetric, positive-definite
# matrix.
if not (np.ndim(sigma) == 2 and
np.shape(sigma) == (n, n) and
not np.isnan(sigma).any() and
np.isfinite(sigma).all() and
np.allclose(np.transpose(sigma), sigma) and
linalg.det(sigma) > 0.0):
msg = 'The noise parameter must be a symmetric, positive-definite'
msg += ' matrix.'
raise Exception(msg)
# Check that the shape parameter is a number greater than one minus the
# number of degrees of freedom.
if not (np.isscalar(eta) and
not np.isnan(eta) and
np.isfinite(eta) and eta > n - 1.0):
msg = 'The shape parameter must be greater than one minus the'
msg += ' degrees of freedom.'
raise Exception(msg)
# Allocate space for storing the matrix of product statistics.
self.__prod = np.zeros([m + n, m + n])
# Initialize the statistics with the parameters of the prior
# distribution.
x = np.dot(omega, mu)
self.__prod[:m, :m] = omega
self.__prod[:m, m:] = x
self.__prod[m:, :m] = x.T
self.__prod[m:, m:] = np.dot(mu.T, x) + eta * sigma
self.__weight = eta
def update(self, X, Y):
"""Update the sufficient statistics given observed data.
The sufficient statistics are the only parameters required to describe
the shape of the distribution. Initially, the sufficient statistics
contain no information apart from that implied by the prior
distribution. As data arrive, the statistics are updated incrementally
in order to reflect this new knowledge. Performing updates allows the
sufficient statistics to summarise all information contained in the
data observed so far.
"""
# (Equation 5a, b)
#
# | XX XY |
# | YX YY |
#
if np.ndim(X) > 1:
k, m = np.shape(X)
x = np.dot(X.T, Y)
# Update the statistics given a block of data (in the following
# order: XX, XY, YX, YY)
self.__prod[:m, :m] += np.dot(X.T, X)
self.__prod[:m, m:] += x
self.__prod[m:, :m] += x.T
self.__prod[m:, m:] += np.dot(Y.T, Y)
self.__weight += k
else:
m = np.size(X)
x = np.outer(X, Y)
# Update the statistics given a single datum.
self.__prod[:m, :m] += np.outer(X, X)
self.__prod[:m, m:] += x
self.__prod[m:, :m] += x.T
self.__prod[m:, m:] += np.outer(Y, Y)
self.__weight += 1
def log_constant(self):
m, n = self.__m, self.__n
# Note usage of the log-determinant 'trick':
#
# log(det(A)) = 2*sum(log(diag(chol(A))))
#
d = np.diag(linalg.cholesky(self.__prod))
w = self.__weight
# Evaluate the log-normalization constant.
# (Equation 8)
return special.gammaln(0.5*(w - np.arange(n))).sum() - \
n * np.log(d[:m]).sum() - \
w * np.log(d[m:] / np.sqrt(w)).sum() - \
n * (0.5 * w) * np.log(0.5 * w)
def parameters(self):
"""Return the posterior parameters.
All the information content of the data is summarized by the sufficient
statistics. As a result the posterior parameters are a function of the
sufficient statistics. This is a consequence of the conjugacy of the
matrix-variate Gaussian-inverse-Gamma distribution.
"""
m = self.__m
s = linalg.cholesky(self.__prod).transpose()
w = self.__weight
# Compute the parameters of the posterior distribution.
return linalg.solve(s[:m, :m], s[:m, m:]), \
np.dot(s[:m, :m].transpose(), s[:m, :m]), \
np.dot(s[m:, m:].transpose(), s[m:, m:]) / w, \
w
def rand(self):
m, n = self.__m, self.__n
s = linalg.cholesky(self.__prod).transpose()
w = self.__weight
# Compute the parameters of the posterior distribution.
mu = linalg.solve(s[:m, :m], s[:m, m:])
omega = np.dot(s[:m, :m].transpose(), s[:m, :m])
sigma = np.dot(s[m:, m:].transpose(), s[m:, m:]) / w
eta = w
# Simulate the marginal Wishart distribution.
f = linalg.solve(np.diag(np.sqrt(2.0*random.gamma(
(eta - np.arange(n))/2.0))) + np.tril(random.randn(n, n), -1),
np.sqrt(eta)*linalg.cholesky(sigma).transpose())
b = np.dot(f.transpose(), f)
# Simulate the conditional Gauss distribution.
a = mu + linalg.solve(linalg.cholesky(omega).transpose(),
np.dot(random.randn(m, n),
linalg.cholesky(b).transpose()))
return a, b
class Bcdm(object):
"""Bayesian change detection model.
Args:
mu (numpy.array): (M x N) location parameters of the prior distribution.
omega (numpy.array): (M x M) scale parameters of the prior distribution.
sigma (numpy.array): (N x N) dispersion parameters of the prior distribution.
eta (float): shape parameter of the prior distribution.
alg (string): Specifies the algorithm to use. Choose either 'sumprod'
for the sum-product algorithm or 'maxprod' for the
max-product algorithm. If the sum-product algorithm is
selected, the posterior probabilities of the segmentation
hypotheses will be calculated. If the max-product
algorithm is selected, the most likely sequence
segmentation will be calculated.
ratefun (float): Relative chance of a new segments being
generated. ``ratefun`` is a value between 0 and
1. Segments are MORE likely to be created with values
closer to zero. Segments are LESS likely to form with
values closer to 1. Alternatively, ratefun can be set
to an executable hazard function. The hazard function
must accept non-negative integers and return
non-negative floating-point numbers.
basisfunc (callable): Feature functions for basis function
expansion. Feature functions provide additional
flexibility by mapping the predictor variables to
an intermmediate feature space, thus allowing the
user to model non-linear relationships.
minprob (float): Minimum probability required for a
hypothesis. Hypotheses with insignificant support
(probabilities below this value) will be pruned.
maxhypot (int): Maximum number of segmentation hypotheses to
consider. After each update, pruning will take place to
limit the number of hypotheses. If set to ``None``, no
pruning will NOT take place after updates, however,
pruning can be initiated manually by calling
:py:meth:`.trim`.
Raises:
Exception: If the any of the inputs are an incorrect type.
"""
def __init__(self, mu=None, omega=None, sigma=None, eta=None,
alg='sumprod', ratefun=0.1, basisfunc=None, minprob=1.0e-6,
maxhypot=20):
# The inference algorithm must be either sum-product or sum-product.
if alg.lower() not in ['sumprod', 'maxprod']:
msg = "The input 'alg' must be either 'sumprod' or 'maxprod'."
raise Exception(msg)
else:
self.__alg__ = alg.lower()
# Store number of dimensions in the predictor (independent/input
# variable) and response (dependent/output variable) variables.
self.__m = None
self.__n = None
# Allocate variables for matrix variate, normal inverse gamma
# distributions.
self.__mu = None
self.__omega = None
self.__sigma = None
self.__eta = None
# Set prior for the location parameter.
if mu is not None:
self.__mu = mu
# Set prior for the scale parameter.
if omega is not None:
self.__omega = omega
# Set prior for the dispersion/noise parameter.
if sigma is not None:
self.__sigma = sigma
# Set prior for the shape parameter.
if eta is not None:
self.__eta = eta
# Ensure algorithm initialises on first call to update.
self.__initialised = False
# If 'maxhypot' is set to none, no hypotheses will be trimmed.
if maxhypot > 0 or not None:
self.__maximum_hypotheses = maxhypot
else:
msg = "The input 'maxhypot' must be an integer greater than zero."
raise Exception(msg)
if minprob > 0:
self.__minimum_probability = minprob
else:
msg = "The input 'minprob' must be a float greater than zero."
raise Exception(msg)
# Allocate variables for tracking segments.
self.__hypotheses = list()
self.__counts = list()
self.__probabilities = list()
# Store basis and hazard function.
self.__basisfunc = basisfunc if callable(basisfunc) else lambda x: x
self.__ratefun = ratefun if callable(ratefun) else lambda x: ratefun
def __initialise_algorithm(self, m, n):
"""Initialise the Bcdm algorithm."""
# Ensure input dimensions are consistent.
if self.__m is None:
self.__m = m
elif self.__m != m:
msg = 'Expected {} dimensions in the predictor variable.'.format(m)
raise Exception(msg)
# Ensure output dimensions are consistent.
if self.__n is None:
self.__n = n
elif self.__n != n:
msg = 'Expected {} dimensions in the response variable.'.format(n)
raise Exception(msg)
# Set uninformative prior for the location parameter.
if self.__mu is None:
self.__mu = np.zeros([m, n])
# Set uninformative prior for the scale parameter.
if self.__omega is None:
self.__omega = np.eye(m)
# Set uninformative prior for the dispersion/noise parameter.
if self.__sigma is None:
self.__sigma = np.eye(n)
# Set uninformative prior for the shape parameter.
if self.__eta is None:
self.__eta = n
# Create the initial hypothesis, which states that the first segment is
# about to begin.
self.__add_new_hypothesis(0.0)
def __soft_max(self, x, y):
return max(x, y) + np.log1p(np.exp(-abs(x - y)))
def __add_new_hypothesis(self, log_likelihood, basisfunc=None):
"""Function for spawning new hypothesis"""
# Set basis function.
if basisfunc is None:
basisfunc = self.__basisfunc
# Create new Bayesian linear model (using supplied priors).
stat = MatrixVariateNormalInvGamma(self.__mu,
self.__omega,
self.__sigma,
self.__eta)
# Add a new hypothesis, which states that a new segment is about to
# begin.
self.__hypotheses.append({'count': 0,
'log_probability': log_likelihood,
'distribution': stat,
'log_constant': stat.log_constant(),
'basisfunc': basisfunc})
def update(self, X, Y, basisfunc=None):
"""Update model with a single observation.
When new input-output data is available, the model can be updated using
this method. As more and more data are collected, the number of
hypotheses grows, increasing the computational complexity. By default
hypotheses are pruned at the end of each update (see
:py:meth:`.trim`.). To disable hypotheses trimming, initialise the
class with ``maxhypot`` set to ``None``.
Args:
X (numpy.array): Observed (1 x M) input data (predictor variable).
Y (numpy.array): Observed (1 x N) output data (response variable).
"""
# Initialise algorithm on first call to update. This allows the
# algorithm to configure itself to the size of the first input/output
# data if no hyper-parameters have been specified.
if not self.__initialised:
init_basis = self.__basisfunc if basisfunc is None else basisfunc
x = init_basis(X)
m = x.shape[1] if np.ndim(x) > 1 else X.size
n = Y.shape[1] if np.ndim(Y) > 1 else Y.size
self.__initialise_algorithm(m, n)
self.__initialised = True
# Get size of data.
k = X.shape[0] if np.ndim(X) > 1 else X.size
m, n = self.__m, self.__n
# Allocate variables for the dynamic programming pass.
loglik = -np.inf
logmax = -np.inf
logsum = -np.inf
ind = np.nan
# Update hypotheses by updating each matrix variate, normal inverse
# gamma distribution over the linear models.
for hypothesis in self.__hypotheses:
# Update the sufficient statistics.
hypothesis['distribution'].update(hypothesis['basisfunc'](X), Y)
# Compute the log-normalization constant after the update
# (posterior parameter distribution).
# (Equation 8)
n_o = hypothesis['log_constant']
n_k = hypothesis['log_constant'] = hypothesis['distribution'].log_constant()
# Evaluate the log-density of the predictive distribution.
# (Equation 16)
log_density = n_k - n_o - k * (0.5 * m * n) * np.log(2.0 * np.pi)
# Increment the counter.
hypothesis['count'] += 1
# Accumulate the log-likelihood of the data.
# (Equation 17)
hazard = self.__ratefun(hypothesis['count'])
aux = np.log(hazard) + log_density + hypothesis['log_probability']
loglik = self.__soft_max(loglik, aux)
# Keep track of the highest, log-likelihood.
if aux > logmax:
logmax, ind = aux, hypothesis['count']
# Update and accumulate the log-probabilities.
hypothesis['log_probability'] += np.log1p(-hazard) + log_density
logsum = self.__soft_max(logsum, hypothesis['log_probability'])
# In the max-product algorithm, keep track of the most likely
# hypotheses.
if self.__alg__ == 'maxprod':
loglik = logmax
self.__counts.append(ind)
# Add a new hypothesis, which states that the next segment is about to
# begin.
self.__add_new_hypothesis(loglik, basisfunc)
# Normalize the hypotheses so that their probabilities sum to one.
logsum = self.__soft_max(logsum, loglik)
for hypothesis in self.__hypotheses:
hypothesis['log_probability'] -= logsum
# Automatically trim hypothesis on each update if requested.
if self.__maximum_hypotheses is not None:
self.trim_hypotheses(minprob=self.__minimum_probability,
maxhypot=self.__maximum_hypotheses)
# In the sum-product algorithm, keep track of the probabilities.
if self.__alg__ == 'sumprod':
iteration = list()
for hypothesis in self.__hypotheses:
iteration.append((hypothesis['count'],
hypothesis['log_probability']))
self.__probabilities.append(iteration)
def trim_hypotheses(self, minprob=1.0e-6, maxhypot=20):
"""Prune hypotheses to limit computational complexity.
The computational complexity of the algorithm can be managed by
limiting the number of hypotheses maintained. This method limits the
number of hypotheses maintained by the model by:
1) Removing any hypotheses with a support (probability) less than
``minprob``.
2) Preserving the first ``maxhypot`` likely hypotheses and
discarding the rest.
"""
# Skip pruning if less hypotheses exist than the maximum allowed.
if len(self.__hypotheses) <= maxhypot:
return
# Sort the hypotheses in decreasing log probability order.
self.__hypotheses.sort(key=lambda dct: -dct['log_probability'])
# Store the indices of likely hypotheses.
minprob = np.log(minprob)
index = [i for i, hypot in enumerate(self.__hypotheses)
if hypot['log_probability'] > minprob]
# Trim the hypotheses.
index = index[:maxhypot] if len(index) >= maxhypot else index
self.__hypotheses = [self.__hypotheses[i] for i in index]
# NOTE: This final ordering can preserve the original order of the
# hypotheses. Interestingly, the algorithm specified in update
# does not require that the hypotheses be ordered! This sort can
# safely be ignored.
# self.__hypotheses.sort(key=lambda dct: dct['index'])
# Normalize the hypotheses so that their probabilities sum to one.
logsum = -np.inf
for hypot in self.__hypotheses:
logsum = self.__soft_max(logsum, hypot['log_probability'])
for hypot in self.__hypotheses:
hypot['log_probability'] -= logsum
def infer(self):
"""Return posterior probabilities OR sequence segmentation.
If the MAX-PRODUCT algorithm is selected, this method returns the most
likely sequence segmentation as a list of integers. Each integer in the
list marks where a segment begins.
If the SUM-PRODUCT algorithm is selected, this method returns the
posterior probabilities of the segmentation hypotheses as a numpy
array. Rows in the array represent hypotheses and columns in the array
represent data points in the time-series.
Returns:
object: This method returns the inference results. In the case of
the MAX-PRODUCT algorithm, the method returns the most
likely segmentation. In the case of the SUM-PRODUCT
algorithm, this method returns the posterior probabilities
of the segmentation hypotheses.
"""
# In the max-product algorithm, the most likely hypotheses are
# tracked. Recover the most likely segment boundaries by performing a
# back-trace.
if self.__alg__ == 'maxprod':
# Find the most likely hypothesis.
max_hypothesis = max(self.__hypotheses,
key=lambda dct: dct['log_probability'])
# Find the best sequence segmentation given all the data so far.
segment_boundaries = [len(self.__counts) - 1, ]
index = segment_boundaries[0] - 1
count = max_hypothesis['count'] - 1
while index > 0:
index -= count
segment_boundaries.insert(0, index)
count = self.__counts[index - 1]
return segment_boundaries
# In the sum-product algorithm, the segment probabilities are
# tracked. Recover the segment probabilities by formatting the stored
# history.
else:
k = len(self.__probabilities)
segment_probabilities = np.zeros((k + 1, k + 1))
segment_probabilities[0, 0] = 1.0
# Update hypotheses probabilities.
for i in range(len(self.__probabilities)):
for (j, probability) in self.__probabilities[i]:
segment_probabilities[j, i + 1] = np.exp(probability)
# A segment always occurs at the beginning of the dataset.
segment_probabilities[0, 0] = 1.0
return segment_probabilities
|
[
"numpy.isnan",
"numpy.shape",
"numpy.arange",
"numpy.exp",
"numpy.linalg.solve",
"numpy.random.randn",
"numpy.ndim",
"numpy.isfinite",
"numpy.transpose",
"numpy.linalg.det",
"numpy.log1p",
"numpy.linalg.cholesky",
"numpy.size",
"numpy.dot",
"numpy.outer",
"numpy.log",
"numpy.isscalar",
"numpy.zeros",
"numpy.eye",
"numpy.sqrt"
] |
[((1442, 1454), 'numpy.shape', 'np.shape', (['mu'], {}), '(mu)\n', (1450, 1454), True, 'import numpy as np\n'), ((3368, 3392), 'numpy.zeros', 'np.zeros', (['[m + n, m + n]'], {}), '([m + n, m + n])\n', (3376, 3392), True, 'import numpy as np\n'), ((3499, 3516), 'numpy.dot', 'np.dot', (['omega', 'mu'], {}), '(omega, mu)\n', (3505, 3516), True, 'import numpy as np\n'), ((6659, 6693), 'numpy.linalg.solve', 'linalg.solve', (['s[:m, :m]', 's[:m, m:]'], {}), '(s[:m, :m], s[:m, m:])\n', (6671, 6693), False, 'from numpy import linalg\n'), ((19536, 19551), 'numpy.log', 'np.log', (['minprob'], {}), '(minprob)\n', (19542, 19551), True, 'import numpy as np\n'), ((3649, 3664), 'numpy.dot', 'np.dot', (['mu.T', 'x'], {}), '(mu.T, x)\n', (3655, 3664), True, 'import numpy as np\n'), ((4414, 4424), 'numpy.ndim', 'np.ndim', (['X'], {}), '(X)\n', (4421, 4424), True, 'import numpy as np\n'), ((4449, 4460), 'numpy.shape', 'np.shape', (['X'], {}), '(X)\n', (4457, 4460), True, 'import numpy as np\n'), ((4477, 4491), 'numpy.dot', 'np.dot', (['X.T', 'Y'], {}), '(X.T, Y)\n', (4483, 4491), True, 'import numpy as np\n'), ((4641, 4655), 'numpy.dot', 'np.dot', (['X.T', 'X'], {}), '(X.T, X)\n', (4647, 4655), True, 'import numpy as np\n'), ((4767, 4781), 'numpy.dot', 'np.dot', (['Y.T', 'Y'], {}), '(Y.T, Y)\n', (4773, 4781), True, 'import numpy as np\n'), ((4844, 4854), 'numpy.size', 'np.size', (['X'], {}), '(X)\n', (4851, 4854), True, 'import numpy as np\n'), ((4871, 4885), 'numpy.outer', 'np.outer', (['X', 'Y'], {}), '(X, Y)\n', (4879, 4885), True, 'import numpy as np\n'), ((4980, 4994), 'numpy.outer', 'np.outer', (['X', 'X'], {}), '(X, X)\n', (4988, 4994), True, 'import numpy as np\n'), ((5106, 5120), 'numpy.outer', 'np.outer', (['Y', 'Y'], {}), '(Y, Y)\n', (5114, 5120), True, 'import numpy as np\n'), ((5364, 5392), 'numpy.linalg.cholesky', 'linalg.cholesky', (['self.__prod'], {}), '(self.__prod)\n', (5379, 5392), False, 'from numpy import linalg\n'), ((6268, 6302), 'numpy.linalg.solve', 'linalg.solve', (['s[:m, :m]', 's[:m, m:]'], {}), '(s[:m, :m], s[:m, m:])\n', (6280, 6302), False, 'from numpy import linalg\n'), ((12896, 12912), 'numpy.zeros', 'np.zeros', (['[m, n]'], {}), '([m, n])\n', (12904, 12912), True, 'import numpy as np\n'), ((13033, 13042), 'numpy.eye', 'np.eye', (['m'], {}), '(m)\n', (13039, 13042), True, 'import numpy as np\n'), ((13174, 13183), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (13180, 13183), True, 'import numpy as np\n'), ((22502, 22526), 'numpy.zeros', 'np.zeros', (['(k + 1, k + 1)'], {}), '((k + 1, k + 1))\n', (22510, 22526), True, 'import numpy as np\n'), ((3012, 3028), 'numpy.isscalar', 'np.isscalar', (['eta'], {}), '(eta)\n', (3023, 3028), True, 'import numpy as np\n'), ((3087, 3103), 'numpy.isfinite', 'np.isfinite', (['eta'], {}), '(eta)\n', (3098, 3103), True, 'import numpy as np\n'), ((5690, 5705), 'numpy.log', 'np.log', (['(0.5 * w)'], {}), '(0.5 * w)\n', (5696, 5705), True, 'import numpy as np\n'), ((6121, 6149), 'numpy.linalg.cholesky', 'linalg.cholesky', (['self.__prod'], {}), '(self.__prod)\n', (6136, 6149), False, 'from numpy import linalg\n'), ((6514, 6542), 'numpy.linalg.cholesky', 'linalg.cholesky', (['self.__prod'], {}), '(self.__prod)\n', (6529, 6542), False, 'from numpy import linalg\n'), ((7042, 7054), 'numpy.sqrt', 'np.sqrt', (['eta'], {}), '(eta)\n', (7049, 7054), True, 'import numpy as np\n'), ((15751, 15761), 'numpy.ndim', 'np.ndim', (['X'], {}), '(X)\n', (15758, 15761), True, 'import numpy as np\n'), ((17327, 17344), 'numpy.log1p', 'np.log1p', (['(-hazard)'], {}), '(-hazard)\n', (17335, 17344), True, 'import numpy as np\n'), ((1581, 1592), 'numpy.ndim', 'np.ndim', (['mu'], {}), '(mu)\n', (1588, 1592), True, 'import numpy as np\n'), ((1618, 1630), 'numpy.shape', 'np.shape', (['mu'], {}), '(mu)\n', (1626, 1630), True, 'import numpy as np\n'), ((1950, 1964), 'numpy.ndim', 'np.ndim', (['omega'], {}), '(omega)\n', (1957, 1964), True, 'import numpy as np\n'), ((1990, 2005), 'numpy.shape', 'np.shape', (['omega'], {}), '(omega)\n', (1998, 2005), True, 'import numpy as np\n'), ((2139, 2158), 'numpy.transpose', 'np.transpose', (['omega'], {}), '(omega)\n', (2151, 2158), True, 'import numpy as np\n'), ((2187, 2204), 'numpy.linalg.det', 'linalg.det', (['omega'], {}), '(omega)\n', (2197, 2204), False, 'from numpy import linalg\n'), ((2470, 2484), 'numpy.ndim', 'np.ndim', (['sigma'], {}), '(sigma)\n', (2477, 2484), True, 'import numpy as np\n'), ((2510, 2525), 'numpy.shape', 'np.shape', (['sigma'], {}), '(sigma)\n', (2518, 2525), True, 'import numpy as np\n'), ((2659, 2678), 'numpy.transpose', 'np.transpose', (['sigma'], {}), '(sigma)\n', (2671, 2678), True, 'import numpy as np\n'), ((2707, 2724), 'numpy.linalg.det', 'linalg.det', (['sigma'], {}), '(sigma)\n', (2717, 2724), False, 'from numpy import linalg\n'), ((3053, 3066), 'numpy.isnan', 'np.isnan', (['eta'], {}), '(eta)\n', (3061, 3066), True, 'import numpy as np\n'), ((6992, 7010), 'numpy.random.randn', 'random.randn', (['n', 'n'], {}), '(n, n)\n', (7004, 7010), False, 'from numpy import random\n'), ((7287, 7305), 'numpy.random.randn', 'random.randn', (['m', 'n'], {}), '(m, n)\n', (7299, 7305), False, 'from numpy import random\n'), ((15528, 15538), 'numpy.ndim', 'np.ndim', (['x'], {}), '(x)\n', (15535, 15538), True, 'import numpy as np\n'), ((15585, 15595), 'numpy.ndim', 'np.ndim', (['Y'], {}), '(Y)\n', (15592, 15595), True, 'import numpy as np\n'), ((16713, 16732), 'numpy.log', 'np.log', (['(2.0 * np.pi)'], {}), '(2.0 * np.pi)\n', (16719, 16732), True, 'import numpy as np\n'), ((16969, 16983), 'numpy.log', 'np.log', (['hazard'], {}), '(hazard)\n', (16975, 16983), True, 'import numpy as np\n'), ((22795, 22814), 'numpy.exp', 'np.exp', (['probability'], {}), '(probability)\n', (22801, 22814), True, 'import numpy as np\n'), ((1704, 1719), 'numpy.isfinite', 'np.isfinite', (['mu'], {}), '(mu)\n', (1715, 1719), True, 'import numpy as np\n'), ((2082, 2100), 'numpy.isfinite', 'np.isfinite', (['omega'], {}), '(omega)\n', (2093, 2100), True, 'import numpy as np\n'), ((2602, 2620), 'numpy.isfinite', 'np.isfinite', (['sigma'], {}), '(sigma)\n', (2613, 2620), True, 'import numpy as np\n'), ((7055, 7077), 'numpy.linalg.cholesky', 'linalg.cholesky', (['sigma'], {}), '(sigma)\n', (7070, 7077), False, 'from numpy import linalg\n'), ((7214, 7236), 'numpy.linalg.cholesky', 'linalg.cholesky', (['omega'], {}), '(omega)\n', (7229, 7236), False, 'from numpy import linalg\n'), ((1665, 1677), 'numpy.isnan', 'np.isnan', (['mu'], {}), '(mu)\n', (1673, 1677), True, 'import numpy as np\n'), ((2040, 2055), 'numpy.isnan', 'np.isnan', (['omega'], {}), '(omega)\n', (2048, 2055), True, 'import numpy as np\n'), ((2560, 2575), 'numpy.isnan', 'np.isnan', (['sigma'], {}), '(sigma)\n', (2568, 2575), True, 'import numpy as np\n'), ((7344, 7362), 'numpy.linalg.cholesky', 'linalg.cholesky', (['b'], {}), '(b)\n', (7359, 7362), False, 'from numpy import linalg\n'), ((5579, 5592), 'numpy.log', 'np.log', (['d[:m]'], {}), '(d[:m])\n', (5585, 5592), True, 'import numpy as np\n'), ((5637, 5647), 'numpy.sqrt', 'np.sqrt', (['w'], {}), '(w)\n', (5644, 5647), True, 'import numpy as np\n'), ((5535, 5547), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (5544, 5547), True, 'import numpy as np\n'), ((6961, 6973), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (6970, 6973), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.figure
from typing import List, Sequence, Dict, Tuple, Union
from biplane_kine.graphing import plot_utils
from .common_graph_utils import make_interactive
from .smoothing_graph_utils import marker_graph_init, marker_graph_add
from .kine_graph_utils import kine_graph_init, kine_graph_add, plot_marker_cluster_avail
class RawSmoothedKineTorsoPlotter:
"""Plotter for torso kinematics derived from labeled, filled, and smoothed skin marker data.
Attributes
----------
torso_pos_labeled: numpy.ndarray (N, 3)
Torso position trajectory derived from labeled (raw) skin marker data
torso_eul_labeled: numpy.ndarray (N, 3)
Torso orientation trajectory (expressed as an Euler angle sequence) derived from labeled (raw) skin marker data
torso_pos_filled: numpy.ndarray (N, 3)
Torso position trajectory derived from filled (raw) skin marker data
torso_eul_filled: numpy.ndarray (N, 3)
Torso orientation trajectory (expressed as an Euler angle sequence) derived from filled (raw) skin marker data
torso_pos_smoothed: numpy.ndarray (N, 3)
Torso position trajectory derived from smoothed (raw) skin marker data
torso_eul_smoothed: numpy.ndarray (N, 3)
Torso orientation trajectory (expressed as an Euler angle sequence) derived from smoothed (raw) skin marker data
frame_nums: numpy.ndarray (N,)
Vicon frame numbers associated with torso trajectory
trial_name: str
Name of trial being plotted
"""
def __init__(self, trial_name: str, torso_pos_labeled: np.ndarray, torso_eul_labeled: np.ndarray,
torso_pos_filled: np.ndarray, torso_eul_filled: np.ndarray, torso_pos_smoothed: np.ndarray,
torso_eul_smoothed: np.ndarray, frame_nums: np.ndarray):
self.torso_pos_labeled = torso_pos_labeled
self.torso_eul_labeled = torso_eul_labeled
self.torso_pos_filled = torso_pos_filled
self.torso_eul_filled = torso_eul_filled
self.torso_pos_smoothed = torso_pos_smoothed
self.torso_eul_smoothed = torso_eul_smoothed
self.frame_nums = frame_nums
self.trial_name = trial_name
def plot(self) -> List[matplotlib.figure.Figure]:
"""Plot torso position and orientation trajectory as derived from labeled (raw), filled, and smoothed skin
marker data.
Figures:
1. Torso position broken out into 3 separate subplots for each spatial dimension
2. Torso orientation broken out into 3 separate subplots for each spatial dimension
3. Torso position plotted on one axes with different colors for each spatial dimension
4. Torso orientation plotted on one axes with different colors for each spatial dimension
"""
figs = []
# Figure 1, position in 3 subplots
pos_fig_sub = self.plot_subplots(0, self.trial_name + ' Torso Intrinsic Position', 'Position (mm)',
self.torso_pos_labeled, self.torso_pos_filled, self.torso_pos_smoothed)
figs.append(pos_fig_sub)
# Figure 2, orientation in 3 subplots
eul_fig_sub = self.plot_subplots(1, self.trial_name + ' Torso Intrinsic Euler Angles', 'Angle (deg)',
self.torso_eul_labeled, self.torso_eul_filled, self.torso_eul_smoothed)
figs.append(eul_fig_sub)
# Figure 3, position in one axes
pos_fig_one = self.plot_one_axes(2, self.trial_name + ' Torso Intrinsic Position', 'Position (mm)',
self.torso_pos_labeled, self.torso_pos_filled, self.torso_pos_smoothed,
{'labeled': 'Labeled (X)', 'filled': 'Filled (Y)', 'smoothed': 'Smoothed (Z)'})
figs.append(pos_fig_one)
# Figure 3, position in one axes
eul_fig_one = self.plot_one_axes(3, self.trial_name + ' Torso Intrinsic Euler Angles', 'Angle (deg)',
self.torso_eul_labeled, self.torso_eul_filled, self.torso_eul_smoothed,
{'labeled': 'Labeled (Flex/Ext)', 'filled': 'Filled (Lat Flex)',
'smoothed': 'Smoothed (Axial)'})
figs.append(eul_fig_one)
return figs
def plot_subplots(self, fig_num: int, title: str, y_label: str, labeled: np.ndarray, filled: np.ndarray,
smoothed: np.ndarray) -> matplotlib.figure.Figure:
"""Plot torso position or orientation into 3 separate subplots for each spatial dimension."""
fig = plt.figure(fig_num)
axs = fig.subplots(3, 1, sharex=True)
labeled_lines = marker_graph_init(axs, labeled, y_label, self.frame_nums, color='blue')
filled_lines = marker_graph_add(axs, filled, self.frame_nums, color='red')
smoothed_lines = marker_graph_add(axs, smoothed, self.frame_nums, color='green')
plt.tight_layout()
fig.suptitle(title)
fig.legend((labeled_lines[0], filled_lines[0], smoothed_lines[0]), ('Labeled', 'Filled', 'Smoothed'),
ncol=3, handlelength=0.75, handletextpad=0.25, columnspacing=0.5, loc='upper left')
make_interactive()
return fig
def plot_one_axes(self, fig_num: int, title: str, y_label: str, labeled: np.ndarray, filled: np.ndarray,
smoothed: np.ndarray, legend_entries: Dict[str, str]) -> matplotlib.figure.Figure:
"""Plot torso position or orientation on one axes with different colors for each spatial dimension."""
fig = plt.figure(fig_num)
ax = fig.subplots(1, 1)
labeled_lines = kine_graph_init(ax, labeled, y_label, self.frame_nums, [{'ls': '', 'marker': 'o', 'ms': 2,
'fillstyle': 'none', 'mew': 0.5}] * 3)
ax.set_prop_cycle(None)
filled_lines = kine_graph_add(ax, filled, self.frame_nums, [{'ls': '-', 'lw': 0.75}] * 3)
ax.set_prop_cycle(None)
smoothed_lines = kine_graph_add(ax, smoothed, self.frame_nums, [{'ls': '-'}] * 3)
plt.tight_layout()
fig.suptitle(title, x=0.7)
fig.legend((labeled_lines[0], smoothed_lines[2], filled_lines[1]),
(legend_entries['labeled'], legend_entries['smoothed'], legend_entries['filled']),
ncol=2, handlelength=0.75, handletextpad=0.25, columnspacing=0.5, loc='upper left')
make_interactive()
return fig
class MarkerClusterAvailPlotter:
"""Plotter for visually determining when markers in a marker clustser are present in a trial.
Attributes
----------
marker_data: numpy.ndarray (M, N, 3)
marker position data for all M markers and N frames in a trial
marker_names: List of str
list of marker names in marker cluster
vicon_endpts: array_like (2,)
The frame indices (endpoints) of the Vicon trial that correspond to the endpoints of the reciprocal
biplane fluoroscopy trial.
"""
def __init__(self, marker_data: np.ndarray, marker_names: Sequence[str], vicon_endpts: np.ndarray, trial_name: str):
self.marker_data = marker_data
self.marker_names = marker_names
self.vicon_endpts = vicon_endpts
self.trial_name = trial_name
self.frame_nums = np.arange(self.marker_data.shape[1]) + 1
def plot(self) -> List[matplotlib.figure.Figure]:
fig = plt.figure()
ax = fig.subplots(1, 1)
present_lines, absent_lines = plot_marker_cluster_avail(ax, self.marker_data, self.frame_nums,
self.marker_names, self.vicon_endpts)
plt.tight_layout()
fig.suptitle(self.trial_name)
plt.subplots_adjust(top=0.95)
fig.legend((present_lines[0], absent_lines[0]), ('Present', 'Absent'), ncol=2, handlelength=0.75,
handletextpad=0.5, columnspacing=1.0, loc='upper right')
make_interactive()
return [fig]
class MarkerClusterFillPlotter(MarkerClusterAvailPlotter):
"""Plotter for visually demonstrating filled gaps and the markers utilized to fill them.
Attributes
----------
gaps_filled: dict of marker names to gaps
Dictionary containing the gaps that were filled for each marker
source_markers: dict of marker names to source markers
Dictionary containing the source markers that were utilized to fill gaps for each marker
filled_data: dict of marker names to filled marker trajectories
Dictionary containing the filled marker trajectory for each marker
sfs_data: dict of marker names to smoothed/filled/smoothed marker trajectories
Dictionary containing the smoothed/filled/smoothed marker trajectory for each marker
"""
def __init__(self, trial_name: str, marker_data: np.ndarray, marker_names: Sequence[str],
gaps_filled: Dict[str, Sequence[Tuple[int, int]]], source_markers: Dict[str, Sequence[str]],
filled_data: Dict[str, np.ndarray], sfs_data: Dict[str, np.ndarray], vicon_endpts: np.ndarray):
super().__init__(marker_data, marker_names, vicon_endpts, trial_name)
self.gaps_filled = gaps_filled
self.source_markers = source_markers
self.filled_data = filled_data
self.sfs_data = sfs_data
def plot(self) -> List[matplotlib.figure.Figure]:
fig = super().plot()[0]
ax_fig = fig.axes[0]
# add gap demarcation lines and source marker names for marker that was filled
for (idx, marker_name) in enumerate(self.marker_names):
if marker_name in self.gaps_filled:
gaps = self.gaps_filled[marker_name]
for gap in gaps:
ax_fig.vlines([gap[0] + 1, gap[1]], ymin=(idx + 1) - 0.2, ymax=(idx + 1) + 0.2, linewidths=6,
colors=(1, 0.5, 0))
max_gap_idx = np.argmax([gap[1] - gap[0] for gap in gaps])
ax_fig.text((gaps[max_gap_idx][0] + gaps[max_gap_idx][1])/2, idx + 1 + 0.25,
','.join(self.source_markers[marker_name]), horizontalalignment='center',
verticalalignment='bottom', fontweight='bold')
# create a figure for each marker that was filled
figs = self.plot_filled_trajectory(2)
return [fig] + figs
def plot_filled_trajectory(self, fig_start) -> List[matplotlib.figure.Figure]:
figs = []
for (idx, (marker_name, filled_marker_data)) in enumerate(self.filled_data.items()):
fig = plt.figure(fig_start + idx)
ax = fig.subplots(3, 1)
filled_lines = marker_graph_init(ax, filled_marker_data, 'Position (mm)', self.frame_nums, color='red')
smoothed_lines = marker_graph_add(ax, self.marker_data[self.marker_names.index(marker_name)],
self.frame_nums, color='blue')
sfs_lines = marker_graph_add(ax, self.sfs_data[marker_name], self.frame_nums, color='green')
highlight_sfs = np.full_like(self.sfs_data[marker_name], np.nan)
gaps = self.gaps_filled[marker_name]
for gap in gaps:
highlight_sfs[gap[0]:gap[1], :] = self.sfs_data[marker_name][gap[0]:gap[1], :]
high_sfs_lines = marker_graph_add(ax, highlight_sfs, self.frame_nums, color='orange')
for sub_ax in ax:
sub_ax.set_xlim(left=1)
plt.tight_layout()
fig.suptitle(self.trial_name + ' ' + marker_name, x=0.75)
fig.legend((filled_lines[0], smoothed_lines[0], sfs_lines[0], high_sfs_lines[0]),
('Filled Raw', 'Smoothed', 'SFS', 'Filled Smoothed'), ncol=4, handlelength=0.75,
handletextpad=0.25, columnspacing=0.5, loc='lower left')
make_interactive()
figs.append(fig)
return figs
class TorsoTrajComparisonPlotter:
"""Torso trajectory plotter comparing previously filled, smoothed, smoothed/filled, and smoothed/filled/smoothed
torso kinematics.
Attributes
----------
trial_name: str
Name of trial
prev_filled: tuple of numpy.ndarray (N, 3)
Torso position and Euler angles derived from marker position data that had been filled in Vicon
smoothed: tuple of numpy.ndarray (N, 3)
Torso position and Euler angles derived from smoothed marker position data
filled: tuple of numpy.ndarray (N, 3)
Torso position and Euler angles dervied from smoothed then filled marker position data
sfs: tuple of numpy.ndarray (N, 3)
Torso position and Euler angles dervied from smoothed, filled, then smoothed again (lightly)
marker position data
frame_nums: numpy.ndarray (N,)
Frame numbers for the trial
vicon_endpts: array_like (2,)
The frame indices (endpoints) of the Vicon trial that correspond to the endpoints of the reciprocal
biplane fluoroscopy trial.
"""
def __init__(self, trial_name, prev_filled, smoothed, filled, sfs, vicon_endpts):
self.trial_name = trial_name
self.prev_filled = prev_filled
self.smoothed = smoothed
self.filled = filled
self.sfs = sfs
self.frame_nums = np.arange(self.sfs[0].shape[0]) + 1
self.vicon_endpts = vicon_endpts
def plot(self) -> List[matplotlib.figure.Figure]:
"""Plot torso position and orientation trajectory as derived from previously filled, smoothed,
smoothed then filled, and smoothed/filled/smoothed skin marker position.
Figures:
1. Torso position broken out into 3 separate subplots for each spatial dimension
2. Torso orientation broken out into 3 separate subplots for each spatial dimension
"""
figs = []
# Figure 1: Position
fig = self.plot_kine_var(1, self.trial_name, ('X (mm)', 'Y (mm)', 'Z (mm)'), self.prev_filled[0],
self.smoothed[0], self.filled[0], self.sfs[0])
figs.append(fig)
# Figure 2: Orientation
fig = self.plot_kine_var(2, self.trial_name, ('Flex/Ext (deg)', 'Lat Flex (deg)', 'Axial (deg)'),
self.prev_filled[1], self.smoothed[1], self.filled[1], self.sfs[1])
figs.append(fig)
return figs
def plot_kine_var(self, fig_num: int, title: str, y_labels: Sequence[str], prev_filled: np.ndarray,
smoothed: np.ndarray, filled: np.ndarray, sfs: np.ndarray) -> matplotlib.figure.Figure:
"""Plot torso position or orientation on one axes with different colors for each spatial dimension."""
fig = plt.figure(fig_num)
ax = fig.subplots(3, 1)
prev_filled_lines = marker_graph_init(ax, prev_filled, '', self.frame_nums, color='red')
smoothed_lines = marker_graph_add(ax, smoothed, self.frame_nums, color='blue')
smoothed_filled_lines = marker_graph_add(ax, filled, self.frame_nums, ls=':', lw=2, color='green')
sfs_lines = marker_graph_add(ax, sfs, self.frame_nums, color='green')
for idx, sub_ax in enumerate(ax):
plot_utils.update_ylabel(sub_ax, y_labels[idx], font_size=10)
sub_ax.axvline(self.vicon_endpts[0])
sub_ax.axvline(self.vicon_endpts[1])
sub_ax.set_xlim(left=1)
plt.tight_layout()
fig.suptitle(title)
fig.legend((prev_filled_lines[0], smoothed_lines[0], smoothed_filled_lines[0], sfs_lines[0]),
('Prev Filled', 'Smoothed', 'Smoothed/Filled', 'SFS'),
ncol=4, handlelength=0.75, handletextpad=0.25, columnspacing=0.5, loc='lower left')
make_interactive()
return fig
class RawSmoothSegmentPlotter:
"""Plotter for torso kinematics derived from labeled, filled, and smoothed skin marker data.
Attributes
----------
pos_raw: numpy.ndarray (N, 3)
Raw position trajectory
eul_raw: numpy.ndarray (N, 3)
Raw orientation trajectory (expressed as an Euler angle sequence)
pos_smooth: numpy.ndarray (N, 3)
Smoothed position trajectory
eul_smooth: numpy.ndarray (N, 3)
Smoothed orientation trajectory (expressed as an Euler angle sequence)
vel: numpy.ndarray (N, 3)
Smoothed linear velocity
ang_vel: numpy.ndarray (N, 3)
Smoothed angular velocity
frame_nums: numpy.ndarray (N,)
Biplane fluoroscopy frame numbers
trial_name: str
Name of trial being plotted
segment_name: str
Name of segment being plotted
euler_legend: List of str
Legend specifying the sequence of Euler rotation names
fig_num_start: int
Starting figure number
"""
def __init__(self, trial_name: str, segment_name: str, pos_raw: np.ndarray, eul_raw: np.ndarray,
pos_smooth: np.ndarray, eul_smooth: np.ndarray, vel: np.ndarray, ang_vel: np.ndarray,
frame_nums: np.ndarray, euler_legend: Sequence[str], pos_legend: Union[Sequence[str], None] = None,
fig_num_start: int = 0):
self.pos_raw = pos_raw
self.eul_raw = eul_raw
self.pos_smooth = pos_smooth
self.eul_smooth = eul_smooth
self.vel = vel
self.ang_vel = ang_vel
self.frame_nums = frame_nums
self.trial_name = trial_name
self.segment_name = segment_name
self.euler_legend = euler_legend
self.pos_legend = ['X', 'Y', 'Z'] if pos_legend is None else pos_legend
self.fig_num_start = fig_num_start
def plot(self) -> List[matplotlib.figure.Figure]:
"""Plot raw and smoothed position and orientation.
Figures:
1. Position broken out into 3 separate subplots for each spatial dimension
2. Orientation broken out into 3 separate subplots for each spatial dimension
3. Velocity broken out into 3 separate subplots for each spatial dimension
4. Angular velocity broken out into 3 separate subplots for each spatial dimension
5. Position plotted on one axes with different colors for each spatial dimension
6. Orientation plotted on one axes with different colors for each spatial dimension
7. Velocity plotted on one axes with different colors for each spatial dimension
8. Angular velocity plotted on one axes with different colors for each spatial dimension
"""
figs = []
title_prefix = self.trial_name + ' ' + self.segment_name + ' '
# Figure 1, position in 3 subplots
pos_fig_sub = self.plot_subplots(self.fig_num_start, title_prefix + 'Position (mm)', self.pos_raw,
self.pos_smooth, self.pos_legend)
figs.append(pos_fig_sub)
# Figure 2, orientation in 3 subplots
eul_fig_sub = self.plot_subplots(self.fig_num_start + 1, title_prefix + 'Euler Angles (deg)', self.eul_raw,
self.eul_smooth, self.euler_legend)
figs.append(eul_fig_sub)
# Figure 3, velocity in 3 subplots
vel_fig_sub = self.plot_subplots_vel(self.fig_num_start + 2, title_prefix + 'Velocity', 'Velocity (mm/s)',
self.vel)
figs.append(vel_fig_sub)
# Figure 4, angular velocity in 3 subplots
ang_vel_fig_sub = self.plot_subplots_vel(self.fig_num_start + 3, title_prefix + 'Angular Velocity',
'Angular Velocity (deg/s)', self.ang_vel)
figs.append(ang_vel_fig_sub)
# Figure 5, position in one axes
pos_fig_one = self.plot_one_axes(self.fig_num_start + 4, title_prefix + 'Position', 'Position (mm)',
self.pos_raw, self.pos_smooth, self.pos_legend)
figs.append(pos_fig_one)
# Figure 6, orientation in one axes
eul_fig_one = self.plot_one_axes(self.fig_num_start + 5, title_prefix + 'Euler Angles', 'Angle (deg)',
self.eul_raw, self.eul_smooth, self.euler_legend)
figs.append(eul_fig_one)
# Figure 7, velocity in one axes
vel_fig_one = self.plot_one_axes_vel(self.fig_num_start + 6, title_prefix + 'Velocity', 'Velocity (mm/s)',
self.vel, self.pos_legend)
figs.append(vel_fig_one)
# Figure 8, angular velocity in one axes
ang_vel_fig_one = self.plot_one_axes_vel(self.fig_num_start + 7, title_prefix + 'Angular Velocity',
'Angular Velocity (deg/s)', self.ang_vel, self.pos_legend)
figs.append(ang_vel_fig_one)
return figs
def plot_subplots(self, fig_num: int, title: str, raw: np.ndarray, smoothed: np.ndarray,
axes_lbl_entries: Sequence[str]) -> matplotlib.figure.Figure:
"""Plot position or orientation into 3 separate subplots for each spatial dimension."""
fig = plt.figure(fig_num)
axs = fig.subplots(3, 1, sharex=True)
raw_lines = marker_graph_init(axs, raw, '', self.frame_nums, color='red')
for idx, ax in enumerate(axs):
plot_utils.update_ylabel(ax, axes_lbl_entries[idx], font_size=10)
smoothed_lines = marker_graph_add(axs, smoothed, self.frame_nums, color='green')
plt.tight_layout()
plt.subplots_adjust(top=0.94)
fig.suptitle(title)
fig.legend((raw_lines[0], smoothed_lines[0]), ('Raw', 'Smoothed'), ncol=2, handlelength=0.75,
handletextpad=0.25, columnspacing=0.5, loc='lower left')
make_interactive()
return fig
def plot_one_axes(self, fig_num: int, title: str, y_label: str, raw: np.ndarray, smoothed: np.ndarray,
legend_entries: Sequence[str]) -> matplotlib.figure.Figure:
"""Plot position or orientation on one axes with different colors for each spatial dimension."""
fig = plt.figure(fig_num)
ax = fig.subplots(1, 1)
raw_lines = kine_graph_init(ax, raw, y_label, self.frame_nums, [{'ls': ':', 'lw': 2}] * 3)
ax.set_prop_cycle(None)
smoothed_lines = kine_graph_add(ax, smoothed, self.frame_nums, [{'ls': '-'}] * 3)
plt.tight_layout()
fig.suptitle(title, x=0.7)
legend_text = ('Raw (' + legend_entries[0] + ')', 'Smoothed (' + legend_entries[1] + ')',
'Smoothed (' + legend_entries[2] + ')')
fig.legend((raw_lines[0], smoothed_lines[1], smoothed_lines[2]), legend_text, ncol=3, handlelength=0.75,
handletextpad=0.25, columnspacing=0.5, loc='lower left')
make_interactive()
return fig
def plot_subplots_vel(self, fig_num: int, title: str, y_label: str, vel: np.ndarray) -> matplotlib.figure.Figure:
"""Plot velocity into 3 separate subplots for each spatial dimension."""
fig = plt.figure(fig_num)
axs = fig.subplots(3, 1, sharex=True)
marker_graph_init(axs, vel, y_label, self.frame_nums, color='blue')
plt.tight_layout()
fig.suptitle(title)
make_interactive()
return fig
def plot_one_axes_vel(self, fig_num: int, title: str, y_label: str, vel: np.ndarray,
legend_entries: Sequence[str]) -> matplotlib.figure.Figure:
"""Plot velocity on one axes with different colors for each spatial dimension."""
fig = plt.figure(fig_num)
ax = fig.subplots(1, 1)
kine_graph_init(ax, vel, y_label, self.frame_nums, [{}] * 3)
plt.tight_layout()
fig.suptitle(title, x=0.7)
fig.legend(legend_entries, ncol=3, handlelength=0.75, handletextpad=0.25, columnspacing=0.5, loc='lower left')
make_interactive()
return fig
|
[
"numpy.full_like",
"numpy.argmax",
"biplane_kine.graphing.plot_utils.update_ylabel",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.tight_layout"
] |
[((4662, 4681), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_num'], {}), '(fig_num)\n', (4672, 4681), True, 'import matplotlib.pyplot as plt\n'), ((5004, 5022), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5020, 5022), True, 'import matplotlib.pyplot as plt\n'), ((5650, 5669), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_num'], {}), '(fig_num)\n', (5660, 5669), True, 'import matplotlib.pyplot as plt\n'), ((6197, 6215), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (6213, 6215), True, 'import matplotlib.pyplot as plt\n'), ((7531, 7543), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7541, 7543), True, 'import matplotlib.pyplot as plt\n'), ((7789, 7807), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (7805, 7807), True, 'import matplotlib.pyplot as plt\n'), ((7854, 7883), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.95)'}), '(top=0.95)\n', (7873, 7883), True, 'import matplotlib.pyplot as plt\n'), ((14830, 14849), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_num'], {}), '(fig_num)\n', (14840, 14849), True, 'import matplotlib.pyplot as plt\n'), ((15509, 15527), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (15525, 15527), True, 'import matplotlib.pyplot as plt\n'), ((21155, 21174), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_num'], {}), '(fig_num)\n', (21165, 21174), True, 'import matplotlib.pyplot as plt\n'), ((21517, 21535), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (21533, 21535), True, 'import matplotlib.pyplot as plt\n'), ((21544, 21573), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.94)'}), '(top=0.94)\n', (21563, 21573), True, 'import matplotlib.pyplot as plt\n'), ((22135, 22154), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_num'], {}), '(fig_num)\n', (22145, 22154), True, 'import matplotlib.pyplot as plt\n'), ((22416, 22434), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (22432, 22434), True, 'import matplotlib.pyplot as plt\n'), ((23080, 23099), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_num'], {}), '(fig_num)\n', (23090, 23099), True, 'import matplotlib.pyplot as plt\n'), ((23230, 23248), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (23246, 23248), True, 'import matplotlib.pyplot as plt\n'), ((23603, 23622), 'matplotlib.pyplot.figure', 'plt.figure', (['fig_num'], {}), '(fig_num)\n', (23613, 23622), True, 'import matplotlib.pyplot as plt\n'), ((23732, 23750), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (23748, 23750), True, 'import matplotlib.pyplot as plt\n'), ((7421, 7457), 'numpy.arange', 'np.arange', (['self.marker_data.shape[1]'], {}), '(self.marker_data.shape[1])\n', (7430, 7457), True, 'import numpy as np\n'), ((10713, 10740), 'matplotlib.pyplot.figure', 'plt.figure', (['(fig_start + idx)'], {}), '(fig_start + idx)\n', (10723, 10740), True, 'import matplotlib.pyplot as plt\n'), ((11209, 11257), 'numpy.full_like', 'np.full_like', (['self.sfs_data[marker_name]', 'np.nan'], {}), '(self.sfs_data[marker_name], np.nan)\n', (11221, 11257), True, 'import numpy as np\n'), ((11611, 11629), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (11627, 11629), True, 'import matplotlib.pyplot as plt\n'), ((13419, 13450), 'numpy.arange', 'np.arange', (['self.sfs[0].shape[0]'], {}), '(self.sfs[0].shape[0])\n', (13428, 13450), True, 'import numpy as np\n'), ((15305, 15366), 'biplane_kine.graphing.plot_utils.update_ylabel', 'plot_utils.update_ylabel', (['sub_ax', 'y_labels[idx]'], {'font_size': '(10)'}), '(sub_ax, y_labels[idx], font_size=10)\n', (15329, 15366), False, 'from biplane_kine.graphing import plot_utils\n'), ((21354, 21419), 'biplane_kine.graphing.plot_utils.update_ylabel', 'plot_utils.update_ylabel', (['ax', 'axes_lbl_entries[idx]'], {'font_size': '(10)'}), '(ax, axes_lbl_entries[idx], font_size=10)\n', (21378, 21419), False, 'from biplane_kine.graphing import plot_utils\n'), ((10051, 10097), 'numpy.argmax', 'np.argmax', (['[(gap[1] - gap[0]) for gap in gaps]'], {}), '([(gap[1] - gap[0]) for gap in gaps])\n', (10060, 10097), True, 'import numpy as np\n')]
|
from numpy import exp, array, random, dot
class NeuralNetworkIA():
def __init__(self):
# self.input = x
# Seed the random number generator, so it generates the same numbers
# every time the program runs.
# random.seed(1)
# We model a single neuron, with 3 input connections and 1 output connection.
# We assign random weights to a 3 x 1 matrix, with values in the range -1 to 1
# and mean 0.
self.synaptic_weights = 2 * random.random((3, 1)) - 1
# The Sigmoid function, which describes an S shaped curve.
# We pass the weighted sum of the inputs through this function to
# normalise them between 0 and 1.
def __sigmoid(self, x):
return 1 / (1 + exp(-x))
# The derivative of the Sigmoid function.
# This is the gradient of the Sigmoid curve.
# It indicates how confident we are about the existing weight.
def __sigmoid_derivative(self, x):
return x * (1 - x)
# We train the neural network through a process of trial and error.
# Adjusting the synaptic weights each time.
def train(self, training_set_inputs, training_set_outputs, number_of_training_iterations):
for iteration in range(number_of_training_iterations):
# Pass the training set through our neural network (a single neuron).
output = self.think(training_set_inputs)
# Calculate the error (The difference between the desired output
# and the predicted output).
error = training_set_outputs - output
# Multiply the error by the input and again by the gradient of the Sigmoid curve.
# This means less confident weights are adjusted more.
# This means inputs, which are zero, do not cause changes to the weights.
adjustment = dot(training_set_inputs.T, error * self.__sigmoid_derivative(output))
# Adjust the weights.
self.synaptic_weights += adjustment
# The neural network thinks.
def think(self, inputs):
# Pass inputs through our neural network (our single neuron).
return self.__sigmoid(dot(inputs, self.synaptic_weights))
def trainingInputData(self):
trainInput = []
for training in range(1000):
arr = []
for i in range(3):
arr.append(random.randint(0, 2))
trainInput.append(arr)
return trainInput
def trainingOutputData(self, input):
trainOutput = []
for i in input:
mp = ((i[0] * 1) + (i[1] * 2) + (i[2] * 3)) / 3
fixed = float((int(i[0]) * 0.33333333333) + (int(i[1]) * 0.33333333333) + (int(i[2]) * 0.33333333333))
print(fixed)
if fixed < 0.34:
trainOutput.append(0)
elif fixed < 0.67:
trainOutput.append(1)
else:
trainOutput.append(1)
return trainOutput
if __name__ == "__main__":
#Intialise a single neuron neural network.
neural_network = NeuralNetworkIA()
print("Random starting synaptic weights: ")
print(neural_network.synaptic_weights)
# The training set. We have 4 examples, each consisting of 3 input values
# and 1 output value.
# training_set_inputs = array([[0, 0, 0], [3, 3, 3], [2, 0, 2], [0, 3, 2]])
train = neural_network.trainingInputData()
test = neural_network.trainingOutputData(train)
print("train: ")
print(train)
print("test: ")
print(test)
training_set_inputs = array([train])
training_set_outputs = test
# training_set_outputs = array([[0, 0, 1, 1]]).T
# Train the neural network using a training set.
# Do it 10,000 times and make small adjustments each time.
neural_network.train(training_set_inputs, training_set_outputs, 1000)
print("New synaptic weights after training: ")
print(neural_network.synaptic_weights)
# Test the neural network with a new situation.
print("Considering new situation [1, 0, 0] -> ?: ")
print(neural_network.think(array([1, 0, 0])))
|
[
"numpy.random.randint",
"numpy.random.random",
"numpy.array",
"numpy.exp",
"numpy.dot"
] |
[((3573, 3587), 'numpy.array', 'array', (['[train]'], {}), '([train])\n', (3578, 3587), False, 'from numpy import exp, array, random, dot\n'), ((2151, 2185), 'numpy.dot', 'dot', (['inputs', 'self.synaptic_weights'], {}), '(inputs, self.synaptic_weights)\n', (2154, 2185), False, 'from numpy import exp, array, random, dot\n'), ((4099, 4115), 'numpy.array', 'array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (4104, 4115), False, 'from numpy import exp, array, random, dot\n'), ((491, 512), 'numpy.random.random', 'random.random', (['(3, 1)'], {}), '((3, 1))\n', (504, 512), False, 'from numpy import exp, array, random, dot\n'), ((741, 748), 'numpy.exp', 'exp', (['(-x)'], {}), '(-x)\n', (744, 748), False, 'from numpy import exp, array, random, dot\n'), ((2364, 2384), 'numpy.random.randint', 'random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (2378, 2384), False, 'from numpy import exp, array, random, dot\n')]
|
import os
import nltk
import spacy
import gensim
import numpy as np
from nltk.stem import WordNetLemmatizer
from nltk.stem.porter import PorterStemmer
from gensim.test.utils import datapath
class TopicModeling:
"""
Topic Modeling Class with a coherence score of 0.52.
As an unsupervised learning approach is used, no accuracy tests
were performed, but those that were performed manually
obtained good results.
It allows from a text to be able to describe which topic that
text belongs to with a good probability.
"""
def __init__(self):
"""
Class constructor. Starts the main objects/attributes
for its operation.
"""
self.stemmer = PorterStemmer()
self.nlp = spacy.load("pt_core_news_sm")
self.nlp.Defaults.stop_words |= {
"tudo",
"coisa",
"toda",
"tava",
"pessoal",
"dessa",
"resolvido",
"aqui",
"gente",
"tá",
"né",
"calendário",
"jpb",
"agora",
"voltar",
"lá",
"hoje",
"aí",
"ainda",
"então",
"vai",
"porque",
"moradores",
"fazer",
"rua",
"bairro",
"prefeitura",
"todo",
"vamos",
"problema",
"fica",
"ver",
"tô",
}
self.stop_words_spacy = self.nlp.Defaults.stop_words
np.random.seed(2018)
nltk.download("wordnet")
self.allowed_postags = ["NOUN", "ADJ", "PRON"]
# Carrega o modelo.
ROOT = os.path.abspath(os.path.dirname(__file__))
fname = datapath(ROOT + "/modelo/meu_lda_model")
self.model = gensim.models.LdaMulticore.load(fname=fname)
self.topics = {}
self.represent_topics(
[0, 1, 2, 3], ["Sanitation", "Traffic", "Construction", "Several"]
)
def pre_processing(self, text):
"""
Method that performs the pre-processing of a text:
- Remove stop words.
- Remove words that are of location entities.
- Put all text in lower case.
- Performs the lemmatization of the words.
- Removes words that are not: substantivos, adjetivos
e pronomes.
Params:
----------
text : String
- Text that will undergo pre-processing.
Return:
----------
doc_out : List
- List of words that have gone through pre-processing.
"""
doc_out = []
doc = self.nlp(text)
location_entities = [entity for entity in doc.ents if entity.label_ == "LOC"]
for token in doc:
if (
token.text not in self.stop_words_spacy
and len(token.text) > 3
and token.pos_ in self.allowed_postags
and not self.is_entities_loc(token.text, location_entities)
):
doc_out.append(self.lemmatization(token.text))
return doc_out
def lemmatization(self, word):
"""
Method that performs the lemmatization of a word.
Paramso:
----------
word : String
- Word that will surffer from stemming.
Return:
----------
word : String
- Word lemmatization.
"""
return self.stemmer.stem(WordNetLemmatizer().lemmatize(word, pos="v"))
def is_entities_loc(self, word, location_entities):
"""
Method that check if the word is entity of location.
Params:
----------
word : String
- Word.
location_entities : List
- List of location entities recognized by spacy.
Return:
----------
True: If the word is a location entity.\n
False: Othwerwise..
"""
for entity in location_entities:
if entity.text.lower() == word.lower():
return True
return False
def print_keywords(self, max_number_words=None):
"""
Method that will print the keywords for each of topics in the model.
Params:
----------
max_number_words: Int
- Maximum number of words that representing a topic to be returned
Return:
----------
topics : List
- List of keywords for topics in the model.
"""
if max_number_words is None:
max_number_words = 5
topics = []
for topic in self.model.print_topics(-1, max_number_words):
topics.append(topic)
return topics
def print_topics(self):
"""
Method that will print each of topics in the model.
Return:
----------
topics : List
- List of topics in the model.
"""
return self.topics
def represent_topics(self, ids_topics, names_topics):
"""
Method that will set the values to the topics.
NOTE: The two must come in the same order, name in position 0
is from id in position 0.
Params:
----------
ids_topics: List
- List of ids in the topics.
names_topics: List
- List of names of topics.
"""
for id_topic, name in zip(ids_topics, names_topics):
self.topics[id_topic] = name
def get_topic(self, id_topic):
"""
Method that returns the representation of the topic.
Params:
----------
id_topic: Int
- Integer that represent the topic.
Return:
----------
topics: String
- The name that represent the topic with the `id_topic`.
"""
return self.topics[id_topic]
def rate_text(self, text):
"""
Method that will return from which topic the text is more
likely to belong to.
Params:
----------
text : String
- Text that will to be evaluate.
Return:
----------
result : List
- List of tuples with id of topic that the text belongs
and probability.
"""
bow_vector = self.model.id2word.doc2bow(self.pre_processing(text))
result = self.model.get_document_topics(bow_vector)
result = sorted(result, reverse=True, key=lambda t: t[1])
return result
|
[
"numpy.random.seed",
"nltk.stem.WordNetLemmatizer",
"gensim.models.LdaMulticore.load",
"os.path.dirname",
"nltk.download",
"spacy.load",
"nltk.stem.porter.PorterStemmer",
"gensim.test.utils.datapath"
] |
[((713, 728), 'nltk.stem.porter.PorterStemmer', 'PorterStemmer', ([], {}), '()\n', (726, 728), False, 'from nltk.stem.porter import PorterStemmer\n'), ((748, 777), 'spacy.load', 'spacy.load', (['"""pt_core_news_sm"""'], {}), "('pt_core_news_sm')\n", (758, 777), False, 'import spacy\n'), ((1588, 1608), 'numpy.random.seed', 'np.random.seed', (['(2018)'], {}), '(2018)\n', (1602, 1608), True, 'import numpy as np\n'), ((1617, 1641), 'nltk.download', 'nltk.download', (['"""wordnet"""'], {}), "('wordnet')\n", (1630, 1641), False, 'import nltk\n'), ((1799, 1839), 'gensim.test.utils.datapath', 'datapath', (["(ROOT + '/modelo/meu_lda_model')"], {}), "(ROOT + '/modelo/meu_lda_model')\n", (1807, 1839), False, 'from gensim.test.utils import datapath\n'), ((1861, 1905), 'gensim.models.LdaMulticore.load', 'gensim.models.LdaMulticore.load', ([], {'fname': 'fname'}), '(fname=fname)\n', (1892, 1905), False, 'import gensim\n'), ((1756, 1781), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1771, 1781), False, 'import os\n'), ((3549, 3568), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (3566, 3568), False, 'from nltk.stem import WordNetLemmatizer\n')]
|
### Adapt from ###
from _util import *
import numpy as np
import tensorflow as tf
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
print(e)
import collections
import os
os.environ["OMP_NUM_THREADS"] = "1"
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["OPENBLAS_NUM_THREADS"] = "1"
os.environ["VECLIB_MAXIMUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
import _cartpole as cartpole
reload(cartpole)
env = cartpole.CartPoleEnv(e_max = 1000)
class softmax_policy():
def __init__(self, pi, tau):
self.pi = pi
self.tau = tau
def sample_A(self, S):
probs = self.get_A_prob(S)
c = probs.cumsum(axis=1)
u = np.random.rand(len(c), 1)
As = (u < c).argmax(axis=1)
As = np.squeeze(As)
return As
def get_A(self, S):
return self.sample_A(S)
def get_A_prob(self, S):
S = np.atleast_2d(S)
Qs = self.pi.model(S).numpy()
probs = softmax(Qs / self.tau, axis = 1)
return probs
class GymEval():
def __init__(self, random_pi = False):
self.random_pi = random_pi
self.seed = 42
def eval_policy(self, pi, gamma, init_states = None, rep = 1000):
rewards = self.simu_trajs_para(pi, rep = rep, T = 500
, burn_in = None, init_states = init_states
, return_rewards = True)
Vs = []
for i in range(rep):
V = sum(r * gamma ** t for t, r in enumerate(rewards[i]))
Vs.append(V)
V_true = np.mean(Vs)
std_true_value = np.std(Vs) / np.sqrt(len(Vs))
printR("value = {:.4f} with std {:.4f}".format(V_true, std_true_value))
return V_true
def simu_trajs(self, pi, rep = 100, T = 1000
, burn_in = None, init_states = None, return_rewards = False):
########
envs = [cartpole.CartPoleEnv(e_max = 1000) for i in range(rep)]
Ss = randn(rep, 4)
for i in range(rep):
S = envs[i].reset()
if init_states is not None:
init_S = init_states[i]
envs[i].state = init_S
Ss[i] = init_S
else:
Ss[i] = S
trajs = [[] for i in range(rep)]
rewards = [[] for i in range(rep)]
############
for t in range(T):
np.random.seed(self.seed)
self.seed += 1
if t * 2 % T == 0:
print("simu {}% DONE!".format(str(t / T * 100)))
if self.random_pi:
As = pi.sample_A(Ss)
else:
As = pi.get_A(Ss)
for i in range(rep):
SS, reward, done, _ = envs[i].step(As[i])
SARS = [Ss[i].copy(), As[i], reward, SS.copy()]
Ss[i] = SS
trajs[i].append(SARS)
rewards[i].append(reward)
############
if return_rewards:
return rewards
if burn_in is not None:
trajs = [traj[burn_in:] for traj in trajs]
return trajs
def simu_trajs_para(self, pi, rep = 100, T = 1000
, burn_in = None, init_states = None, return_rewards = False):
########
env = cartpole.CartPoleEnv(e_max = 1000)
Ss = env.reset_multiple(rep)
Ss = Ss.T
if init_states is not None:
env.states = init_states.T
Ss = init_states
trajs = [[] for i in range(rep)]
rewards = [[] for i in range(rep)]
############
for t in range(T):
np.random.seed(self.seed)
self.seed += 1
if t * 2 % T == 0:
print("simu {}% DONE!".format(str(t / T * 100)))
if self.random_pi:
As = pi.sample_A(Ss)
else:
As = pi.get_A(Ss)
SSs, Rs, _, _ = env.step_multiple(As)
for i in range(rep):
SARS = [Ss[i].copy(), As[i], Rs[i], SSs.T[i].copy()]
trajs[i].append(SARS)
rewards[i].append(Rs[i])
Ss = SSs.T
############
if return_rewards:
return rewards
if burn_in is not None:
trajs = [traj[burn_in:] for traj in trajs]
return trajs
def get_init_S_from_trajs(self, trajs, n_init = 1000):
np.random.seed(42)
states = np.array([item[0] for traj in trajs for item in traj])
return states[np.random.choice(len(states), n_init)]
############################################################################################################################################################################################################################################################################################################################################################
|
[
"numpy.random.seed",
"_cartpole.CartPoleEnv",
"numpy.std",
"tensorflow.config.experimental.set_memory_growth",
"numpy.mean",
"numpy.array",
"numpy.squeeze",
"tensorflow.config.experimental.list_physical_devices",
"numpy.atleast_2d"
] |
[((89, 140), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (133, 140), True, 'import tensorflow as tf\n'), ((580, 612), '_cartpole.CartPoleEnv', 'cartpole.CartPoleEnv', ([], {'e_max': '(1000)'}), '(e_max=1000)\n', (600, 612), True, 'import _cartpole as cartpole\n'), ((919, 933), 'numpy.squeeze', 'np.squeeze', (['As'], {}), '(As)\n', (929, 933), True, 'import numpy as np\n'), ((1050, 1066), 'numpy.atleast_2d', 'np.atleast_2d', (['S'], {}), '(S)\n', (1063, 1066), True, 'import numpy as np\n'), ((1704, 1715), 'numpy.mean', 'np.mean', (['Vs'], {}), '(Vs)\n', (1711, 1715), True, 'import numpy as np\n'), ((3412, 3444), '_cartpole.CartPoleEnv', 'cartpole.CartPoleEnv', ([], {'e_max': '(1000)'}), '(e_max=1000)\n', (3432, 3444), True, 'import _cartpole as cartpole\n'), ((4529, 4547), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (4543, 4547), True, 'import numpy as np\n'), ((4565, 4619), 'numpy.array', 'np.array', (['[item[0] for traj in trajs for item in traj]'], {}), '([item[0] for traj in trajs for item in traj])\n', (4573, 4619), True, 'import numpy as np\n'), ((196, 247), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (236, 247), True, 'import tensorflow as tf\n'), ((1741, 1751), 'numpy.std', 'np.std', (['Vs'], {}), '(Vs)\n', (1747, 1751), True, 'import numpy as np\n'), ((2043, 2075), '_cartpole.CartPoleEnv', 'cartpole.CartPoleEnv', ([], {'e_max': '(1000)'}), '(e_max=1000)\n', (2063, 2075), True, 'import _cartpole as cartpole\n'), ((2525, 2550), 'numpy.random.seed', 'np.random.seed', (['self.seed'], {}), '(self.seed)\n', (2539, 2550), True, 'import numpy as np\n'), ((3750, 3775), 'numpy.random.seed', 'np.random.seed', (['self.seed'], {}), '(self.seed)\n', (3764, 3775), True, 'import numpy as np\n')]
|
# MIT License
#
# Copyright (c) 2016 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from numpy import empty
import numpy as np
from fdistance import fmanhattan_distance
from fdistance import fl2_distance
from fdistance import fp_distance_integer, fp_distance_double
from farad import atomic_arad_l2_distance_all
PTP = {\
1 :[1,1] ,2: [1,8]#Row1
,3 :[2,1] ,4: [2,2]#Row2\
,5 :[2,3] ,6: [2,4] ,7 :[2,5] ,8 :[2,6] ,9 :[2,7] ,10 :[2,8]\
,11 :[3,1] ,12: [3,2]#Row3\
,13 :[3,3] ,14: [3,4] ,15 :[3,5] ,16 :[3,6] ,17 :[3,7] ,18 :[3,8]\
,19 :[4,1] ,20: [4,2]#Row4\
,31 :[4,3] ,32: [4,4] ,33 :[4,5] ,34 :[4,6] ,35 :[4,7] ,36 :[4,8]\
,21 :[4,9] ,22: [4,10],23 :[4,11],24 :[4,12],25 :[4,13],26 :[4,14],27 :[4,15],28 :[4,16],29 :[4,17],30 :[4,18]\
,37 :[5,1] ,38: [5,2]#Row5\
,49 :[5,3] ,50: [5,4] ,51 :[5,5] ,52 :[5,6] ,53 :[5,7] ,54 :[5,8]\
,39 :[5,9] ,40: [5,10],41 :[5,11],42 :[5,12],43 :[5,13],44 :[5,14],45 :[5,15],46 :[5,16],47 :[5,17],48 :[5,18]\
,55 :[6,1] ,56: [6,2]#Row6\
,81 :[6,3] ,82: [6,4] ,83 :[6,5] ,84 :[6,6] ,85 :[6,7] ,86 :[6,8]
,72: [6,10],73 :[6,11],74 :[6,12],75 :[6,13],76 :[6,14],77 :[6,15],78 :[6,16],79 :[6,17],80 :[6,18]\
,57 :[6,19],58: [6,20],59 :[6,21],60 :[6,22],61 :[6,23],62 :[6,24],63 :[6,25],64 :[6,26],65 :[6,27],66 :[6,28],67 :[6,29],68 :[6,30],69 :[6,31],70 :[6,32],71 :[6,33]\
,87 :[7,1] ,88: [7,2]#Row7\
,113:[7,3] ,114:[7,4] ,115:[7,5] ,116:[7,6] ,117:[7,7] ,118:[7,8]\
,104:[7,10],105:[7,11],106:[7,12],107:[7,13],108:[7,14],109:[7,15],110:[7,16],111:[7,17],112:[7,18]\
,89 :[7,19],90: [7,20],91 :[7,21],92 :[7,22],93 :[7,23],94 :[7,24],95 :[7,25],96 :[7,26],97 :[7,27],98 :[7,28],99 :[7,29],100:[7,30],101:[7,31],101:[7,32],102:[7,14],103:[7,33]}
def manhattan_distance(A, B):
if len(A.shape) != 2 or len(B.shape) != 2:
raise ValueError('expected matrices of dimension=2')
if B.shape[0] != A.shape[0]:
raise ValueError('expected matrices containing vectors of same size')
na = A.shape[1]
nb = B.shape[1]
D = empty((na, nb), order='F')
fmanhattan_distance(A, B, D)
return D
def l2_distance(A, B):
if len(A.shape) != 2 or len(B.shape) != 2:
raise ValueError('expected matrices of dimension=2')
if B.shape[0] != A.shape[0]:
raise ValueError('expected matrices containing vectors of same size')
na = A.shape[1]
nb = B.shape[1]
D = empty((na, nb), order='F')
fl2_distance(A, B, D)
return D
def p_distance(A, B, p=2):
if len(A.shape) != 2 or len(B.shape) != 2:
raise ValueError('expected matrices of dimension=2')
if B.shape[0] != A.shape[0]:
raise ValueError('expected matrices containing vectors of same size')
na = A.shape[1]
nb = B.shape[1]
D = empty((na, nb), order='F')
if (type(p) == type(1)):
if (p == 2):
fl2_distance(A, B, D)
else:
fp_distance_integer(A, B, D, p)
elif (type(p) == type(1.0)):
fp_distance_double(A, B, D, p)
else:
raise ValueError('expected exponent of integer or float type')
return D
def get_l2_distance_arad(X1, X2, Z1, Z2, \
width=0.2, cut_distance=6.0, r_width=1.0, c_width=0.5):
""" Calculates the Gaussian distance matrix D for atomic ARAD for two
sets of molecules
K is calculated using an OpenMP parallel Fortran routine.
Arguments:
==============
X1 -- np.array of ARAD descriptors for molecules in set 1.
X2 -- np.array of ARAD descriptors for molecules in set 2.
Z1 -- List of lists of nuclear charges for molecules in set 1.
Z2 -- List of lists of nuclear charges for molecules in set 2.
Keyword arguments:
width --
cut_distance --
r_width --
c_width --
Returns:
==============
D -- The distance matrices for each sigma (4D-array, Nmol1 x Nmol2 x Natom1 x Natoms2)
"""
amax = X1.shape[1]
assert X1.shape[3] == amax, "ERROR: Check ARAD decriptor sizes! code = 1"
assert X2.shape[1] == amax, "ERROR: Check ARAD decriptor sizes! code = 2"
assert X2.shape[3] == amax, "ERROR: Check ARAD decriptor sizes! code = 3"
nm1 = len(Z1)
nm2 = len(Z2)
assert X1.shape[0] == nm1, "ERROR: Check ARAD decriptor sizes! code = 4"
assert X2.shape[0] == nm2, "ERROR: Check ARAD decriptor sizes! code = 5"
N1 = []
for Z in Z1:
N1.append(len(Z))
N2 = []
for Z in Z2:
N2.append(len(Z))
N1 = np.array(N1,dtype=np.int32)
N2 = np.array(N2,dtype=np.int32)
c1 = []
for charges in Z1:
c1.append(np.array([PTP[int(q)] for q in charges], dtype=np.int32))
Z1_arad = np.zeros((nm1,amax,2))
for i in range(nm1):
for j, z in enumerate(c1[i]):
Z1_arad[i,j] = z
c2 = []
for charges in Z2:
c2.append(np.array([PTP[int(q)] for q in charges], dtype=np.int32))
Z2_arad = np.zeros((nm2,amax,2))
for i in range(nm2):
for j, z in enumerate(c2[i]):
Z2_arad[i,j] = z
return atomic_arad_l2_distance_all(X1, X2, Z1_arad, Z2_arad, N1, N2, \
nm1, nm2, width, cut_distance, r_width, c_width, amax)
|
[
"fdistance.fl2_distance",
"fdistance.fp_distance_double",
"numpy.empty",
"numpy.zeros",
"fdistance.fmanhattan_distance",
"fdistance.fp_distance_integer",
"numpy.array",
"farad.atomic_arad_l2_distance_all"
] |
[((3186, 3212), 'numpy.empty', 'empty', (['(na, nb)'], {'order': '"""F"""'}), "((na, nb), order='F')\n", (3191, 3212), False, 'from numpy import empty\n'), ((3218, 3246), 'fdistance.fmanhattan_distance', 'fmanhattan_distance', (['A', 'B', 'D'], {}), '(A, B, D)\n', (3237, 3246), False, 'from fdistance import fmanhattan_distance\n'), ((3556, 3582), 'numpy.empty', 'empty', (['(na, nb)'], {'order': '"""F"""'}), "((na, nb), order='F')\n", (3561, 3582), False, 'from numpy import empty\n'), ((3588, 3609), 'fdistance.fl2_distance', 'fl2_distance', (['A', 'B', 'D'], {}), '(A, B, D)\n', (3600, 3609), False, 'from fdistance import fl2_distance\n'), ((3923, 3949), 'numpy.empty', 'empty', (['(na, nb)'], {'order': '"""F"""'}), "((na, nb), order='F')\n", (3928, 3949), False, 'from numpy import empty\n'), ((5683, 5711), 'numpy.array', 'np.array', (['N1'], {'dtype': 'np.int32'}), '(N1, dtype=np.int32)\n', (5691, 5711), True, 'import numpy as np\n'), ((5720, 5748), 'numpy.array', 'np.array', (['N2'], {'dtype': 'np.int32'}), '(N2, dtype=np.int32)\n', (5728, 5748), True, 'import numpy as np\n'), ((5875, 5899), 'numpy.zeros', 'np.zeros', (['(nm1, amax, 2)'], {}), '((nm1, amax, 2))\n', (5883, 5899), True, 'import numpy as np\n'), ((6118, 6142), 'numpy.zeros', 'np.zeros', (['(nm2, amax, 2)'], {}), '((nm2, amax, 2))\n', (6126, 6142), True, 'import numpy as np\n'), ((6246, 6366), 'farad.atomic_arad_l2_distance_all', 'atomic_arad_l2_distance_all', (['X1', 'X2', 'Z1_arad', 'Z2_arad', 'N1', 'N2', 'nm1', 'nm2', 'width', 'cut_distance', 'r_width', 'c_width', 'amax'], {}), '(X1, X2, Z1_arad, Z2_arad, N1, N2, nm1, nm2,\n width, cut_distance, r_width, c_width, amax)\n', (6273, 6366), False, 'from farad import atomic_arad_l2_distance_all\n'), ((4014, 4035), 'fdistance.fl2_distance', 'fl2_distance', (['A', 'B', 'D'], {}), '(A, B, D)\n', (4026, 4035), False, 'from fdistance import fl2_distance\n'), ((4062, 4093), 'fdistance.fp_distance_integer', 'fp_distance_integer', (['A', 'B', 'D', 'p'], {}), '(A, B, D, p)\n', (4081, 4093), False, 'from fdistance import fp_distance_integer, fp_distance_double\n'), ((4136, 4166), 'fdistance.fp_distance_double', 'fp_distance_double', (['A', 'B', 'D', 'p'], {}), '(A, B, D, p)\n', (4154, 4166), False, 'from fdistance import fp_distance_integer, fp_distance_double\n')]
|
#!/usr/bin/env python3
from numpy import array, append, iinfo, sin, cos, linspace, int16, pi, zeros_like, ones_like, max as npmax
from scipy.io.wavfile import write
from scipy.signal import square, chirp
# import matplotlib.pyplot as plt
max_amplitude = iinfo(int16).max
A5 = 880.
A4 = 440.
A3 = 220.
A2 = 110.
samplerate = 44100;
duration_s = 10
t = linspace(0., duration_s, num=duration_s * samplerate)
# def cos_waveform(f_hz, duration_s=1, amplitude=max_amplitude):
# return amplitude * cos(2 * pi * f_hz * t[:duration_s * samplerate])
# def sweep_amplitude(f_hz, from_A, to_A, duration_s=1):
# amplitude = linspace(from_A, to_A, num=duration_s * samplerate)
# return amplitude * cos(2 * pi * f_hz * t[:duration_s * samplerate])
# def sweep_hz(from_hz, to_hz, duration_s=1, method='linear'):
# return max_amplitude * chirp(
# t[:duration_s*samplerate],
# f0=from_hz,
# f1=to_hz,
# t1=t[:duration_s*samplerate][-1],
# method=method, # ‘linear’ | ‘quadratic’ | ‘logarithmic’ | ‘hyperbolic’
# )
def waveform(f_hz, A=max_amplitude, duration_s=1, to_A=None, to_hz=None, method='linear'):
to_hz = to_hz if to_hz is not None else f_hz
amplitude = linspace(A, to_A, num=int(duration_s * samplerate)) if to_A is not None else A
return amplitude * chirp(
t[:int(duration_s*samplerate)],
f0=f_hz,
f1=to_hz,
t1=t[:int(duration_s*samplerate)][-1],
method=method, # ‘linear’ | ‘quadratic’ | ‘logarithmic’ | ‘hyperbolic’
)
def append_all(*signals):
waveform = array([])
for signal in signals:
waveform = append(waveform, signal)
return waveform
signal = append_all(
waveform(A2, A=0, to_A=max_amplitude, duration_s=0.1),
waveform(A2, duration_s=2),
waveform(A2, to_hz=A3, duration_s=5),
waveform(A3, duration_s=2),
waveform(A3, to_A=0, duration_s=0.1),
)
# signal = append_all(
# waveform(A3, A=0, to_A=max_amplitude, duration_s=0.05),
# waveform(A3, to_hz=A2, to_A=0, duration_s=5),
# waveform(A2, to_hz=A3, A=0, to_A=max_amplitude, duration_s=5),
# waveform(A3, to_A=0, duration_s=0.05),
# )
# center = 2 * samplerate
# plt.plot(t[center-1000:center+1000], signal[center-1000:center+1000])
# plt.savefig('waveform.png')
# plt.show()
write("example.wav", samplerate, signal.astype(int16))
|
[
"numpy.append",
"numpy.array",
"numpy.iinfo",
"numpy.linspace"
] |
[((357, 411), 'numpy.linspace', 'linspace', (['(0.0)', 'duration_s'], {'num': '(duration_s * samplerate)'}), '(0.0, duration_s, num=duration_s * samplerate)\n', (365, 411), False, 'from numpy import array, append, iinfo, sin, cos, linspace, int16, pi, zeros_like, ones_like, max as npmax\n'), ((257, 269), 'numpy.iinfo', 'iinfo', (['int16'], {}), '(int16)\n', (262, 269), False, 'from numpy import array, append, iinfo, sin, cos, linspace, int16, pi, zeros_like, ones_like, max as npmax\n'), ((1580, 1589), 'numpy.array', 'array', (['[]'], {}), '([])\n', (1585, 1589), False, 'from numpy import array, append, iinfo, sin, cos, linspace, int16, pi, zeros_like, ones_like, max as npmax\n'), ((1636, 1660), 'numpy.append', 'append', (['waveform', 'signal'], {}), '(waveform, signal)\n', (1642, 1660), False, 'from numpy import array, append, iinfo, sin, cos, linspace, int16, pi, zeros_like, ones_like, max as npmax\n')]
|
"""Analyze the trained models."""
import sys
import os
import pickle
import warnings
from collections import defaultdict
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import cm
import torch
from torch.utils.tensorboard import SummaryWriter
rootpath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(rootpath)
import tools
from tools import nicename, save_fig
from datasets.dataset_utils import get_dataset
from models.model_utils import get_model
mpl.rcParams['font.size'] = 7
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['ps.fonttype'] = 42
mpl.rcParams['font.family'] = 'arial'
mpl.rcParams['figure.facecolor'] = 'None'
figpath = os.path.join(rootpath, 'figures')
def _get_ax_args(xkey, ykey):
rect = (0.3, 0.35, 0.5, 0.55)
ax_args = {}
return rect, ax_args
def plot_progress(save_path, select=None, exclude=None,
legend_key=None, x_range=None, ykeys=None, ax_args=None):
"""Plot progress through training."""
def _plot_progress(xkey, ykey, modeldirs):
"""Plot progress for one xkey and ykey pair."""
if ax_args is None:
rect, ax_args_ = _get_ax_args(xkey, ykey)
else:
rect = [0.25, 0.3, 0.6, 0.5]
ax_args_ = ax_args
n_model = len(modeldirs)
logs = [tools.load_log(d) for d in modeldirs]
cfgs = [tools.load_config(d) for d in modeldirs]
figsize = (3.5, 2)
fig = plt.figure(figsize=figsize)
ax = fig.add_axes(rect, **ax_args_)
colors = [cm.cool(x) for x in np.linspace(0, 1, n_model)]
for i in range(n_model):
x, y, c = logs[i][xkey], logs[i][ykey], colors[i]
if x_range:
x, y = x[x_range[0]: x_range[1]], y[x_range[0]: x_range[1]]
ax.plot(x, y, color=c, linewidth=1)
ax.text(x[-1]*1.05, y[-1], nicename(y[-1], mode=ykey), color=c)
if legend_key is not None:
# Check if legend_key is string or tuple
legend_key_ = [legend_key] if isinstance(legend_key, str) else legend_key
legends = []
# Loop over curves/mdoels
for i in range(n_model):
cfg = tools.flatten_nested_dict(cfgs[i])
l_list = []
# Loop over possible tuple of legend keys
for lk in legend_key_:
# TODO: make more general
if lk in cfg:
nn = nicename(cfg[lk], mode=lk)
elif lk == 'plasticnet': #this is a hack
lk = 'plasticnet.network'
nn = nicename(cfg[i], mode=lk)
elif any([k.startswith(lk) for k in cfg]):
nn = ' '.join([nicename(cfg[i], mode=k) for k in cfg
if k.startswith(lk)])
else: #should not execute, but won't break if it does, just makes ugly figure
warnings.warn('Key {} not found in log from {}'.format(lk, save_path))
nn = 'Unknown key: {}'.format(lk)
l_list.append(nn)
legends.append(', '.join(l_list))
title = ', '.join([nicename(lk) for lk in legend_key_])
ax.legend(legends, fontsize=7, frameon=False, ncol=2, loc='best')
plt.title(title, fontsize=7)
ax.set_xlabel(nicename(xkey))
ax.set_ylabel(nicename(ykey))
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
if x_range:
ax.set_xlim([x_range[0], x_range[1]])
else:
ax.set_xlim([-1, logs[0][xkey][-1]])
figname = '_' + ykey
if select:
for k, v in select.items():
figname += k + '_' + str(v) + '_'
if x_range:
figname += '_x_range_' + str(x_range[1])
save_fig(save_path, figname)
modeldirs = tools.get_modeldirs(save_path, select_dict=select,
exclude_dict=exclude)
if not modeldirs:
print('No model to plot progress')
return
if ykeys is None:
ykeys = []
if ykeys == 'all':
log = tools.load_log(modeldirs[0])
ykeys = [k for k, v in log.items() if v.shape == log['steps'].shape]
if isinstance(ykeys, str):
ykeys = [ykeys]
for plot_var in ykeys:
_plot_progress('steps', plot_var, modeldirs)
def get_errorbar(x):
"""Get errorbar.
Args:
x: list of lists.
Returns:
x_mean: list of mean values
x_err: array (2, N) lower error and upper error for 95% conf interval
"""
x_mean = [np.mean(x0) for x0 in x]
x_err = np.zeros((2, len(x_mean)))
for i, a in enumerate(x):
bootstrap = [np.mean(np.random.choice(a, size=len(a))) for _ in
range(100)]
x_err[0, i] = x_mean[i] - np.percentile(bootstrap, 2.5)
x_err[1, i] = np.percentile(bootstrap, 97.5) - x_mean[i]
return x_mean, x_err
def plot_results(path, xkey, ykey, loop_key=None, select=None,
logx=None, logy=False, figsize=None, ax_args=None,
plot_args=None, ax_box=None, res=None, string='',
plot_actual_value=True):
"""Plot results for varying parameters experiments.
Args:
path: str, model save path
xkey: str, key for the x-axis variable
ykey: str, key for the y-axis variable
loop_key: str, key for the value to loop around
select: dict, dict of parameters to select
logx: bool, if True, use log x-axis
logy: bool, if True, use log x-axis
"""
if isinstance(ykey, str):
ykeys = [ykey]
else:
ykeys = ykey
if res is None:
res = tools.load_results(path, select=select)
tmp = res[xkey][0]
xkey_is_string = isinstance(tmp, str) or tmp is None
if plot_args is None:
plot_args = {}
# Unique sorted xkey values
xvals = sorted(set(res[xkey]))
if logx is None:
logx = False
if figsize is None:
if xkey == 'lr':
figsize = (4.5, 1.5)
else:
figsize = (1.5, 1.2)
def _plot_results(ykey):
# Default ax_args and other values, based on x and y keys
rect, ax_args_ = _get_ax_args(xkey, ykey)
if ax_args:
ax_args_.update(ax_args)
if ax_box is not None:
rect = ax_box
fig = plt.figure(figsize=figsize)
ax = fig.add_axes(rect, **ax_args_)
if loop_key:
loop_vals = np.unique(res[loop_key])
colors = [cm.cool(x) for x in np.linspace(0, 1, len(loop_vals))]
for loop_val, color in zip(loop_vals, colors):
ind = res[loop_key] == loop_val
x_plot = res[xkey][ind]
y_plot = res[ykey][ind]
if logx:
x_plot = np.log(x_plot)
if logy:
y_plot = np.log(y_plot)
# x_plot = [str(x).rsplit('/', 1)[-1] for x in x_plot]
ax.plot(x_plot, y_plot, 'o-', markersize=3, color=color,
label=nicename(loop_val, mode=loop_key), **plot_args)
else:
# Organize
yvals = list()
yvals_all = list()
for xval in xvals:
yval_tmp = [res[ykey][i] for i, r in enumerate(res[xkey]) if
r == xval]
yval_tmp = np.array(yval_tmp)
yval_tmp = yval_tmp.flatten()
if logy:
yval_tmp = np.log(yval_tmp)
yvals.append(np.mean(yval_tmp))
yvals_all.append(yval_tmp)
y_mean, y_error = get_errorbar(yvals_all)
if xkey_is_string:
x_plot = np.arange(len(xvals))
else:
if logx:
x_plot = np.log(np.array(xvals))
else:
x_plot = xvals
# ax.plot(x_plot, y_mean, fmt='o-', markersize=3, **plot_args)
ax.errorbar(x_plot, y_mean, yerr=y_error, fmt='o-', markersize=3,
**plot_args)
if plot_actual_value:
for x, y in zip(x_plot, y_mean):
if y > ax.get_ylim()[-1]:
continue
ytext = '{:0.2f}'.format(y)
ax.text(x, y, ytext, fontsize=6,
horizontalalignment='center',
verticalalignment='bottom')
if 'xticks' in ax_args_.keys():
xticks = ax_args_['xticks']
ax.set_xticks(xticks)
else:
xticks = x_plot
xticklabels = [nicename(x, mode=xkey) for x in xvals]
ax.set_xticks(xticks)
ax.set_xticklabels(xticklabels)
# ax.set_xticks(xticks)
# if not xkey_is_string:
# x_span = xticks[-1] - xticks[0]
# ax.set_xlim([xticks[0]-x_span*0.05, xticks[-1]+x_span*0.05])
# ax.set_xticklabels(xticklabels)
if 'yticks' in ax_args_.keys():
yticks = ax_args_['yticks']
if logy:
ax.set_yticks(np.log(yticks))
ax.set_yticklabels(yticks)
else:
ax.set_yticks(yticks)
else:
plt.locator_params(axis='y', nbins=3)
ax.set_xlabel(nicename(xkey))
ax.set_ylabel(nicename(ykey))
if loop_key:
l = ax.legend(loc=1, bbox_to_anchor=(1.0, 0.5), fontsize= 7,
frameon=False, ncol=2)
l.set_title(nicename(loop_key))
figname = '_' + ykey + '_vs_' + xkey
if loop_key:
figname += '_vary' + loop_key
if select:
for k, v in select.items():
if isinstance(v, list):
v = [x.rsplit('/',1)[-1] for x in v]
v = str('__'.join(v))
else:
v = str(v)
figname += k + '_' + v + '__'
figname += string
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
tools.save_fig(path, figname)
for ykey in ykeys:
_plot_results(ykey)
def plot_value_in_time(X, name, save_path=None):
"""Plot X with imshow."""
fig = plt.figure(figsize=(3, 2.5))
plt.imshow(X.T, aspect='auto')
plt.title(name)
plt.xlabel('Position')
plt.ylabel(name)
plt.colorbar()
if save_path is not None:
save_fig(save_path, 'analyze_'+name)
def plot_matrix_in_time(X, name, save_path=None, n_plot=5):
"""Plot X at each time point
args:
X: (n_time, .., ..)
"""
n_time = X.shape[0]
n_time_plot = np.linspace(0, n_time - 1, n_plot, dtype=int)
vlim = np.max(np.abs(X))
fig, axes = plt.subplots(1, n_plot, sharey=True, figsize=(7, 1.5))
for i, ind in enumerate(n_time_plot):
ax = axes[i]
im = ax.imshow(X[ind], aspect='auto', vmin=-vlim, vmax=vlim)
ax.set_title('Time {:d}'.format(ind))
# f.colorbar(im, ax=ax)
fig.subplots_adjust(right=0.93)
cbar_ax = fig.add_axes([0.95, 0.15, 0.015, 0.7])
fig.colorbar(im, cax=cbar_ax)
fig.suptitle(name, y=1.08)
if save_path is not None:
save_fig(save_path, 'analyze_' + name)
def evaluate_run(modeldir=None, update_config=None, model=None, custom_data=None,
n_batch=1, analyze=False, load=True, load_hebb=True,
reset_hebb=True, fname=None, save_pickle=False, save_path=None):
"""Evaluate the network for batches of data post training.
Args:
modeldir: if None, then do not load models
update_config: optional update of config loaded from save_path
model: if not None, use this model and disregard modeldir and
update_config
custom_data: optional. Output of dataset.generate(), for evaluating net
on a particular instance of the dataset (must have n_batch=1)
n_batch: number of dataset batch to run
analyze: if True, network in analyze mode
load_hebb: if True, load Hebbian weights
reset_hebb: if True, reset Hebbian weights for each batch
fname: str, filename to store
save_pickle: if True, save results to pickle file
save_path: path to save results
"""
if modeldir is None:
config = dict()
else:
config = tools.load_config(modeldir)
if update_config is not None:
config = tools.nested_update(config, update_config)
# Training networks
device = 'cuda' if torch.cuda.is_available() else 'cpu'
if custom_data is None:
dataset = get_dataset(config['dataset'], verbose=False)
output_size = dataset.output_dim
else:
assert n_batch==1, "Only one batch of custom_data can be passed in"
seq_len, output_size = custom_data['target'].shape
if model is None:
net = get_model(config)
if load and 'save_path' in config:
model_path = os.path.join(config['save_path'], 'model.pt')
net.load(model_path, map_location=device, load_hebb=load_hebb)
else:
net = model
if analyze:
net.analyze()
torch.no_grad()
# TODO: Consider a different way to store
results = defaultdict(list)
for batch in range(n_batch):
if reset_hebb:
for m in net.modules():
if 'reset_hebb' in dir(m):
m.reset_hebb()
if custom_data is None:
data = dataset.generate()
else:
data = custom_data
results['data'].append(data)
for key, val in data.items():
data[key] = torch.from_numpy(val).float().to(device)
# Add batch dimension
data['input'] = data['input'].unsqueeze(1)
if 'modu_input' in data.keys():
data['modu_input'] = data['modu_input'].unsqueeze(1)
if 'modu_input' in data.keys():
# TODO: This forces all networks to accept modu_input, fix?
if 'input_heteroassociative' in data.keys():
target = data['input_heteroassociative']
else:
target = None
outputs, rnn_out = net(input=data['input'],
modu_input=data['modu_input'],
target=target)
else:
outputs, rnn_out = net(input=data['input'])
outputs = outputs.view(-1, output_size)
outputs = torch.sign(outputs)
results['outputs'].append(outputs)
# Get acc at recall times
match = (outputs == data['target']).float()
acc = match.mean(dim=1)
results['acc'].append(acc)
if analyze:
results.update(net.writer_dict())
results['config'] = config
if save_pickle:
if fname is None:
fname = 'evaluate_run.pkl'
elif fname[-4:] != '.pkl':
fname = fname + '.pkl'
if save_path is None:
if modeldir is not None:
save_path = modeldir
else:
raise ValueError('No save_path or modeldir provided')
with open(os.path.join(save_path, fname), 'wb') as f:
pickle.dump(results, f)
return dict(results) #don't want defaultdict, may hide bugs downstream
if __name__ == '__main__':
pass
|
[
"matplotlib.pyplot.title",
"pickle.dump",
"numpy.abs",
"tools.nicename",
"collections.defaultdict",
"matplotlib.pyplot.figure",
"tools.save_fig",
"numpy.mean",
"tools.get_modeldirs",
"torch.no_grad",
"os.path.join",
"numpy.unique",
"sys.path.append",
"os.path.abspath",
"matplotlib.pyplot.locator_params",
"matplotlib.pyplot.imshow",
"tools.load_results",
"matplotlib.cm.cool",
"matplotlib.pyplot.colorbar",
"torch.sign",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"models.model_utils.get_model",
"tools.load_config",
"tools.load_log",
"numpy.percentile",
"torch.cuda.is_available",
"datasets.dataset_utils.get_dataset",
"matplotlib.pyplot.ylabel",
"torch.from_numpy",
"numpy.log",
"tools.nested_update",
"numpy.array",
"tools.flatten_nested_dict",
"matplotlib.pyplot.xlabel"
] |
[((360, 385), 'sys.path.append', 'sys.path.append', (['rootpath'], {}), '(rootpath)\n', (375, 385), False, 'import sys\n'), ((715, 748), 'os.path.join', 'os.path.join', (['rootpath', '"""figures"""'], {}), "(rootpath, 'figures')\n", (727, 748), False, 'import os\n'), ((4097, 4169), 'tools.get_modeldirs', 'tools.get_modeldirs', (['save_path'], {'select_dict': 'select', 'exclude_dict': 'exclude'}), '(save_path, select_dict=select, exclude_dict=exclude)\n', (4116, 4169), False, 'import tools\n'), ((10660, 10688), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(3, 2.5)'}), '(figsize=(3, 2.5))\n', (10670, 10688), True, 'import matplotlib.pyplot as plt\n'), ((10693, 10723), 'matplotlib.pyplot.imshow', 'plt.imshow', (['X.T'], {'aspect': '"""auto"""'}), "(X.T, aspect='auto')\n", (10703, 10723), True, 'import matplotlib.pyplot as plt\n'), ((10728, 10743), 'matplotlib.pyplot.title', 'plt.title', (['name'], {}), '(name)\n', (10737, 10743), True, 'import matplotlib.pyplot as plt\n'), ((10748, 10770), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Position"""'], {}), "('Position')\n", (10758, 10770), True, 'import matplotlib.pyplot as plt\n'), ((10775, 10791), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['name'], {}), '(name)\n', (10785, 10791), True, 'import matplotlib.pyplot as plt\n'), ((10796, 10810), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (10808, 10810), True, 'import matplotlib.pyplot as plt\n'), ((11070, 11115), 'numpy.linspace', 'np.linspace', (['(0)', '(n_time - 1)', 'n_plot'], {'dtype': 'int'}), '(0, n_time - 1, n_plot, dtype=int)\n', (11081, 11115), True, 'import numpy as np\n'), ((11161, 11215), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', 'n_plot'], {'sharey': '(True)', 'figsize': '(7, 1.5)'}), '(1, n_plot, sharey=True, figsize=(7, 1.5))\n', (11173, 11215), True, 'import matplotlib.pyplot as plt\n'), ((13571, 13586), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13584, 13586), False, 'import torch\n'), ((13648, 13665), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (13659, 13665), False, 'from collections import defaultdict\n'), ((332, 357), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (347, 357), False, 'import os\n'), ((1493, 1520), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (1503, 1520), True, 'import matplotlib.pyplot as plt\n'), ((4051, 4079), 'tools.save_fig', 'save_fig', (['save_path', 'figname'], {}), '(save_path, figname)\n', (4059, 4079), False, 'from tools import nicename, save_fig\n'), ((4366, 4394), 'tools.load_log', 'tools.load_log', (['modeldirs[0]'], {}), '(modeldirs[0])\n', (4380, 4394), False, 'import tools\n'), ((4840, 4851), 'numpy.mean', 'np.mean', (['x0'], {}), '(x0)\n', (4847, 4851), True, 'import numpy as np\n'), ((5953, 5992), 'tools.load_results', 'tools.load_results', (['path'], {'select': 'select'}), '(path, select=select)\n', (5971, 5992), False, 'import tools\n'), ((6640, 6667), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (6650, 6667), True, 'import matplotlib.pyplot as plt\n'), ((10487, 10516), 'tools.save_fig', 'tools.save_fig', (['path', 'figname'], {}), '(path, figname)\n', (10501, 10516), False, 'import tools\n'), ((10849, 10887), 'tools.save_fig', 'save_fig', (['save_path', "('analyze_' + name)"], {}), "(save_path, 'analyze_' + name)\n", (10857, 10887), False, 'from tools import nicename, save_fig\n'), ((11134, 11143), 'numpy.abs', 'np.abs', (['X'], {}), '(X)\n', (11140, 11143), True, 'import numpy as np\n'), ((11614, 11652), 'tools.save_fig', 'save_fig', (['save_path', "('analyze_' + name)"], {}), "(save_path, 'analyze_' + name)\n", (11622, 11652), False, 'from tools import nicename, save_fig\n'), ((12768, 12795), 'tools.load_config', 'tools.load_config', (['modeldir'], {}), '(modeldir)\n', (12785, 12795), False, 'import tools\n'), ((12847, 12889), 'tools.nested_update', 'tools.nested_update', (['config', 'update_config'], {}), '(config, update_config)\n', (12866, 12889), False, 'import tools\n'), ((12938, 12963), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (12961, 12963), False, 'import torch\n'), ((13021, 13066), 'datasets.dataset_utils.get_dataset', 'get_dataset', (["config['dataset']"], {'verbose': '(False)'}), "(config['dataset'], verbose=False)\n", (13032, 13066), False, 'from datasets.dataset_utils import get_dataset\n'), ((13290, 13307), 'models.model_utils.get_model', 'get_model', (['config'], {}), '(config)\n', (13299, 13307), False, 'from models.model_utils import get_model\n'), ((14863, 14882), 'torch.sign', 'torch.sign', (['outputs'], {}), '(outputs)\n', (14873, 14882), False, 'import torch\n'), ((1356, 1373), 'tools.load_log', 'tools.load_log', (['d'], {}), '(d)\n', (1370, 1373), False, 'import tools\n'), ((1410, 1430), 'tools.load_config', 'tools.load_config', (['d'], {}), '(d)\n', (1427, 1430), False, 'import tools\n'), ((1584, 1594), 'matplotlib.cm.cool', 'cm.cool', (['x'], {}), '(x)\n', (1591, 1594), False, 'from matplotlib import cm\n'), ((3410, 3438), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(7)'}), '(title, fontsize=7)\n', (3419, 3438), True, 'import matplotlib.pyplot as plt\n'), ((3462, 3476), 'tools.nicename', 'nicename', (['xkey'], {}), '(xkey)\n', (3470, 3476), False, 'from tools import nicename, save_fig\n'), ((3500, 3514), 'tools.nicename', 'nicename', (['ykey'], {}), '(ykey)\n', (3508, 3514), False, 'from tools import nicename, save_fig\n'), ((5073, 5102), 'numpy.percentile', 'np.percentile', (['bootstrap', '(2.5)'], {}), '(bootstrap, 2.5)\n', (5086, 5102), True, 'import numpy as np\n'), ((5125, 5155), 'numpy.percentile', 'np.percentile', (['bootstrap', '(97.5)'], {}), '(bootstrap, 97.5)\n', (5138, 5155), True, 'import numpy as np\n'), ((6757, 6781), 'numpy.unique', 'np.unique', (['res[loop_key]'], {}), '(res[loop_key])\n', (6766, 6781), True, 'import numpy as np\n'), ((9563, 9600), 'matplotlib.pyplot.locator_params', 'plt.locator_params', ([], {'axis': '"""y"""', 'nbins': '(3)'}), "(axis='y', nbins=3)\n", (9581, 9600), True, 'import matplotlib.pyplot as plt\n'), ((9624, 9638), 'tools.nicename', 'nicename', (['xkey'], {}), '(xkey)\n', (9632, 9638), False, 'from tools import nicename, save_fig\n'), ((9662, 9676), 'tools.nicename', 'nicename', (['ykey'], {}), '(ykey)\n', (9670, 9676), False, 'from tools import nicename, save_fig\n'), ((13376, 13421), 'os.path.join', 'os.path.join', (["config['save_path']", '"""model.pt"""'], {}), "(config['save_path'], 'model.pt')\n", (13388, 13421), False, 'import os\n'), ((15595, 15618), 'pickle.dump', 'pickle.dump', (['results', 'f'], {}), '(results, f)\n', (15606, 15618), False, 'import pickle\n'), ((1604, 1630), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'n_model'], {}), '(0, 1, n_model)\n', (1615, 1630), True, 'import numpy as np\n'), ((1915, 1941), 'tools.nicename', 'nicename', (['y[-1]'], {'mode': 'ykey'}), '(y[-1], mode=ykey)\n', (1923, 1941), False, 'from tools import nicename, save_fig\n'), ((2249, 2283), 'tools.flatten_nested_dict', 'tools.flatten_nested_dict', (['cfgs[i]'], {}), '(cfgs[i])\n', (2274, 2283), False, 'import tools\n'), ((6804, 6814), 'matplotlib.cm.cool', 'cm.cool', (['x'], {}), '(x)\n', (6811, 6814), False, 'from matplotlib import cm\n'), ((7675, 7693), 'numpy.array', 'np.array', (['yval_tmp'], {}), '(yval_tmp)\n', (7683, 7693), True, 'import numpy as np\n'), ((8944, 8966), 'tools.nicename', 'nicename', (['x'], {'mode': 'xkey'}), '(x, mode=xkey)\n', (8952, 8966), False, 'from tools import nicename, save_fig\n'), ((9846, 9864), 'tools.nicename', 'nicename', (['loop_key'], {}), '(loop_key)\n', (9854, 9864), False, 'from tools import nicename, save_fig\n'), ((15539, 15569), 'os.path.join', 'os.path.join', (['save_path', 'fname'], {}), '(save_path, fname)\n', (15551, 15569), False, 'import os\n'), ((3282, 3294), 'tools.nicename', 'nicename', (['lk'], {}), '(lk)\n', (3290, 3294), False, 'from tools import nicename, save_fig\n'), ((7100, 7114), 'numpy.log', 'np.log', (['x_plot'], {}), '(x_plot)\n', (7106, 7114), True, 'import numpy as np\n'), ((7169, 7183), 'numpy.log', 'np.log', (['y_plot'], {}), '(y_plot)\n', (7175, 7183), True, 'import numpy as np\n'), ((7796, 7812), 'numpy.log', 'np.log', (['yval_tmp'], {}), '(yval_tmp)\n', (7802, 7812), True, 'import numpy as np\n'), ((7843, 7860), 'numpy.mean', 'np.mean', (['yval_tmp'], {}), '(yval_tmp)\n', (7850, 7860), True, 'import numpy as np\n'), ((9422, 9436), 'numpy.log', 'np.log', (['yticks'], {}), '(yticks)\n', (9428, 9436), True, 'import numpy as np\n'), ((2518, 2544), 'tools.nicename', 'nicename', (['cfg[lk]'], {'mode': 'lk'}), '(cfg[lk], mode=lk)\n', (2526, 2544), False, 'from tools import nicename, save_fig\n'), ((7358, 7391), 'tools.nicename', 'nicename', (['loop_val'], {'mode': 'loop_key'}), '(loop_val, mode=loop_key)\n', (7366, 7391), False, 'from tools import nicename, save_fig\n'), ((8117, 8132), 'numpy.array', 'np.array', (['xvals'], {}), '(xvals)\n', (8125, 8132), True, 'import numpy as np\n'), ((2685, 2710), 'tools.nicename', 'nicename', (['cfg[i]'], {'mode': 'lk'}), '(cfg[i], mode=lk)\n', (2693, 2710), False, 'from tools import nicename, save_fig\n'), ((14052, 14073), 'torch.from_numpy', 'torch.from_numpy', (['val'], {}), '(val)\n', (14068, 14073), False, 'import torch\n'), ((2813, 2837), 'tools.nicename', 'nicename', (['cfg[i]'], {'mode': 'k'}), '(cfg[i], mode=k)\n', (2821, 2837), False, 'from tools import nicename, save_fig\n')]
|
#
# UNIVERSIDADE FEDERAL DE PERNAMBUCO -- UFPE (http://www.ufpe.br)
# CENTRO DE INFORMÁTICA -- CIn (http://www.cin.ufpe.br)
# Av. Jornalista <NAME>, s/n - Cidade Universitária (Campus Recife)
# 50.740-560 - Recife - PE - BRAZIL
#
# Copyright (C) 2018 <NAME> (<EMAIL>)
#
# Created on: 2018-05-26
# @author: <NAME>
# @contact: <EMAIL>
# @license: MIT
from functools import reduce
import numpy as np
from feature import Feature
class Rule:
def __init__(self, ft=set(), target=None):
'''
Constructor
'''
self.__feat = ft
self.__target = target
self.__objs = None
def __call__(self, data):
self.__objs = reduce(lambda x,y: np.logical_and(x,y), map(lambda x: x(data),self.__feat))
return self.__objs
def __add__(self, elem):
r = Rule(self.feat.copy(),self.target)
if isinstance(elem, Feature):
r.feat.update([elem])
elif isinstance(elem, Rule):
assert(self.target==elem.target)
r.feat.update(elem.feat)
else:
raise ValueError("Invalid type of param elem: {}\n Should either be Feature or Rule.".format(type(elem)))
return r
def __iadd__(self, elem):
if isinstance(elem, Feature):
self.feat.update([elem])
elif isinstance(elem, Rule):
assert(self.target==elem.target)
self.feat.update(elem.feat)
else:
raise ValueError("Invalid type of param elem: {}\n Should either be Feature or Rule.".format(type(elem)))
return self
def __str__(self):
return " \u2227 ".join(map(str,self.feat)) + " \u2192 " + str(self.target)
def __repr__(self):
return str(self)
def _get_feat(self):
return self.__feat
def _get_target(self):
return self.__target
def _get_objs(self):
return self.__objs
def _set_feat(self, value):
self.__feat = set(value)
def _set_target(self, value):
self.__target = value
feat = property(_get_feat, _set_feat, None, "The antecedent of this rule.")
target = property(_get_target, _set_target, None, "The consequent of this rule.")
objs = property(_get_objs, None, None, "The set of examples that satisfy the rule")
if __name__ == '__main__':
import pandas as pd
from textwrap import fill
from reader import construct_features
df = pd.read_csv("../test/data/abalone.data",header=None)
names = ["sex", "length", "diameter", "height", "whole", "shucked", "viscera", "shell", "rings"]
df.columns = names
ft = list(construct_features(df, 'rings', 10))
r = Rule()
r.feat = [ft[0],ft[10]]
r.target = 10
print(r(df))
print(r)
r2 = Rule()
r2.feat = [ft[1],ft[9],ft[11]]
r2.target = 10
print(r2)
print(r + r2)
r+=r2
print(r)
|
[
"pandas.read_csv",
"numpy.logical_and",
"reader.construct_features"
] |
[((2475, 2528), 'pandas.read_csv', 'pd.read_csv', (['"""../test/data/abalone.data"""'], {'header': 'None'}), "('../test/data/abalone.data', header=None)\n", (2486, 2528), True, 'import pandas as pd\n'), ((2671, 2706), 'reader.construct_features', 'construct_features', (['df', '"""rings"""', '(10)'], {}), "(df, 'rings', 10)\n", (2689, 2706), False, 'from reader import construct_features\n'), ((718, 738), 'numpy.logical_and', 'np.logical_and', (['x', 'y'], {}), '(x, y)\n', (732, 738), True, 'import numpy as np\n')]
|
# Logistic Regression
from matplotlib import use
use('TkAgg')
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
import pandas as pd
from ml import mapFeature, plotData, plotDecisionBoundary
from matplotlib.pyplot import show
from costFunctionReg import costFunctionReg
from gradientFunctionReg import gradientFunctionReg
from sigmoid import sigmoid
from predict import predict
def optimize(Lambda):
result = minimize(costFunctionReg, initial_theta, method='L-BFGS-B',
jac=gradientFunctionReg, args=(X, y, Lambda),
options={'gtol': 1e-4, 'disp': False, 'maxiter': 1000})
return result
# Plot Boundary
def plotBoundary(theta, X, y):
plotDecisionBoundary(theta, X, y)
plt.title(r'$\lambda$ = ' + str(Lambda))
# Labels and Legend
plt.xlabel('Microchip Test 1')
plt.ylabel('Microchip Test 2')
show()
# Initialization
# Load Data
# The first two columns contains the X values and the third column
# contains the label (y).
plt.figure(figsize=(15, 10))
data = np.loadtxt('ex2data2.txt', delimiter=',')
X = data[:, 0:2]
y = data[:, 2]
plotData(X, y)
# Labels and Legend
plt.xlabel('Microchip Test 1')
plt.ylabel('Microchip Test 2')
show()
input('Program paused. Press Enter to continue...')
# =========== Part 1: Regularized Logistic Regression ============
# Add Polynomial Features
# Note that mapFeature also adds a column of ones for us, so the intercept
# term is handled
X = pd.DataFrame(X)
X = X.apply(mapFeature, axis=1)
# convert back to numpy ndarray
X = X.values
# Initialize fitting parameters
initial_theta = np.zeros(X.shape[1])
# Set regularization parameter lambda to 1
Lambda = 0.0
# Compute and display initial cost and gradient for regularized logistic
# regression
cost = costFunctionReg(initial_theta, X, y, Lambda)
print('Cost at initial theta (zeros): %f' % cost)
# ============= Part 2: Regularization and Accuracies =============
# Optimize and plot boundary
Lambda = 1.0
result = optimize(Lambda)
theta = result.x
cost = result.fun
# Print to screen
print('lambda = ' + str(Lambda))
print('Cost at theta found by scipy: %f' % cost)
print('theta:', ["%0.4f" % i for i in theta])
input('Program paused. Press Enter to continue...')
plotBoundary(theta, X, y)
# Compute accuracy on our training set
p = predict(theta, X)
acc = np.mean(np.where(p == y, 1, 0)) * 100
print('Train Accuracy: %f' % acc)
input('Program paused. Press Enter to continue...')
# ============= Part 3: Different values of lambda =============
for Lambda in np.linspace(0.0, 100.1, 8):
result = optimize(Lambda)
theta = result.x
print('lambda = ' + str(Lambda))
print('theta:', ["%0.4f" % i for i in theta])
plotBoundary(theta, X, y)
input('Program paused. Press Enter to continue...')
|
[
"ml.plotData",
"pandas.DataFrame",
"costFunctionReg.costFunctionReg",
"scipy.optimize.minimize",
"matplotlib.pyplot.show",
"ml.plotDecisionBoundary",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.use",
"predict.predict",
"numpy.loadtxt",
"numpy.linspace",
"numpy.where",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((50, 62), 'matplotlib.use', 'use', (['"""TkAgg"""'], {}), "('TkAgg')\n", (53, 62), False, 'from matplotlib import use\n'), ((1045, 1073), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 10)'}), '(figsize=(15, 10))\n', (1055, 1073), True, 'import matplotlib.pyplot as plt\n'), ((1081, 1122), 'numpy.loadtxt', 'np.loadtxt', (['"""ex2data2.txt"""'], {'delimiter': '""","""'}), "('ex2data2.txt', delimiter=',')\n", (1091, 1122), True, 'import numpy as np\n'), ((1156, 1170), 'ml.plotData', 'plotData', (['X', 'y'], {}), '(X, y)\n', (1164, 1170), False, 'from ml import mapFeature, plotData, plotDecisionBoundary\n'), ((1192, 1222), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Microchip Test 1"""'], {}), "('Microchip Test 1')\n", (1202, 1222), True, 'import matplotlib.pyplot as plt\n'), ((1223, 1253), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Microchip Test 2"""'], {}), "('Microchip Test 2')\n", (1233, 1253), True, 'import matplotlib.pyplot as plt\n'), ((1254, 1260), 'matplotlib.pyplot.show', 'show', ([], {}), '()\n', (1258, 1260), False, 'from matplotlib.pyplot import show\n'), ((1507, 1522), 'pandas.DataFrame', 'pd.DataFrame', (['X'], {}), '(X)\n', (1519, 1522), True, 'import pandas as pd\n'), ((1649, 1669), 'numpy.zeros', 'np.zeros', (['X.shape[1]'], {}), '(X.shape[1])\n', (1657, 1669), True, 'import numpy as np\n'), ((1821, 1865), 'costFunctionReg.costFunctionReg', 'costFunctionReg', (['initial_theta', 'X', 'y', 'Lambda'], {}), '(initial_theta, X, y, Lambda)\n', (1836, 1865), False, 'from costFunctionReg import costFunctionReg\n'), ((2362, 2379), 'predict.predict', 'predict', (['theta', 'X'], {}), '(theta, X)\n', (2369, 2379), False, 'from predict import predict\n'), ((2593, 2619), 'numpy.linspace', 'np.linspace', (['(0.0)', '(100.1)', '(8)'], {}), '(0.0, 100.1, 8)\n', (2604, 2619), True, 'import numpy as np\n'), ((455, 627), 'scipy.optimize.minimize', 'minimize', (['costFunctionReg', 'initial_theta'], {'method': '"""L-BFGS-B"""', 'jac': 'gradientFunctionReg', 'args': '(X, y, Lambda)', 'options': "{'gtol': 0.0001, 'disp': False, 'maxiter': 1000}"}), "(costFunctionReg, initial_theta, method='L-BFGS-B', jac=\n gradientFunctionReg, args=(X, y, Lambda), options={'gtol': 0.0001,\n 'disp': False, 'maxiter': 1000})\n", (463, 627), False, 'from scipy.optimize import minimize\n'), ((733, 766), 'ml.plotDecisionBoundary', 'plotDecisionBoundary', (['theta', 'X', 'y'], {}), '(theta, X, y)\n', (753, 766), False, 'from ml import mapFeature, plotData, plotDecisionBoundary\n'), ((841, 871), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Microchip Test 1"""'], {}), "('Microchip Test 1')\n", (851, 871), True, 'import matplotlib.pyplot as plt\n'), ((876, 906), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Microchip Test 2"""'], {}), "('Microchip Test 2')\n", (886, 906), True, 'import matplotlib.pyplot as plt\n'), ((911, 917), 'matplotlib.pyplot.show', 'show', ([], {}), '()\n', (915, 917), False, 'from matplotlib.pyplot import show\n'), ((2394, 2416), 'numpy.where', 'np.where', (['(p == y)', '(1)', '(0)'], {}), '(p == y, 1, 0)\n', (2402, 2416), True, 'import numpy as np\n')]
|
import logging
logger = logging.getLogger('SBFC')
from math import floor
import numpy as np
from joblib import Parallel, delayed
from utils.parproc import split_for_parallelism
class SimHashBloomFilter:
def __init__(self, **kwargs):
self.expansion_factor = kwargs['expansion_factor']
self.bloom_filters = None
self.kwargs = kwargs
self.classes_ = None
self.n_classes_ = None
self.rnd_proj_mat = None
self.batch_size = (
kwargs['batch_size'] if 'batch_size' in kwargs else 128
)
self.kwargs['batch_size'] = self.batch_size
self.nthreads = kwargs['nthreads'] if 'nthreads' in kwargs else 1
self.kwargs['nthreads'] = self.nthreads
def set_params(self, **kwargs):
self.bloom_filters = None
for k in kwargs:
self.kwargs[k] = kwargs[k]
self.classes_ = None
self.n_classes_ = None
self.rnd_proj_mat = None
def fit(self, X, y):
# Generate simhash
nrows, ncols_in = X.shape
self.ncols_in = ncols_in
# Set up the SimHash
# Get the output dimensionality
ncols_out = max(int(ncols_in * self.expansion_factor), 2)
assert self.rnd_proj_mat is None, (
'projection matrix should be None, method might be already fit'
)
self.rnd_proj_mat = np.random.normal(0., 1., size=(ncols_in, ncols_out))
# Get number of classes
self.classes_ = np.unique(y)
self.n_classes_ = len(self.classes_)
l2i = {l: i for i, l in enumerate(self.classes_)}
# Split data for parallel processing
th_idxs = np.insert(
np.cumsum(split_for_parallelism(nrows, self.nthreads)), 0, 0
)
# process each thread batch in parallel
feval = lambda sidx, eidx: per_thread_job(
bf_nrows=self.n_classes_,
bf_ncols=ncols_out,
th_start_idx=sidx,
th_end_idx=eidx,
batch_size=self.batch_size,
X=X,
y=y,
rp_mat=self.rnd_proj_mat,
l2i=l2i
)
all_th_bfs = Parallel(n_jobs=self.nthreads)(
delayed(feval)(th_idxs[i], th_idxs[i+1])
for i in range(self.nthreads)
)
# consolidate results from all threads
bloom_filters = np.zeros((self.n_classes_, ncols_out), dtype=np.bool)
for bf in all_th_bfs:
bloom_filters += bf
# invert bloom filter & transpose
self.bloom_filters = np.invert(bloom_filters)
self.tbf = np.transpose(self.bloom_filters)
return self
def partial_fit(X, y):
pass
def _bf_scores(self, X):
nrows, ncols = X.shape
assert self.ncols_in == ncols, (
'NBF trained with %i columns, trying prediction with %i columns'
% (self.ncols_in, ncols)
)
assert not self.bloom_filters is None, ('Method not fit yet')
assert not self.rnd_proj_mat is None, ('Method not fit yet')
# Process points in batches
nbatches = (
floor(nrows / self.batch_size)
+ int((nrows % self.batch_size) > 0)
)
start_idx = 0
fX = []
for j in range(nbatches):
end_idx = min(start_idx + self.batch_size, nrows)
# Generate simhash
batch_shX = simhash(self.rnd_proj_mat, X[start_idx : end_idx, :])
# For bloom filters from each class, compute
# - ((W . X)^\top 1) / self.k
batch_fX = batch_shX.astype(np.int) @ self.tbf
fX.extend(batch_fX.tolist())
start_idx = end_idx
assert len(fX) == nrows, ('Expected %i, obtained %i' % (nrows, len(fX)))
return np.array(fX)
def predict(self, X):
nrows, ncols = X.shape
th_idxs = np.insert(
np.cumsum(split_for_parallelism(nrows, self.nthreads)), 0, 0
)
fX = np.vstack(
Parallel(n_jobs=self.nthreads)(
delayed(self._bf_scores)(X[th_idxs[i] : th_idxs[i + 1], :])
for i in range(self.nthreads)
)
)
# Return the class with minimum value
# Break ties randomly, currently it choses minimum index with min value
min_bf_scores = np.min(fX, axis=1)
y = []
nties = 0
for min_bf_score, fx in zip(min_bf_scores, fX):
y_set = self.classes_[fx == min_bf_score]
l = None
if len(y_set) > 1:
nties += 1
l = y_set[np.random.randint(0, len(y_set))]
else:
l = y_set[0]
y.append(l)
logging.debug('%i / %i points have ties' % (nties, nrows))
return np.array(y)
def predict_proba(self, X):
nrows, ncols = X.shape
th_idxs = np.insert(np.cumsum(split_for_parallelism(nrows, self.nthreads)), 0, 0)
fX = np.vstack(
Parallel(n_jobs=self.nthreads)(
delayed(self._bf_scores)(X[th_idxs[i] : th_idxs[i + 1], :])
for i in range(self.nthreads)
)
).astype(float)
exp_neg_fX = np.exp(-fX)
probs = exp_neg_fX / np.sum(exp_neg_fX, axis=1)[:, None]
return probs
def get_params(self, deep=False):
return self.kwargs
def simhash(rnd_proj_mat, X):
ncols_in, ncols_out = rnd_proj_mat.shape
assert ncols_in == X.shape[1]
projX = X @ rnd_proj_mat
assert ncols_out == projX.shape[1]
assert projX.shape[0] == X.shape[0]
retX = np.zeros_like(projX, dtype=np.bool)
retX[projX > 0.] = True
return retX
def per_thread_job(
bf_nrows, bf_ncols, th_start_idx,
th_end_idx, batch_size, X, y, rp_mat, l2i
):
bloom_filters = np.zeros((bf_nrows, bf_ncols), dtype=np.bool)
nrows = th_end_idx - th_start_idx
assert bf_ncols == rp_mat.shape[1]
# Process points in batches
nbatches = floor(nrows / batch_size) + int((nrows % batch_size) > 0)
start_idx = th_start_idx
for j in range(nbatches):
end_idx = min(start_idx + batch_size, th_end_idx)
shX = simhash(rp_mat, X[start_idx : end_idx, :])
nrows_batch, ncols_out_batch = shX.shape
assert nrows_batch == (end_idx - start_idx), (
'The number of rows batch do not match: %i vs. %i'
% (end_idx - start_idx, nrows_batch)
)
assert ncols_out_batch == bf_ncols, (
'Hashed X %i != BF %i' % (ncols_out_batch, bf_ncols)
)
# For each class, compute W = Complement(X_1 V X_2 V ....)
for features, label in zip(shX, y[ start_idx : end_idx ]) :
bloom_filters[l2i[label]] += features
# Update batch start idx
start_idx = end_idx
assert start_idx == th_end_idx
return bloom_filters
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
from sklearn.datasets import load_iris
X, y = load_iris(return_X_y=True)
rnd_proj_mat = np.random.normal(0., 1., size=(X.shape[1], 10))
batch = 5
hashed_X = simhash(rnd_proj_mat, X[:batch, :])
print(hashed_X)
kwargs = { 'expansion_factor': 20. }
sbf = SimHashBloomFilter(**kwargs)
sbf.fit(X, y)
print('RP matrix:', sbf.rnd_proj_mat.shape)
print('BF matrix:', sbf.tbf.shape)
print('Preds: ', sbf.predict(X[:batch, :]))
print('Probs: ', sbf.predict_proba(X[:batch, :]))
|
[
"sklearn.datasets.load_iris",
"numpy.zeros_like",
"logging.debug",
"numpy.sum",
"logging.basicConfig",
"numpy.invert",
"numpy.unique",
"numpy.zeros",
"numpy.transpose",
"math.floor",
"numpy.min",
"numpy.array",
"numpy.exp",
"numpy.random.normal",
"joblib.Parallel",
"joblib.delayed",
"logging.getLogger",
"utils.parproc.split_for_parallelism"
] |
[((24, 49), 'logging.getLogger', 'logging.getLogger', (['"""SBFC"""'], {}), "('SBFC')\n", (41, 49), False, 'import logging\n'), ((5590, 5625), 'numpy.zeros_like', 'np.zeros_like', (['projX'], {'dtype': 'np.bool'}), '(projX, dtype=np.bool)\n', (5603, 5625), True, 'import numpy as np\n'), ((5807, 5852), 'numpy.zeros', 'np.zeros', (['(bf_nrows, bf_ncols)'], {'dtype': 'np.bool'}), '((bf_nrows, bf_ncols), dtype=np.bool)\n', (5815, 5852), True, 'import numpy as np\n'), ((6895, 6935), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (6914, 6935), False, 'import logging\n'), ((6990, 7016), 'sklearn.datasets.load_iris', 'load_iris', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (6999, 7016), False, 'from sklearn.datasets import load_iris\n'), ((7036, 7085), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(1.0)'], {'size': '(X.shape[1], 10)'}), '(0.0, 1.0, size=(X.shape[1], 10))\n', (7052, 7085), True, 'import numpy as np\n'), ((1377, 1431), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(1.0)'], {'size': '(ncols_in, ncols_out)'}), '(0.0, 1.0, size=(ncols_in, ncols_out))\n', (1393, 1431), True, 'import numpy as np\n'), ((1486, 1498), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (1495, 1498), True, 'import numpy as np\n'), ((2359, 2412), 'numpy.zeros', 'np.zeros', (['(self.n_classes_, ncols_out)'], {'dtype': 'np.bool'}), '((self.n_classes_, ncols_out), dtype=np.bool)\n', (2367, 2412), True, 'import numpy as np\n'), ((2546, 2570), 'numpy.invert', 'np.invert', (['bloom_filters'], {}), '(bloom_filters)\n', (2555, 2570), True, 'import numpy as np\n'), ((2590, 2622), 'numpy.transpose', 'np.transpose', (['self.bloom_filters'], {}), '(self.bloom_filters)\n', (2602, 2622), True, 'import numpy as np\n'), ((3780, 3792), 'numpy.array', 'np.array', (['fX'], {}), '(fX)\n', (3788, 3792), True, 'import numpy as np\n'), ((4327, 4345), 'numpy.min', 'np.min', (['fX'], {'axis': '(1)'}), '(fX, axis=1)\n', (4333, 4345), True, 'import numpy as np\n'), ((4707, 4765), 'logging.debug', 'logging.debug', (["('%i / %i points have ties' % (nties, nrows))"], {}), "('%i / %i points have ties' % (nties, nrows))\n", (4720, 4765), False, 'import logging\n'), ((4781, 4792), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (4789, 4792), True, 'import numpy as np\n'), ((5196, 5207), 'numpy.exp', 'np.exp', (['(-fX)'], {}), '(-fX)\n', (5202, 5207), True, 'import numpy as np\n'), ((5977, 6002), 'math.floor', 'floor', (['(nrows / batch_size)'], {}), '(nrows / batch_size)\n', (5982, 6002), False, 'from math import floor\n'), ((2151, 2181), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.nthreads'}), '(n_jobs=self.nthreads)\n', (2159, 2181), False, 'from joblib import Parallel, delayed\n'), ((3118, 3148), 'math.floor', 'floor', (['(nrows / self.batch_size)'], {}), '(nrows / self.batch_size)\n', (3123, 3148), False, 'from math import floor\n'), ((1698, 1741), 'utils.parproc.split_for_parallelism', 'split_for_parallelism', (['nrows', 'self.nthreads'], {}), '(nrows, self.nthreads)\n', (1719, 1741), False, 'from utils.parproc import split_for_parallelism\n'), ((3902, 3945), 'utils.parproc.split_for_parallelism', 'split_for_parallelism', (['nrows', 'self.nthreads'], {}), '(nrows, self.nthreads)\n', (3923, 3945), False, 'from utils.parproc import split_for_parallelism\n'), ((3999, 4029), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.nthreads'}), '(n_jobs=self.nthreads)\n', (4007, 4029), False, 'from joblib import Parallel, delayed\n'), ((4895, 4938), 'utils.parproc.split_for_parallelism', 'split_for_parallelism', (['nrows', 'self.nthreads'], {}), '(nrows, self.nthreads)\n', (4916, 4938), False, 'from utils.parproc import split_for_parallelism\n'), ((5237, 5263), 'numpy.sum', 'np.sum', (['exp_neg_fX'], {'axis': '(1)'}), '(exp_neg_fX, axis=1)\n', (5243, 5263), True, 'import numpy as np\n'), ((2195, 2209), 'joblib.delayed', 'delayed', (['feval'], {}), '(feval)\n', (2202, 2209), False, 'from joblib import Parallel, delayed\n'), ((4047, 4071), 'joblib.delayed', 'delayed', (['self._bf_scores'], {}), '(self._bf_scores)\n', (4054, 4071), False, 'from joblib import Parallel, delayed\n'), ((4983, 5013), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.nthreads'}), '(n_jobs=self.nthreads)\n', (4991, 5013), False, 'from joblib import Parallel, delayed\n'), ((5031, 5055), 'joblib.delayed', 'delayed', (['self._bf_scores'], {}), '(self._bf_scores)\n', (5038, 5055), False, 'from joblib import Parallel, delayed\n')]
|
import numpy as np
from recourse.action_set import _BoundElement as BoundElement
v = np.random.rand(1000)
a = np.sort(v)
lb = np.percentile(v, 40)
# bounds
def test_absolute_bound():
l = -1.0
u = 10.0
b = BoundElement(bound_type = 'absolute', lb = l, ub = u, variable_type=int)
assert b.lb == l
assert b.ub == u
assert b.bound_type == 'absolute'
def test_absolute_bound_with_values():
l = 1.0
u = 10.0
values = l + np.multiply(u - l, np.random.rand(1000))
b = BoundElement(bound_type = 'absolute', lb = l, ub = u, values = values)
assert b.lb == l
assert b.ub == u
assert b.bound_type == 'absolute'
def test_absolute_bound_with_values():
values = np.random.randn(1000)
b = BoundElement(bound_type = 'absolute', values = values)
assert b.lb == np.min(values)
assert b.ub == np.max(values)
assert b.bound_type == 'absolute'
def test_percentile_bound():
l = 5.0
u = 95.0
values = np.random.rand(1000)
b = BoundElement(bound_type = 'percentile', lb = l, ub = u, values = values)
assert np.isclose(b.lb, np.percentile(values, l))
assert np.isclose(b.ub, np.percentile(values, u))
assert b.bound_type == 'percentile'
|
[
"numpy.random.randn",
"numpy.percentile",
"numpy.sort",
"numpy.min",
"recourse.action_set._BoundElement",
"numpy.max",
"numpy.random.rand"
] |
[((89, 109), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (103, 109), True, 'import numpy as np\n'), ((115, 125), 'numpy.sort', 'np.sort', (['v'], {}), '(v)\n', (122, 125), True, 'import numpy as np\n'), ((132, 152), 'numpy.percentile', 'np.percentile', (['v', '(40)'], {}), '(v, 40)\n', (145, 152), True, 'import numpy as np\n'), ((232, 298), 'recourse.action_set._BoundElement', 'BoundElement', ([], {'bound_type': '"""absolute"""', 'lb': 'l', 'ub': 'u', 'variable_type': 'int'}), "(bound_type='absolute', lb=l, ub=u, variable_type=int)\n", (244, 298), True, 'from recourse.action_set import _BoundElement as BoundElement\n'), ((527, 589), 'recourse.action_set._BoundElement', 'BoundElement', ([], {'bound_type': '"""absolute"""', 'lb': 'l', 'ub': 'u', 'values': 'values'}), "(bound_type='absolute', lb=l, ub=u, values=values)\n", (539, 589), True, 'from recourse.action_set import _BoundElement as BoundElement\n'), ((739, 760), 'numpy.random.randn', 'np.random.randn', (['(1000)'], {}), '(1000)\n', (754, 760), True, 'import numpy as np\n'), ((770, 820), 'recourse.action_set._BoundElement', 'BoundElement', ([], {'bound_type': '"""absolute"""', 'values': 'values'}), "(bound_type='absolute', values=values)\n", (782, 820), True, 'from recourse.action_set import _BoundElement as BoundElement\n'), ((1009, 1029), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (1023, 1029), True, 'import numpy as np\n'), ((1039, 1103), 'recourse.action_set._BoundElement', 'BoundElement', ([], {'bound_type': '"""percentile"""', 'lb': 'l', 'ub': 'u', 'values': 'values'}), "(bound_type='percentile', lb=l, ub=u, values=values)\n", (1051, 1103), True, 'from recourse.action_set import _BoundElement as BoundElement\n'), ((845, 859), 'numpy.min', 'np.min', (['values'], {}), '(values)\n', (851, 859), True, 'import numpy as np\n'), ((880, 894), 'numpy.max', 'np.max', (['values'], {}), '(values)\n', (886, 894), True, 'import numpy as np\n'), ((1141, 1165), 'numpy.percentile', 'np.percentile', (['values', 'l'], {}), '(values, l)\n', (1154, 1165), True, 'import numpy as np\n'), ((1196, 1220), 'numpy.percentile', 'np.percentile', (['values', 'u'], {}), '(values, u)\n', (1209, 1220), True, 'import numpy as np\n'), ((496, 516), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (510, 516), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 30 00:26:48 2015
@author: willy
"""
import os
import argparse
import numpy as np
import glob
def read_haplotypes(msmc_input):
"""
Returns a tuple (value1, value2, value3)
value1 : The chromossome name
value 2 : a list of tuples, each containing
the positions of the SNPS (the column 2 and 3) of the msmc input
value3: a list of tuples:
Each tuple is a diploid individual. Ex: if
the input of msmc has 4 haplotypes, returns [(hap1, hap2), (hap3, hap4)]
"""
a = open(msmc_input, 'r')
lines = a.readlines()
l1 = lines[-1].split('\t')
value_1 = l1[0]
hap = l1[-1].strip('\n')
value_2 = []
haplotypes_list = [[] for i in list(hap)]
for l in lines:
values = l.split('\t')
value_2.append((values[1], values[2]))
hap = values[3].strip('\n')
ignore_output = [haplotypes_list[i].append(hap[i])
for i in range(len(hap))]
value_3 = [(''.join(haplotypes_list[2*i]), ''.join(haplotypes_list[2*i+1]))
for i in range(len(haplotypes_list)/2)]
return (value_1, value_2, value_3)
def write_msmc_input(filename, value_1, value_2, value_3):
"""
Given the type of values returned by the 'read_haplotypes' method, write
to filename the values in the msmc input format
"""
f = open(filename, 'w')
for i in range(len(value_2)):
temp_line = "{}\t{}\t".format(value_1, '\t'.join(value_2[i]))
hap_line = ''.join([value_3[j][0][i]+value_3[j][1][i]
for j in range(len(value_3))])
f.write("{}{}\n".format(temp_line, hap_line))
f.close()
def add_phasing_err(chrA, chrB, snp_pos, errors_positions):
"""
Given two haplotypes with the snp positions (as in the msmc input format)
as well as the positions of some phasing errors, this method will output
two new chromosomes switched at positions where phasing errors happen.
"""
snp_pos = [int(p) for p in snp_pos]
if errors_positions == []:
return (chrA, chrB)
newA = []
newB = []
A = chrA
B = chrB
start_pos = 0
curr_pos = 0
err_index = 0
while curr_pos < len(snp_pos):
if (snp_pos[curr_pos] >= errors_positions[err_index]):
# if phasing error
newA.append(A[start_pos:curr_pos])
newB.append(B[start_pos:curr_pos])
start_pos = curr_pos
# Invert the chromosomes as many times as there are phasing errors
while (snp_pos[curr_pos] >= errors_positions[err_index]):
(A, B) = (B, A)
err_index += 1
if err_index == len(errors_positions):
curr_pos = len(snp_pos) # make sure that it will not enter
# the first while next time
break # quit current while
curr_pos +=1
# Copy the rest of the haplotypes to the new chromosomes
newA.append(A[start_pos:])
newB.append(B[start_pos:])
return(''.join(newA), ''.join(newB))
def generate_phasing_err_positions(seq_len, mean):
"""
Generates the positions where phasing errors occur. It assumes
phasing errors are a geometrical random variable with some rate
"""
positions = []
curr_position = 0
while curr_position <= seq_len:
err_position = np.random.exponential(mean)
err_position = np.trunc(err_position) + 1
curr_position += err_position
positions.append(curr_position)
return [int(p) for p in positions[:-1]]
def add_phasing_err_folder(input_folder, output_folder, seq_len, err_mean):
"""
Assuming input_folder contains some .msmcin files (the input of msmc)
and that these files are haplotypes of size seq_len,
for each msmcin file in the input folder we will create a new msmcin file
in the output_folder containing some phasing errors.
We assume that phasing errors are distributed following a Poison process
over the annalyzed genome.
For introducing phasing errors, we simulate values with an exponential
distribution.
"""
input_filenames = []
# We take all the msmcin files in the input_folder
for filename in glob.glob(os.path.join(input_folder, '*.txt')):
input_filenames.append(filename)
for f in input_filenames:
(v1, v2, v3) = read_haplotypes(f)
snp_pos = [p[0] for p in v2]
new_v3 = []
for v in v3:
err_positions = generate_phasing_err_positions(seq_len, err_mean)
new_v3.append(add_phasing_err(v[0], v[1], snp_pos, err_positions))
output_name = f.split('/')[-1]
f_output = os.path.join(output_folder, output_name)
write_msmc_input(f_output, v1, v2, new_v3)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Add phasing errors to haplotypes")
parser.add_argument("input_folder", help = "The input folder ")
parser.add_argument("output_folder", help="The output folder")
parser.add_argument("-l", "--sequence_length", help="Length of the \
sequence where pahsing errors will be added", type=int,
default = 2000000)
parser.add_argument("-e", "--error_rate", help = "The rate of the phasing\
errors", type=float, default=1e6)
args = parser.parse_args()
l = args.sequence_length
err_mean = args.error_rate
add_phasing_err_folder(args.input_folder, args.output_folder, l, err_mean)
print("Done")
|
[
"numpy.random.exponential",
"os.path.join",
"argparse.ArgumentParser",
"numpy.trunc"
] |
[((4916, 4987), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Add phasing errors to haplotypes"""'}), "(description='Add phasing errors to haplotypes')\n", (4939, 4987), False, 'import argparse\n'), ((3441, 3468), 'numpy.random.exponential', 'np.random.exponential', (['mean'], {}), '(mean)\n', (3462, 3468), True, 'import numpy as np\n'), ((4334, 4369), 'os.path.join', 'os.path.join', (['input_folder', '"""*.txt"""'], {}), "(input_folder, '*.txt')\n", (4346, 4369), False, 'import os\n'), ((4783, 4823), 'os.path.join', 'os.path.join', (['output_folder', 'output_name'], {}), '(output_folder, output_name)\n', (4795, 4823), False, 'import os\n'), ((3492, 3514), 'numpy.trunc', 'np.trunc', (['err_position'], {}), '(err_position)\n', (3500, 3514), True, 'import numpy as np\n')]
|
"""Test for .prep.excel module
"""
# =======
# PRIVATE
# =======
import numpy as np
import pandas as pd
from numpy.testing import assert_equal
from pandas.testing import assert_index_equal, assert_frame_equal
from hidrokit.prep import excel
def test__file_year():
filepath = 'tests/data/excel/2006 HUJAN DISNEY LAND.xls'
year = excel._file_year(filepath)
assert year == 2006
def test__file_single_pivot():
filepath = 'tests/data/excel/2006 HUJAN DISNEY LAND.xls'
pivot = excel._file_single_pivot(filepath)
pivot = pivot.replace('-', np.nan)
nan = np.nan
row_3 = np.array(
[66.3, nan, 11.3, nan, nan, 13.2, nan, nan, nan, nan, nan,
12.3]
)
assert_equal(
row_3, pivot.iloc[3, :].values
)
def test__dataframe_year():
data = pd.read_csv('tests/data/one_year_one_column.csv',
index_col=0, parse_dates=True)
data_test = excel._dataframe_year(2000)
assert_index_equal(
data.index, data_test.index
)
def test__dataframe_data():
filepath = 'tests/data/excel/2013 DUGAAIR.xls'
pivot = excel._file_single_pivot(filepath, template='pdderi')
pivot = pivot.replace('-', np.nan)
pivot_list = excel._dataframe_data(pivot, 2013)
assert pivot_list[0] == 1.33
assert pivot_list[-1] == 1.20
def test__dataframe_table():
filepath = 'tests/data/excel/2013 DUGAAIR.xls'
pivot = excel._file_single_pivot(filepath, template='pdderi')
dataframe = excel._dataframe_table(pivot, 2013, name='dugaair')
result = pd.read_csv('tests/data/2013 DUGAAIR.csv',
index_col=0, parse_dates=True)
assert_frame_equal(
result, dataframe
)
|
[
"hidrokit.prep.excel._file_single_pivot",
"pandas.testing.assert_frame_equal",
"hidrokit.prep.excel._dataframe_data",
"pandas.read_csv",
"hidrokit.prep.excel._dataframe_table",
"numpy.array",
"numpy.testing.assert_equal",
"hidrokit.prep.excel._file_year",
"pandas.testing.assert_index_equal",
"hidrokit.prep.excel._dataframe_year"
] |
[((340, 366), 'hidrokit.prep.excel._file_year', 'excel._file_year', (['filepath'], {}), '(filepath)\n', (356, 366), False, 'from hidrokit.prep import excel\n'), ((498, 532), 'hidrokit.prep.excel._file_single_pivot', 'excel._file_single_pivot', (['filepath'], {}), '(filepath)\n', (522, 532), False, 'from hidrokit.prep import excel\n'), ((602, 676), 'numpy.array', 'np.array', (['[66.3, nan, 11.3, nan, nan, 13.2, nan, nan, nan, nan, nan, 12.3]'], {}), '([66.3, nan, 11.3, nan, nan, 13.2, nan, nan, nan, nan, nan, 12.3])\n', (610, 676), True, 'import numpy as np\n'), ((705, 749), 'numpy.testing.assert_equal', 'assert_equal', (['row_3', 'pivot.iloc[3, :].values'], {}), '(row_3, pivot.iloc[3, :].values)\n', (717, 749), False, 'from numpy.testing import assert_equal\n'), ((805, 890), 'pandas.read_csv', 'pd.read_csv', (['"""tests/data/one_year_one_column.csv"""'], {'index_col': '(0)', 'parse_dates': '(True)'}), "('tests/data/one_year_one_column.csv', index_col=0, parse_dates=True\n )\n", (816, 890), True, 'import pandas as pd\n'), ((926, 953), 'hidrokit.prep.excel._dataframe_year', 'excel._dataframe_year', (['(2000)'], {}), '(2000)\n', (947, 953), False, 'from hidrokit.prep import excel\n'), ((959, 1006), 'pandas.testing.assert_index_equal', 'assert_index_equal', (['data.index', 'data_test.index'], {}), '(data.index, data_test.index)\n', (977, 1006), False, 'from pandas.testing import assert_index_equal, assert_frame_equal\n'), ((1114, 1167), 'hidrokit.prep.excel._file_single_pivot', 'excel._file_single_pivot', (['filepath'], {'template': '"""pdderi"""'}), "(filepath, template='pdderi')\n", (1138, 1167), False, 'from hidrokit.prep import excel\n'), ((1225, 1259), 'hidrokit.prep.excel._dataframe_data', 'excel._dataframe_data', (['pivot', '(2013)'], {}), '(pivot, 2013)\n', (1246, 1259), False, 'from hidrokit.prep import excel\n'), ((1421, 1474), 'hidrokit.prep.excel._file_single_pivot', 'excel._file_single_pivot', (['filepath'], {'template': '"""pdderi"""'}), "(filepath, template='pdderi')\n", (1445, 1474), False, 'from hidrokit.prep import excel\n'), ((1491, 1542), 'hidrokit.prep.excel._dataframe_table', 'excel._dataframe_table', (['pivot', '(2013)'], {'name': '"""dugaair"""'}), "(pivot, 2013, name='dugaair')\n", (1513, 1542), False, 'from hidrokit.prep import excel\n'), ((1557, 1630), 'pandas.read_csv', 'pd.read_csv', (['"""tests/data/2013 DUGAAIR.csv"""'], {'index_col': '(0)', 'parse_dates': '(True)'}), "('tests/data/2013 DUGAAIR.csv', index_col=0, parse_dates=True)\n", (1568, 1630), True, 'import pandas as pd\n'), ((1660, 1697), 'pandas.testing.assert_frame_equal', 'assert_frame_equal', (['result', 'dataframe'], {}), '(result, dataframe)\n', (1678, 1697), False, 'from pandas.testing import assert_index_equal, assert_frame_equal\n')]
|
import pandas as pd
import numpy as np
input_file_path = "datasets/Accelerometer-2011-06-02-17-21-57-liedown_bed-m1.txt"
fields = ['15', '16', '17']
if __name__ == '__main__':
# data = pd.read_csv(input_file_path, skipinitialspace=True, usecols=fields, delim_whitespace=True)
# data.to_csv('uic_dataset.csv', index=False)
# CONVERT THE ACCELEROMETER DATA INTO REAL ACCELERATION VALUES
# mapping from [0. .63] to[-14.709.. + 14.709]
# real_val = -1.5 g + (coded_val / 63) * 3 g
data = np.loadtxt("datasets/Accelerometer-2011-05-30-10-38-41-liedown_bed-m1.txt")
real_data = list(map(lambda v: -14.709 + (v / 63) * (2 * 14.709), data))
np.savetxt("datasets/hmp_dataset1.csv", real_data, delimiter=",")
|
[
"numpy.savetxt",
"numpy.loadtxt"
] |
[((497, 572), 'numpy.loadtxt', 'np.loadtxt', (['"""datasets/Accelerometer-2011-05-30-10-38-41-liedown_bed-m1.txt"""'], {}), "('datasets/Accelerometer-2011-05-30-10-38-41-liedown_bed-m1.txt')\n", (507, 572), True, 'import numpy as np\n'), ((650, 715), 'numpy.savetxt', 'np.savetxt', (['"""datasets/hmp_dataset1.csv"""', 'real_data'], {'delimiter': '""","""'}), "('datasets/hmp_dataset1.csv', real_data, delimiter=',')\n", (660, 715), True, 'import numpy as np\n')]
|
# coding: utf-8
import numpy as np
import pysptk
from nose.tools import raises
from warnings import warn
from scipy.io import wavfile
from os.path import join, dirname
def test_swipe():
def __test(x, fs, hopsize, otype):
f0 = pysptk.swipe(x, fs, hopsize, otype=otype)
assert np.all(np.isfinite(f0))
if otype == 1:
assert np.all(f0 >= 0)
np.random.seed(98765)
fs = 16000
x = np.random.rand(16000)
for hopsize in [40, 80, 160, 320]:
for otype in [0, 1, 2]:
yield __test, x, fs, hopsize, otype
for otype in ["pitch", "f0", "logf0"]:
yield __test, x, fs, 80, otype
# unsupported otype
yield raises(ValueError)(__test), x, fs, 80, -1
yield raises(ValueError)(__test), x, fs, 80, 3
yield raises(ValueError)(__test), x, fs, 80, "ff0"
def test_rapt():
def __test(x, fs, hopsize, min, max, otype):
f0 = pysptk.rapt(x, fs, hopsize, min=min, max=max, otype=otype)
assert np.all(np.isfinite(f0))
if otype == 1:
assert np.all(f0 >= 0)
np.random.seed(98765)
fs = 16000
x = np.random.rand(16000).astype(np.float32)
for otype in [0, 1, 2]:
for hopsize in [40, 80, 160, 320]:
yield __test, x, fs, hopsize, 60, 240, otype
for otype in ["pitch", "f0", "logf0"]:
yield __test, x, fs, 80, 60, 240, otype
# unsupported otype
yield raises(ValueError)(__test), x, fs, 80, 60, 240, -1
yield raises(ValueError)(__test), x, fs, 80, 60, 240, 3
yield raises(ValueError)(__test), x, fs, 80, 60, 240, "f00"
# valid min freq
yield __test, x, fs, 80, 10, 240, 0
warn("TODO: fix RAPT bug to pass this minfreq lower bound test")
# yield __test, x, fs, 80, fs / 10000. + 1, 240, 0
# valid max freq
yield __test, x, fs, 80, 60, fs // 2 - 1, 0
# invalid min/max freq
yield raises(ValueError)(__test), x, fs, 80, 60, 60, 0
yield raises(ValueError)(__test), x, fs, 80, 60, fs // 2, 0
yield raises(ValueError)(__test), x, fs, 80, fs / 10000., 240, 0
# valid frame_period (corner case)
yield __test, x, fs, 1600, 60, 240, 0
yield __test, x, fs, 2, 60, 240, 0
warn("TODO: pass this corner case test")
# yield __test, x, fs, 1, 60, 240, 0
# invalid frame_period
yield raises(ValueError)(__test), x, fs, 1601, 60, 240, 0
# valid input length
yield __test, x[:1000], fs, 80, 60, 240, 0
# invalid input length (too small)
yield raises(ValueError)(__test), x[:100], fs, 80, 60, 240, 0
def test_rapt_regression():
# Grund truth data is generated by:
#
# $ wav2raw pysptk/example_audio_data/arctic_a0007.wav
#
# $ x2x +sf ./pysptk/example_audio_data/arctic_a0007.raw | \
# pitch -a 0 -s 16 -p 80 -L 60 -H 240 -o 0 > \
# arctic_a007_p16_L60_H240_o0_rapt.pitch
#
# $ dmp +f arctic_a007_p16_L60_H240_o0_rapt.pitch | awk '{print $2}' >\
# arctic_a007_p16_L60_H240_o0_rapt.txt
#
# $ pitch -h
# ...
#
# SPTK: version 3.8
# CVS Info: $Id: pitch.c,v 1.46 2014/12/11 08:30:43 uratec Exp $
ground_truth_path = join(dirname(__file__), "data",
"arctic_a007_p16_L60_H240_o0_rapt.txt")
with open(ground_truth_path) as f:
ground_truth = np.asarray([float(s)
for s in [l for l in f.readlines()]])
ground_truth = ground_truth.astype(np.float32)
fs, x = wavfile.read(pysptk.util.example_audio_file())
assert fs == 16000
# Since SPTK might have memory corruption bug and the result might be
# non-deterministic, test it with multiple time...
for i in range(5):
f0 = pysptk.rapt(x.astype(np.float32), fs=fs, hopsize=80,
min=60, max=240, voice_bias=0.0, otype=0)
assert np.allclose(ground_truth, f0)
|
[
"numpy.random.seed",
"nose.tools.raises",
"os.path.dirname",
"numpy.allclose",
"numpy.isfinite",
"pysptk.rapt",
"numpy.all",
"numpy.random.rand",
"warnings.warn",
"pysptk.swipe",
"pysptk.util.example_audio_file"
] |
[((387, 408), 'numpy.random.seed', 'np.random.seed', (['(98765)'], {}), '(98765)\n', (401, 408), True, 'import numpy as np\n'), ((432, 453), 'numpy.random.rand', 'np.random.rand', (['(16000)'], {}), '(16000)\n', (446, 453), True, 'import numpy as np\n'), ((1083, 1104), 'numpy.random.seed', 'np.random.seed', (['(98765)'], {}), '(98765)\n', (1097, 1104), True, 'import numpy as np\n'), ((1666, 1730), 'warnings.warn', 'warn', (['"""TODO: fix RAPT bug to pass this minfreq lower bound test"""'], {}), "('TODO: fix RAPT bug to pass this minfreq lower bound test')\n", (1670, 1730), False, 'from warnings import warn\n'), ((2202, 2242), 'warnings.warn', 'warn', (['"""TODO: pass this corner case test"""'], {}), "('TODO: pass this corner case test')\n", (2206, 2242), False, 'from warnings import warn\n'), ((243, 284), 'pysptk.swipe', 'pysptk.swipe', (['x', 'fs', 'hopsize'], {'otype': 'otype'}), '(x, fs, hopsize, otype=otype)\n', (255, 284), False, 'import pysptk\n'), ((922, 980), 'pysptk.rapt', 'pysptk.rapt', (['x', 'fs', 'hopsize'], {'min': 'min', 'max': 'max', 'otype': 'otype'}), '(x, fs, hopsize, min=min, max=max, otype=otype)\n', (933, 980), False, 'import pysptk\n'), ((3154, 3171), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (3161, 3171), False, 'from os.path import join, dirname\n'), ((3483, 3515), 'pysptk.util.example_audio_file', 'pysptk.util.example_audio_file', ([], {}), '()\n', (3513, 3515), False, 'import pysptk\n'), ((3841, 3870), 'numpy.allclose', 'np.allclose', (['ground_truth', 'f0'], {}), '(ground_truth, f0)\n', (3852, 3870), True, 'import numpy as np\n'), ((307, 322), 'numpy.isfinite', 'np.isfinite', (['f0'], {}), '(f0)\n', (318, 322), True, 'import numpy as np\n'), ((366, 381), 'numpy.all', 'np.all', (['(f0 >= 0)'], {}), '(f0 >= 0)\n', (372, 381), True, 'import numpy as np\n'), ((1003, 1018), 'numpy.isfinite', 'np.isfinite', (['f0'], {}), '(f0)\n', (1014, 1018), True, 'import numpy as np\n'), ((1062, 1077), 'numpy.all', 'np.all', (['(f0 >= 0)'], {}), '(f0 >= 0)\n', (1068, 1077), True, 'import numpy as np\n'), ((1128, 1149), 'numpy.random.rand', 'np.random.rand', (['(16000)'], {}), '(16000)\n', (1142, 1149), True, 'import numpy as np\n'), ((692, 710), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (698, 710), False, 'from nose.tools import raises\n'), ((744, 762), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (750, 762), False, 'from nose.tools import raises\n'), ((795, 813), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (801, 813), False, 'from nose.tools import raises\n'), ((1425, 1443), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (1431, 1443), False, 'from nose.tools import raises\n'), ((1486, 1504), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (1492, 1504), False, 'from nose.tools import raises\n'), ((1546, 1564), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (1552, 1564), False, 'from nose.tools import raises\n'), ((1894, 1912), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (1900, 1912), False, 'from nose.tools import raises\n'), ((1953, 1971), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (1959, 1971), False, 'from nose.tools import raises\n'), ((2017, 2035), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (2023, 2035), False, 'from nose.tools import raises\n'), ((2322, 2340), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (2328, 2340), False, 'from nose.tools import raises\n'), ((2497, 2515), 'nose.tools.raises', 'raises', (['ValueError'], {}), '(ValueError)\n', (2503, 2515), False, 'from nose.tools import raises\n')]
|
import sys, os, re, time, platform
import json, csv
import numpy as np
import matplotlib.pyplot as plt
from scipy import linalg as LA
import matplotlib
import matplotlib.cm
from mpl_toolkits.mplot3d import Axes3D
DATA_DIR = os.path.join('output', 'cheetah-multi-task', '2021_04_30_parametrized', '2021_04_30_cheetah_8_task_true_gmm', 'tensorboard')
FIG_DIR = os.path.join('log', 'latent')
PLOT_LIST = []
MARKERS = ['.', '^', 's', 'p', '*', 'X', 'h', 'd', '+', 'P']
COLORS = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan']
def main(run_name=None, save=True, show_='last'):
global DATA_DIR
if run_name is not None:
head, tail = os.path.split(run_name)
if len(head) > 0:
DATA_DIR = os.path.join(run_name, 'tensorboard')
else:
DATA_DIR = os.path.join('output', 'cheetah-multi-task', run_name, 'tensorboard')
fig_folder = os.path.join(FIG_DIR, f'{time.strftime("%Y-%m-%d-%H_%M_%S")}_{os.path.split(DATA_DIR)[-1]}')
if not os.path.isdir(fig_folder) and save:
os.mkdir(fig_folder)
epoch_dict = {}
folders_ = [d for d in os.listdir(DATA_DIR) if os.path.isdir(os.path.join(DATA_DIR, d))]
maxlen = max([len(f) for f in folders_])
folders_ = sorted([f.zfill(maxlen) for f in folders_])
for folder in (folders_ if show_ == 'all' else [folders_[-1]]):
if re.match('[0-9]+', folder):
step_ = re.findall('([0-9]+)', folder)[0]
with open(os.path.join(DATA_DIR, folder, 'default', 'metadata.tsv'), newline='') as f:
r = csv.reader(f, delimiter='\t')
metadata = [row_ for row_ in r]
with open(os.path.join(DATA_DIR, folder, 'default', 'tensors.tsv'), newline='') as f:
r = csv.reader(f, delimiter='\t')
data = [row_ for row_ in r]
# Convert to np
metadata = np.array(metadata)
true_tasks = np.array([s[0].split('[')[0].strip() for s in metadata])
unique_tasks = np.sort(np.unique(true_tasks))
specs = np.array([float(re.findall('[-]*[0-9]+[.]*[0-9]*', s[0])[0]) for s in metadata])
data = np.array(data, dtype=np.float)
d_list, t_list, s_list = [], [], []
for true_task in unique_tasks:
# Bring data to 3D
pcad = ''
if data.shape[1] < 3:
d = data[true_tasks == true_task]
temp = np.zeros([d.shape[0], 3])
temp[:, 0:d.shape[1]] = d
d = temp
elif data.shape[1] > 3:
d = data[true_tasks == true_task]
print(f'Performing PCA from {d.shape[1]} DIM to 3')
pcad = f' (Using PCA From {d.shape[1]} To 3 Dimensions)'
d = perform_pca(d)
d_list.append(d)
t_list.append(true_task)
s_list.append(specs[true_tasks == true_task])
epoch_dict[step_] = [metadata, data, pcad, d_list, t_list, s_list]
m_l = max([len(k) for k in epoch_dict.keys()])
# Plotting
# Use Latex text
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
plt.style.use('seaborn')
size_ = 12# 20 veldir, 36 8 task
plt.rc('font', size=size_) # controls default text sizes
plt.rc('axes', labelsize=size_) # fontsize of the x and y labels
plt.rc('axes', titlesize=size_) # fontsize of the axes title
plt.rc('xtick', labelsize=size_) # fontsize of the tick labels
plt.rc('ytick', labelsize=size_) # fontsize of the tick labels
plt.rc('legend', fontsize=size_) # legend fontsize
plt.rc('figure', titlesize=size_) # fontsize of the figure title
CMAP_NAME = 'viridis'
for step_ in sorted(epoch_dict.keys()):
metadata, values, pcad, d_list, t_list, s_list = epoch_dict[step_]
fig = plt.figure()
for i, d in enumerate(d_list):
axs = fig.add_subplot(int(np.ceil(len(d_list) / 4)), min([len(d_list), 4]), i + 1, projection='3d')
axs.set_aspect('auto')
axs.set_title(f'{" ".join([el.capitalize() for el in t_list[i].split("_")])}', y=1.08)
el = axs.scatter(d[:, 0], d[:, 1], d[:, 2],
c=(s_list[i] - s_list[i].min()) / (s_list[i].max() - s_list[i].min()),
cmap=plt.get_cmap(CMAP_NAME))
axs.tick_params(labelsize=0)
# axs.set_xlabel('Latent Dim 1')
# axs.set_ylabel('Latent Dim 2')
# axs.set_zlabel('Latent Dim 3')
cbar = fig.colorbar(matplotlib.cm.ScalarMappable(cmap=plt.get_cmap(CMAP_NAME)), ax=fig.axes,
orientation='vertical',
anchor=(1., 0.5),
shrink=0.5,
ticks=[0, 0.5, 1])
cbar.ax.set_yticklabels(['Low', 'Medium', 'High'])
fig.set_size_inches(3. * min([len(d_list), 4]), 2.8 * int(np.ceil(len(d_list) / 4)))
if save:
plt.savefig(os.path.join(fig_folder, f'encodings_step_{str(step_).zfill(m_l)}.png'), format='png', dpi=100, bbox_inches='tight')
else:
plt.show()
plt.close()
print(f'Created plot for {DATA_DIR}')
def perform_pca(values, dims=3):
# sample points equally for all gaussians
x = np.copy(values)
# centering the data
x -= np.mean(x, axis=0)
cov = np.cov(x, rowvar=False)
evals, evecs = LA.eigh(cov)
idx = np.argsort(evals)[::-1]
evecs = evecs[:, idx[:dims]]
return np.dot(x, evecs)
if __name__ == '__main__':
main(*sys.argv[1:])
|
[
"os.mkdir",
"csv.reader",
"time.strftime",
"numpy.argsort",
"matplotlib.pyplot.style.use",
"numpy.mean",
"matplotlib.pyplot.figure",
"os.path.join",
"numpy.unique",
"numpy.copy",
"matplotlib.pyplot.close",
"re.findall",
"matplotlib.pyplot.rc",
"scipy.linalg.eigh",
"numpy.cov",
"matplotlib.pyplot.show",
"matplotlib.pyplot.get_cmap",
"re.match",
"numpy.dot",
"os.listdir",
"os.path.isdir",
"numpy.zeros",
"numpy.array",
"os.path.split"
] |
[((236, 364), 'os.path.join', 'os.path.join', (['"""output"""', '"""cheetah-multi-task"""', '"""2021_04_30_parametrized"""', '"""2021_04_30_cheetah_8_task_true_gmm"""', '"""tensorboard"""'], {}), "('output', 'cheetah-multi-task', '2021_04_30_parametrized',\n '2021_04_30_cheetah_8_task_true_gmm', 'tensorboard')\n", (248, 364), False, 'import sys, os, re, time, platform\n'), ((372, 401), 'os.path.join', 'os.path.join', (['"""log"""', '"""latent"""'], {}), "('log', 'latent')\n", (384, 401), False, 'import sys, os, re, time, platform\n'), ((2974, 2998), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn"""'], {}), "('seaborn')\n", (2987, 2998), True, 'import matplotlib.pyplot as plt\n'), ((3038, 3064), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': 'size_'}), "('font', size=size_)\n", (3044, 3064), True, 'import matplotlib.pyplot as plt\n'), ((3098, 3129), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'labelsize': 'size_'}), "('axes', labelsize=size_)\n", (3104, 3129), True, 'import matplotlib.pyplot as plt\n'), ((3166, 3197), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'titlesize': 'size_'}), "('axes', titlesize=size_)\n", (3172, 3197), True, 'import matplotlib.pyplot as plt\n'), ((3230, 3262), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': 'size_'}), "('xtick', labelsize=size_)\n", (3236, 3262), True, 'import matplotlib.pyplot as plt\n'), ((3296, 3328), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': 'size_'}), "('ytick', labelsize=size_)\n", (3302, 3328), True, 'import matplotlib.pyplot as plt\n'), ((3362, 3394), 'matplotlib.pyplot.rc', 'plt.rc', (['"""legend"""'], {'fontsize': 'size_'}), "('legend', fontsize=size_)\n", (3368, 3394), True, 'import matplotlib.pyplot as plt\n'), ((3416, 3449), 'matplotlib.pyplot.rc', 'plt.rc', (['"""figure"""'], {'titlesize': 'size_'}), "('figure', titlesize=size_)\n", (3422, 3449), True, 'import matplotlib.pyplot as plt\n'), ((4887, 4902), 'numpy.copy', 'np.copy', (['values'], {}), '(values)\n', (4894, 4902), True, 'import numpy as np\n'), ((4935, 4953), 'numpy.mean', 'np.mean', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (4942, 4953), True, 'import numpy as np\n'), ((4964, 4987), 'numpy.cov', 'np.cov', (['x'], {'rowvar': '(False)'}), '(x, rowvar=False)\n', (4970, 4987), True, 'import numpy as np\n'), ((5007, 5019), 'scipy.linalg.eigh', 'LA.eigh', (['cov'], {}), '(cov)\n', (5014, 5019), True, 'from scipy import linalg as LA\n'), ((5096, 5112), 'numpy.dot', 'np.dot', (['x', 'evecs'], {}), '(x, evecs)\n', (5102, 5112), True, 'import numpy as np\n'), ((735, 758), 'os.path.split', 'os.path.split', (['run_name'], {}), '(run_name)\n', (748, 758), False, 'import sys, os, re, time, platform\n'), ((1085, 1105), 'os.mkdir', 'os.mkdir', (['fig_folder'], {}), '(fig_folder)\n', (1093, 1105), False, 'import sys, os, re, time, platform\n'), ((1391, 1417), 're.match', 're.match', (['"""[0-9]+"""', 'folder'], {}), "('[0-9]+', folder)\n", (1399, 1417), False, 'import sys, os, re, time, platform\n'), ((3635, 3647), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3645, 3647), True, 'import matplotlib.pyplot as plt\n'), ((4742, 4753), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4751, 4753), True, 'import matplotlib.pyplot as plt\n'), ((5030, 5047), 'numpy.argsort', 'np.argsort', (['evals'], {}), '(evals)\n', (5040, 5047), True, 'import numpy as np\n'), ((795, 832), 'os.path.join', 'os.path.join', (['run_name', '"""tensorboard"""'], {}), "(run_name, 'tensorboard')\n", (807, 832), False, 'import sys, os, re, time, platform\n'), ((857, 926), 'os.path.join', 'os.path.join', (['"""output"""', '"""cheetah-multi-task"""', 'run_name', '"""tensorboard"""'], {}), "('output', 'cheetah-multi-task', run_name, 'tensorboard')\n", (869, 926), False, 'import sys, os, re, time, platform\n'), ((1046, 1071), 'os.path.isdir', 'os.path.isdir', (['fig_folder'], {}), '(fig_folder)\n', (1059, 1071), False, 'import sys, os, re, time, platform\n'), ((1153, 1173), 'os.listdir', 'os.listdir', (['DATA_DIR'], {}), '(DATA_DIR)\n', (1163, 1173), False, 'import sys, os, re, time, platform\n'), ((1835, 1853), 'numpy.array', 'np.array', (['metadata'], {}), '(metadata)\n', (1843, 1853), True, 'import numpy as np\n'), ((2086, 2116), 'numpy.array', 'np.array', (['data'], {'dtype': 'np.float'}), '(data, dtype=np.float)\n', (2094, 2116), True, 'import numpy as np\n'), ((4726, 4736), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4734, 4736), True, 'import matplotlib.pyplot as plt\n'), ((969, 1003), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d-%H_%M_%S"""'], {}), "('%Y-%m-%d-%H_%M_%S')\n", (982, 1003), False, 'import sys, os, re, time, platform\n'), ((1191, 1216), 'os.path.join', 'os.path.join', (['DATA_DIR', 'd'], {}), '(DATA_DIR, d)\n', (1203, 1216), False, 'import sys, os, re, time, platform\n'), ((1433, 1463), 're.findall', 're.findall', (['"""([0-9]+)"""', 'folder'], {}), "('([0-9]+)', folder)\n", (1443, 1463), False, 'import sys, os, re, time, platform\n'), ((1569, 1598), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '"""\t"""'}), "(f, delimiter='\\t')\n", (1579, 1598), False, 'import json, csv\n'), ((1735, 1764), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '"""\t"""'}), "(f, delimiter='\\t')\n", (1745, 1764), False, 'import json, csv\n'), ((1957, 1978), 'numpy.unique', 'np.unique', (['true_tasks'], {}), '(true_tasks)\n', (1966, 1978), True, 'import numpy as np\n'), ((1006, 1029), 'os.path.split', 'os.path.split', (['DATA_DIR'], {}), '(DATA_DIR)\n', (1019, 1029), False, 'import sys, os, re, time, platform\n'), ((1483, 1540), 'os.path.join', 'os.path.join', (['DATA_DIR', 'folder', '"""default"""', '"""metadata.tsv"""'], {}), "(DATA_DIR, folder, 'default', 'metadata.tsv')\n", (1495, 1540), False, 'import sys, os, re, time, platform\n'), ((1650, 1706), 'os.path.join', 'os.path.join', (['DATA_DIR', 'folder', '"""default"""', '"""tensors.tsv"""'], {}), "(DATA_DIR, folder, 'default', 'tensors.tsv')\n", (1662, 1706), False, 'import sys, os, re, time, platform\n'), ((2315, 2340), 'numpy.zeros', 'np.zeros', (['[d.shape[0], 3]'], {}), '([d.shape[0], 3])\n', (2323, 2340), True, 'import numpy as np\n'), ((4052, 4075), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['CMAP_NAME'], {}), '(CMAP_NAME)\n', (4064, 4075), True, 'import matplotlib.pyplot as plt\n'), ((4282, 4305), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['CMAP_NAME'], {}), '(CMAP_NAME)\n', (4294, 4305), True, 'import matplotlib.pyplot as plt\n'), ((2008, 2048), 're.findall', 're.findall', (['"""[-]*[0-9]+[.]*[0-9]*"""', 's[0]'], {}), "('[-]*[0-9]+[.]*[0-9]*', s[0])\n", (2018, 2048), False, 'import sys, os, re, time, platform\n')]
|
import numpy as np
def get_estimator(scorer_type, save_folder=None):
if scorer_type == 'esim':
# submitted model, glove + fasttext, with attention
from os import path
from athene.rte.deep_models.ESIM_for_ensemble import ESIM
from athene.utils.config import Config
pos_weight = np.asarray(Config.esim_hyper_param['pos_weight'], np.float32)
clf = ESIM(random_state=Config.seed, tensorboard_logdir="logdir/", learning_rate=Config.esim_hyper_param['lr'],
max_check_without_progress=Config.esim_hyper_param['max_checks_no_progress'],
activation=Config.esim_hyper_param['activation'],
initializer=Config.esim_hyper_param['initializer'],
lstm_layers=Config.esim_hyper_param['lstm_layers'],
optimizer=Config.esim_hyper_param['optimizer'],
trainable=Config.esim_hyper_param['trainable'],
batch_size=Config.esim_hyper_param['batch_size'],
dropout_rate=Config.esim_hyper_param['dropout'],
num_neurons=Config.esim_hyper_param['num_neurons'], pos_weight=pos_weight,
ckpt_path=path.join(save_folder, Config.name + '.ckpt'), name=Config.name)
if scorer_type == 'esim_glove_no_attention':
# glove, no attention
from os import path
from athene.rte.deep_models.ESIM_for_ensemble_glove_only_no_attention import ESIM
from athene.utils.config import Config
pos_weight = np.asarray(Config.esim_hyper_param['pos_weight'], np.float32)
clf = ESIM(random_state=Config.seed, tensorboard_logdir="logdir/", learning_rate=Config.esim_hyper_param['lr'],
max_check_without_progress=Config.esim_hyper_param['max_checks_no_progress'],
activation=Config.esim_hyper_param['activation'],
initializer=Config.esim_hyper_param['initializer'],
lstm_layers=Config.esim_hyper_param['lstm_layers'],
optimizer=Config.esim_hyper_param['optimizer'],
trainable=Config.esim_hyper_param['trainable'],
batch_size=Config.esim_hyper_param['batch_size'],
dropout_rate=Config.esim_hyper_param['dropout'],
num_neurons=Config.esim_hyper_param['num_neurons'], pos_weight=pos_weight,
ckpt_path=path.join(save_folder, Config.name + '.ckpt'), name=Config.name)
return clf
|
[
"numpy.asarray",
"os.path.join"
] |
[((323, 384), 'numpy.asarray', 'np.asarray', (["Config.esim_hyper_param['pos_weight']", 'np.float32'], {}), "(Config.esim_hyper_param['pos_weight'], np.float32)\n", (333, 384), True, 'import numpy as np\n'), ((1538, 1599), 'numpy.asarray', 'np.asarray', (["Config.esim_hyper_param['pos_weight']", 'np.float32'], {}), "(Config.esim_hyper_param['pos_weight'], np.float32)\n", (1548, 1599), True, 'import numpy as np\n'), ((1207, 1252), 'os.path.join', 'path.join', (['save_folder', "(Config.name + '.ckpt')"], {}), "(save_folder, Config.name + '.ckpt')\n", (1216, 1252), False, 'from os import path\n'), ((2422, 2467), 'os.path.join', 'path.join', (['save_folder', "(Config.name + '.ckpt')"], {}), "(save_folder, Config.name + '.ckpt')\n", (2431, 2467), False, 'from os import path\n')]
|
# -*- encoding: utf-8 -*-
import codecs
import json
import random
import shutil
from onmt.translate.translator import build_translator
from onmt.utils.parse import ArgumentParser
import os
import datetime
import time
import numpy as np
import kp_evaluate
from onmt.utils import split_corpus
from onmt.utils.logging import init_logger
import onmt.opts as opts
def scan_new_checkpoints(ckpt_dir):
ckpts = {}
for subdir, dirs, files in os.walk(ckpt_dir):
for file in files:
if file.endswith('.pt'):
ckpt_name = file[: file.find('.pt')]
ckpts[ckpt_name] = os.path.join(subdir, file)
return ckpts
def _get_parser():
parser = ArgumentParser(description='run_kp_eval.py')
opts.config_opts(parser)
opts.translate_opts(parser)
return parser
if __name__ == "__main__":
parser = _get_parser()
parser.add_argument('--tasks', '-tasks', nargs='+', type=str,
required=True,
choices=['pred', 'eval', 'report'],
help='Specify process to run, generation or evaluation')
parser.add_argument('-ckpt_dir', type=str, required=True, help='Directory to all checkpoints')
parser.add_argument('--step_base', '-step_base', type=int, default=1,
help='the base of step to be evaluated, only if ckpt_step % step_base==0 we evaluate it, '
'1 means evaluate everything.')
parser.add_argument('-output_dir', type=str, required=True, help='Directory to output results')
parser.add_argument('-data_dir', type=str, required=True, help='Directory to datasets (ground-truth)')
parser.add_argument('-test_interval', type=int, default=600, help='Minimum time interval the job should wait if a .pred file is not updated by another job (imply another job failed).')
parser.add_argument('-testsets', nargs='+', type=str, default=["nus", "semeval"], help='Specify datasets to test on')
# parser.add_argument('-testsets', nargs='+', type=str, default=["kp20k", "duc", "inspec", "krapivin", "nus", "semeval"], help='Specify datasets to test on')
parser.add_argument('--onepass', '-onepass', action='store_true', help='If true, it only scans and generates once, otherwise an infinite loop scanning new available ckpts.')
parser.add_argument('--wait_patience', '-wait_patience', type=int, default=1, help='Terminates evaluation after scan this number of times.')
parser.add_argument('--wait_time', '-wait_time', type=int, default=120, help='.')
parser.add_argument('--sleep_time', '-sleep_time', type=int, default=600, help='.')
parser.add_argument('--ignore_existing', '-ignore_existing', action='store_true', help='If true, it ignores previous generated results.')
parser.add_argument('--eval_topbeam', '-eval_topbeam',action="store_true", help='Evaluate with top beam only (self-terminating) or all beams (full search)')
parser.add_argument('--kp_concat_type', '-kp_concat_type', default='one2seq', help='one2one or one2seq')
opt = parser.parse_args()
# np.random.seed()
wait_time = np.random.randint(opt.wait_time)
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") # "%Y-%m-%d_%H:%M:%S"
logger = init_logger(opt.output_dir + '/autoeval_%s_%s.log'
% ('-'.join(opt.testsets), current_time))
if not opt.onepass:
logger.info('Sleep for %d sec to avoid conflicting with other threads' % wait_time)
time.sleep(wait_time)
if not os.path.exists(opt.output_dir):
os.makedirs(opt.output_dir)
if not os.path.exists(os.path.join(opt.output_dir, 'eval')):
os.makedirs(os.path.join(opt.output_dir, 'eval'))
if not os.path.exists(os.path.join(opt.output_dir, 'pred')):
os.makedirs(os.path.join(opt.output_dir, 'pred'))
# shutil.copy2(opt.config, opt.output_dir)
logger.info(opt)
testset_path_dict = {}
for testset in opt.testsets:
src_shard = split_corpus(opt.data_dir + '/%s/%s_test.src' % (testset, testset), shard_size=-1)
tgt_shard = split_corpus(opt.data_dir + '/%s/%s_test.tgt' % (testset, testset), shard_size=-1)
src_shard, tgt_shard = list(zip(src_shard, tgt_shard))[0]
logger.info("Loaded data from %s: #src=%d, #tgt=%d" % (testset, len(src_shard), len(tgt_shard)))
testset_path_dict[testset] = (opt.data_dir + '/%s/%s_test.src' % (testset, testset),
opt.data_dir + '/%s/%s_test.tgt' % (testset, testset),
src_shard, tgt_shard)
current_patience = opt.wait_patience
pred_linecount_dict = {}
eval_linecount_dict = {}
while True:
new_ckpts = scan_new_checkpoints(opt.ckpt_dir)
new_ckpts_items = sorted(new_ckpts.items(), key=lambda x:int(x[0][x[0].rfind('step_')+5:]))
random.shuffle(new_ckpts_items)
logger.info('Found %d checkpoints from %s!' % (len(new_ckpts), opt.ckpt_dir))
if opt.step_base is not None and opt.step_base > 1:
logger.warn('-step_base is set, filtering some ckpts')
new_ckpts_items = [(ckpt_name, ckpt_path) for ckpt_name, ckpt_path in new_ckpts_items if int(ckpt_name[ckpt_name.rfind('step_')+5:]) % opt.step_base == 0 and int(ckpt_name[ckpt_name.rfind('step_')+5:]) // opt.step_base > 0]
logger.info('After filtering non opt.step_base ckpts, found %d checkpoints!' % (len(new_ckpts_items)))
job_done = False # a flag indicating if any real pred/eval job is done
for ckpt_id, (ckpt_name, ckpt_path) in enumerate(new_ckpts_items):
logger.info("[%d/%d] Checking checkpoint: %s" % (ckpt_id, len(new_ckpts_items), ckpt_path))
setattr(opt, 'models', [ckpt_path])
translator = None
score_dicts = {}
for dataname, dataset in testset_path_dict.items():
src_path, tgt_path, src_shard, tgt_shard = dataset
pred_path = os.path.join(opt.output_dir, 'pred', ckpt_name, '%s.pred' % dataname)
printout_path = os.path.join(opt.output_dir, 'pred', ckpt_name, '%s.report.txt' % dataname)
eval_dir = os.path.join(opt.output_dir, 'eval')
eval_path = os.path.join(eval_dir, ckpt_name + '-%s-%s.json'
% (dataname, 'selfterminating' if opt.eval_topbeam else 'exhaustive'))
report_path = os.path.join(eval_dir, '%s_summary_%s.csv' % (current_time, '%s'))
# create dirs
if not os.path.exists(os.path.join(opt.output_dir, 'pred', ckpt_name)):
os.makedirs(os.path.join(opt.output_dir, 'pred', ckpt_name))
if not os.path.exists(eval_dir):
os.makedirs(eval_dir)
# do translation
# skip translation for this dataset if previous pred exists
do_trans_flag = True
if os.path.exists(pred_path):
elapsed_time = time.time() - os.stat(pred_path).st_mtime
if pred_path in pred_linecount_dict and pred_linecount_dict[pred_path] == len(src_shard):
# if it's already in pred_linecount_dict, means it's done and counted
do_trans_flag = False
elif elapsed_time < opt.test_interval:
do_trans_flag = False
logger.info("Skip translating because previous PRED file was generated only %d sec ago (<%d sec)." % (elapsed_time, opt.test_interval))
else:
# count line numbers of long-done files to check if this is a done pred
try:
pred_linecount_dict[pred_path] = len([1 for i in open(pred_path, 'r').readlines()])
# count is same means it's done
if pred_linecount_dict[pred_path] == len(src_shard):
do_trans_flag = False
logger.info("Skip translating because previous PRED is complete.")
else:
# if file is modified less than opt.test_interval min, it might be being processed by another job. Otherwise it's a bad result and delete it
if elapsed_time < opt.test_interval:
do_trans_flag = False
else:
os.remove(pred_path)
logger.info('Removed a bad PRED file, #(line)=%d, #(elapsed_time)=%ds: %s'
% (pred_linecount_dict[pred_path], int(elapsed_time), pred_path))
except Exception as e:
logger.exception('Error while validating or deleting PRED file: %s' % pred_path)
if 'pred' in opt.tasks:
try:
if do_trans_flag or opt.ignore_existing:
if translator is None:
translator = build_translator(opt, report_score=opt.verbose, logger=logger)
# create an empty file to indicate that the translator is working on it
codecs.open(pred_path, 'w+', 'utf-8').close()
# set output_file for each dataset (instead of outputting to opt.output)
translator.out_file = codecs.open(pred_path, 'w+', 'utf-8')
logger.info("Start translating [%s] for %s." % (dataname, ckpt_name))
logger.info("\t exporting PRED result to %s." % (pred_path))
_, _ = translator.translate(
src=src_shard,
tgt=tgt_shard,
# src_dir=opt.src_dir,
batch_size=opt.batch_size,
attn_debug=opt.attn_debug,
opt=opt
)
job_done = True
else:
logger.info("Skip translating [%s] for %s." % (dataname, ckpt_name))
except Exception as e:
logger.exception('Error while translating')
# do evaluation
do_eval_flag = True
if not os.path.exists(pred_path):
do_eval_flag = False
logger.info("Skip evaluating because no available pred file.")
else:
try:
if not pred_path in pred_linecount_dict:
pred_linecount_dict[pred_path] = len([1 for i in open(pred_path, 'r').readlines()])
num_pred = pred_linecount_dict[pred_path]
if num_pred != len(src_shard):
do_eval_flag = False
logger.info("Skip evaluating because current PRED file is not complete, #(line)=%d." % (num_pred))
elapsed_time = time.time() - os.stat(pred_path).st_mtime
if elapsed_time > opt.test_interval:
os.remove(pred_path)
logger.warn('Removed a bad PRED file, #(line)=%d, #(elapsed_time)=%ds: %s' % (num_pred, int(elapsed_time), pred_path))
else:
# if pred is good, check if re-eval is necessary
if os.path.exists(eval_path):
elapsed_time = time.time() - os.stat(eval_path).st_mtime
if eval_path in eval_linecount_dict and eval_linecount_dict[eval_path] == len(src_shard):
do_eval_flag = False
logger.info("Skip evaluating because eval_path in eval_linecount_dict and count matches src_shard")
elif elapsed_time < opt.test_interval:
# if file is modified less than opt.test_interval min, it might be being processed by another job.
do_eval_flag = False
logger.info("Skip evaluating because previous EVAL file was generated only %d sec ago (<%d sec)." % (elapsed_time, opt.test_interval))
else:
score_dict = json.load(open(eval_path, 'r'))
if 'present_exact_correct@5' in score_dict:
num_eval = len(score_dict['present_exact_correct@5'])
else:
num_eval = 0
eval_linecount_dict[eval_path] = num_eval
if num_eval == len(src_shard):
do_eval_flag = False
logger.info("Skip evaluating because existing eval file is complete.")
else:
# it's a bad result and delete it
os.remove(eval_path)
logger.info('Removed a bad eval file, #(pred)=%d, #(eval)=%d, #(elapsed_time)=%ds: %s' % (num_pred, num_eval, int(elapsed_time), eval_path))
except Exception as e:
logger.exception('Error while validating or deleting EVAL file: %s' % eval_path)
if 'eval' in opt.tasks:
try:
if do_eval_flag or opt.ignore_existing:
logger.info("Start evaluating [%s] for %s" % (dataname, ckpt_name))
logger.info("\t will export eval result to %s." % (eval_path))
score_dict = kp_evaluate.keyphrase_eval(opt.data_dir, src_path, tgt_path,
pred_path=pred_path, logger=logger,
verbose=opt.verbose,
report_path=printout_path
)
if score_dict is not None:
score_dicts[dataname] = score_dict
with open(eval_path, 'w') as output_json:
output_json.write(json.dumps(score_dict)+'\n')
job_done = True
else:
logger.info("Skip evaluating [%s] for %s." % (dataname, ckpt_name))
except Exception as e:
logger.exception('Error while evaluating')
# do generate summarized report
if 'report' in opt.tasks:
kp_evaluate.gather_eval_results(eval_root_dir=eval_dir, report_csv_dir=report_path)
if job_done: # reset current_patience if no real job is done in the current iteration
current_patience = opt.wait_patience
else:
current_patience -= 1
if opt.onepass:
break
if current_patience <= 0:
break
else:
# scan again for every 10min
sleep_time = opt.sleep_time
logger.info('Sleep for %d sec, current_patience=%d' % (sleep_time, current_patience))
logger.info('*' * 50)
time.sleep(sleep_time)
|
[
"onmt.opts.translate_opts",
"os.remove",
"onmt.utils.split_corpus",
"random.shuffle",
"os.walk",
"json.dumps",
"kp_evaluate.gather_eval_results",
"numpy.random.randint",
"os.path.join",
"codecs.open",
"onmt.opts.config_opts",
"os.path.exists",
"datetime.datetime.now",
"os.stat",
"time.sleep",
"kp_evaluate.keyphrase_eval",
"onmt.translate.translator.build_translator",
"onmt.utils.parse.ArgumentParser",
"os.makedirs",
"time.time"
] |
[((447, 464), 'os.walk', 'os.walk', (['ckpt_dir'], {}), '(ckpt_dir)\n', (454, 464), False, 'import os\n'), ((697, 741), 'onmt.utils.parse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""run_kp_eval.py"""'}), "(description='run_kp_eval.py')\n", (711, 741), False, 'from onmt.utils.parse import ArgumentParser\n'), ((747, 771), 'onmt.opts.config_opts', 'opts.config_opts', (['parser'], {}), '(parser)\n', (763, 771), True, 'import onmt.opts as opts\n'), ((776, 803), 'onmt.opts.translate_opts', 'opts.translate_opts', (['parser'], {}), '(parser)\n', (795, 803), True, 'import onmt.opts as opts\n'), ((3136, 3168), 'numpy.random.randint', 'np.random.randint', (['opt.wait_time'], {}), '(opt.wait_time)\n', (3153, 3168), True, 'import numpy as np\n'), ((3515, 3536), 'time.sleep', 'time.sleep', (['wait_time'], {}), '(wait_time)\n', (3525, 3536), False, 'import time\n'), ((3549, 3579), 'os.path.exists', 'os.path.exists', (['opt.output_dir'], {}), '(opt.output_dir)\n', (3563, 3579), False, 'import os\n'), ((3589, 3616), 'os.makedirs', 'os.makedirs', (['opt.output_dir'], {}), '(opt.output_dir)\n', (3600, 3616), False, 'import os\n'), ((4013, 4099), 'onmt.utils.split_corpus', 'split_corpus', (["(opt.data_dir + '/%s/%s_test.src' % (testset, testset))"], {'shard_size': '(-1)'}), "(opt.data_dir + '/%s/%s_test.src' % (testset, testset),\n shard_size=-1)\n", (4025, 4099), False, 'from onmt.utils import split_corpus\n'), ((4116, 4202), 'onmt.utils.split_corpus', 'split_corpus', (["(opt.data_dir + '/%s/%s_test.tgt' % (testset, testset))"], {'shard_size': '(-1)'}), "(opt.data_dir + '/%s/%s_test.tgt' % (testset, testset),\n shard_size=-1)\n", (4128, 4202), False, 'from onmt.utils import split_corpus\n'), ((4895, 4926), 'random.shuffle', 'random.shuffle', (['new_ckpts_items'], {}), '(new_ckpts_items)\n', (4909, 4926), False, 'import random\n'), ((3188, 3211), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3209, 3211), False, 'import datetime\n'), ((3643, 3679), 'os.path.join', 'os.path.join', (['opt.output_dir', '"""eval"""'], {}), "(opt.output_dir, 'eval')\n", (3655, 3679), False, 'import os\n'), ((3702, 3738), 'os.path.join', 'os.path.join', (['opt.output_dir', '"""eval"""'], {}), "(opt.output_dir, 'eval')\n", (3714, 3738), False, 'import os\n'), ((3766, 3802), 'os.path.join', 'os.path.join', (['opt.output_dir', '"""pred"""'], {}), "(opt.output_dir, 'pred')\n", (3778, 3802), False, 'import os\n'), ((3825, 3861), 'os.path.join', 'os.path.join', (['opt.output_dir', '"""pred"""'], {}), "(opt.output_dir, 'pred')\n", (3837, 3861), False, 'import os\n'), ((15897, 15919), 'time.sleep', 'time.sleep', (['sleep_time'], {}), '(sleep_time)\n', (15907, 15919), False, 'import time\n'), ((618, 644), 'os.path.join', 'os.path.join', (['subdir', 'file'], {}), '(subdir, file)\n', (630, 644), False, 'import os\n'), ((6021, 6090), 'os.path.join', 'os.path.join', (['opt.output_dir', '"""pred"""', 'ckpt_name', "('%s.pred' % dataname)"], {}), "(opt.output_dir, 'pred', ckpt_name, '%s.pred' % dataname)\n", (6033, 6090), False, 'import os\n'), ((6123, 6198), 'os.path.join', 'os.path.join', (['opt.output_dir', '"""pred"""', 'ckpt_name', "('%s.report.txt' % dataname)"], {}), "(opt.output_dir, 'pred', ckpt_name, '%s.report.txt' % dataname)\n", (6135, 6198), False, 'import os\n'), ((6226, 6262), 'os.path.join', 'os.path.join', (['opt.output_dir', '"""eval"""'], {}), "(opt.output_dir, 'eval')\n", (6238, 6262), False, 'import os\n'), ((6291, 6415), 'os.path.join', 'os.path.join', (['eval_dir', "(ckpt_name + '-%s-%s.json' % (dataname, 'selfterminating' if opt.\n eval_topbeam else 'exhaustive'))"], {}), "(eval_dir, ckpt_name + '-%s-%s.json' % (dataname, \n 'selfterminating' if opt.eval_topbeam else 'exhaustive'))\n", (6303, 6415), False, 'import os\n'), ((6482, 6548), 'os.path.join', 'os.path.join', (['eval_dir', "('%s_summary_%s.csv' % (current_time, '%s'))"], {}), "(eval_dir, '%s_summary_%s.csv' % (current_time, '%s'))\n", (6494, 6548), False, 'import os\n'), ((7006, 7031), 'os.path.exists', 'os.path.exists', (['pred_path'], {}), '(pred_path)\n', (7020, 7031), False, 'import os\n'), ((6772, 6796), 'os.path.exists', 'os.path.exists', (['eval_dir'], {}), '(eval_dir)\n', (6786, 6796), False, 'import os\n'), ((6818, 6839), 'os.makedirs', 'os.makedirs', (['eval_dir'], {}), '(eval_dir)\n', (6829, 6839), False, 'import os\n'), ((10600, 10625), 'os.path.exists', 'os.path.exists', (['pred_path'], {}), '(pred_path)\n', (10614, 10625), False, 'import os\n'), ((15286, 15374), 'kp_evaluate.gather_eval_results', 'kp_evaluate.gather_eval_results', ([], {'eval_root_dir': 'eval_dir', 'report_csv_dir': 'report_path'}), '(eval_root_dir=eval_dir, report_csv_dir=\n report_path)\n', (15317, 15374), False, 'import kp_evaluate\n'), ((6618, 6665), 'os.path.join', 'os.path.join', (['opt.output_dir', '"""pred"""', 'ckpt_name'], {}), "(opt.output_dir, 'pred', ckpt_name)\n", (6630, 6665), False, 'import os\n'), ((6700, 6747), 'os.path.join', 'os.path.join', (['opt.output_dir', '"""pred"""', 'ckpt_name'], {}), "(opt.output_dir, 'pred', ckpt_name)\n", (6712, 6747), False, 'import os\n'), ((7068, 7079), 'time.time', 'time.time', ([], {}), '()\n', (7077, 7079), False, 'import time\n'), ((7082, 7100), 'os.stat', 'os.stat', (['pred_path'], {}), '(pred_path)\n', (7089, 7100), False, 'import os\n'), ((9607, 9644), 'codecs.open', 'codecs.open', (['pred_path', '"""w+"""', '"""utf-8"""'], {}), "(pred_path, 'w+', 'utf-8')\n", (9618, 9644), False, 'import codecs\n'), ((11764, 11789), 'os.path.exists', 'os.path.exists', (['eval_path'], {}), '(eval_path)\n', (11778, 11789), False, 'import os\n'), ((14198, 14347), 'kp_evaluate.keyphrase_eval', 'kp_evaluate.keyphrase_eval', (['opt.data_dir', 'src_path', 'tgt_path'], {'pred_path': 'pred_path', 'logger': 'logger', 'verbose': 'opt.verbose', 'report_path': 'printout_path'}), '(opt.data_dir, src_path, tgt_path, pred_path=\n pred_path, logger=logger, verbose=opt.verbose, report_path=printout_path)\n', (14224, 14347), False, 'import kp_evaluate\n'), ((9219, 9281), 'onmt.translate.translator.build_translator', 'build_translator', (['opt'], {'report_score': 'opt.verbose', 'logger': 'logger'}), '(opt, report_score=opt.verbose, logger=logger)\n', (9235, 9281), False, 'from onmt.translate.translator import build_translator\n'), ((11315, 11326), 'time.time', 'time.time', ([], {}), '()\n', (11324, 11326), False, 'import time\n'), ((11454, 11474), 'os.remove', 'os.remove', (['pred_path'], {}), '(pred_path)\n', (11463, 11474), False, 'import os\n'), ((9410, 9447), 'codecs.open', 'codecs.open', (['pred_path', '"""w+"""', '"""utf-8"""'], {}), "(pred_path, 'w+', 'utf-8')\n", (9421, 9447), False, 'import codecs\n'), ((11329, 11347), 'os.stat', 'os.stat', (['pred_path'], {}), '(pred_path)\n', (11336, 11347), False, 'import os\n'), ((11838, 11849), 'time.time', 'time.time', ([], {}), '()\n', (11847, 11849), False, 'import time\n'), ((8590, 8610), 'os.remove', 'os.remove', (['pred_path'], {}), '(pred_path)\n', (8599, 8610), False, 'import os\n'), ((11852, 11870), 'os.stat', 'os.stat', (['eval_path'], {}), '(eval_path)\n', (11859, 11870), False, 'import os\n'), ((13490, 13510), 'os.remove', 'os.remove', (['eval_path'], {}), '(eval_path)\n', (13499, 13510), False, 'import os\n'), ((14866, 14888), 'json.dumps', 'json.dumps', (['score_dict'], {}), '(score_dict)\n', (14876, 14888), False, 'import json\n')]
|
import os
import cv2
import json
import time
import numpy as np
from h5_logger import H5Logger
from .config import Config
from .camera import Camera
from .utility import get_user_monitor
from .utility import get_angle_and_body_vector
from .utility import get_max_area_blob
from .blob_finder import BlobFinder
from .homography import Homography
from .calibration import Calibration
from .display import DisplayMode
from .display import DisplayController
class Trials:
POS_NOT_FOUND = (-1.0, -1.0)
def __init__(self, param_file, data_file):
self.param = None
self.logger = None
self.bg_image = None
self.t_start = 0.0
self.files = {'param': param_file, 'data': data_file}
self.load_param_file()
self.config = Config()
self.camera = Camera(self.config, 'fly')
self.calibration = Calibration(self.config)
self.user_monitor = get_user_monitor(self.config)
self.display = DisplayController(self.config, images=self.param['images'])
self.create_camera_window()
self.blob_finder = BlobFinder(**self.config['fly']['blob_finder'])
self.blob_finder.mask = self.arena_mask()
self.zero_bg_image()
self.show_threshold_image = False
def load_param_file(self):
if not os.path.exists(self.files['param']):
raise FileNotFoundError(f"param file not found: {self.files['param']}")
with open(self.files['param'], 'r') as f:
self.param = json.load(f)
def check_data_file(self):
if os.path.exists(self.files['data']):
print()
print(' data file already exists overwrite (y/n): ', end='')
ans = input()
print()
if not ans == 'y':
print(' ok ... aborting run')
print()
exit(0)
def create_camera_window(self):
self.window_name = "studio50 file trials"
cv2.namedWindow(self.window_name)
cv2.resizeWindow(
self.window_name,
self.config['camera']['width'],
self.config['camera']['height']
)
window_pos_x = self.user_monitor.width - self.config['camera']['width']
window_pos_y = 0
cv2.moveWindow(self.window_name, window_pos_x, window_pos_y)
def create_threshold_image(self):
cv2.namedWindow('threshold')
cv2.resizeWindow(
'threshold',
self.config['camera']['width'],
self.config['camera']['height']
)
window_pos_x = self.user_monitor.width - self.config['camera']['width']
window_pos_y = 0
cv2.moveWindow('threshold', window_pos_x, window_pos_y)
def run_attributes(self):
attributes = {
'param' : self.param,
'config' : self.config.data,
'cal_data' : self.calibration.data(jsonable=True)
}
return attributes
def run(self):
print()
print(f" running studio50 fly")
print(f" ====================")
print()
print(f" param: {self.files['param']}")
print(f" output: {self.files['data']}")
print()
self.check_data_file()
self.logger = H5Logger(self.files['data'],jsonparam=self.run_attributes())
state = {'mode': DisplayMode.BLACK, 'kwargs': {}}
self.display.update_image(state)
cv2.waitKey(self.config['projector']['start_dt_ms'])
self.find_bg_image()
if self.show_threshold_image:
self.create_threshold_image()
self.run_trial_schedule()
def run_trial_schedule(self):
print(f' running trials (press q to quit)')
print()
self.t_start = time.time()
for cycle_num in range(self.param['cycles']):
print(f" cycle: {cycle_num+1}/{self.param['cycles']}")
print()
for trial_num, trial_name in enumerate(self.param['schedule']):
self.run_trial(trial_num, trial_name)
print()
def run_trial(self, trial_num, trial_name):
t_trial = time.time()
trial_param = self.param['trials'][trial_name]
len_schedule = len(self.param['schedule'])
print(f' trial {trial_num+1}/{len_schedule}: {trial_name}')
t_now = t_trial
pos = self.POS_NOT_FOUND
body_angle = 0.0
body_vector = np.array([0.0, 0.0])
exit_ok_flag = False
while (t_now - t_trial < trial_param['duration']) or not exit_ok_flag:
t_now = time.time()
t_elapsed_trial = t_now - t_trial
t_elapsed_total = t_now - self.t_start
found = False
ok, image = self.camera.read()
if ok:
gray_image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
diff_image = cv2.absdiff(gray_image, self.bg_image)
blob_list, blob_image, thresh_image = self.blob_finder.find(diff_image)
if self.show_threshold_image:
cv2.imshow('threshold', thresh_image)
if blob_list:
found = True
fly = get_max_area_blob(blob_list)
pos = (fly['centroid_x'], fly['centroid_y'])
body_angle, body_vector = get_angle_and_body_vector(fly['moments'])
self.draw_indicators_on_image(image, (fly['centroid_x'],fly['centroid_y']), body_vector)
cv2.imshow(self.window_name, image)
log_image = self.get_log_image(pos, gray_image)
display_mode, exit_ok_flag = self.update_display(t_elapsed_trial, pos, trial_param)
data = {
'found' : found,
't_total' : t_elapsed_total,
't_trial' : t_elapsed_trial,
'position' : pos,
'body_angle' : body_angle,
'body_vector' : body_vector,
'display_mode' : display_mode,
'trial_num' : trial_num,
'image' : log_image,
}
self.logger.add(data)
def update_display(self, t, pos, trial_param):
display_mode = DisplayMode[trial_param['display_mode'].upper()]
cx_arena = self.calibration.arena['centroid_x']
cy_arena = self.calibration.arena['centroid_y']
fly_radial_pos = np.sqrt((pos[0] - cx_arena)**2 + (pos[1] - cy_arena)**2)
if display_mode == DisplayMode.BLACK:
kwargs = {}
elif display_mode == DisplayMode.SOLID:
kwargs = {'color': trial_param['color']}
elif display_mode == DisplayMode.STATIC_IMAGE:
kwargs = {'name': trial_param['name']}
elif display_mode == DisplayMode.ROTATING_RAYS:
if (trial_param['center'] == 'arena') or (pos == self.POS_NOT_FOUND):
pos_tmp = (cx_arena, cy_arena)
else:
pos_tmp = pos
pos_proj = self.calibration.homography.camera_to_projector(pos_tmp)
kwargs = {
't' : t,
'pos' : tuple(pos_proj),
'rate' : trial_param['rate'],
'num_rays' : trial_param['num_rays'],
'color' : trial_param['color'],
}
elif display_mode == DisplayMode.FILLED_CIRCLE:
if (trial_param['center'] == 'arena') or (pos == self.POS_NOT_FOUND):
pos_tmp = (cx_arena, cy_arena)
else:
pos_tmp = pos
pos_proj = self.calibration.homography.camera_to_projector(pos_tmp)
# Get circle radius. If >= 1 then it size in pixels. If < 1 then it is
# fraction of arena radius.
radius = trial_param['radius']
if radius <= 1:
radius = radius*self.get_arena_radius()
kwargs = {
'pos' : tuple(pos_proj),
'radius' : int(radius),
'color' : trial_param['color'],
}
elif display_mode == DisplayMode.SOLID_BLINKING:
kwargs = {
't' : t,
'period' : trial_param['period'],
'duty_cycle' : trial_param['duty_cycle'],
'on_color' : trial_param['on_color'],
'off_color' : trial_param['off_color'],
}
elif display_mode == DisplayMode.GRAYSCALE_GRADIENT:
if (trial_param['center'] == 'arena') or (pos == self.POS_NOT_FOUND):
pos_tmp = (cx_arena, cy_arena)
else:
pos_tmp = pos
pos_proj = tuple(self.calibration.homography.camera_to_projector(pos_tmp))
radius = trial_param['radius']
if radius <= 1:
radius = radius*self.get_arena_radius()
kwargs = {'pos': tuple(pos_proj), 'radius' : radius}
else:
raise ValueError(f"unknown display mode {trial_param['display_mode']}")
# Check for inhibition condition
try:
inhibit_cond = trial_param['inhibit_cond']
except KeyError:
inhibit_cond = {}
inhibit_flag = False
if inhibit_cond:
# We have inhibition cond.
if 'inside_radius' in inhibit_cond:
radius = inhibit_cond['inside_radius']
if radius <= 1:
radius = radius*self.get_arena_radius()
inhibit_flag = True if fly_radial_pos <= radius else False
if 'outside_radius' in inhibit_cond:
radius = inhibit_cond['outside_radius']
if radius <= 1:
radius = radius*self.get_arena_radius()
inhibit_flag = True if fly_radial_pos >= radius else False
kwargs['inhibit'] = inhibit_flag
# Update the projector display
self.display.update_image({'mode': display_mode, 'kwargs': kwargs})
key = cv2.waitKey(1) & 0xff
if key == ord('q'):
print()
print(' run aborted!')
print()
exit(0)
# Check for exit condition
try:
exit_cond = trial_param['exit_cond']
except KeyError:
exit_cond = {}
exit_ok_flag = True
if exit_cond:
# We have an exit condition, check it and set the exit_ok_flag accordinly
if 'inside_radius' in exit_cond:
radius = exit_cond['inside_radius']
if radius <= 1:
radius = radius*self.get_arena_radius()
exit_ok_flag = True if fly_radial_pos <= radius else False
if 'outside_radius' in exit_cond:
radius = exit_cond['outside_radius']
if radius <= 1:
radius = radius*self.get_arena_radius()
exit_ok_flag = True if fly_radial_pos >= radius else False
return display_mode, exit_ok_flag
def get_log_image(self, pos, image):
log_image_type = self.config['fly']['log']['image']['type']
if log_image_type == 'full':
log_image = image
elif log_image_type == 'arena':
bbx, bby, bbw, bbh = self.calibration.arena['bounding_box']
log_image = image[bby:bby+bbh,bbx:bbx+bbw]
elif log_image_type == 'fly':
image_h, image_w = image.shape
log_image_shape = self.config['fly']['log']['image']['fly_image_shape']
log_image_h, log_image_w = log_image_shape
fly_x, fly_y = int(pos[0]), int(pos[1])
# Get lower left (x0,y0) and upper right (x1,y1) corners for sub_image
x0 = fly_x - log_image_w//2
y0 = fly_y - log_image_h//2
x1 = x0 + log_image_w
y1 = y0 + log_image_h
# Get adjustments for when part ofsub_image if outside of image
m0 = 0 if x0 > -1 else -x0
n0 = 0 if y0 > -1 else -y0
m1 = log_image_w if x1 < (image_w + 1) else -(x1 - image_w)
n1 = log_image_h if y1 < (image_h + 1) else -(y1 - image_h)
# Make sure actual x0 and y0 used are >= 0
x0 = 0 if x0 < 0 else x0
y0 = 0 if y0 < 0 else y0
# Create log image and assign subregion
log_image = np.zeros(log_image_shape, dtype=np.uint8)
log_image[n0:n1,m0:m1] = image[y0:y1, x0:x1]
else:
raise ValueError(f'unknown log image type {log_image_type}')
return log_image
def draw_indicators_on_image(self, image, pos, vector):
# Body orientation line
s0 = self.config['fly']['circle']['radius']
s1 = s0 + self.config['fly']['line']['length']
cx, cy = int(pos[0]), int(pos[1])
for sign in (1,-1):
pt0 = int(cx + sign*s0*vector[0]), int(cy + sign*s0*vector[1])
pt1 = int(cx + sign*s1*vector[0]), int(cy + sign*s1*vector[1])
cv2.line(
image,
pt0,
pt1,
self.config['fly']['line']['color'],
self.config['fly']['line']['thickness']
)
# Circle around fly position
cv2.circle(
image,
(cx, cy),
self.config['fly']['circle']['radius'],
self.config['fly']['circle']['color'],
self.config['fly']['circle']['thickness']
)
def find_bg_image(self):
print(f' finding background image (press q when done)')
self.zero_bg_image()
cv2.imshow(self.window_name, self.bg_image)
cv2.waitKey(1)
cnt = 0
done = False
while not done:
ok, image = self.camera.read()
if ok:
if cnt > self.config['fly']['background']['min_count']:
gray_image = cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
self.bg_image = np.maximum(self.bg_image, gray_image)
cv2.imshow(self.window_name, self.bg_image)
key = cv2.waitKey(1) & 0xff
if key == ord('q'):
done = True
cnt += 1
print()
def zero_bg_image(self):
shape = self.config['camera']['height'], self.config['camera']['width']
self.bg_image = np.zeros(shape, dtype=np.uint8)
def arena_mask(self):
shape = self.config['camera']['height'], self.config['camera']['width']
arena_mask = np.zeros(shape, dtype=np.uint8)
cv2.fillPoly(arena_mask, pts=[self.calibration.arena['contour']], color=(255,))
kernel_size = self.config['fly']['arena_mask']['kernel_size']
kernel = np.ones((kernel_size, kernel_size))
arena_mask = cv2.erode(arena_mask,kernel,iterations=1)
return arena_mask
def get_arena_radius(self):
bb_x,bb_y, bb_w,bb_h = self.calibration.arena['bounding_box']
p0 = (bb_x, bb_y)
p1 = (bb_x + bb_w, bb_y + bb_h)
p0_proj = tuple(self.calibration.homography.camera_to_projector(p0))
p1_proj = tuple(self.calibration.homography.camera_to_projector(p1))
radius = 0.5*max([abs(p1_proj[0] - p0_proj[0]), abs(p1_proj[1] - p0_proj[1])])
return radius
|
[
"numpy.maximum",
"numpy.ones",
"cv2.fillPoly",
"cv2.absdiff",
"cv2.erode",
"cv2.imshow",
"cv2.line",
"cv2.cvtColor",
"os.path.exists",
"cv2.circle",
"cv2.waitKey",
"cv2.resizeWindow",
"json.load",
"numpy.zeros",
"time.time",
"numpy.array",
"cv2.moveWindow",
"cv2.namedWindow",
"numpy.sqrt"
] |
[((1558, 1592), 'os.path.exists', 'os.path.exists', (["self.files['data']"], {}), "(self.files['data'])\n", (1572, 1592), False, 'import os\n'), ((1954, 1987), 'cv2.namedWindow', 'cv2.namedWindow', (['self.window_name'], {}), '(self.window_name)\n', (1969, 1987), False, 'import cv2\n'), ((1996, 2100), 'cv2.resizeWindow', 'cv2.resizeWindow', (['self.window_name', "self.config['camera']['width']", "self.config['camera']['height']"], {}), "(self.window_name, self.config['camera']['width'], self.\n config['camera']['height'])\n", (2012, 2100), False, 'import cv2\n'), ((2277, 2337), 'cv2.moveWindow', 'cv2.moveWindow', (['self.window_name', 'window_pos_x', 'window_pos_y'], {}), '(self.window_name, window_pos_x, window_pos_y)\n', (2291, 2337), False, 'import cv2\n'), ((2386, 2414), 'cv2.namedWindow', 'cv2.namedWindow', (['"""threshold"""'], {}), "('threshold')\n", (2401, 2414), False, 'import cv2\n'), ((2423, 2522), 'cv2.resizeWindow', 'cv2.resizeWindow', (['"""threshold"""', "self.config['camera']['width']", "self.config['camera']['height']"], {}), "('threshold', self.config['camera']['width'], self.config[\n 'camera']['height'])\n", (2439, 2522), False, 'import cv2\n'), ((2699, 2754), 'cv2.moveWindow', 'cv2.moveWindow', (['"""threshold"""', 'window_pos_x', 'window_pos_y'], {}), "('threshold', window_pos_x, window_pos_y)\n", (2713, 2754), False, 'import cv2\n'), ((3486, 3538), 'cv2.waitKey', 'cv2.waitKey', (["self.config['projector']['start_dt_ms']"], {}), "(self.config['projector']['start_dt_ms'])\n", (3497, 3538), False, 'import cv2\n'), ((3811, 3822), 'time.time', 'time.time', ([], {}), '()\n', (3820, 3822), False, 'import time\n'), ((4183, 4194), 'time.time', 'time.time', ([], {}), '()\n', (4192, 4194), False, 'import time\n'), ((4478, 4498), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (4486, 4498), True, 'import numpy as np\n'), ((6587, 6647), 'numpy.sqrt', 'np.sqrt', (['((pos[0] - cx_arena) ** 2 + (pos[1] - cy_arena) ** 2)'], {}), '((pos[0] - cx_arena) ** 2 + (pos[1] - cy_arena) ** 2)\n', (6594, 6647), True, 'import numpy as np\n'), ((13566, 13725), 'cv2.circle', 'cv2.circle', (['image', '(cx, cy)', "self.config['fly']['circle']['radius']", "self.config['fly']['circle']['color']", "self.config['fly']['circle']['thickness']"], {}), "(image, (cx, cy), self.config['fly']['circle']['radius'], self.\n config['fly']['circle']['color'], self.config['fly']['circle']['thickness']\n )\n", (13576, 13725), False, 'import cv2\n'), ((13945, 13988), 'cv2.imshow', 'cv2.imshow', (['self.window_name', 'self.bg_image'], {}), '(self.window_name, self.bg_image)\n', (13955, 13988), False, 'import cv2\n'), ((13997, 14011), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (14008, 14011), False, 'import cv2\n'), ((14717, 14748), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'np.uint8'}), '(shape, dtype=np.uint8)\n', (14725, 14748), True, 'import numpy as np\n'), ((14877, 14908), 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'np.uint8'}), '(shape, dtype=np.uint8)\n', (14885, 14908), True, 'import numpy as np\n'), ((14917, 14996), 'cv2.fillPoly', 'cv2.fillPoly', (['arena_mask'], {'pts': "[self.calibration.arena['contour']]", 'color': '(255,)'}), "(arena_mask, pts=[self.calibration.arena['contour']], color=(255,))\n", (14929, 14996), False, 'import cv2\n'), ((15084, 15119), 'numpy.ones', 'np.ones', (['(kernel_size, kernel_size)'], {}), '((kernel_size, kernel_size))\n', (15091, 15119), True, 'import numpy as np\n'), ((15141, 15184), 'cv2.erode', 'cv2.erode', (['arena_mask', 'kernel'], {'iterations': '(1)'}), '(arena_mask, kernel, iterations=1)\n', (15150, 15184), False, 'import cv2\n'), ((1306, 1341), 'os.path.exists', 'os.path.exists', (["self.files['param']"], {}), "(self.files['param'])\n", (1320, 1341), False, 'import os\n'), ((1502, 1514), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1511, 1514), False, 'import json\n'), ((4630, 4641), 'time.time', 'time.time', ([], {}), '()\n', (4639, 4641), False, 'import time\n'), ((10280, 10294), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (10291, 10294), False, 'import cv2\n'), ((13290, 13398), 'cv2.line', 'cv2.line', (['image', 'pt0', 'pt1', "self.config['fly']['line']['color']", "self.config['fly']['line']['thickness']"], {}), "(image, pt0, pt1, self.config['fly']['line']['color'], self.config[\n 'fly']['line']['thickness'])\n", (13298, 13398), False, 'import cv2\n'), ((4858, 4897), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (4870, 4897), False, 'import cv2\n'), ((4926, 4964), 'cv2.absdiff', 'cv2.absdiff', (['gray_image', 'self.bg_image'], {}), '(gray_image, self.bg_image)\n', (4937, 4964), False, 'import cv2\n'), ((5558, 5593), 'cv2.imshow', 'cv2.imshow', (['self.window_name', 'image'], {}), '(self.window_name, image)\n', (5568, 5593), False, 'import cv2\n'), ((5121, 5158), 'cv2.imshow', 'cv2.imshow', (['"""threshold"""', 'thresh_image'], {}), "('threshold', thresh_image)\n", (5131, 5158), False, 'import cv2\n'), ((12646, 12687), 'numpy.zeros', 'np.zeros', (['log_image_shape'], {'dtype': 'np.uint8'}), '(log_image_shape, dtype=np.uint8)\n', (12654, 12687), True, 'import numpy as np\n'), ((14241, 14280), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (14253, 14280), False, 'import cv2\n'), ((14316, 14353), 'numpy.maximum', 'np.maximum', (['self.bg_image', 'gray_image'], {}), '(self.bg_image, gray_image)\n', (14326, 14353), True, 'import numpy as np\n'), ((14374, 14417), 'cv2.imshow', 'cv2.imshow', (['self.window_name', 'self.bg_image'], {}), '(self.window_name, self.bg_image)\n', (14384, 14417), False, 'import cv2\n'), ((14444, 14458), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (14455, 14458), False, 'import cv2\n')]
|
import json
import numpy as np
class SegmentStandardScaler:
def __init__(self, segments=None):
self.feat_mean = np.zeros((1, 1))
self.feat_std = np.ones((1, 1))
self.segments = segments
self._encountered_y_shape = None
def fit(self, y=None, segments=None):
if segments is not None:
self.segments = segments
if self.segments is None:
raise ValueError("Please define segments to scale features for shape", self.feat_mean.shape)
feat_std = []
feat_mean = []
splits = np.concatenate([np.array([0]), np.cumsum(self.segments)])
print(splits)
for i in range(len(self.segments)):
sub_array = y[:, splits[i]:splits[i + 1]]
feat_std.append(np.std(sub_array))
feat_mean.append(np.mean(sub_array))
feat_mean = np.repeat(np.array(feat_mean), np.array(self.segments))
feat_std = np.repeat(np.array(feat_std), np.array(self.segments))
self.feat_std = np.expand_dims(feat_std, axis=0)
self.feat_mean = np.expand_dims(feat_mean, axis=0)
self._encountered_y_shape = np.array(y.shape)
# print(feat_mean,feat_std)
def transform(self, y=None):
y_res = None
if y is not None:
y_res = (y - self.feat_mean) / self.feat_std
return y_res
def inverse_transform(self, y=None):
y_res = y
if y is not None:
y_res = y * self.feat_std + self.feat_mean
return y_res
def fit_transform(self, y=None, segments=None):
self.fit(y=y,segments=segments)
return self.transform(y=y)
def save(self, filepath):
outdict = {'feat_mean': self.feat_mean.tolist(),
'feat_std': self.feat_std.tolist(),
}
with open(filepath, 'w') as f:
json.dump(outdict, f)
def load(self, filepath):
with open(filepath, 'r') as f:
indict = json.load(f)
self.feat_mean = np.array(indict['feat_mean'])
self.feat_std = np.array(indict['feat_std'])
def get_params(self):
outdict = {'feat_mean': self.feat_mean.tolist(),
'feat_std': self.feat_std.tolist(),
}
return outdict
def set_params(self, indict):
self.feat_mean = np.array(indict['feat_mean'])
self.feat_std = np.array(indict['feat_std'])
def print_params_info(self):
print("Info: Data feature shape", self._encountered_y_shape)
print("Info: Using feature-scale", self.feat_std.shape, ":", self.feat_std)
print("Info: Using feature-offset", self.feat_mean.shape, ":", self.feat_mean)
|
[
"json.dump",
"json.load",
"numpy.std",
"numpy.zeros",
"numpy.expand_dims",
"numpy.ones",
"numpy.cumsum",
"numpy.mean",
"numpy.array"
] |
[((127, 143), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (135, 143), True, 'import numpy as np\n'), ((168, 183), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (175, 183), True, 'import numpy as np\n'), ((1024, 1056), 'numpy.expand_dims', 'np.expand_dims', (['feat_std'], {'axis': '(0)'}), '(feat_std, axis=0)\n', (1038, 1056), True, 'import numpy as np\n'), ((1082, 1115), 'numpy.expand_dims', 'np.expand_dims', (['feat_mean'], {'axis': '(0)'}), '(feat_mean, axis=0)\n', (1096, 1115), True, 'import numpy as np\n'), ((1153, 1170), 'numpy.array', 'np.array', (['y.shape'], {}), '(y.shape)\n', (1161, 1170), True, 'import numpy as np\n'), ((2023, 2052), 'numpy.array', 'np.array', (["indict['feat_mean']"], {}), "(indict['feat_mean'])\n", (2031, 2052), True, 'import numpy as np\n'), ((2077, 2105), 'numpy.array', 'np.array', (["indict['feat_std']"], {}), "(indict['feat_std'])\n", (2085, 2105), True, 'import numpy as np\n'), ((2349, 2378), 'numpy.array', 'np.array', (["indict['feat_mean']"], {}), "(indict['feat_mean'])\n", (2357, 2378), True, 'import numpy as np\n'), ((2403, 2431), 'numpy.array', 'np.array', (["indict['feat_std']"], {}), "(indict['feat_std'])\n", (2411, 2431), True, 'import numpy as np\n'), ((880, 899), 'numpy.array', 'np.array', (['feat_mean'], {}), '(feat_mean)\n', (888, 899), True, 'import numpy as np\n'), ((901, 924), 'numpy.array', 'np.array', (['self.segments'], {}), '(self.segments)\n', (909, 924), True, 'import numpy as np\n'), ((955, 973), 'numpy.array', 'np.array', (['feat_std'], {}), '(feat_std)\n', (963, 973), True, 'import numpy as np\n'), ((975, 998), 'numpy.array', 'np.array', (['self.segments'], {}), '(self.segments)\n', (983, 998), True, 'import numpy as np\n'), ((1871, 1892), 'json.dump', 'json.dump', (['outdict', 'f'], {}), '(outdict, f)\n', (1880, 1892), False, 'import json\n'), ((1984, 1996), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1993, 1996), False, 'import json\n'), ((591, 604), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (599, 604), True, 'import numpy as np\n'), ((606, 630), 'numpy.cumsum', 'np.cumsum', (['self.segments'], {}), '(self.segments)\n', (615, 630), True, 'import numpy as np\n'), ((781, 798), 'numpy.std', 'np.std', (['sub_array'], {}), '(sub_array)\n', (787, 798), True, 'import numpy as np\n'), ((829, 847), 'numpy.mean', 'np.mean', (['sub_array'], {}), '(sub_array)\n', (836, 847), True, 'import numpy as np\n')]
|
from MLlib.models import BernoulliNB
import numpy as np
with open('datasets/bernoulli_naive_bayes_dataset.txt', 'r') as f:
words = [[string.strip('\n')
for string in line.split(',')] for line in f]
for i in range(len(words)):
words[i] = list(map(int, words[i]))
x = np.array([words[i] for i in range(len(words)-1)])
y_class = np.array(words[-1])
test = np.array([[1, 0, 0, 0, 1, 1], [1, 1, 1, 0, 0, 1]])
nb = BernoulliNB(alpha=1).fit(np.where(x > 0, 1, 0), y_class)
print(nb.predict(test))
|
[
"MLlib.models.BernoulliNB",
"numpy.where",
"numpy.array"
] |
[((350, 369), 'numpy.array', 'np.array', (['words[-1]'], {}), '(words[-1])\n', (358, 369), True, 'import numpy as np\n'), ((378, 428), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 1, 1], [1, 1, 1, 0, 0, 1]]'], {}), '([[1, 0, 0, 0, 1, 1], [1, 1, 1, 0, 0, 1]])\n', (386, 428), True, 'import numpy as np\n'), ((459, 480), 'numpy.where', 'np.where', (['(x > 0)', '(1)', '(0)'], {}), '(x > 0, 1, 0)\n', (467, 480), True, 'import numpy as np\n'), ((434, 454), 'MLlib.models.BernoulliNB', 'BernoulliNB', ([], {'alpha': '(1)'}), '(alpha=1)\n', (445, 454), False, 'from MLlib.models import BernoulliNB\n')]
|
import os
import uuid
import pandas as pd
import numpy as np
import concurrent.futures as cf
from arize.api import Client
from arize.utils.types import ModelTypes
ITERATIONS = 1
NUM_RECORDS = 2
arize = Client(
organization_key=os.environ.get("ARIZE_ORG_KEY"),
api_key=os.environ.get("ARIZE_API_KEY"),
)
features = pd.DataFrame(
np.random.randint(0, 100000000, size=(NUM_RECORDS, 12)),
columns=list("ABCDEFGHIJKL"),
)
pred_labels = pd.DataFrame(np.random.randint(0, 100000000, size=(NUM_RECORDS, 1)))
ids = pd.DataFrame([str(uuid.uuid4()) for _ in range(NUM_RECORDS)])
column_overwrite = list("abcdefghijkl")
shap_values = pd.DataFrame(
np.random.random(size=(NUM_RECORDS, 12)),
columns=list("abcdefghijkl"),
)
print(shap_values)
preds = arize.bulk_log(
model_id="example_model_id",
model_type=ModelTypes.NUMERIC,
model_version="v0.1",
prediction_ids=ids,
prediction_labels=pred_labels,
features=features,
feature_names_overwrite=column_overwrite,
shap_values=shap_values,
)
for future in cf.as_completed(preds):
res = future.result()
print(f"future completed with response code {res.status_code}")
if res.status_code != 200:
print(f"future failed with response code {res.status_code}, {res.text}")
|
[
"uuid.uuid4",
"os.environ.get",
"numpy.random.randint",
"numpy.random.random",
"concurrent.futures.as_completed"
] |
[((1049, 1071), 'concurrent.futures.as_completed', 'cf.as_completed', (['preds'], {}), '(preds)\n', (1064, 1071), True, 'import concurrent.futures as cf\n'), ((344, 399), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100000000)'], {'size': '(NUM_RECORDS, 12)'}), '(0, 100000000, size=(NUM_RECORDS, 12))\n', (361, 399), True, 'import numpy as np\n'), ((464, 518), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100000000)'], {'size': '(NUM_RECORDS, 1)'}), '(0, 100000000, size=(NUM_RECORDS, 1))\n', (481, 518), True, 'import numpy as np\n'), ((660, 700), 'numpy.random.random', 'np.random.random', ([], {'size': '(NUM_RECORDS, 12)'}), '(size=(NUM_RECORDS, 12))\n', (676, 700), True, 'import numpy as np\n'), ((234, 265), 'os.environ.get', 'os.environ.get', (['"""ARIZE_ORG_KEY"""'], {}), "('ARIZE_ORG_KEY')\n", (248, 265), False, 'import os\n'), ((279, 310), 'os.environ.get', 'os.environ.get', (['"""ARIZE_API_KEY"""'], {}), "('ARIZE_API_KEY')\n", (293, 310), False, 'import os\n'), ((544, 556), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (554, 556), False, 'import uuid\n')]
|
import math
import torch
import torch.nn as nn
from torch.distributions import Normal
from torch.distributions import VonMises
from torch.distributions import Independent
from torch.distributions import Uniform
from survae.distributions.conditional import ConditionalDistribution
from survae.utils import sum_except_batch
import numpy as np
class Loba_dist(ConditionalDistribution):
"""See https://github.com/SB-27182/loba_NN"""
def __init__(self, radii_net, osci_spread_net, n_var_net, o_mean_net,
shift=torch.tensor([[0.0, 0.0]])):
super(dOscicond_Ovarmean_Nvar_radi_dist, self).__init__()
self.radii_net = radii_net
self.osci_spread_net = osci_spread_net
self.n_var_net = n_var_net
self.o_mean_net = o_mean_net
self.shift = shift
def cond_dist(self, context):
radii = self.radii_net(context)
osci_spread = torch.abs(self.osci_spread_net(context))
n_var = torch.abs(self.n_var_net(context))
o_mean = self.o_mean_net(context)
vm = Independent(VonMises(o_mean, osci_spread), 1)
sample_inds_n = torch.squeeze(vm.sample([1]))
x = self.shift[:, 0:1] + radii[:, 0:1]*torch.cos(sample_inds_n[:, 0:1])
y = self.shift[:, 1:2] + radii[:, 1:2]*torch.cos(sample_inds_n[:, 1:2])*torch.sin(sample_inds_n[:, 0:1])
n_mean = torch.cat((x, y), dim=1)
return Normal(loc=n_mean, scale=n_var)
def log_prob(self, x, context):
dist = self.cond_dist(context)
return sum_except_batch(dist.log_prob(x))
def sample(self, context):
#assert False, "Not Implemented"
dist = self.cond_dist(context)
return dist.rsample()
def latent_sample_nVars(self, context, dims, myVarDim=0, scaleMyVarBy=1.0, scaleOthrVarsBy=0.01,
myOsciVarDim=0, scaleMyOsciVarBy=1.0, scaleOthrOsciVarsBy=1.0):
radii = self.radii_net(context)
osciVar = torch.abs(self.osci_spread_net(context))
n_var = torch.abs(self.n_var_net(context))
o_mean = self.o_mean_net(context)
# <editor-fold desc="Oscil Variance Scaleing">
scaleOsciVarDims = np.full([1, dims], scaleOthrOsciVarsBy)
np.put(scaleOsciVarDims, [myOsciVarDim], scaleMyOsciVarBy)
scaleOsciVarDims = torch.tensor(scaleOsciVarDims, dtype=torch.float32)
osciVar = torch.mul(osciVar, scaleOsciVarDims)
# </editor-fold>
vm = VonMises(o_mean, osciVar)
sample_inds = torch.squeeze(vm.sample([1]))
x = self.shift[:, 0:1] + radii[:, 0:1] * torch.cos(sample_inds[:, 0:1])
y = self.shift[:, 1:2] + radii[:, 1:2] * torch.cos(sample_inds[:, 1:2]) * torch.sin(sample_inds[:, 0:1])
n_mean = torch.cat((x, y), dim=1)
print(n_var)
# <editor-fold desc="n Variance Scaleing">
scaleVarDims = np.full([1, dims], scaleOthrVarsBy)
np.put(scaleVarDims, [myVarDim], scaleMyVarBy)
scaleVarDims = torch.tensor(scaleVarDims, dtype=torch.float32)
n_var = torch.mul(n_var, scaleVarDims)
# </editor-fold>
dist = Normal(loc=n_mean, scale=n_var)
z = dist.sample([1])
return z
def latent_sample_1DimOscil(self, context, dims, myDim, scaleMyVarBy=1.0, scaleOthrVarsBy=1.0,
myOsciVarDim=0, scaleMyOsciVarBy=1.0, scaleOthrOsciVarsBy=1.0):
"""Generates the latent corresponding to the found 1-dim oscilator in the data."""
radii = self.radii_net(context)
osciVar = torch.abs(self.osci_spread_net(context))
n_var = torch.abs(self.n_var_net(context))
o_mean = self.o_mean_net(context)
# <editor-fold desc="Oscil Variance Scaleing">
scaleOsciVarDims = np.full([1, dims], scaleOthrOsciVarsBy)
np.put(scaleOsciVarDims, [myOsciVarDim], scaleMyOsciVarBy)
scaleOsciVarDims = torch.tensor(scaleOsciVarDims, dtype=torch.float32)
osciVar = torch.mul(osciVar, scaleOsciVarDims)
# </editor-fold>
vm = VonMises(o_mean, osciVar)
sample_inds = torch.squeeze(vm.sample([1]))
my_dim_variable = sample_inds[:, myDim:myDim+1]
x = self.shift[:, 0:1] + radii[:, 0:1]*torch.cos(my_dim_variable)
y = self.shift[:, 1:2] + radii[:, 1:2]*torch.sin(my_dim_variable)
n_mean = torch.cat((x, y), dim=1)
# <editor-fold desc="n Variance Scaleing">
scaleVarDims = np.full([1, dims], scaleOthrVarsBy)
np.put(scaleVarDims, [myDim], scaleMyVarBy)
scaleVarDims = torch.tensor(scaleVarDims, dtype=torch.float32)
n_var = torch.mul(n_var, scaleVarDims)
# </editor-fold>
dist = Normal(loc=n_mean, scale=n_var)
z = dist.sample([1])
return z
def conditional_variance_avg(self, context):
"""Returns average variance of latent density givent context."""
n_var = torch.abs(self.n_var_net(context))
return(torch.mean(n_var, dim=0))
def sample_with_log_prob(self, context):
assert False, "Not Implemented"
def mean(self, context):
assert False, "Not Implemented"
|
[
"numpy.full",
"torch.mean",
"numpy.put",
"torch.cat",
"torch.mul",
"torch.cos",
"torch.distributions.VonMises",
"torch.distributions.Normal",
"torch.sin",
"torch.tensor"
] |
[((533, 559), 'torch.tensor', 'torch.tensor', (['[[0.0, 0.0]]'], {}), '([[0.0, 0.0]])\n', (545, 559), False, 'import torch\n'), ((1367, 1391), 'torch.cat', 'torch.cat', (['(x, y)'], {'dim': '(1)'}), '((x, y), dim=1)\n', (1376, 1391), False, 'import torch\n'), ((1408, 1439), 'torch.distributions.Normal', 'Normal', ([], {'loc': 'n_mean', 'scale': 'n_var'}), '(loc=n_mean, scale=n_var)\n', (1414, 1439), False, 'from torch.distributions import Normal\n'), ((2174, 2213), 'numpy.full', 'np.full', (['[1, dims]', 'scaleOthrOsciVarsBy'], {}), '([1, dims], scaleOthrOsciVarsBy)\n', (2181, 2213), True, 'import numpy as np\n'), ((2222, 2280), 'numpy.put', 'np.put', (['scaleOsciVarDims', '[myOsciVarDim]', 'scaleMyOsciVarBy'], {}), '(scaleOsciVarDims, [myOsciVarDim], scaleMyOsciVarBy)\n', (2228, 2280), True, 'import numpy as np\n'), ((2308, 2359), 'torch.tensor', 'torch.tensor', (['scaleOsciVarDims'], {'dtype': 'torch.float32'}), '(scaleOsciVarDims, dtype=torch.float32)\n', (2320, 2359), False, 'import torch\n'), ((2378, 2414), 'torch.mul', 'torch.mul', (['osciVar', 'scaleOsciVarDims'], {}), '(osciVar, scaleOsciVarDims)\n', (2387, 2414), False, 'import torch\n'), ((2454, 2479), 'torch.distributions.VonMises', 'VonMises', (['o_mean', 'osciVar'], {}), '(o_mean, osciVar)\n', (2462, 2479), False, 'from torch.distributions import VonMises\n'), ((2743, 2767), 'torch.cat', 'torch.cat', (['(x, y)'], {'dim': '(1)'}), '((x, y), dim=1)\n', (2752, 2767), False, 'import torch\n'), ((2863, 2898), 'numpy.full', 'np.full', (['[1, dims]', 'scaleOthrVarsBy'], {}), '([1, dims], scaleOthrVarsBy)\n', (2870, 2898), True, 'import numpy as np\n'), ((2907, 2953), 'numpy.put', 'np.put', (['scaleVarDims', '[myVarDim]', 'scaleMyVarBy'], {}), '(scaleVarDims, [myVarDim], scaleMyVarBy)\n', (2913, 2953), True, 'import numpy as np\n'), ((2977, 3024), 'torch.tensor', 'torch.tensor', (['scaleVarDims'], {'dtype': 'torch.float32'}), '(scaleVarDims, dtype=torch.float32)\n', (2989, 3024), False, 'import torch\n'), ((3041, 3071), 'torch.mul', 'torch.mul', (['n_var', 'scaleVarDims'], {}), '(n_var, scaleVarDims)\n', (3050, 3071), False, 'import torch\n'), ((3113, 3144), 'torch.distributions.Normal', 'Normal', ([], {'loc': 'n_mean', 'scale': 'n_var'}), '(loc=n_mean, scale=n_var)\n', (3119, 3144), False, 'from torch.distributions import Normal\n'), ((3750, 3789), 'numpy.full', 'np.full', (['[1, dims]', 'scaleOthrOsciVarsBy'], {}), '([1, dims], scaleOthrOsciVarsBy)\n', (3757, 3789), True, 'import numpy as np\n'), ((3798, 3856), 'numpy.put', 'np.put', (['scaleOsciVarDims', '[myOsciVarDim]', 'scaleMyOsciVarBy'], {}), '(scaleOsciVarDims, [myOsciVarDim], scaleMyOsciVarBy)\n', (3804, 3856), True, 'import numpy as np\n'), ((3884, 3935), 'torch.tensor', 'torch.tensor', (['scaleOsciVarDims'], {'dtype': 'torch.float32'}), '(scaleOsciVarDims, dtype=torch.float32)\n', (3896, 3935), False, 'import torch\n'), ((3954, 3990), 'torch.mul', 'torch.mul', (['osciVar', 'scaleOsciVarDims'], {}), '(osciVar, scaleOsciVarDims)\n', (3963, 3990), False, 'import torch\n'), ((4030, 4055), 'torch.distributions.VonMises', 'VonMises', (['o_mean', 'osciVar'], {}), '(o_mean, osciVar)\n', (4038, 4055), False, 'from torch.distributions import VonMises\n'), ((4331, 4355), 'torch.cat', 'torch.cat', (['(x, y)'], {'dim': '(1)'}), '((x, y), dim=1)\n', (4340, 4355), False, 'import torch\n'), ((4431, 4466), 'numpy.full', 'np.full', (['[1, dims]', 'scaleOthrVarsBy'], {}), '([1, dims], scaleOthrVarsBy)\n', (4438, 4466), True, 'import numpy as np\n'), ((4475, 4518), 'numpy.put', 'np.put', (['scaleVarDims', '[myDim]', 'scaleMyVarBy'], {}), '(scaleVarDims, [myDim], scaleMyVarBy)\n', (4481, 4518), True, 'import numpy as np\n'), ((4542, 4589), 'torch.tensor', 'torch.tensor', (['scaleVarDims'], {'dtype': 'torch.float32'}), '(scaleVarDims, dtype=torch.float32)\n', (4554, 4589), False, 'import torch\n'), ((4606, 4636), 'torch.mul', 'torch.mul', (['n_var', 'scaleVarDims'], {}), '(n_var, scaleVarDims)\n', (4615, 4636), False, 'import torch\n'), ((4678, 4709), 'torch.distributions.Normal', 'Normal', ([], {'loc': 'n_mean', 'scale': 'n_var'}), '(loc=n_mean, scale=n_var)\n', (4684, 4709), False, 'from torch.distributions import Normal\n'), ((4946, 4970), 'torch.mean', 'torch.mean', (['n_var'], {'dim': '(0)'}), '(n_var, dim=0)\n', (4956, 4970), False, 'import torch\n'), ((1068, 1097), 'torch.distributions.VonMises', 'VonMises', (['o_mean', 'osci_spread'], {}), '(o_mean, osci_spread)\n', (1076, 1097), False, 'from torch.distributions import VonMises\n'), ((1204, 1236), 'torch.cos', 'torch.cos', (['sample_inds_n[:, 0:1]'], {}), '(sample_inds_n[:, 0:1])\n', (1213, 1236), False, 'import torch\n'), ((1317, 1349), 'torch.sin', 'torch.sin', (['sample_inds_n[:, 0:1]'], {}), '(sample_inds_n[:, 0:1])\n', (1326, 1349), False, 'import torch\n'), ((2582, 2612), 'torch.cos', 'torch.cos', (['sample_inds[:, 0:1]'], {}), '(sample_inds[:, 0:1])\n', (2591, 2612), False, 'import torch\n'), ((2695, 2725), 'torch.sin', 'torch.sin', (['sample_inds[:, 0:1]'], {}), '(sample_inds[:, 0:1])\n', (2704, 2725), False, 'import torch\n'), ((4213, 4239), 'torch.cos', 'torch.cos', (['my_dim_variable'], {}), '(my_dim_variable)\n', (4222, 4239), False, 'import torch\n'), ((4287, 4313), 'torch.sin', 'torch.sin', (['my_dim_variable'], {}), '(my_dim_variable)\n', (4296, 4313), False, 'import torch\n'), ((1284, 1316), 'torch.cos', 'torch.cos', (['sample_inds_n[:, 1:2]'], {}), '(sample_inds_n[:, 1:2])\n', (1293, 1316), False, 'import torch\n'), ((2662, 2692), 'torch.cos', 'torch.cos', (['sample_inds[:, 1:2]'], {}), '(sample_inds[:, 1:2])\n', (2671, 2692), False, 'import torch\n')]
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import sys
import os
import os.path as osp
import glob
import re
import warnings
from torchreid.data.datasets import ImageDataset
from torchreid.utils import read_image
import cv2
import numpy as np
class Occluded_REID(ImageDataset):
dataset_dir = 'occluded-reid'
def __init__(self, root='', **kwargs):
self.root = osp.abspath(osp.expanduser(root))
self.dataset_dir = osp.join(self.root, self.dataset_dir)
# if osp.isdir(data_dir):
# self.data_dir = data_dir
# else:
# warnings.warn('The current data structure is deprecated.')
self.train_dir = osp.join(self.dataset_dir, 'Occluded_REID/train')
self.query_dir = osp.join(self.dataset_dir, 'Occluded_REID/query')
self.gallery_dir = osp.join(self.dataset_dir, 'Occluded_REID/test')
train = self.process_dir(self.train_dir, relabel=True)
query = self.process_dir(self.query_dir, relabel=False)
gallery = self.process_dir(self.gallery_dir, relabel=False, is_query=False)
super(Occluded_REID, self).__init__(train, query, gallery, **kwargs)
self.load_pose = isinstance(self.transform, tuple)
if self.load_pose:
if self.mode == 'query':
self.pose_dir = osp.join(self.data_dir, 'occluded_body_pose')
elif self.mode == 'gallery':
self.pose_dir = osp.join(self.data_dir, 'whole_body_pose')
else:
self.pose_dir = ''
def process_dir(self, dir_path, relabel=False, is_query=True):
img_paths = glob.glob(osp.join(dir_path, '*.tif'))
if is_query:
camid = 0
else:
camid = 1
pid_container = set()
for img_path in img_paths:
img_name = img_path.split('/')[-1]
pid = int(img_name.split('_')[0])
pid_container.add(pid)
pid2label = {pid: label for label, pid in enumerate(pid_container)}
data = []
for img_path in img_paths:
img_name = img_path.split('/')[-1]
pid = int(img_name.split('_')[0])
if relabel:
pid = pid2label[pid]
data.append((img_path, pid, camid))
return data
def __getitem__(self, index):
img_path, pid, camid = self.data[index]
img = read_image(img_path)
if self.load_pose:
img_name = '.'.join(img_path.split('/')[-1].split('.')[:-1])
pose_pic_name = img_name + '_pose_heatmaps.png'
pose_pic_path = os.path.join(self.pose_dir, pose_pic_name)
pose = cv2.imread(pose_pic_path, cv2.IMREAD_GRAYSCALE)
pose = pose.reshape((pose.shape[0], 56, -1)).transpose((0,2,1)).astype('float32')
pose[:,:,18:] = np.abs(pose[:,:,18:]-128)
img, pose = self.transform[1](img, pose)
img = self.transform[0](img)
return img, pid, camid, img_path, pose
else:
if self.transform is not None:
img = self.transform(img)
return img, pid, camid, img_path
|
[
"numpy.abs",
"os.path.join",
"torchreid.utils.read_image",
"cv2.imread",
"os.path.expanduser"
] |
[((506, 543), 'os.path.join', 'osp.join', (['self.root', 'self.dataset_dir'], {}), '(self.root, self.dataset_dir)\n', (514, 543), True, 'import os.path as osp\n'), ((731, 780), 'os.path.join', 'osp.join', (['self.dataset_dir', '"""Occluded_REID/train"""'], {}), "(self.dataset_dir, 'Occluded_REID/train')\n", (739, 780), True, 'import os.path as osp\n'), ((806, 855), 'os.path.join', 'osp.join', (['self.dataset_dir', '"""Occluded_REID/query"""'], {}), "(self.dataset_dir, 'Occluded_REID/query')\n", (814, 855), True, 'import os.path as osp\n'), ((883, 931), 'os.path.join', 'osp.join', (['self.dataset_dir', '"""Occluded_REID/test"""'], {}), "(self.dataset_dir, 'Occluded_REID/test')\n", (891, 931), True, 'import os.path as osp\n'), ((2439, 2459), 'torchreid.utils.read_image', 'read_image', (['img_path'], {}), '(img_path)\n', (2449, 2459), False, 'from torchreid.utils import read_image\n'), ((457, 477), 'os.path.expanduser', 'osp.expanduser', (['root'], {}), '(root)\n', (471, 477), True, 'import os.path as osp\n'), ((1689, 1716), 'os.path.join', 'osp.join', (['dir_path', '"""*.tif"""'], {}), "(dir_path, '*.tif')\n", (1697, 1716), True, 'import os.path as osp\n'), ((2649, 2691), 'os.path.join', 'os.path.join', (['self.pose_dir', 'pose_pic_name'], {}), '(self.pose_dir, pose_pic_name)\n', (2661, 2691), False, 'import os\n'), ((2711, 2758), 'cv2.imread', 'cv2.imread', (['pose_pic_path', 'cv2.IMREAD_GRAYSCALE'], {}), '(pose_pic_path, cv2.IMREAD_GRAYSCALE)\n', (2721, 2758), False, 'import cv2\n'), ((2881, 2910), 'numpy.abs', 'np.abs', (['(pose[:, :, 18:] - 128)'], {}), '(pose[:, :, 18:] - 128)\n', (2887, 2910), True, 'import numpy as np\n'), ((1376, 1421), 'os.path.join', 'osp.join', (['self.data_dir', '"""occluded_body_pose"""'], {}), "(self.data_dir, 'occluded_body_pose')\n", (1384, 1421), True, 'import os.path as osp\n'), ((1495, 1537), 'os.path.join', 'osp.join', (['self.data_dir', '"""whole_body_pose"""'], {}), "(self.data_dir, 'whole_body_pose')\n", (1503, 1537), True, 'import os.path as osp\n')]
|
import numpy as np
import tensorly as tl
import tensorflow as tf
from tensorflow.keras import models
import torch
def output_channel_decomposition_conv_layer(
layers,
rank=None,
):
tl.set_backend("tensorflow")
layer = layers[0]
weights = np.asarray(layer.get_weights()[0])
bias = layer.get_weights()[1] if layer.use_bias else None
layer_data = tl.tensor(weights)
# print(f"Original output channels is : {layer_data.shape[-1]},\
# Estimate output channels is : {rank[0]}")
rank = rank[0]
dim = layer_data.shape
layer_data = np.asarray(layer_data)
layer_data = layer_data.reshape(dim[0]*dim[1]*dim[2], -1)
layer_data = torch.tensor(layer_data)
N, sigmaVH, C = torch.svd(layer_data)
rank = rank if rank < N.shape[1] else N.shape[1]
N = N[:, :rank]
C = np.transpose(C)
C = C[:rank, :]
sigmaVH = sigmaVH[:rank]
sigmaVH = torch.sqrt(sigmaVH)
N = np.asarray(N).dot(np.diag(sigmaVH))
C = np.diag(sigmaVH).dot(C)
N = N.reshape(dim[0], dim[1], dim[2], rank)
C = C.reshape(1, 1, rank, dim[3])
print(f"N has shape {N.shape}, C has shape {C.shape}")
new_layers = from_tensor_to_layers([C, N], layer, bias)
return new_layers
def from_tensor_to_layers(
tensors,
layers,
bias,
):
'''
transform tensors to layers
Key arguments:
tensors -- contains data of decomposed layer
layers -- original layers
bias -- bias of layer
Return:
new_layers
'''
layer = layers
[V, H] = tensors
bias = layer.get_weights()[1] if layer.use_bias else None
first_layer = tf.keras.layers.Conv2D(
name=layer.name+"first",
filters=H.shape[3], kernel_size=[H.shape[0], H.shape[1]],
strides=layer.strides, padding=(layer.padding),
dilation_rate=layer.dilation_rate, use_bias=False,
input_shape=layer.input_shape[1:])
last_layer = tf.keras.layers.Conv2D(
name=layer.name+"last",
filters=V.shape[3], kernel_size=[V.shape[0], V.shape[1]],
padding=(layer.padding), dilation_rate=layer.dilation_rate,
use_bias=layer.use_bias, activation=layer.activation)
new_weight = [H, V]
if layer.use_bias:
new_weight.append(bias)
new_layer = [first_layer, last_layer]
return new_layer, new_weight
|
[
"tensorflow.keras.layers.Conv2D",
"torch.svd",
"torch.sqrt",
"numpy.asarray",
"numpy.transpose",
"tensorly.set_backend",
"tensorly.tensor",
"numpy.diag",
"torch.tensor"
] |
[((211, 239), 'tensorly.set_backend', 'tl.set_backend', (['"""tensorflow"""'], {}), "('tensorflow')\n", (225, 239), True, 'import tensorly as tl\n'), ((391, 409), 'tensorly.tensor', 'tl.tensor', (['weights'], {}), '(weights)\n', (400, 409), True, 'import tensorly as tl\n'), ((596, 618), 'numpy.asarray', 'np.asarray', (['layer_data'], {}), '(layer_data)\n', (606, 618), True, 'import numpy as np\n'), ((698, 722), 'torch.tensor', 'torch.tensor', (['layer_data'], {}), '(layer_data)\n', (710, 722), False, 'import torch\n'), ((743, 764), 'torch.svd', 'torch.svd', (['layer_data'], {}), '(layer_data)\n', (752, 764), False, 'import torch\n'), ((846, 861), 'numpy.transpose', 'np.transpose', (['C'], {}), '(C)\n', (858, 861), True, 'import numpy as np\n'), ((925, 944), 'torch.sqrt', 'torch.sqrt', (['sigmaVH'], {}), '(sigmaVH)\n', (935, 944), False, 'import torch\n'), ((1657, 1909), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'name': "(layer.name + 'first')", 'filters': 'H.shape[3]', 'kernel_size': '[H.shape[0], H.shape[1]]', 'strides': 'layer.strides', 'padding': 'layer.padding', 'dilation_rate': 'layer.dilation_rate', 'use_bias': '(False)', 'input_shape': 'layer.input_shape[1:]'}), "(name=layer.name + 'first', filters=H.shape[3],\n kernel_size=[H.shape[0], H.shape[1]], strides=layer.strides, padding=\n layer.padding, dilation_rate=layer.dilation_rate, use_bias=False,\n input_shape=layer.input_shape[1:])\n", (1679, 1909), True, 'import tensorflow as tf\n'), ((2015, 2246), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', ([], {'name': "(layer.name + 'last')", 'filters': 'V.shape[3]', 'kernel_size': '[V.shape[0], V.shape[1]]', 'padding': 'layer.padding', 'dilation_rate': 'layer.dilation_rate', 'use_bias': 'layer.use_bias', 'activation': 'layer.activation'}), "(name=layer.name + 'last', filters=V.shape[3],\n kernel_size=[V.shape[0], V.shape[1]], padding=layer.padding,\n dilation_rate=layer.dilation_rate, use_bias=layer.use_bias, activation=\n layer.activation)\n", (2037, 2246), True, 'import tensorflow as tf\n'), ((971, 987), 'numpy.diag', 'np.diag', (['sigmaVH'], {}), '(sigmaVH)\n', (978, 987), True, 'import numpy as np\n'), ((953, 966), 'numpy.asarray', 'np.asarray', (['N'], {}), '(N)\n', (963, 966), True, 'import numpy as np\n'), ((997, 1013), 'numpy.diag', 'np.diag', (['sigmaVH'], {}), '(sigmaVH)\n', (1004, 1013), True, 'import numpy as np\n')]
|
""" Main sims module to read and parse Cameca (nano)SIMS data files. """
import bz2
import collections
import copy
import datetime
import gzip
import io
import lzma
import numpy as np
import os
import re
import tarfile
import warnings
import xarray
from struct import unpack
# py7zlib is needed for 7z
try:
import py7zlib
except ImportError:
py7zlib = None
from sims.utils import format_species
from sims.transparent import TransparentOpen
__all__ = ['SIMSReader', 'SIMS']
_stage_scan_types = {
0: 'stage scan',
1: 'beam scan',
2: 'image scan',
}
_file_types = {
21: 'depth profile',
22: 'line scan, stage control',
25: 'E0S/E0W scan',
26: 'spot',
27: 'image',
29: 'grain mode image',
31: 'HMR/SIBC/trolley step scan',
32: 'PHD scan',
34: 'tools scan',
35: 'beam stability/leak current',
39: 'line scan image',
40: 'line scan, beam control',
41: 'stage scan image'
}
_supported_file_types = {21, 22, 26, 27, 29, 31, 35, 39, 40, 41}
_peakcenter_sides = {
0: 'left',
1: 'right',
2: 'both'
}
_exit_slit_labels = {
0: 'slit 1',
1: 'slit 2',
2: 'slit 3'
}
_exit_slit_size_labels = {
0: 'normal',
1: 'large',
2: 'extra large'
}
_detectors = {
0: 'EM',
1: 'FC'
}
class SIMSReader(object):
""" Base class for reading a SIMS file. """
def __init__(self, fileobject, filename=''):
""" This class object does not open or close files, and nothing will
be read by default. An empty header and data array are created.
Provides all methods needed for reading a SIMS file.
"""
self.fh = fileobject
self.filename = filename
self.header = {}
self.data = None
self._data_corr = None
self._bo = ''
def __deepcopy__(self, memo):
""" Assist deep-copying of this class.
Everything except the filehandle is copied.
"""
new = type(self)(None)
for k in self.__dict__:
if k in ('fh', 'fh_archive'):
new.__dict__[k] = None
else:
new.__dict__[k] = copy.deepcopy(self.__dict__[k], memo)
return new
def peek(self):
""" Peek into image file and determine basic file information.
Usage: s.peek()
Reads first 12 bytes of file opened as s.fh, determines byte order
(endianess), file type, file version, and header size. Information
is stored in s.header.
"""
self.fh.seek(0)
snip = self.fh.read(12)
if unpack('<i', snip[4:8])[0] <= max(_supported_file_types):
self.header['byte order'] = '<'
self._bo = '<'
elif unpack('>i', snip[4:8])[0] <= max(_supported_file_types):
self.header['byte order'] = '>'
self._bo = '>'
else:
raise TypeError("Cannot determine file endianess.")
self.header['file version'], self.header['file type'], \
self.header['header size'] = \
unpack(self._bo + '3i', snip)
if self.header['file type'] not in _supported_file_types:
msg = "File of type {} is not supported at the moment."
msg = msg.format(self.header['file type'])
raise NotImplementedError(msg)
def read_header(self):
""" Read the image header.
Usage: s.read_header()
Reads the header from the file object stored in s.fh. Extracts as
much information as possible and stores it in a Python dictionary
in s.header. At least byte order, header size, file type, and file
version need to be known before the header can be read: use
s.peek() before s.read_header().
"""
# Read entire header into memory in one read to minimize Disk I/O.
self.fh.seek(0)
hdr = self.fh.read(self.header['header size'])
# Find several markers in the byte-string
# Each of these may occur more than once, find last.
polylist_pos = hdr.rfind(b'Poly_list\x00')
champslist_pos = hdr.rfind(b'Champs_list\x00')
offsetlist_pos = hdr.rfind(b'Offset_list\x00')
# Find first occurance for these.
# analparam_pos = hdr.find(b'Anal_param\x00')
analparamnano_pos = hdr.find(b'Anal_param_nano\x00')
analparamnanobis_pos = hdr.find(b'Anal_param_nano_bis\x00')
# Turn byte-string into BytesIO file-like object; reading and
# keeping track of where we are is easier that way than trying to
# slice byte-string as an array and keeping track of indices.
hdr = io.BytesIO(hdr)
# Main header
hdr.seek(12)
self.header.update(self._main_header(hdr))
# NanoSIMS header, starts with PolyList/ChampsList/OffsetList
# The following configurations have been found in the wild, so far:
# 1. NS header
# 2. PL, NS header
# 3. PL, CL, OL, NS header
# 4. PL, CL, OL, partial NS header, PL, NS header, PL, CL, OL,
# partial NS header, PL, NS header
# Note: I have not seen any *lists with contents (only length 0).
# From OpenMIMS documentation I know that PolyList is as list of
# Species dicts, but don't know how to read ChampsList or OffsetList.
if polylist_pos < 0:
# Case 1: No PL marker, so far only found for Real Time Images,
# beam stability, or secondary ion beam centering files.
if (self.header['analysis type'].endswith('rti') or
self.header['file type'] == 35):
hdr.seek(216, 1)
elif self.header['file type'] == 31:
if (self.header['analysis type'].endswith('hmr') or
self.header['analysis type'].endswith('trolley step scan')):
hdr.seek(120, 1)
else:
# secondary ion beam
hdr.seek(600, 1)
else:
raise NotImplementedError('No PolyList marker found in header '
'and not and RTI image. Don\'t know '
'how to continue.')
elif (champslist_pos < 0 and offsetlist_pos < 0):
# Case 2: PL, NS header
self.header['PolyList'] = self._pco_list(hdr, 'poly', polylist_pos)
elif (polylist_pos < champslist_pos < offsetlist_pos):
# Case 3: PL, CL, OL, NS header
self.header['PolyList'] = self._pco_list(hdr, 'poly', polylist_pos)
self.header['ChampsList'] = self._pco_list(hdr, 'champs', champslist_pos)
self.header['OffsetList'] = self._pco_list(hdr, 'offset', offsetlist_pos)
elif (champslist_pos < offsetlist_pos < polylist_pos):
# Case 4: PL, CL, OL, partial NS header, PL, NS header
# with possible repeat
self.header['ChampsList'] = self._pco_list(hdr, 'champs', champslist_pos)
self.header['OffsetList'] = self._pco_list(hdr, 'offset', offsetlist_pos)
self.header['PolyList'] = self._pco_list(hdr, 'poly', polylist_pos)
else:
raise NotImplementedError(
'An unknown order of the Poly/Champs/Offset Lists occured.\n'
'Positions: PL = {}, CL = {}, OL = {}'
''.format(polylist_pos, champslist_pos, offsetlist_pos))
self.header['NanoSIMSHeader'] = self._nanosims_header(hdr)
# How much to skip? Chomping does not work; what if first value is 0?
# This is correct so far, for nsheader v8 and 9
hdr.seek(948, 1)
self.header['BFields'] = []
for b in range(self.header['NanoSIMSHeader']['b fields']):
bf = self._bfield(hdr)
bf['counting frame time'] = bf['time per pixel'] * \
self.header['NanoSIMSHeader']['counting frame height'] * \
self.header['NanoSIMSHeader']['counting frame width']
bf['scanning frame time'] = bf['time per pixel'] * \
self.header['NanoSIMSHeader']['scanning frame height'] * \
self.header['NanoSIMSHeader']['scanning frame width']
bf['working frame time'] = bf['time per pixel'] * \
self.header['NanoSIMSHeader']['working frame height'] * \
self.header['NanoSIMSHeader']['working frame width']
self.header['BFields'].append(bf)
# End nanosims_header/bfield based on Poly_list position
# Analytical parameters
# anal_param is not in OpenMIMS at all, represents file
# Cameca NanoSIMS Data/raw_spec/cur_anal_par
# However, only few useful things in this section, all of
# which are also in other sections. Skip.
# if analparam_pos < 0:
# msg = 'Anal_param not found in header, skipping.'
# warnings.warn(msg)
# else:
# hdr.seek(analparam_pos + 24)
# print(analparam_pos)
# d = {}
# d['primary ion'], d['primary current begin'], \
# d['primary current end'], d['raster'], \
# d['X 00 always 1.0'], \
# d['X 01 always 1'], d['X 02 always 0'], \
# d['X 03 always 1'], d['X 04 always 0'], \
# d['X 05 always 0'], d['X 06 (not0 always 0'], \
# d['X 07 (not) always 0'], d['X 08 always 0'], \
# d['pressure 1'], d['e0w'], d['X 09 always 35 or #'], \
# d['X 10 junk'], \
# d['X 11 always 1'], d['X 12 always 0'], \
# d['X 13 always 1'], d['X 14 always 0'], \
# d['X 15 always 0'], d['X 16 always 0'], \
# d['X 17 always 0'], d['X 18 always 0'], \
# d['X 19 always 0'], d['X 20 always 300'], \
# d['X 21'], d['X 22'], d['X 23'], d['X 24'], \
# d['pressure 2'], d['X 25 junk'] = \
# unpack(self._bo + '24s 4d 8i 48s d i 28s 14i 8s 176s', hdr.read(416))
#
# d['pressure 1'] = self._cleanup_string(d['pressure 1'])
# d['pressure 2'] = self._cleanup_string(d['pressure 2'])
# d['primary ion'] = self._cleanup_string(d['primary ion'])
#
# self.header['AnalParam'] = d
# Called AnalyticalParamNano AND AnalysisParamNano in OpenMIMS.
# Here, split out Primary and Secondary beam.
# Represents the file Cameca NanoSIMS Data/raw_spec/cur_anal_par_nano
if analparamnano_pos < 0:
msg = 'Anal_param_nano not found in header, '
msg += 'don\'t know where PrimaryBeam section starts.'
warnings.warn(msg)
else:
hdr.seek(analparamnano_pos + 16)
self.header['analysis version'], self.header['n50large'], \
self.header['comment'] = \
unpack(self._bo + '2i 8x 256s', hdr.read(272))
self.header['n50large'] = bool(self.header['n50large'])
self.header['comment'] = self._cleanup_string(self.header['comment'])
self.header['PrimaryBeam'] = self._primary_beam(hdr)
self.header['SecondaryBeam'] = self._secondary_beam(hdr)
self.header['Detectors'] = self._detectors1(hdr)
self.header['SecondaryBeam']['E0S'] = self.header['Detectors'].pop('E0S')
self.header['SecondaryBeam']['pressure multicollection chamber'] = \
self.header['Detectors'].pop('pressure multicollection chamber')
# Add overall mode of machine, based on E0W
if self.header['SecondaryBeam']['E0W'] < 0:
self.header['polarity'] = '+'
else:
self.header['polarity'] = '-'
# Combine pixel size from NanoSIMSHeader and raster from PrimaryBeam
# Prevent ZeroDivisionError if undefined
wfw = self.header['NanoSIMSHeader']['working frame width']
if not wfw:
wfw = 1
self.header['NanoSIMSHeader']['working frame raster'] = \
self.header['PrimaryBeam']['raster']
self.header['NanoSIMSHeader']['scanning frame raster'] = \
self.header['NanoSIMSHeader']['working frame raster'] * \
self.header['NanoSIMSHeader']['scanning frame width'] / wfw
self.header['NanoSIMSHeader']['counting frame raster'] = \
self.header['NanoSIMSHeader']['working frame raster'] * \
self.header['NanoSIMSHeader']['counting frame width'] / wfw
# Header for non-nano SIMS
magic = unpack(self._bo + 'i', hdr.read(4))[0]
if magic != 2306:
msg = 'SIMSHeader magic number not found here at byte {}.'
msg = msg.format(hdr.tell()-4)
raise ValueError(msg)
self.header['SIMSHeader'] = self._sims_header(hdr)
if self.header['analysis version'] >= 5:
if analparamnanobis_pos < 0:
msg = 'Anal_param_nano_bis not found in header, '
msg += 'don\'t know where second Detectors section starts.'
warnings.warn(msg)
else:
hdr.seek(analparamnanobis_pos + 24)
self.header['Detectors'].update(self._detectors2(hdr))
xl = self.header['Detectors'].pop('exit slit xl')
for n in range(7):
det = self.header['Detectors']['Detector {}'.format(n+1)]
w = list(det['exit slit widths'])
w[2] = xl[5*n:5*(n+1)]
det['exit slit widths'] = tuple(w)
h = list(det['exit slit heights'])
h[2] = xl[5*(n+1):5*(n+2)]
det['exit slit heights'] = tuple(h)
# Presets
self.header['Presets'] = self._presets(hdr)
# End Detectors pt 2 based on anal_param_nano_bis position
# Last part of detectors
if self.header['analysis version'] >= 6:
d3 = self._detectors3(hdr)
self.header['Detectors']['TIC'] = d3.pop('TIC')
for k, v in d3.items():
self.header['Detectors'][k].update(v)
# End PrimaryBeam/SecondaryBeam/Presets/Detectors based on anal_param_nano position
# Image header, at end of overall header
if self.header['file type'] == 26:
hdr.seek(-176, 2)
self.header['Isotopes'] = self._isotopes_hdr(hdr)
elif self.header['file type'] in (21, 22, 31, 35):
# no image header for line scan or beam stability
pass
else:
hdr.seek(-84, 2)
self.header['Image'] = self._image_hdr(hdr)
# Done reading header. Check for and read external files for extra info.
if os.path.exists(os.path.splitext(self.filename)[0] + '.chk_is'):
self._read_chk_is()
def read_data(self):
""" Read the image data.
Usage: s.read_data()
Reads all the data from the file object in s.fh. The data is stored
in s.data. The file header must be read before data can be read.
Data are stored as a xarray DataArray. Image data have coordinates:
species, frames, y, and x. s.data.attrs['unit'] holds the unit of
the data, which is either 'counts' or 'counts/s'.
Isotope data contain the uncorrected, raw data, read from the
.is_txt file (if available). The corrected data (corrections done
during measurement), are read from the .is file and stored under
s._data_corr.
"""
if not self.header['data included']:
pass
elif self.header['file type'] in (21, 26):
self._isotope_data()
if os.path.exists(self.filename + '_txt'):
self._isotope_txt_data()
elif self.header['file type'] == 22:
# line scan types, no ImageHeader
warnings.warn('No data read for line scan, fix')
pass
elif self.header['file type'] in (31, 35):
self._beamstability_data()
else:
self._image_data()
def copy(self):
""" Return a copy of the SIMS object.
Usage: s2 = s.copy()
The copy is always a deepcopy, meaning that all data and the full
header are copied, there are no references to the original. This
way the data of the copy can be altered without the data of the
original being changed as well.
The only exception is the filehandle(s) fh (and fh_archive): they
cannot be copied. File operations such as read_header and read_data
is therefore not possible after copy.
"""
return copy.deepcopy(self)
def _main_header(self, hdr):
""" Internal function; reads variable number of bytes;
returns main header dict
"""
d = {}
# Called readDefAnalysis in OpenMIMS
d['sample type'], d['data included'], d['sample x'], d['sample y'], \
d['analysis type'], d['user name'], d['sample z'], date, time = \
unpack(self._bo + '4i 32s 16s i 12x 16s 16s', hdr.read(112))
d['data included'] = bool(d['data included'])
d['user name'] = self._cleanup_string(d['user name'])
d['analysis type'] = self._cleanup_string(d['analysis type']).lower()
date = self._cleanup_string(date)
time = self._cleanup_string(time)
d['date'] = self._cleanup_date(date + ' ' + time)
if self.header['file type'] in (27, 29, 39):
# Called MaskImage/readMaskIm in OpenMIMS
d['original filename'], d['analysis duration'], d['frames'], \
d['scan type'], d['magnification'], d['size type'], \
d['size detector'], d['beam blanking'], d['presputtering'], \
d['presputtering duration'] = \
unpack(self._bo + '16s 3i 3h 2x 3i', hdr.read(48))
d['AutoCal'] = self._autocal(hdr)
d['HVControl'] = {}
d['HVControl']['hvcontrol enabled'] = False
elif self.header['file type'] in (22, 41):
# Called MaskSampleStageImage/readMaskIss in OpenMIMS
d['original filename'], d['analysis duration'], d['scan type'], \
d['steps'], d['step size x'], d['step size y'], d['step size?'], \
d['step waittime'], d['frames'], d['beam blanking'], \
d['presputtering'], d['presputtering duration'] = \
unpack(self._bo + '16s 6i d 4i', hdr.read(64))
d['scan type'] = _stage_scan_types.get(d['scan type'], str(d['scan type']))
d['AutoCal'] = self._autocal(hdr)
d['HVControl'] = self._hvcontrol(hdr)
# Don't know if this unused byte needs to go after HVControl or after SigRef.
hdr.seek(4, 1)
elif self.header['file type'] in (21, 26):
# Not in OpenMIMS
# this bit same as image, 1 extra unused/unknown
d['original filename'], d['analysis duration'], d['frames'], \
d['scan type'], d['magnification'], d['size type'], \
d['size detector'], d['beam blanking'], d['presputtering'], \
d['presputtering duration'] = \
unpack(self._bo + '16s 4x 3i 3h 2x 3i', hdr.read(52))
# this bit same as stage scan
d['AutoCal'] = self._autocal(hdr)
d['HVControl'] = self._hvcontrol(hdr)
# 24 bytes unknown, not sure if they go here or before AutoCal
hdr.seek(24, 1)
elif self.header['file type'] == 31:
# Don't know if this is correct, all 0s anyway
d['original filename'], d['scan type'], \
d['beam blanking'], d['presputtering'] = \
unpack(self._bo + '16s 3i 4x', hdr.read(32))
elif self.header['file type'] == 35:
d['original filename'], d['scan type'], d['analysis duration'], \
d['frames'], d['beam blanking'], d['presputtering'] = \
unpack(self._bo + '16s 5i 40x', hdr.read(76))
d['AutoCal'] = self._autocal(hdr)
d['HVControl'] = self._hvcontrol(hdr)
else:
raise TypeError('What type of image are you? {}'.format(self.header['file type']))
# Continue main header for all types
d['SigRef'] = self._sigref(hdr)
d['masses'] = unpack(self._bo + 'i', hdr.read(4))[0]
# scan type is set for stage scan analysis, set others
if isinstance(d['scan type'], int):
if d['scan type'] == 0:
d['scan type'] = ''
else:
d['scan type'] = str(d['scan type'])
d['beam blanking'] = bool(d['beam blanking'])
d['presputtering'] = bool(d['presputtering'])
d['original filename'] = self._cleanup_string(d['original filename'])
if self.header['file type'] in (21, 26, 27, 29, 35, 39):
if self.header['file version'] >= 4108:
n = 60
else:
n = 10
elif self.header['file type'] in (22, 31, 40, 41):
n = 20
else:
n = 0
# Not sure what this is, memory pointers? Not needed.
# d['mass table ptr'] = unpack(self._bo + 2*n*'h', hdr.read(n*4))
hdr.seek(n*4, 1)
if self.header['file type'] in (21, 22, 26, 40, 41, 35):
hdr.seek(4, 1) # 4 bytes unused
# Mass table, dict by species label.
d['MassTable'] = collections.OrderedDict()
for m in range(d['masses']):
mi = {}
mi['trolley index'], unknown, mi['mass'], mi['matrix or trace'], \
mi['detector'], mi['wait time'], mi['frame count time'] = \
unpack(self._bo + '2i d 2i 2d', hdr.read(40))
if self.header['file type'] == 31:
if d['analysis type'].endswith('trolley step scan'):
# start and end are in mm, step is in μm; convert to mm
mi['radius start'], mi['radius end'], \
mi['radius step'], mi['b field bits'] = \
unpack(self._bo + '3d i', hdr.read(28))
mi['radius step'] /= 1000
else:
mi['voltage start'], mi['voltage end'], \
mi['voltage step'], mi['b field bits'] = \
unpack(self._bo + '3d i', hdr.read(28))
else:
mi['offset'], mi['b field bits'] = unpack(self._bo + '2i', hdr.read(8))
mi.update(self._species(hdr))
if self.header['file type'] == 31:
hdr.seek(4, 1)
# Add correction controls, my own addition.
mi['background corrected'] = False
mi['deadtime corrected'] = False
mi['yield corrected'] = False
label = mi.pop('label')
# This is true for NS50L and file version 4108.
# Anywhere else different?
# Maybe confirm this with the Trolleys dict,
# there is an Esi trolley.
if mi['trolley index'] == 8:
label = 'SE'
d['MassTable'][label] = mi
# Create a few convenient lists
d['label list'] = tuple(d['MassTable'].keys())
d['label list fmt'] = tuple(format_species(m) for m in d['label list'])
d['mass list'] = tuple(d['MassTable'][m]['mass'] for m in d['label list'])
return d
def _autocal(self, hdr):
""" Internal function; reads 76 bytes; returns AutoCal dict """
# Called AutoCal in OpenMIMS source
# OpenMIMS says extra unused byte after autocal enabled
# for stage scan image; not true
d = {}
d['autocal enabled'], d['label'], d['begin'], d['duration'] = \
unpack(self._bo + 'i 64s 2i', hdr.read(76))
d['autocal enabled'] = bool(d['autocal enabled'])
d['label'] = self._cleanup_string(d['label'])
return d
def _hvcontrol(self, hdr):
""" Internal function; reads 112 bytes, returns HVControl dict. """
# Called readHvControl by OpenMIMS
d = {}
d['hvcontrol enabled'], d['label'], d['begin'], \
d['duration'], d['limit low'], d['limit high'], d['step'], \
d['bandpass width'], d['count time'] = \
unpack(self._bo + 'i 64s 2i 3d i d', hdr.read(112))
d['hvcontrol enabled'] = bool(d['hvcontrol enabled'])
d['label'] = self._cleanup_string(d['label'])
return d
def _sigref(self, hdr):
""" Internal function; reads 160 bytes; returns SigRef dict """
# Called SigRef in OpenMIMS
d = {}
d['sigref enabled'] = bool(unpack(self._bo + 'i', hdr.read(4))[0])
d['Species'] = self._species(hdr)
d['detector'], d['offset'], d['quantity'] = \
unpack(self._bo + '3i', hdr.read(12))
return d
def _species(self, hdr):
""" Internal function; reads 144 bytes, return Species dict. """
# Called PolyAtomic in OpenMIMS source
d = {}
d['numeric flag'], d['numeric value'], d['elements'], \
d['charges'], d['charge label'], d['label'] = \
unpack(self._bo + '4i c 64s', hdr.read(81))
d['label'] = self._cleanup_string(d['label'])
d['charge label'] = self._cleanup_string(d['charge label'])
# OpenMIMS says 3 bytes AFTER el.table are unused; this is wrong,
# 3 bytes BEFORE el.table (b 81-84) are unused. n_elements (here:
# atomic number) is element number in periodic table rather than
# number of elements. n_isotopes (here: isotope number) is offset from
# main atomic Z number. Also: collapse ElementTable (Tabelts) into
# main dict, too many layers.
hdr.seek(3, 1)
atoms = unpack(self._bo + '15i', hdr.read(60))
d['atomic number'] = tuple(n for n in atoms[::3])
d['isotope number'] = tuple(n for n in atoms[1::3])
d['stoich number'] = tuple(n for n in atoms[2::3])
return d
def _pco_list(self, hdr, name, pos):
""" Internal function; reads 'name'list, returns 'Name'List list.
Name is one of "poly", "champs", or "offset". pos is the byte-
position where the maker 'Name'List starts.
"""
if name not in ('poly', 'champs', 'offset'):
raise TypeError('Name must be one of "poly", "champs", or "offset".')
hdr.seek(pos + 16)
length = unpack(self._bo + 'i', hdr.read(4))[0]
d = []
for p in range(length):
if name == 'poly':
d.append(self._species(hdr))
else:
raise NotImplementedError(
'{}List is non-null, don\'t know how to read.'
''.format(name.capitalize()))
hdr.seek(4, 1)
return d
def _nanosims_header(self, hdr):
""" Internal function; reads 604 bytes; returns NanoSIMSHeader dict """
# Called MaskNano in OpenMIMS; BFieldTab separated out; create extra sub-dict PeakCenter
d = {}
d['PeakCenter'] = {}
d['nanosimsheader version'], d['regulation mode'], d['mode'], \
d['grain mode'], d['semigraphic mode'], d['stage delta x'], \
d['stage delta y'], d['working frame width'], \
d['working frame height'], d['scanning frame x'], \
d['scanning frame width'], d['scanning frame y'], \
d['scanning frame height'], d['counting frame x start'], \
d['counting frame x end'], d['counting frame y start'], \
d['counting frame y end'], d['detector type'], d['electron scan'], \
d['scanning mode'], d['beam blanking'], \
d['PeakCenter']['peakcenter enabled'], d['PeakCenter']['start'], \
d['PeakCenter']['frequency'], d['b fields'] = \
unpack(self._bo + '25i', hdr.read(100))
d['PeakCenter']['peakcenter enabled'] = bool(d['PeakCenter']['peakcenter enabled'])
d['regulation mode'] = bool(d['regulation mode'])
d['grain mode'] = bool(d['grain mode'])
d['semigraphic mode'] = bool(d['semigraphic mode'])
d['scanning mode'] = bool(d['scanning mode'])
# Set a few extra variables.
d['counting frame width'] = d['counting frame x end'] - d['counting frame x start'] + 1
d['counting frame height'] = d['counting frame y end'] - d['counting frame y start'] + 1
# Found in at least one version (file v11, nsHeader v8) a repeat of
# Poly_list and this first part of nanoSIMSHeader. Total of repeat
# adds up to 288. After last Poly_list, 288 byte padding zone, not all
# null-bytes.
hdr.seek(288, 1)
# Is this the nPrintRed from OpenMIMS?
d['print results'] = bool(unpack(self._bo + 'i', hdr.read(4))[0])
d['SibCenterHor'] = self._sib_center(hdr)
d['SibCenterVert'] = self._sib_center(hdr)
# Duplicate and store these two in sub dicts
b_field_index, has_sib_center = \
unpack(self._bo + '2i', hdr.read(8))
if b_field_index < 0:
b_field_index = None
has_sib_center = bool(has_sib_center)
d['SibCenterHor']['b field index'] = b_field_index
d['SibCenterVert']['b field index'] = b_field_index
d['SibCenterHor']['sib center enabled'] = has_sib_center
d['SibCenterVert']['sib center enabled'] = has_sib_center
d['EnergyCenter'] = self._energy_center(hdr)
d['E0SCenter'] = self._e0s_center(hdr)
d['EnergyCenter']['wait time'], d['presputtering raster'], \
d['PeakCenter']['E0P offset'], d['E0SCenter']['steps'], \
d['baseline measurement'], d['baseline offset'], \
d['baseline frequency'] = \
unpack(self._bo + '5i d i', hdr.read(32))
return d
def _sib_center(self, hdr):
""" Internal function; reads 40 bytes; returns SibCenter dict """
# Called SecIonBeamNano in OpenMIMS
d = {}
d['detector'], d['start'], d['step size'], d['center'], \
d['50% width'], d['count time'] = \
unpack(self._bo + '3i 4x 2d i 4x', hdr.read(40))
if d['detector'] < 0:
d['detector'] = None
d['count time'] /= 100 # 10 ms increments to seconds
return d
def _energy_center(self, hdr):
""" Internal function; reads 52 bytes; returns EnergyCenter dict """
# Called EnergyNano in OpenMIMS
# Added b field index, energy center enabled, and frequency from MaskNano
d = {}
d['detector'], d['start'], d['step size'], d['center'], \
d['delta'], d['count time'], d['b field index'], \
d['energy center enabled'], d['frequency'] = \
unpack(self._bo + '3i 4x 2d i 4x 3i', hdr.read(52))
d['energy center enabled'] = bool(d['energy center enabled'])
d['count time'] /= 100 # 10 ms increments to seconds
if d['detector'] < 0:
d['detector'] = None
if d['b field index'] < 0:
d['b field index'] = None
return d
def _e0s_center(self, hdr):
""" Internal function; reads 40 bytes; returns E0sCenter dict """
# Called E0SNano in OpenMIMS
# b field index and e0s center enabled added to sub dict from main nano header
d = {}
d['b field index'], d['detector'], d['start'], \
d['step size'], d['count time'], d['center'], \
d['80% width'], d['E0S center enabled'] = \
unpack(self._bo + '5i 2d i', hdr.read(40))
d['E0S center enabled'] = bool(d['E0S center enabled'])
d['count time'] /= 100 # 10 ms increments to seconds
if d['detector'] < 0:
d['detector'] = None
if d['b field index'] < 0:
d['b field index'] = None
return d
def _bfield(self, hdr):
""" Internal function; reads 2840 bytes; returns BField dict """
# Called TabBFieldNano in OpenMIMS
d = {}
d['b field enabled'], d['b field bits'], d['wait time'], \
d['time per pixel'], d['time per step'], \
d['wait time computed'], d['E0W offset'], d['Q'], \
d['LF4'], d['hex val'], d['frames per bfield'] = \
unpack(self._bo + '4i d 6i', hdr.read(48))
d['b field enabled'] = bool(d['b field enabled'])
d['wait time computed'] = bool(d['wait time computed'])
d['wait time'] = d['wait time']/1e6
d['time per pixel'] = d['time per pixel']/1e6
# 8 bytes unused
hdr.seek(8, 1)
# There appear to be 12 trolleys stored.
# The following labels are true for NS50L and file version 4108.
# Anywhere else different? What are labels for missing?
# idx trolley idx in header (if all enabled)
# 0 FCs? -2
# 1 T1 0
# 2 T2 1
# 3 T3 2
# 4 T4 3
# 5 T5 4
# 6 ? -3
# 7 ? -2
# 8 SE -1
# 9 ? -3
# 10 T6 5
# 11 T7 6
trolleys = []
for t in range(12):
trolleys.append(self._trolley(hdr))
for t in range(12):
trolleys[t].update(self._phd(hdr))
# Add detector index that links trolley to detector and
# trolley names. Don't know how to do this for EMBig, LD etc.
for t in range(12):
if t in (1, 2, 3, 4, 5):
trolleys[t]['trolley label'] = 'Trolley {}'.format(t)
trolleys[t]['detector label'] = 'Detector {}'.format(t)
elif t in (10, 11):
trolleys[t]['trolley label'] = 'Trolley {}'.format(t - 4)
trolleys[t]['detector label'] = 'Detector {}'.format(t - 4)
elif t == 8:
trolleys[t]['trolley label'] = 'SE'
trolleys[t]['detector label'] = 'SE'
else:
trolleys[t]['trolley label'] = 'non-trolley {}'.format(t)
trolleys[t]['detector label'] = ''
d['Trolleys'] = trolleys
return d
def _trolley(self, hdr):
""" Internal function; reads 192 or 208 bytes; returns Trolley dict """
# Called TabTrolleyNano in OpenMIMS
d = {}
# exit slit seems to be incorrect
d['label'], d['mass'], d['radius'], d['deflection plate 1'], \
d['deflection plate 2'], d['detector'], d['exit slit'], \
d['real trolley'], d['cameca trolley index'], \
d['peakcenter index'], d['peakcenter follow'], d['focus'], \
d['hmr start'], d['start dac plate 1'], d['start dac plate 2'], \
d['hmr step'], d['hmr points'], d['hmr count time'], \
d['used for baseline'], d['50% width'], d['peakcenter side'], \
d['peakcenter count time'], d['used for sib center'], \
d['unit correction'], d['deflection'], \
d['used for energy center'], d['used for E0S center'] = \
unpack(self._bo + '64s 2d 8i 2d 6i d 4i d 2i', hdr.read(192))
# 16 extra bytes per trolley entry, not in OpenMIMS
# Only certain versions?
if self.header['file version'] >= 4108:
hdr.seek(16, 1)
# Cleanup
d['label'] = self._cleanup_string(d['label'])
d['used for baseline'] = bool(d['used for baseline'])
d['used for sib center'] = bool(d['used for sib center'])
d['used for energy center'] = bool(d['used for energy center'])
d['used for E0S center'] = bool(d['used for E0S center'])
d['real trolley'] = bool(d['real trolley'])
d['peakcenter side'] = _peakcenter_sides.get(d['peakcenter side'],
str(d['peakcenter side']))
d['detector'] = _detectors.get(d['detector'], str(d['detector']))
d['hmr count time'] /= 100
d['peakcenter count time'] /= 100
# If trolley is real and index is >= 0, then it is enabled. If it is real and
# index is -1, it is not enabled. If index is < -1, it should also be not real.
d['trolley enabled'] = bool(d['real trolley']
and d['cameca trolley index'] >= 0)
return d
def _phd(self, hdr):
""" Internal function; reads 24 bytes; returns Phd dict """
# Called PHDTrolleyNano in OpenMIMS
d = {}
d['used for phd scan'], d['phd start'], d['phd step size'], \
d['phd points'], d['phd count time'], d['phd scan repeat'] = \
unpack(self._bo + '6i', hdr.read(24))
d['used for phd scan'] = bool(d['used for phd scan'])
d['phd count time'] /= 100
# 24 extra bytes per phd entry, not in OpenMIMS
# Only certain versions?
if self.header['file version'] >= 4108:
hdr.seek(24, 1)
return d
def _primary_beam(self, hdr):
""" Internal function; reads 552 bytes; returns PrimaryBeam dict """
# Called ApPrimaryNano in OpenMIMS
d = {}
start_position = hdr.tell()
d['source'], d['current start'], d['current end'], d['Lduo'], d['L1'] = \
unpack(self._bo + '8s 4i', hdr.read(24))
# Each widths list is 10 ints long
d['Dduo'] = unpack(self._bo + 'i', hdr.read(4))[0]
d['Dduo widths'] = tuple(unpack(self._bo + '10i', hdr.read(40)))
d['D0'] = unpack(self._bo + 'i', hdr.read(4))[0]
d['D0 widths'] = tuple(unpack(self._bo + '10i', hdr.read(40)))
d['D1'] = unpack(self._bo + 'i', hdr.read(4))[0]
d['D1 widths'] = tuple(unpack(self._bo + '10i', hdr.read(40)))
# 4 bytes unused
hdr.seek(4, 1)
d['raster'], d['oct45'], d['oct90'], d['E0P'], \
d['pressure analysis chamber'] = \
unpack(self._bo + '4d 32s', hdr.read(64))
d['source'] = self._cleanup_string(d['source'])
d['pressure analysis chamber'] = self._cleanup_string(d['pressure analysis chamber'])
if self.header['analysis version'] >= 3:
d['L0'] = unpack(self._bo + 'i', hdr.read(4))[0]
if self.header['analysis version'] >= 4:
d['hv cesium'], d['hv duo'] = unpack(self._bo + '2i', hdr.read(8))
# DCs not in OpenMIMS; only in certain release/version?
d['Dcs'] = unpack(self._bo + 'i', hdr.read(4))[0]
d['Dcs widths'] = tuple(unpack(self._bo + '10i', hdr.read(40)))
# skip bytes until total read in this function is 552
# OpenMIMS: size_Ap_primary_nano = 552
# Newer versions have rest filled with \xCC continuation bytes, but
# older versions have null-bytes, but not all bytes are null!!
# The numbers do not seem to represent anything, though, so can be skipped.
hdr.seek(start_position + 552)
return d
def _secondary_beam(self, hdr):
""" Internal function; reads 192 bytes; returns SecondaryBeam dict. """
# Called ApSecondaryNano in OpenMIMS
d = {}
tmp = unpack(self._bo + 'd 42i 2d', hdr.read(192))
d['E0W'], d['ES'] = tmp[:2]
d['ES widths'] = tmp[2:12]
d['ES heights'] = tuple(tmp[12:22])
d['AS'] = tmp[22]
d['AS widths'] = tuple(tmp[23:33])
d['AS heights'] = tuple(tmp[33:43])
d['EnS'], d['EnS width'] = tmp[43:]
return d
def _detectors1(self, hdr):
""" Internal function; reads 808 bytes, returns Detectors dict, part 1. """
d = {}
d['FCs'] = self._exit_slits(hdr)
for n in range(1, 6):
det = 'Detector {}'.format(n)
d[det] = self._exit_slits(hdr)
d['LD'] = {}
d['LD']['exit slit width'], d['LD']['exit slit coeff a'], \
d['LD']['exit slit coeff b'], d['E0S'], \
d['pressure multicollection chamber'], \
d['FCs']['fc background setup positive'], \
d['FCs']['fc background setup negative'] = \
unpack(self._bo + '4d 32s 2i', hdr.read(72))
d['pressure multicollection chamber'] = \
self._cleanup_string(d['pressure multicollection chamber'])
for n in range(1, 6):
det = 'Detector {}'.format(n)
d[det].update(self._electron_multiplier(hdr))
d['LD'].update(self._electron_multiplier(hdr))
d['EMBig'] = self._exit_slits(hdr)
d['EMBig'].update(self._electron_multiplier(hdr))
# 8 bytes unused
hdr.seek(8, 1)
return d
def _detectors2(self, hdr):
""" Internal function; reads 488 bytes, returns Detectors dict, part 2. """
# Called AnalysisParam in OpenMIMS, first part only
# presets separate, last part in _detectors3
d = {}
d['Detector 6'] = self._exit_slits(hdr)
d['Detector 6'].update(self._electron_multiplier(hdr))
d['Detector 7'] = self._exit_slits(hdr)
d['Detector 7'].update(self._electron_multiplier(hdr))
d['exit slit xl'] = unpack(self._bo + '70i', hdr.read(280))
return d
def _detectors3(self, hdr):
""" Internal function; reads 100 bytes, returns Detectors dict, part 3. """
# Called AnalysisParam in OpenMIMS, only last part
d = {}
d['TIC'] = self._electron_multiplier(hdr)
for n in range(1, 8):
det = 'Detector {}'.format(n)
d[det] = {}
d[det]['fc background setup positive'], \
d[det]['fc background setup negative'] = \
unpack(self._bo + '2i', hdr.read(8))
for n in range(1, 8):
det = 'Detector {}'.format(n)
det_type = unpack(self._bo + 'i', hdr.read(4))[0]
d[det]['detector'] = _detectors.get(det_type, str(det_type))
return d
def _exit_slits(self, hdr):
""" Internal function; reads 88 bytes, returns exit slit dict. """
# Does not exist separately in OpenMIMS, part of ApSecondaryNano and AnalysisParam
d = {}
# Each detector exit slit has:
# - a position (0, 1, 2)
# - a size (normal, large, xl)
# The exit slits widths (and heights) are a 3x5 matrix where
# coordinate (size, pos) returns actual width (height). positions 4
# and 5 are 0 (for future expansion?) Size XL not stored in same part
# of header, and only in analysis version >= 5, so we return a list of
# length 5 with 0s here. Slits 0, 1, 2 are called slit 1, slit 2,
# slit 3, so add labels to avoid confusion.
d['exit slit'], d['exit slit size'] = \
unpack(self._bo + '2i', hdr.read(8))
d['exit slit label'] = _exit_slit_labels.get(d['exit slit'], str(d['exit slit']))
d['exit slit size label'] = _exit_slit_size_labels.get(d['exit slit size'], str(d['exit slit size']))
w0 = tuple(unpack(self._bo + '5i', hdr.read(20)))
w1 = tuple(unpack(self._bo + '5i', hdr.read(20)))
w2 = (0, 0, 0, 0, 0)
d['exit slit widths'] = (w0, w1, w2)
h0 = tuple(unpack(self._bo + '5i', hdr.read(20)))
h1 = tuple(unpack(self._bo + '5i', hdr.read(20)))
h2 = (0, 0, 0, 0, 0)
d['exit slit heights'] = (h0, h1, h2)
return d
def _electron_multiplier(self, hdr):
""" Internal function; reads 16 bytes, returns EM dict. """
d = {}
d['em yield'], d['em background'], d['em deadtime'] = \
unpack(self._bo + 'd 2i', hdr.read(16))
return d
def _sims_header(self, hdr):
"""Internal function, reads 1240 bytes, returns SIMSHeader dict. """
# Called DefAnalysisBis and DefEps in OpenMIMS
d = {}
d['simsheader version'], d['original filename'], d['matrix'], \
d['sigref auto'], d['sigref points'], d['sigref delta'], \
d['sigref scan time'], d['sigref measure time'], \
d['sigref beam on time'], d['eps centering enabled'], \
d['eps enabled'], d['eps central energy'], d['eps b field'] = \
unpack(self._bo + 'i 256s 256s 10i', hdr.read(556))
d['EPSCentralSpecies'] = self._species(hdr)
d['EPSReferenceSpecies'] = self._species(hdr)
# Don't know how long method name is, runs into null-padded zone.
d['eps ref mass tube hv'], d['eps ref mass tube hv max var'], \
d['sample rotation'], d['sample rotation speed'], \
d['sample rotation synced'], d['sample name'], \
d['user name'], d['method name'] = \
unpack(self._bo + '2d 3i 80s 32s 256s', hdr.read(396))
d['original filename'] = self._cleanup_string(d['original filename'])
d['matrix'] = self._cleanup_string(d['matrix'])
d['sample name'] = self._cleanup_string(d['sample name'])
d['user name'] = self._cleanup_string(d['user name'])
d['method name'] = self._cleanup_string(d['method name'])
d['sigref auto'] = bool(d['sigref auto'])
d['eps centering enabled'] = bool(d['eps centering enabled'])
d['eps enabled'] = bool(d['eps enabled'])
d['sample rotation'] = bool(d['sample rotation'])
d['sample rotation synced'] = bool(d['sample rotation synced'])
d['sigref scan time'] /= 10 # 0.1 sec increments
return d
def _preset_start(self, hdr):
""" Internal function; read 0 bytes (8 and back up),
returns True or False detecting start of preset.
"""
test = hdr.read(8)
hdr.seek(-8, 1)
try:
test = self._cleanup_string(test)
except UnicodeDecodeError:
# Some non-null filler bytes
return False
# First entry in preset is .isf filename with full path
# If preset is used at all, first entry is non-null.
# Paths start with / (older Sun systems), or drive letter
# (D: newer Windows systems)
if re.match('[A-Z]:', test) or re.match('/.', test):
return True
else:
return False
def _preset(self, hdr, group=None):
""" Internal function; reads 1080 (slits) or 3640 (lenses) bytes,
returns a Preset dict.
"""
# Called ApPresetSlit/ApPresetLens in OpenMIMS
# ApPresetDef and ApParamPreset combined here.
if not group:
raise ValueError("Group not set, select either 'slit' or 'lens'.")
d = {}
start_position = hdr.tell()
d['isf filename'], d['preset name'], d['calibration date'], \
d['enabled'], d['parameters'] = \
unpack(self._bo + '256s 224s 32s 2i', hdr.read(520))
d['enabled'] = bool(d['enabled'])
d['isf filename'] = self._cleanup_string(d['isf filename'])
d['preset name'] = self._cleanup_string(d['preset name'])
d['calibration date'] = self._cleanup_string(d['calibration date'])
d['calibration date'] = self._cleanup_date(d['calibration date'])
# Presets have a fixed length: 1080 for slits, 3640 for lenses.
# Padded with null- or CC bytes (but there may be other stuff in
# there). There are more than d['parameters'] parameters in here, but
# they seem to be "left-overs" from previous presets. Much the
# same as strings which have more text after the terminating null-byte.
# Only read first d['parameters'] parameters.
for p in range(d['parameters']):
param_id, value, param_name = \
unpack(self._bo + '2i 20s', hdr.read(28))
param_name = self._cleanup_string(param_name)
if not param_name:
param_name = str(param_id)
d[param_name] = value
current_position = hdr.tell()
if group == 'slit':
skip = 1080 - (current_position - start_position)
else:
skip = 3640 - (current_position - start_position)
hdr.seek(skip, 1)
return d
def _presets(self, hdr):
""" Internal function; reads 11,600 bytes, returns Presets dict. """
# presput/slit = 1080
# presput/lens = 3640
# measure/slit = 1080
# measure/lens = 3640
# 2 x 1080 padding
# padding can be before presput, inbetween presput and measure,
# and after measure.
d = {}
d['Presputter'] = {}
padding = 0
if not self._preset_start(hdr):
hdr.seek(1080, 1)
padding += 1
d['Presputter']['Slits'] = self._preset(hdr, group='slit')
d['Presputter']['Lenses'] = self._preset(hdr, group='lens')
d['Measure'] = {}
if not self._preset_start(hdr):
hdr.seek(1080, 1)
padding += 1
d['Measure']['Slits'] = self._preset(hdr, group='slit')
d['Measure']['Lenses'] = self._preset(hdr, group='lens')
hdr.seek(1080 * (2 - padding), 1)
return d
def _image_hdr(self, hdr):
""" Internal function; reads 84 bytes, returns Image dict. """
# Called ... in OpenMIMS
d = {}
d['header size'], d['type'], d['width'], d['height'], \
d['bytes per pixel'], d['masses'], d['planes'], \
d['raster'], d['original filename'] = \
unpack(self._bo + 'i 6h i 64s', hdr.read(84))
# Called nickname in OpenMIMS
d['original filename'] = self._cleanup_string(d['original filename'])
if d['header size'] != 84:
raise ValueError("Image header size is {}, not 84.".format(d['header size']))
return d
def _isotopes_hdr(self, hdr):
""" Internal function; reads 176 bytes, returns Isotopes dict. """
# Not in OpenMIMS
d = {}
d['blocks'], d['frames per block'], d['rejection sigma'], ratios = \
unpack(self._bo + '4i', hdr.read(16))
# ratios is the number of ratios to follow. Each ratio is a set of two
# ints. Each int is the index (0-index) of the species in the mass
# list. First int is numerator, second is denomenator of ratio.
r = unpack(self._bo + '{}i'.format(2*ratios), hdr.read(2*4*ratios))
rtxt = tuple(self.header['label list'][n] for n in r)
rfmt = tuple(self.header['label list fmt'][n] for n in r)
d['ratios index'] = tuple((r[n], r[n+1]) for n in range(0, 2*ratios, 2))
d['ratios'] = tuple((rtxt[n], rtxt[n+1]) for n in range(0, 2*ratios, 2))
d['ratios fmt'] = tuple('{}\\slash {}'.format(rfmt[n], rfmt[n+1]) for n in range(0, 2*ratios, 2))
# rest is filler with \xFF
hdr.seek(176 - 16 - 2*4*ratios, 1)
return d
def _image_data(self):
""" internal function; read data for image type. """
if self.header['Image']['bytes per pixel'] == 2:
# 16-bit unsigned integers, short
dt = np.dtype(self._bo + 'H')
elif self.header['Image']['bytes per pixel'] == 4:
# 32-bit unsigned integers, int
dt = np.dtype(self._bo + 'I')
shape = [self.header['Image']['planes'],
self.header['Image']['masses'],
self.header['Image']['height'],
self.header['Image']['width']]
self.fh.seek(self.header['header size'])
compressedfiles = (
gzip.GzipFile,
bz2.BZ2File,
tarfile.ExFileObject,
lzma.LZMAFile,
io.BytesIO
)
# fromfile is about 2x faster than frombuffer(fh.read())
if isinstance(self.fh, compressedfiles):
data = np.frombuffer(self.fh.read(), dtype=dt).reshape(shape)
else:
data = np.fromfile(self.fh, dtype=dt).reshape(shape)
# We want to have a cube of contiguous data (stacked images) for each
# mass. Swap axes 0 and 1. Returns a view, so make full copy to make
# data access faster.
data = data.swapaxes(0, 1).copy()
self.data = xarray.DataArray(data,
dims=('species', 'frame', 'y', 'x'),
coords={'species': ('species', list(self.header['label list']))},
attrs={'unit': 'counts'})
def _isotope_data(self):
""" Internal function; read data from .is or .dp file. """
# Data structure:
# header
# 1 int (M): no. blocks (i.e. masses)
# M blocks:
# (each block)
# 1 int (N): no. points (i.e. frames)
# N doubles: cumulative count time in s
# N doubles: data
self.fh.seek(self.header['header size'])
blocks = unpack(self._bo + 'i', self.fh.read(4))[0]
data = []
for block in range(blocks):
points = unpack(self._bo + 'i', self.fh.read(4))[0]
d = np.fromfile(self.fh, dtype=self._bo+'f8', count=2*points).reshape(2, points)
data.append(d[1])
self._data_corr = xarray.DataArray(data,
dims=('species', 'frame'),
coords={'species': ('species', list(self.header['label list']))},
attrs={'unit': 'counts/s'})
def _isotope_txt_data(self):
""" Internal function; read data from .is_txt or .dp_txt file. """
with open(self.filename + '_txt', mode='rt') as fh:
txt = fh.readlines()
data = []
frames = self.header['frames']
line = 0
while line < len(txt):
if txt[line].startswith('B ='):
Tc = txt[line].split('=')[-1].strip().strip(' ms')
Tc = float(Tc)/1000
d = np.loadtxt(txt[line + 2 : line + 2 + frames])
data.append(d[:, 1]/Tc)
line += 2 + frames
line += 1
self.data = xarray.DataArray(data,
dims=('species', 'frame'),
coords={'species': ('species', list(self.header['label list']))},
attrs={'unit': 'counts/s'})
def _read_chk_is(self):
""" Internal function, reads .chk_is file,
extracts background calibration.
"""
fname = os.path.splitext(self.filename)[0] + '.chk_is'
table = []
bg_before = []
bg_after = []
with open(fname, mode='rt') as fh:
for line in fh:
if line.startswith('FC Background before acq'):
bg_before = line.split(':')[1].strip().split()
elif line.startswith('FC Background after acq'):
bg_after = line.split(':')[1].strip().split()
elif line.startswith('|'):
table.append(line)
# Parse analysis background
det = ''
for part in bg_before:
if 'Det' in part:
det = part.replace('Det', 'Detector ').strip('= ')
continue
try:
bg = float(part.strip())
except ValueError:
bg = 0
self.header['Detectors'][det]['fc background before analysis'] = bg
for part in bg_after:
if 'Det' in part:
det = part.replace('Det', 'Detector ').strip('= ')
continue
try:
bg = float(part.strip())
except ValueError:
bg = 0
self.header['Detectors'][det]['fc background after analysis'] = bg
# Parse baseline background if found
if table:
background = table[0].strip().strip('|').split('|')
background = [float(b.strip()) for b in background[1:]]
detectors = table[2].strip().strip('|').split('|')
detectors = [i.strip().replace('Mass#', 'Detector ') for i in detectors[2:]]
for det, bg in zip(detectors, background):
detdict = self.header['Detectors'][det]
key = '{} background baseline'.format(detdict['detector'].lower())
detdict[key] = bg
def _beamstability_data(self):
""" Internal function; read data in .bs beam stability file."""
traces = unpack(self._bo + 'i', self.fh.read(4))[0]
x = []
data = []
maxpoints = 0
for _ in range(traces):
points = unpack(self._bo + 'i', self.fh.read(4))[0]
d = np.fromfile(self.fh, dtype=self._bo+'f8', count=2*points).reshape(2, points)
data.append(d[1])
if points > maxpoints:
x = d[0]
maxpoints = points
for d in range(len(data)):
pad_width = maxpoints - data[d].shape[0]
data[d] = np.pad(data[d], (0, pad_width), 'constant')
if self.header['file type'] == 31:
if self.header['analysis type'].endswith('trolley step scan'):
xprop = 'radius'
xunit = 'mm'
else:
xprop = 'deflection'
xunit = 'V'
elif self.header['file type'] == 35:
xprop = 'time'
xunit = 's'
self.data = xarray.DataArray(data, dims=('species', xprop),
coords={
'species': ('species', list(self.header['label list'])),
xprop: (xprop, x, {'unit': xunit})
},
attrs={
'unit': 'counts/s'
})
def _cleanup_string(self, bytes):
""" Internal function; cuts off bytes at first null-byte,
decodes bytes as latin-1 string, returns string
"""
try:
b = bytes.index(b'\x00')
except ValueError:
return bytes.decode('latin-1').strip()
else:
return bytes[:b].decode('latin-1').strip()
def _cleanup_date(self, date):
""" Internal function; reads date-string, returns Python datetime
object. Assumes date-part and time-part are space separated, date
is dot-separated, and time is colon-separated. Returns None if date
is empty, contains 'N/A', or is not a string.
"""
if (not date or
not isinstance(date, str) or
'N/A' in date):
return None
date, time = date.split()
day, month, year = date.split('.')
hour, minute = time.split(':')
year, month, day, hour, minute = [int(x) for x in (year, month, day, hour, minute)]
# For 2-digit years, 1969/2068 is the wrap-around (POSIX standard)
if (69 <= year < 100):
year += 1900
elif (0 <= year < 69):
year += 2000
return datetime.datetime(year, month, day, hour, minute)
def _chomp(self, hdr, filler=(b'\x00\x00\x00\x00', b'\xCC\xCC\xCC\xCC'), chunk=4):
""" Internal function.
Reads and discards filler bytes, by default null (\\x00) and
continue (\\xCC) bytes. Stops at start of first non-filler byte.
Reads chunk bytes at the time.
"""
filler = tuple(filler)
bytes = hdr.read(chunk)
while(bytes and bytes in filler):
bytes = hdr.read(chunk)
hdr.seek(-1 * chunk, 1)
class SIMS(SIMSReader, TransparentOpen):
""" Read a (nano)SIMS file and load the full header and image data. """
def __init__(self, filename, file_in_archive=0, password=None):
""" Create a SIMS object that will hold all the header information and
image data.
Usage: s = sims.SIMS('filename.im' | 'filename.im.bz2' | fileobject)
Header information is stored as a nested Python dict in SIMS.header,
while data is stored in SIMS.data as a xarray DataArray.
This class can open Cameca (nano)SIMS files and transparently
supports compressed files (gzip, bzip2, xz, lzma, zip, 7zip) and
opening from multifile archives (tar, compressed tar, zip, 7zip).
Set file_in_archive to the filename to extract, or the sequence
number of the file in the archive (0 is the first file). For
encrypted archives (zip, 7z) set password to access the data. For
zip format, password must be a byte-string.
It's also possible to supply a file object to an already opened
file. In fact, SIMS can read from anything that provides a read()
function, although reading from a buffered object (with seek() and
tell() support) is much more efficient.
SIMS supports the 'with' statement.
"""
if not filename:
return
TransparentOpen.__init__(self, filename, file_in_archive=file_in_archive,
password=password)
self.fh.seek(0)
SIMSReader.__init__(self, self.fh, filename=filename)
self.peek()
self.read_header()
self.read_data()
self.close()
###############################################################################
# Not implemented yet in read_header
#
# class CalCond
# int n_delta;
# int np_delta;
# int tps_comptage;
# int nb_cycles;
# double no_used2;
# double cal_ref_mass;
# String symbol;
|
[
"sims.transparent.TransparentOpen.__init__",
"numpy.pad",
"copy.deepcopy",
"io.BytesIO",
"sims.utils.format_species",
"numpy.fromfile",
"struct.unpack",
"numpy.dtype",
"re.match",
"os.path.exists",
"datetime.datetime",
"numpy.loadtxt",
"os.path.splitext",
"collections.OrderedDict",
"warnings.warn"
] |
[((3070, 3099), 'struct.unpack', 'unpack', (["(self._bo + '3i')", 'snip'], {}), "(self._bo + '3i', snip)\n", (3076, 3099), False, 'from struct import unpack\n'), ((4679, 4694), 'io.BytesIO', 'io.BytesIO', (['hdr'], {}), '(hdr)\n', (4689, 4694), False, 'import io\n'), ((17035, 17054), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (17048, 17054), False, 'import copy\n'), ((21884, 21909), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (21907, 21909), False, 'import collections\n'), ((59059, 59108), 'datetime.datetime', 'datetime.datetime', (['year', 'month', 'day', 'hour', 'minute'], {}), '(year, month, day, hour, minute)\n', (59076, 59108), False, 'import datetime\n'), ((61043, 61139), 'sims.transparent.TransparentOpen.__init__', 'TransparentOpen.__init__', (['self', 'filename'], {'file_in_archive': 'file_in_archive', 'password': 'password'}), '(self, filename, file_in_archive=file_in_archive,\n password=password)\n', (61067, 61139), False, 'from sims.transparent import TransparentOpen\n'), ((10766, 10784), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (10779, 10784), False, 'import warnings\n'), ((46501, 46525), 're.match', 're.match', (['"""[A-Z]:"""', 'test'], {}), "('[A-Z]:', test)\n", (46509, 46525), False, 'import re\n'), ((46529, 46549), 're.match', 're.match', (['"""/."""', 'test'], {}), "('/.', test)\n", (46537, 46549), False, 'import re\n'), ((51420, 51444), 'numpy.dtype', 'np.dtype', (["(self._bo + 'H')"], {}), "(self._bo + 'H')\n", (51428, 51444), True, 'import numpy as np\n'), ((57110, 57153), 'numpy.pad', 'np.pad', (['data[d]', '(0, pad_width)', '"""constant"""'], {}), "(data[d], (0, pad_width), 'constant')\n", (57116, 57153), True, 'import numpy as np\n'), ((2149, 2186), 'copy.deepcopy', 'copy.deepcopy', (['self.__dict__[k]', 'memo'], {}), '(self.__dict__[k], memo)\n', (2162, 2186), False, 'import copy\n'), ((2600, 2623), 'struct.unpack', 'unpack', (['"""<i"""', 'snip[4:8]'], {}), "('<i', snip[4:8])\n", (2606, 2623), False, 'from struct import unpack\n'), ((16037, 16075), 'os.path.exists', 'os.path.exists', (["(self.filename + '_txt')"], {}), "(self.filename + '_txt')\n", (16051, 16075), False, 'import os\n'), ((23721, 23738), 'sims.utils.format_species', 'format_species', (['m'], {}), '(m)\n', (23735, 23738), False, 'from sims.utils import format_species\n'), ((51565, 51589), 'numpy.dtype', 'np.dtype', (["(self._bo + 'I')"], {}), "(self._bo + 'I')\n", (51573, 51589), True, 'import numpy as np\n'), ((54120, 54163), 'numpy.loadtxt', 'np.loadtxt', (['txt[line + 2:line + 2 + frames]'], {}), '(txt[line + 2:line + 2 + frames])\n', (54130, 54163), True, 'import numpy as np\n'), ((54617, 54648), 'os.path.splitext', 'os.path.splitext', (['self.filename'], {}), '(self.filename)\n', (54633, 54648), False, 'import os\n'), ((2742, 2765), 'struct.unpack', 'unpack', (['""">i"""', 'snip[4:8]'], {}), "('>i', snip[4:8])\n", (2748, 2765), False, 'from struct import unpack\n'), ((13281, 13299), 'warnings.warn', 'warnings.warn', (['msg'], {}), '(msg)\n', (13294, 13299), False, 'import warnings\n'), ((15055, 15086), 'os.path.splitext', 'os.path.splitext', (['self.filename'], {}), '(self.filename)\n', (15071, 15086), False, 'import os\n'), ((16221, 16269), 'warnings.warn', 'warnings.warn', (['"""No data read for line scan, fix"""'], {}), "('No data read for line scan, fix')\n", (16234, 16269), False, 'import warnings\n'), ((52233, 52263), 'numpy.fromfile', 'np.fromfile', (['self.fh'], {'dtype': 'dt'}), '(self.fh, dtype=dt)\n', (52244, 52263), True, 'import numpy as np\n'), ((53331, 53392), 'numpy.fromfile', 'np.fromfile', (['self.fh'], {'dtype': "(self._bo + 'f8')", 'count': '(2 * points)'}), "(self.fh, dtype=self._bo + 'f8', count=2 * points)\n", (53342, 53392), True, 'import numpy as np\n'), ((56797, 56858), 'numpy.fromfile', 'np.fromfile', (['self.fh'], {'dtype': "(self._bo + 'f8')", 'count': '(2 * points)'}), "(self.fh, dtype=self._bo + 'f8', count=2 * points)\n", (56808, 56858), True, 'import numpy as np\n')]
|
"""
File: xAndes.py
Author: <NAME>
Description: Run dadi 5x on fs from GBS data.
Usage: python xAndes_GBS.py
"""
import os, sys
import numpy
from numpy import array
import dadi
import matplotlib
import matplotlib.pyplot as plt
import demographic_models
for i in range(5):
dir = ("/scratch/mgharvey/SysBio/dadi/GBS")
os.chdir(dir)
outfile = open("./optimized_output_GBS_{0}.txt".format(i+1), 'wb')
data = dadi.Spectrum.from_file('GBS.fs')
ns = data.sample_sizes
pts_l = [30,40,50]
func = demographic_models.twopop_model
params = array([1, 1, 1, 1, 1, 1, 1])
upper_bound = [20, 20, 20, 10, 10, 20, 20]
lower_bound = [0.01, 0.01, 0.01, 0, 0, 0, 0]
func_ex = dadi.Numerics.make_extrap_log_func(func)
model = func_ex(params, ns, pts_l)
ll_model = dadi.Inference.ll_multinom(model, data)
theta = dadi.Inference.optimal_sfs_scaling(model, data)
p0 = dadi.Misc.perturb_params(params, fold=1, upper_bound=upper_bound)
popt = dadi.Inference.optimize_log_lbfgsb(p0, data, func_ex, pts_l,
lower_bound=lower_bound,
upper_bound=upper_bound,
verbose=len(params), maxiter=20)
model = func_ex(popt, ns, pts_l)
ll_opt = dadi.Inference.ll_multinom(model, data)
optpar = repr(popt)
outfile.write("Likelihood: {0}\nOptimized Parameters: {1}\nTheta (Scaling Factor): {2}\nConverted thetas: {3} {4} {5} {6} {7}".format(ll_opt, optpar, theta, (popt[0]*theta), (popt[1]*theta), (popt[2]*theta), (popt[3]*theta), (popt[4]*theta)))
outfile.close()
|
[
"dadi.Inference.optimal_sfs_scaling",
"dadi.Inference.ll_multinom",
"dadi.Numerics.make_extrap_log_func",
"numpy.array",
"dadi.Misc.perturb_params",
"dadi.Spectrum.from_file",
"os.chdir"
] |
[((331, 344), 'os.chdir', 'os.chdir', (['dir'], {}), '(dir)\n', (339, 344), False, 'import os, sys\n'), ((422, 455), 'dadi.Spectrum.from_file', 'dadi.Spectrum.from_file', (['"""GBS.fs"""'], {}), "('GBS.fs')\n", (445, 455), False, 'import dadi\n'), ((552, 580), 'numpy.array', 'array', (['[1, 1, 1, 1, 1, 1, 1]'], {}), '([1, 1, 1, 1, 1, 1, 1])\n', (557, 580), False, 'from numpy import array\n'), ((684, 724), 'dadi.Numerics.make_extrap_log_func', 'dadi.Numerics.make_extrap_log_func', (['func'], {}), '(func)\n', (718, 724), False, 'import dadi\n'), ((773, 812), 'dadi.Inference.ll_multinom', 'dadi.Inference.ll_multinom', (['model', 'data'], {}), '(model, data)\n', (799, 812), False, 'import dadi\n'), ((822, 869), 'dadi.Inference.optimal_sfs_scaling', 'dadi.Inference.optimal_sfs_scaling', (['model', 'data'], {}), '(model, data)\n', (856, 869), False, 'import dadi\n'), ((878, 943), 'dadi.Misc.perturb_params', 'dadi.Misc.perturb_params', (['params'], {'fold': '(1)', 'upper_bound': 'upper_bound'}), '(params, fold=1, upper_bound=upper_bound)\n', (902, 943), False, 'import dadi\n'), ((1153, 1192), 'dadi.Inference.ll_multinom', 'dadi.Inference.ll_multinom', (['model', 'data'], {}), '(model, data)\n', (1179, 1192), False, 'import dadi\n')]
|
'''
Data handler that relies on INI logs and `fusi.io`.
<NAME> (Jan, 2019)
'''
import os
import pathlib
from glob import glob
from pprint import pprint
from collections import OrderedDict
import numpy as np
from fusilib import misc, utils as futils
from fusilib.io import righw, spikeglx, phy, logs, sync
from fusilib.extras import readers
import fusilib.config
def hdf_load(flname, *args, **kwargs):
'''
'''
local_path = misc.uri_convert_uri2local(str(flname))
return readers.hdf_load(local_path, *args, **kwargs)
def mk_hash_call(func_kwargs):
'''
'''
import hashlib
_ = func_kwargs.pop('self')
_ = func_kwargs.pop('recache')
call_signature = str(func_kwargs).encode()
print(call_signature)
call_hash = hashlib.sha512(call_signature).hexdigest()
return call_hash
def params2cache(kwargs):
'''
'''
thash = mk_hash_call(kwargs)
outfl = os.path.join('/store/fast/scratch', str(thash))
return pathlib.Path(outfl)
EXPERIMENTS = [('CR017', (2019, 11, 13)),
('CR017', (2019, 11, 14)),
('CR019', (2019, 11, 26)),
('CR019', (2019, 11, 27)),
('CR020', (2019, 11, 20)),
('CR020', (2019, 11, 21)),
('CR020', (2019, 11, 22)),
('CR022', (2020, 10, 7)),
('CR022', (2020, 10, 11)),
('CR024', (2020, 10, 29)),
]
##############################
# helper functions
##############################
class MetaSession(object):
'''
'''
def __init__(self,
subject_name,
session_name,
root=None,
verbose=False):
'''
'''
if root is None:
root = fusilib.config.DATA_ROOT
root = pathlib.Path(str(root)).joinpath(subject_name)
session_path = root.joinpath(session_name)
self.root = root
self.subject_name = subject_name
self.session_name = session_name
self.session_path = session_path
self.verbose = verbose
if not root.exists():
raise IOError('Invalid path: %s' % root)
if not session_path.exists():
raise IOError('Invalid path: %s' % session_path)
self.log = None
self.load_session_log(verbose=verbose)
def ls(self, pattern):
'''
'''
results = sorted(list(self.session_path.glob(pattern)))
return results
@property
def experiment_mapper_slices2blocks(self):
'''A dict mapping slices to session blocks
'''
session_info = self.log_load_section('experiment')
slice_blocks = self.log_load_section('fusi', 'mapping_block2slices')
fusi_blocks = session_info['matched_fusi_blocks']
assert len(fusi_blocks) == len(slice_blocks)
# Get the blocks corresponding for each slice
all_slice_indeces = np.unique(slice_blocks)
# remove nans
all_slice_indeces = all_slice_indeces[np.logical_not(
np.isnan(all_slice_indeces))]
# check they are indeces
assert np.allclose(np.asarray(all_slice_indeces,
dtype=np.int), all_slice_indeces)
all_slice_indeces = np.asarray(all_slice_indeces, dtype=np.int)
slice_blocks_dict = {slice_num: fusi_blocks[slice_blocks == slice_num]
for slice_num in all_slice_indeces}
return slice_blocks_dict
@property
def experiment_mapper_blocks2slices(self):
slice_blocks = self.experiment_mapper_slices2blocks
blocks2slice = {}
for sl, blocks in slice_blocks.items():
for block in blocks:
blocks2slice[block] = sl
return blocks2slice
@property
def experiment_blocks(self):
'''Get all blocks
'''
session_info = self.log_load_section('experiment')
blocks = session_info['block_numbers']
blocks = blocks[np.logical_not(np.isnan(blocks))]
if 'valid_blocks' in session_info:
blocks = session_info['valid_blocks']
if 'invalid_blocks' in session_info:
bad_blocks = session_info['invalid_blocks']
blocks = np.asarray([t for t in blocks if t not in bad_blocks])
return blocks.astype(np.int)
@property
def experiment_fusi_blocks(self):
'''
'''
session_info = self.log_load_section('experiment')
blocks = self.log_load_section('experiment', 'matched_fusi_blocks')
blocks = blocks[np.logical_not(np.isnan(blocks))]
if 'valid_blocks' in session_info:
blocks = session_info['valid_blocks']
if 'invalid_blocks' in session_info:
bad_blocks = session_info['invalid_blocks']
blocks = np.asarray([t for t in blocks if t not in bad_blocks])
return blocks.astype(np.int)
def fusi_blocks_iterator(self, blocks=None):
'''
'''
if blocks is None:
blocks = self.experiment_fusi_blocks
for block in blocks:
yield MetaBlock(self.subject_name, self.session_name, block)
@property
def fusi_nslices(self):
return len(self.experiment_mapper_slices2blocks)
def fusi_slice_position(self, slice_number):
'''Get position of slice in [mm]
'''
info = self.log_load_section('fusi')
slices_mm = info['slice_positions']
return slices_mm[slice_number]
def experiment_slice2blocks(self, slice_number):
'''Get the fUSi blocks corresponding to the slice of interest
'''
mapper = self.experiment_mapper_slices2blocks
return mapper[slice_number]
@property
def experiment_tasks(self):
return self.log_load_section('experiment', 'tasks')
def experiment_get_task_blocks(self, task_name):
'''
'''
info = self.log_load_section('experiment')
assert task_name in info['tasks']
return np.asarray(info['blocks_%s' % task_name])
def fusi_get_slice_data(self, slice_number,
fusi_mask=None,
fusi_preproc=dict(dt_ms=50,
window_ms=150,
svddrop=5,
freq_cutoffhz=15,
roi_name=None,
mirrored=False),
trim_raw=dict(trim_beg=5, trim_end=5),
trim_post_detrend=dict(trim_beg=10, trim_end=10),
outbrain_nuisance=None,
temporal_detrend=dict(ftype='sg',
window_size_sec=120,
polyorder=3),
spatial_smoothing=None,
normalization='pctchg'):
'''
'''
fusi_slice_blocks = self.experiment_slice2blocks(slice_number)
print('Working on slice %i' % slice_number, fusi_slice_blocks)
slice_data = []
slice_times = []
for bdx, block_number in enumerate(fusi_slice_blocks):
fusi_times, fusi_data = self.fusi_get_block_data(
block_number,
fusi_mask=fusi_mask,
fusi_preproc=fusi_preproc,
trim_raw=trim_raw,
trim_post_detrend=trim_post_detrend,
outbrain_nuisance=outbrain_nuisance,
temporal_detrend=temporal_detrend,
spatial_smoothing=spatial_smoothing,
normalization=normalization)
# store
slice_data.append(fusi_data)
slice_times.append(fusi_times)
slice_data = np.vstack(slice_data)
return slice_times, slice_data
def fusi_get_multiblock_data(self, block_numbers,
fusi_mask=None,
fusi_preproc=dict(dt_ms=50,
window_ms=150,
svddrop=5,
freq_cutoffhz=15,
roi_name=None,
mirrored=False),
trim_raw=dict(trim_beg=5, trim_end=5),
trim_post_detrend=dict(
trim_beg=10, trim_end=10),
outbrain_nuisance=None,
temporal_detrend=dict(ftype='sg',
window_size_sec=120,
polyorder=3),
spatial_smoothing=None,
normalization='pctchg'):
'''
fusi_mask : A mask to apply to the voxels of the fUSi slice
'''
blocks_data = []
blocks_times = []
for bdx, block_number in enumerate(block_numbers):
fusi_times, fusi_data = self.fusi_get_block_data(
block_number,
fusi_mask=fusi_mask,
fusi_preproc=fusi_preproc,
trim_raw=trim_raw,
trim_post_detrend=trim_post_detrend,
outbrain_nuisance=outbrain_nuisance,
temporal_detrend=temporal_detrend,
spatial_smoothing=spatial_smoothing,
normalization=normalization)
# store
blocks_data.append(fusi_data)
blocks_times.append(fusi_times)
blocks_data = np.vstack(blocks_data)
return blocks_times, blocks_data
def fusi_get_block_data(self, block_number,
fusi_mask=None,
fusi_preproc=dict(dt_ms=50,
window_ms=150,
svddrop=5,
freq_cutoffhz=15,
roi_name=None,
mirrored=False),
trim_raw=dict(trim_beg=5, trim_end=5),
trim_post_detrend=dict(trim_beg=10, trim_end=10),
outbrain_nuisance=None,
temporal_detrend=dict(ftype='sg',
window_size_sec=120,
polyorder=3),
spatial_smoothing=None,
normalization='pctchg',
):
'''
fusi_mask : A mask to apply to the voxels of the fUSi slice
outbrain_nuisance = dict(npcs=3,r2thresh=0.1),
spatial_smoothing=dict(sigma_um=20),
'''
fusi_dt_sec = fusi_preproc['dt_ms']/1000.0
subject_block = MetaBlock(self.subject_name,
self.session_name,
int(block_number))
fusi_times, fusi_data = subject_block.fusi_get_data(**fusi_preproc)
# trim before detrending
##############################
if trim_raw is not None:
times_valid = np.logical_and(fusi_times > (fusi_times.min()
+ trim_raw['trim_beg']),
fusi_times < (fusi_times.max()
- trim_raw['trim_end']))
fusi_times = fusi_times[times_valid]
fusi_data = fusi_data[times_valid]
fusi_shape = fusi_data.shape[:]
# outbrain_nuisance
##############################
if outbrain_nuisance is not None:
from scipy import linalg as LA
from tikreg import models, utils as tikutils
outbrain_npcs = outbrain_nuisance['npcs']
r2thresh = outbrain_nuisance['r2thresh']
# get temporal PCs from outside brain mask
outsidebrain_mask = subject_block.fusi_get_outsidebrain_mask()
nuisance_xdata = LA.svd(fusi_data[:, outsidebrain_mask],
full_matrices=False)[0][:, :outbrain_npcs]
# fit OLS model and obtain predictions
nuisance_ypreds = models.olspred(
nuisance_xdata, fusi_data.reshape(fusi_data.shape[0], -1))
# compute prediction accuracy
nuisance_r2 = tikutils.columnwise_rsquared(
nuisance_ypreds, fusi_data.reshape(fusi_data.shape[0], -1))
# voxels to clean
nuisance_mask = (nuisance_r2 > r2thresh).reshape(
fusi_shape[1:]) # > 1%
# remove variance explained from outside brain
fusi_data[:, nuisance_mask] = np.nan_to_num(
(fusi_data[:, nuisance_mask] -
nuisance_ypreds.reshape(fusi_data.shape)[:, nuisance_mask]))
# flatten and mask data
##############################
if fusi_mask is not None:
assert isinstance(fusi_mask, np.ndarray)
fusi_data = fusi_data[:, fusi_mask]
else:
fusi_data = fusi_data.reshape(fusi_data.shape[0], -1)
# temporal detrend
##############################
if temporal_detrend is not None:
window_size = int(temporal_detrend['window_size_sec']/fusi_dt_sec)
window_size += (window_size - 1) % 2 # make odd number
yfull = futils.temporal_filter(fusi_data - fusi_data.mean(0), # zero mean
ftype=temporal_detrend['ftype'],
window_size=window_size,
polyorder=temporal_detrend['polyorder'])
# need mean for % signal change
yfull += fusi_data.mean(0)
# trim after detrending
##############################
if trim_raw is not None:
times_valid = np.logical_and(fusi_times > (fusi_times.min()
+ trim_post_detrend['trim_beg']),
fusi_times < (fusi_times.max()
- trim_post_detrend['trim_end']))
fusi_times = fusi_times[times_valid]
fusi_data = fusi_data[times_valid]
fusi_shape = fusi_data.shape[:]
# smoothing
##############################
if spatial_smoothing is not None:
from scipy import ndimage
sigma_um = spatial_smoothing['sigma_um']
um_hsize, um_vsize = np.asarray(
subject_block.fusi_image_pixel_mm)*1000
sigma_hpix = sigma_um/um_hsize
hsigma, vsigma = (sigma_hpix, sigma_hpix /
subject_block.fusi_aspect_ratio)
sigmas = (vsigma, hsigma)
print(
'Smoothing: %0.02d[um]. pixels (vert, horiz): ' % sigma_um, sigmas)
smooth_data = np.zeros_like(fusi_data)
for image_index in range(fusi_data.shape[0]):
img = fusi_data[image_index]
sim = ndimage.gaussian_filter(img, sigma=sigmas)
smooth_data[image_index] = sim
fusi_data = smooth_data
# normalize signals
##############################
if normalization == 'pctchg':
fusi_data = futils.pixelwise_pctsignalchange(fusi_data)
elif normalization == 'zscore':
fusi_data = zscore(fusi_data)
return fusi_times, fusi_data
def mk_filename_for_localdb(self, filename, subfolder=None, mkdir=False):
date_txt = misc.date_tuple2isoformat(self.date_tuple)
flname = '{date}_{subject}_{fl}'.format(date=date_txt,
subject=self.subject_name,
fl=filename)
if subfolder is None:
outpath = self.session_path
else:
outpath = self.session_path.joinpath(subfolder)
if mkdir:
outpath.mkdir(exist_ok=True)
return outpath.joinpath(flname)
def __str__(self):
return '%s %s' % (self.subject_name, self.session_name)
def __repr__(self):
info = (__name__, type(self).__name__,
self.subject_name, self.session_name)
return '<%s.%s [%s: session=%s]>' % info
@property
def date_tuple(self):
date = misc.date_isoformat2tuple(self.session_name)
return date
@property
def subject_sessions(self):
return list(self.root.glob('20??-??-??'))
@property
def session_blocks(self):
blocks = []
maxdigits = 2
for num in range(1, maxdigits+1):
blocks += list(self.session_path.glob('[0-9]'*num))
# sort by block number
blocks = sorted(blocks, key=lambda x: int(x.stem))
return blocks
def load_session_log(self, verbose=False):
'''
'''
pattern = '{session_path}/{year}-{month}-{day}_{subject}.ini'
yyyy, mm, dd = misc.date_tuple2yyyymmdd(self.date_tuple)
flname = pattern.format(session_path=self.session_path,
subject=self.subject_name,
year=yyyy,
month=mm,
day=dd)
try:
log = logs.read_experiment_log(flname, verbose=verbose)
except IOError:
print('Log file not available: %s' % flname)
log = None
self.log = log
def log_load_section(self, section_name, field=None):
contents = self.log[section_name]
# Convert the contents into correct datatypes
parsed_contents = {}
for key in contents.keys():
if (field is not None) and (key != field):
continue
value = contents[key]
value = misc.convert_string2data(value)
if isinstance(value, list):
# convert numerical lists to arrays
isnumeric = [not isinstance(val, str) for val in value]
if np.alltrue(isnumeric):
value = np.asarray(value)
if isinstance(value, str):
# Dynamic root path
if '{dataset_full_path}' in value:
value = value.format(
dataset_full_path=fusilib.config.DATA_ROOT)
# store parsed result
parsed_contents[key] = value
# Only return what is requested
if field is not None:
if field not in parsed_contents:
print('Not found: %s/%s' % (section_name, field))
else:
parsed_contents = parsed_contents[field]
return parsed_contents
def get_block_paths(self, blocknum):
'''
'''
block_paths = self.log_load_section('block')
for key, value in block_paths.items():
try:
block_paths[key] = value.format(blocknum)
except IndexError:
# pattern `{}` appears more than once
ninstances = len(value.split('{}'))-1
block_paths[key] = value.format(*[blocknum]*ninstances)
except KeyError:
# leave named place holders alone
continue
return block_paths
def ephys_get_probe_hemisphere(self, hemisphere):
'''
'''
probe_names = self.log_load_section('ephys', 'probes')
for probe_name in probe_names:
hemi = self.log_load_section(probe_name, 'nickname')
if hemi == hemisphere:
return probe_name
def ephys_get_probe_object(self, probe_name=None, path=None):
'''
'''
# log file contains a section
# [probe_name]
# phy_path = full/path/to/thing
if path is None:
probe_paths = self.log_load_section(probe_name)
path = misc.uri_convert_uri2local(probe_paths['path_phy'])
return phy.ProbeHandler(path)
def ephys_get_probe_nclusters(self, probe_name):
'''
'''
probe_object = self.ephys_get_probe_object(probe_name)
probe_object.npy_template_feature_ind()
nclusters = probe_object.npy_template_feature_ind.data.shape[0]
return nclusters
def ephys_get_recording_object(self, path=None):
'''
'''
if path is None:
# log file contains a section
# [probe_name]
# phy_path = full/path/to/thing
ephys_info = self.log_load_section('ephys')
ephys_path = ephys_info['path']
spikeglx_prefix = self.log_load_section(
'spikeglx')['recording_prefix']
recording_path = os.path.join(ephys_path, spikeglx_prefix)
path = misc.uri_convert_uri2local(recording_path)
return phy.RecordingHandler(path)
def ephys_get_pxinidaq_data(self, path=None,
index_npsync=0,
index_acqlive=1,
index_flipper=2):
'''Load the PXI NI DAQ data as saved by SpikeGLX.
'''
# log file contains a section
# [pxinidaq]
# path = full/path/to/thing
if path is None:
paths = self.log_load_section('pxinidaq')
path = misc.uri_convert_uri2local(paths['path'])
pxinidaq_flname = path
print('Loading: %s' % pxinidaq_flname)
pxinidaq_metadata = spikeglx.read_metadata(pxinidaq_flname)
pxinidaq_sample_ratehz = pxinidaq_metadata['niSampRate']
pxinidaq_data = spikeglx.load_data_chunk(
pxinidaq_flname, sample_duration=-1).T
pxinidaq_time = np.arange(
pxinidaq_data.shape[0])/pxinidaq_sample_ratehz
pxinidaq = misc.DotDict(npsync=sync.analog2digital(pxinidaq_data[:, index_npsync]), # NP digital signal
# acquisition live timeline signal)
acqlive=sync.analog2digital(
pxinidaq_data[:, index_acqlive]),
flipper=sync.analog2digital(
pxinidaq_data[:, index_flipper]), # arduino flipper signal
times=pxinidaq_time,
sample_ratehz=pxinidaq_sample_ratehz)
return pxinidaq
def fusi_get_probe_localizer(self, probe_name, slice_name='slice00', is_ystack=False):
'''
'''
section_header = 'location_%s_%s' % (slice_name, probe_name)
paths = self.log_load_section(section_header)
pprint(paths)
from fusilib import handler
path = misc.uri_convert_uri2local(paths['fusi_data'])
data = handler.matlab_data(path)
if is_ystack is False:
# Make sure log data matches MATLAB content
assert np.allclose(data['yCoords'], paths['ycoord'])
moving_probe = data['Doppler'].yStack.copy()
del(data)
else:
# It's a ystack 4D image: (z,x,t,y)
# default to using the first slice
moving_probe = data['Doppler'].yStack[..., 0]
return moving_probe
def fusi_get_probe_slice_mask(self,
probe_name,
slice_name='slice00',
roi_file_name='fusi_probe_roi.hdf',
verbose=False):
'''
'''
section_header = 'location_%s_%s' % (slice_name, probe_name)
localizer_info = self.log_load_section(section_header)
if verbose > 1:
pprint(localizer_info)
localizer_subfolder = str(localizer_info['folder_name'])
flname = self.mk_filename_for_localdb(roi_file_name,
subfolder=localizer_subfolder)
mask = hdf_load(str(flname), 'mask', verbose=verbose > 1)
if verbose:
print('fUSi probe ROI voxels: %i' % mask.sum())
return mask
def fusi_get_probe_master_mask(self,
probe_name,
roi_file_name='fusi_probe_roi.hdf'):
'''
probe2Dprojection : 'fusi_probe_roi.hdf'
V1 chunk : 'fusi_V1_section_in_probe_roi.hdf'
HPC chunk : fusi_hippo_section_in_probe_roi.hdf'
'''
blocks = self.log_load_section('experiment', 'block_numbers')
fusi_slices = self.log_load_section('fusi', 'mapping_block2slices')
slices2blocks = {k: v for k, v in zip(fusi_slices, blocks)}
fusi_slices = [t for t in np.unique(fusi_slices) if not np.isnan(t)]
masks = np.asarray([self.fusi_get_probe_slice_mask(
probe_name, 'slice%02i' % sdx, roi_file_name=roi_file_name).astype(np.int) for sdx in fusi_slices])
master_mask = masks.sum(0) > 0
return master_mask
def fusi_get_probe_mask_extreme_coords(self, probe_name, slice_number=0, in_xyz_mm=False):
'''Get top and bottom voxel positions
(0,0,0) is left,back,top.
Returns
-------
top_xyz, bottom_xyz
x:ML: from the left
y:AP: from the back
z:DV: from the top
'''
mask = self.fusi_get_probe_slice_mask(
probe_name, slice_name='slice%02i' % slice_number)
# exclude outside of brain voxels
slice_blocks = self.fusi_get_slice_blocks(slice_number)
block = MetaBlock(self.subject_name, self.session_name,
slice_blocks[0], verbose=False)
outside_brain = block.fusi_get_outsidebrain_mask()
mask[outside_brain] = False
dv_voxels = mask.sum(-1).nonzero()[0]
ml_voxels = mask.sum(0).nonzero()[0]
dv_top, dv_bottom = dv_voxels[0], dv_voxels[-1]
ml_top = round(np.mean(mask[dv_top].nonzero()[0]))
ml_bottom = round(np.mean(mask[dv_bottom].nonzero()[0]))
if in_xyz_mm:
ml_mm, dv_mm = self.fusi_image_pixel_mm
ap_mm = self.fusi_get_slices_mm(slice_number)
xyz = np.asarray([[ml_top, ml_bottom],
[ap_mm, ap_mm],
[dv_top, dv_bottom],
])
xyz[0, :] *= ml_mm
xyz[2, :] *= dv_mm
return xyz[:, 0], xyz[:, 1]
# get position of slice in AP
info = self.log_load_section('fusi')
slices_mm = info['slice_positions']
ystack_mm = info['ystack_positions']
ap_pos = np.asarray([idx for idx, t in enumerate(
ystack_mm) if t in slices_mm])[slice_number]
return (ml_top, ap_pos, dv_top), (ml_bottom, ap_pos, dv_bottom)
@property
def fusi_shape(self):
horizontal, vertical = self.fusi_get_coords_inplane_mm()
return len(vertical), len(horizontal)
def fusi_get_ystack(self):
'''y-stack for session
Raw data is stored as a 4D array: (nz, nx, nt, ny).
This computes the mean across time
Returns
-------
ystack : np.ndarray, (nz, nx, ny)
'''
from fusilib import handler
paths = self.log_load_section('fusi')
path = misc.uri_convert_uri2local(paths['ystack_volume'])
data = handler.matlab_data(path)
out = data['Doppler'].yStack.mean(2)
del(data)
return out
def fusi_get_ystack_nifti(self, nonlinearity=lambda x: x):
'''Convert ystack to RAS-oriented nifti
Applies sqrt non-linearity to data
Returns
-------
image : Nifti1Image
'''
from fusilib import align, handler
# get volume
arr = self.fusi_get_ystack()
arr = nonlinearity(arr)
# get dimensions
xmm, zmm = self.fusi_image_pixel_mm
paths = self.log_load_section('fusi')
path = misc.uri_convert_uri2local(paths['ystack_volume'])
data = handler.matlab_data(path)
ymm = np.median(np.diff(data['yCoords']))
xyzmm = np.asarray([xmm, ymm, zmm])
print(arr.shape, xyzmm)
im = align.fusiarr2nii(arr, xyzmm=xyzmm,
flips=(1, 1, -1))
return im
def fusi_get_allenccf_byindex(self, scale_factor=1):
'''
'''
import nibabel as nib
fl_resampled_atlas = self.mk_filename_for_localdb('alleccf_atlas_resampled_fusi_scaled%02ix_byindex.nii.gz' % scale_factor,
'allenccf_align')
im = nib.load(fl_resampled_atlas)
return np.asarray(im.get_fdata())
def fusi_get_slices_mm(self, slice_number=None):
'''
'''
info = self.log_load_section('fusi')
slices_mm = info['slice_positions']
if slice_number is not None:
slices_mm = slices_mm[slice_number]
return slices_mm
def fusi_get_mean_slices(self, slice_number=None, scale_factor=1, remove_outside=True):
'''
'''
nii = self.fusi_get_ystack_nifti()
arr = np.asarray(nii.get_fdata())
info = self.log_load_section('fusi')
slices_mm = info['slice_positions']
ystack_mm = info['ystack_positions']
ystack_idx = np.asarray(
[idx for idx, t in enumerate(ystack_mm) if t in slices_mm])
ystack_nslices = len(ystack_mm)
axis = np.asarray([idx == ystack_nslices for idx in arr.shape])
assert axis.sum() == 1
axis = int(axis.nonzero()[0])
slicer = [slice(None)]*arr.ndim
slicer[axis] = ystack_idx
# Get relevant coronal slices and put them in the first dimension
dat = arr[tuple(slicer)].transpose((1, 2, 0)).astype(np.int)
if remove_outside:
for slicenum in range(self.fusi_nslices):
slice_blocks = self.fusi_get_slice_blocks(slicenum)
blockob = MetaBlock(
self.subject_name, self.session_name, slice_blocks[0], verbose=False)
outside_brain = blockob.fusi_get_outsidebrain_mask()
dat[:, outside_brain] = 0
if slice_number is not None:
dat = dat[slice_number]
return dat
def fusi_get_allenccf_slices(self, slice_number, scale_factor=1, remove_outside=True):
'''
'''
arr = self.fusi_get_allenccf_byindex(scale_factor)
info = self.log_load_section('fusi')
slices_mm = info['slice_positions']
ystack_mm = info['ystack_positions']
ystack_idx = np.asarray(
[idx for idx, t in enumerate(ystack_mm) if t in slices_mm])
ystack_nslices = len(ystack_mm)
axis = np.asarray([idx == ystack_nslices for idx in arr.shape])
assert axis.sum() == 1
axis = int(axis.nonzero()[0])
slicer = [slice(None)]*arr.ndim
slicer[axis] = ystack_idx
# Get relevant coronal slices and put them in the first dimension
dat = arr[tuple(slicer)].transpose((1, 2, 0)).astype(np.int)
if remove_outside:
for slicenum in range(self.fusi_nslices):
slice_blocks = self.fusi_get_slice_blocks(slicenum)
blockob = MetaBlock(
self.subject_name, self.session_name, slice_blocks[0], verbose=False)
outside_brain = blockob.fusi_get_outsidebrain_mask()
dat[:, outside_brain] = 0
if slice_number is not None:
dat = dat[slice_number]
return dat
def fusi_show_allenccf_slices(self,
slice_number=None,
scale_factor=1,
remove_outside=True,
ax=None,
alpha=0.5):
'''
'''
dat = self.fusi_get_allenccf_slices(slice_number=slice_number,
scale_factor=scale_factor,
remove_outside=remove_outside)
if ax is None:
from matplotlib import pyplot as plt
fig, ax = plt.subplots()
from fusilib.align import allenccf_cmap
cmap, norm = allenccf_cmap()
im = ax.matshow(dat, cmap=cmap, norm=norm,
aspect=self.fusi_aspect_ratio, alpha=alpha)
return ax, im
def fusi_show(self, array_data, slice_number=0, ax=None,
allenccf_background=True, allenccf=True, allenccf_alpha=0.5, **kwargs):
'''
'''
if array_data.ndim == 1:
array_data = array_data.reshape(self.fusi_shape)
if ax is None:
from matplotlib import pyplot as plt
fig, ax = plt.subplots()
if allenccf_background:
im_allenccf = self.fusi_show_allenccf_slices(
slice_number, ax=ax, alpha=allenccf_alpha)
im = ax.matshow(
array_data, aspect=self.fusi_aspect_ratio, **kwargs)
else:
im = ax.matshow(
array_data, aspect=self.fusi_aspect_ratio, **kwargs)
im_allenccf = self.fusi_show_allenccf_slices(
slice_number, ax=ax, alpha=allenccf_alpha)
return ax, (im, im_allenccf)
def fusi_get_allenccf_contours(self, slice_number=0, area_indexes=None, minlen=50,
ax=None, color=None):
'''
'''
from skimage import measure
allenccf_slice = self.fusi_get_allenccf_slices(slice_number)
if area_indexes is None:
from fusilib import align
area_indexes = align.allenccf_main_areas()
contours = {areaidx: measure.find_contours(
allenccf_slice, areaidx) for areaidx, color in area_indexes.items()}
contours = {areaidx: [tt for tt in t if len(
tt) > minlen] for areaidx, t in contours.items()}
if ax is not None:
for areaidx, area_contours in sorted(contours.items())[::-1]:
for contour in area_contours:
ax.plot(contour[:, 1].astype(np.int),
contour[:, 0].astype(np.int), linewidth=1,
color='#%s' % area_indexes[areaidx] if color is None else color)
return contours
def fusi_get_probedepth_volume(self, probe_name, estimate_type='manual'):
'''
'''
import nibabel as nib
# load volume
volfl = self.mk_filename_for_localdb('estimated_%s_3Dtrack_%s.nii.gz' % (probe_name, estimate_type),
'allenccf_align')
assert pathlib.Path(volfl).exists()
vol = nib.load(volfl)
return vol
def fusi_get_probedepth_from_volume(self, probe_name, estimate_type='manual'):
'''
'''
fl = self.mk_filename_for_localdb('estimated_%s_3Dtrack_%s.hdf' % (probe_name, estimate_type),
'allenccf_align')
xyzmm = readers.hdf_load(fl, 'probe_tip_mm', verbose=self.verbose)
probe_depth = np.sqrt(np.sum(xyzmm**2))
return probe_depth
def fusi_get_slice_probedepth_and_voxels(self,
slice_number,
probe_name,
widthmm=0.5,
estimate_type='manual',
remove_outside=True):
'''
'''
probe_mask = self.fusi_get_probe_master_mask(probe_name)
vol = self.fusi_get_probedepth_volume(
probe_name, estimate_type=estimate_type)
arr = np.asarray(vol.get_fdata())
volxyzmm = vol.affine[np.diag_indices_from(np.eye(3))]
del(vol)
info = self.log_load_section('fusi')
slices_mm = np.asarray(info['slice_positions'])
ystack_mm = np.asarray(info['ystack_positions'])
ystack_idx = np.asarray(
[idx for idx, t in enumerate(ystack_mm) if t in slices_mm])
ystack_nslices = len(ystack_mm)
# determine volume axis for slices
axis = np.asarray([idx == ystack_nslices for idx in arr.shape])
assert axis.sum() == 1
axis = int(axis.nonzero()[0])
axis_nvox = arr.shape[axis]
axis_mm = axis_nvox*volxyzmm[axis]
slice_position = slices_mm[slice_number]
slice_window_marker = np.logical_and(ystack_mm <= slice_position + widthmm/2.0,
ystack_mm >= slice_position - widthmm/2.0)
slicer = [slice(None)]*arr.ndim
slicer[axis] = slice_window_marker.nonzero()[0]
# Get relevant slices and put them in the first dimension
dat = arr[tuple(slicer)]
# remove probe section outside of penetration (marked with negative values)
dat = np.where(dat < 0, np.nan, dat)
# min so that we're biased towards the back of brain
dat = np.nanmin(dat, axis=axis).T
# also, force minimum value to be 0. it's not 0 b/c of voxelization
assert np.nanmin(dat) >= 0
if np.nanmin(dat) < 0.1: # [mm]
# if it's less than 100[um] set to zero
dat[np.unravel_index(np.nanargmin(dat), dat.shape)] = 0
# DV min/max
probe_location = np.abs(np.nanmax(dat, axis=1)) > 0
probe_mask[np.logical_not(probe_location), :] = 0
if remove_outside:
slice_blocks = self.fusi_get_slice_blocks(slice_number)
blockob = MetaBlock(
self.subject_name, self.session_name, slice_blocks[0], verbose=False)
outside_brain = blockob.fusi_get_outsidebrain_mask()
dat[outside_brain] = np.nan
probe_mask[outside_brain] = 0
npx_depth = self.fusi_get_probedepth_from_volume(probe_name)
probe_depth = dat[~np.isnan(dat)]
probe_bottom, probe_top = probe_depth.max(), probe_depth.min() # 0:top of brain
phy_limits = np.asarray(
[npx_depth - probe_bottom, npx_depth - probe_top])
if self.verbose:
print('slicepos=%0.02f[mm] and slices:' %
slice_position, ystack_mm[slice_window_marker])
print('probe depth:', phy_limits)
return dat, probe_mask, phy_limits
def fusi_get_coords_inplane_mm(self):
'''Read image coordinates from fUSi acquistion
Uses the size of a y-stack slice for all images.
Returns
-------
horiz_mm_coords, vert_mm_coords
'''
from fusilib import handler
paths = self.log_load_section('fusi')
path = misc.uri_convert_uri2local(paths['ystack_volume'])
data = handler.matlab_data(path)
hcoords = data['Doppler'].xAxis.copy()
vcoords = data['Doppler'].zAxis.copy()
del(data)
return hcoords, vcoords
@property
def fusi_aspect_ratio(self):
'''Assumes all images in this session have the same pixel resolution.
Returns
-------
aspect_ratio = vert_mm/horiz_mm
'''
hsize, vsize = self.fusi_image_pixel_mm
return vsize/hsize
@property
def fusi_image_pixel_mm(self):
'''
Returns
-------
horiz_size_mm, vert_size_mm
'''
hcoords, vcoords = self.fusi_get_coords_inplane_mm()
hsize = np.median(np.unique(np.diff(hcoords)))
vsize = np.median(np.unique(np.diff(vcoords)))
return hsize, vsize
@property
def analysis_blocks(self):
'''Blocks for analysis from the session LOG.
'''
return self.log_load_section('experiment', 'analysis_blocks')
def ephys_get_cluster_depths(self, probe_name):
'''
'''
nclusters = self.ephys_get_probe_nclusters(probe_name)
depths = np.zeros(nclusters)*np.nan
probe_object = self.ephys_get_probe_object(probe_name)
# load cluster depths
probe_object.tsv_cluster_info()
# depth_um = probe_object.tsv_cluster_info.depth.astype(np.float32)
if hasattr(probe_object.tsv_cluster_info, 'cluster_id'):
# Latest PHY changed the heade
depths[probe_object.tsv_cluster_info.cluster_id] = probe_object.tsv_cluster_info.depth.copy()
else:
depths[probe_object.tsv_cluster_info.id] = probe_object.tsv_cluster_info.depth.copy()
return depths
def ephys_get_mua_masks(self, probe_name, mua_window_um=500, neuropix_size_um=3840):
'''
'''
mua_depths = np.arange(0, neuropix_size_um, mua_window_um)
print(mua_depths)
mua_nchunks = len(mua_depths)
nclusters = self.ephys_get_probe_nclusters(probe_name)
mask = np.zeros((mua_nchunks, nclusters)).astype(np.bool)
probe_object = self.ephys_get_probe_object(probe_name)
# load cluster depths
probe_object.tsv_cluster_info()
depth_um = probe_object.tsv_cluster_info.depth.astype(np.float32)
mua_masks = futils.fast_find_between(
depth_um, mua_depths, mua_window_um)
for muaidx, mua_mask in enumerate(mua_masks):
mask[muaidx, mua_mask] = True
return mask
def ephys_get_clusters_group(self, probe_name):
'''
'''
nclusters = self.ephys_get_probe_nclusters(probe_name)
probe_object = self.ephys_get_probe_object(probe_name)
goods = np.zeros(nclusters).astype(np.bool)
# get good clusters
try:
probe_object.tsv_cluster_group() # load
grp = probe_object.tsv_cluster_group.group
cid = probe_object.tsv_cluster_group.cluster_id
except:
print('SPIKE SORTING NOT READY. USING AUTOMATED LABELS')
probe_object.tsv_cluster_info()
grp = probe_object.tsv_cluster_info.group
cid = probe_object.tsv_cluster_info.id
return cid, grp
def ephys_get_putative_class(self, probe_name, neuron_class):
'''
'''
assert neuron_class in ['excitatory', 'inhibitory']
from fusilib.io import phy
probe_object = self.ephys_get_probe_object(probe_name)
fl = probe_object.__pathobj__/'cluster_putative_neuronclass.csv'
data = {k: v for k, v in phy.yield_table_columns(fl, delimiter=',')}
nclusters = self.ephys_get_probe_nclusters(probe_name)
goods = np.zeros(nclusters).astype(np.bool)
class_marker = data['putative_class'] == neuron_class
goods[data['cluster_id']] = class_marker
return goods
def ephys_get_good_clusters(self, probe_name):
'''
'''
nclusters = self.ephys_get_probe_nclusters(probe_name)
probe_object = self.ephys_get_probe_object(probe_name)
goods = np.zeros(nclusters).astype(np.bool)
# get good clusters
try:
probe_object.tsv_cluster_group() # load
mask = probe_object.tsv_cluster_group.group != 'noise'
good_clusters = probe_object.tsv_cluster_group.cluster_id[mask]
except:
print('SPIKE SORTING NOT READY. USING AUTOMATED LABELS')
probe_object.tsv_cluster_info()
mask = probe_object.tsv_cluster_info.group != 'noise'
good_clusters = probe_object.tsv_cluster_info.id[mask]
goods[good_clusters] = True
return goods
def get_block(self, block_number=2):
'''
'''
assert block_number in self.experiment_blocks
return MetaBlock(self.subject_name, self.session_name, block_number)
def generate_fusi_task_blocks(self, task_name):
'''
'''
assert task_name in self.experiment_tasks
task_blocks = self.experiment_get_task_blocks(task_name)
for block in task_blocks:
yield self.get_block(block_number=block)
def generate_analysis_blocks(self):
'''
'''
for block_number in self.analysis_blocks:
block = MetaBlock(self.subject_name, self.session_name,
block_number, verbose=self.verbose)
if block.task_name in ('spontaneous', 'checkerboard'):
yield block
def generate_fusi_slice_blocks(self, slice_number=0):
'''
'''
slice_blocks = self.fusi_get_slice_blocks(slice_number)
print('Found %i blocks for slice=%i' %
(len(slice_blocks), slice_number), slice_blocks)
for block in slice_blocks:
yield MetaBlock(self.subject_name, self.session_name, block)
def generate_fusi_slice_blocks_method(self, slice_number,
method_name,
*args,
**kwargs):
'''
Example
-------
>>> # Get all the data from the slices
>>> fusi_iterator = subject.generate_fusi_slice_blocks_method(0, 'fusi_get_data')
>>> times, data = next(method_iterator)
>>> ephys_iterator = sub.generate_fusi_slice_blocks_method(0, 'ephys_get_probe_spikes_object', 'probe00')
>>> probe = next(ephys_iterator)
>>> nmua, mua_matrix = probe.time_locked_mua_matrix(times, dt=0.3)
'''
slice_blocks = self.generate_fusi_slice_blocks(slice_number)
for block in slice_blocks:
method = getattr(block, method_name)
yield method(*args, **kwargs)
def generate_slice_fusi_and_ephys_data(self, slice_number, probe_name, **fusi_kwargs):
'''
Example
-------
>>> iterator = sub.generate_slice_fusi_and_ephys_data(0, 'probe00')
>>> times, fusi_data, nmua, mua_matrix = next(iterator)
Yields
------
fusi_times : 1D np.ndarray, (t,)
fusi_data : 3D np.ndarray, (t, nz, nx)
nmua : 1D np.ndarray, (m,)
mua_matrix : 2D np.ndarray, (t, m)
'''
fusi_iterator = self.generate_fusi_slice_blocks_method(
slice_number, 'fusi_get_data', **fusi_kwargs)
ephys_iterator = self.generate_fusi_slice_blocks_method(
slice_number, 'ephys_get_probe_spikes_object', probe_name)
fusi_dt = fusi_kwargs.get('dt_ms', 300)/1000.
for (times, fusi_data), probe_object in zip(fusi_iterator, ephys_iterator):
nmua, mua_matrix = probe_object.time_locked_mua_matrix(
times, dt=fusi_dt)
yield times, fusi_data, nmua, mua_matrix
def fusi_get_slice_blocks(self, slice_number=0):
'''
'''
fusi_block_slices = self.log_load_section(
'fusi', 'mapping_block2slices')
blocks = self.log_load_section('experiment', 'block_numbers')
slice_blocks = blocks[fusi_block_slices == slice_number]
return slice_blocks
def fusi_load_all_slice_data(self, slice_number,
dt_ms=300,
window_ms=400,
svddrop=15,
roi_name=None,
concatenate=True):
'''
Parameters
----------
slice_number (int): Index of the slice we want
'''
fusi_block_slices = self.log_load_section(
'fusi', 'mapping_block2slices')
blocks = self.log_load_section('experiment', 'block_numbers')
slice_blocks = blocks[fusi_block_slices == slice_number]
print(slice_blocks, slice_number, fusi_block_slices)
slice_times = []
slice_data = []
for bdx, block_number in enumerate(slice_blocks):
assert np.allclose(block_number, int(block_number))
block_number = int(block_number)
subject_block = MetaBlock(self.subject_name,
self.session_name,
block_number,
root=self.root.parent)
print(subject_block)
# Load fUSi data
times, data = subject_block.fusi_get_data(dt_ms=dt_ms,
window_ms=window_ms,
svddrop=svddrop)
if bdx > 0 and concatenate:
# changes times to be continuous with last block
last_times = slice_times[bdx-1][-2:]
times += last_times[-1] + np.diff(last_times) - times.min()
if roi_name is not None:
if 'probe' in roi_name:
mask = subject_block.fusi_get_probe_slice_mask(roi_name)
data = data[..., mask.astype(np.bool)]
else:
raise ValueError('Unknown fUSi mask: %s' % roi_name)
slice_times.append(times)
slice_data.append(data)
if concatenate:
slice_times = np.hstack(slice_times)
slice_data = np.vstack(slice_data)
return slice_times, slice_data
def load_physio_data(self,
slice_number,
dt_ms=300,
window_ms=400,
svddrop=15,
trim_beg=None,
trim_end=None,
concatenate=True):
'''
Parameters
----------
slice_number (scalar) : slice code
dt_ms (scalar) : new sampling rate in [milliseconds]
'''
from fusilib import resampling
fusi_block_slices = self.log_load_section(
'fusi', 'mapping_block2slices')
blocks = self.log_load_section('experiment', 'block_numbers')
slice_blocks = blocks[fusi_block_slices == slice_number]
print(slice_blocks, slice_number, fusi_block_slices)
timestamps = []
slice_physio_data = []
for bdx, block_number in enumerate(slice_blocks):
assert np.allclose(block_number, int(block_number))
block_number = int(block_number)
subject_block = MetaBlock(self.subject_name,
self.session_name,
block_number,
root=self.root.parent)
print(subject_block)
# Load fUSi data
fusi_times, _ = subject_block.fusi_get_data(dt_ms=dt_ms,
window_ms=window_ms,
svddrop=svddrop)
physio_times, physio_data = subject_block.fusi_get_heartbeat_estimate()
# resample physio data to fUSi frequency
physio_data = resampling.lanczos_resample(dt_ms/1000.,
physio_data[..., None],
physio_times,
fusi_times).squeeze()
if bdx > 0 and concatenate:
# changes times to be continuous with last block
last_times = timestamps[bdx-1][-2:]
fusi_times += last_times[-1] + \
np.diff(last_times) - fusi_times.min()
# trim individual blocks
if trim_beg:
fusi_times = fusi_times[trim_beg:]
physio_data = physio_data[trim_beg:]
if trim_end:
fusi_times = fusi_times[:trim_end]
physio_data = physio_data[:trim_end]
timestamps.append(fusi_times)
slice_physio_data.append(physio_data)
if concatenate:
timestamps = np.hstack(timestamps)
slice_physio_data = np.hstack(slice_physio_data)
return timestamps, slice_physio_data
def load_joint_data(self,
slice_number,
probe_name,
dt_ms=300,
window_ms=400,
svddrop=15,
freq_cutoffhz=15,
roi_name=None,
trim_beg=None,
trim_end=None,
recache=False,
mirrored=False,
concatenate=True):
'''
Parameters
----------
slice_number (int): Index of the slice we want
probe_name (str)
trim_beg: data[beg:]
trim_end: data[:end]
Returns
-------
times
fusi_data
spike_data
'''
params = OrderedDict(locals())
params['path'] = self.session_path
params['name'] = 'joint_data'
cache_file = params2cache(params)
print(cache_file)
if cache_file.exists() and recache is False:
timestamps = readers.hdf_load(cache_file, 'timestamps')
slice_fusi_data = readers.hdf_load(cache_file, 'slice_fusi_data')
spike_data = readers.hdf_load(cache_file, 'spike_data')
return timestamps, slice_fusi_data, spike_data
fusi_block_slices = self.log_load_section(
'fusi', 'mapping_block2slices')
blocks = self.log_load_section('experiment', 'block_numbers')
slice_blocks = blocks[fusi_block_slices == slice_number]
nclusters = self.ephys_get_probe_nclusters(probe_name)
print(slice_blocks, slice_number, fusi_block_slices)
timestamps = []
slice_fusi_data = []
spike_data = []
for bdx, block_number in enumerate(slice_blocks):
assert np.allclose(block_number, int(block_number))
block_number = int(block_number)
subject_block = MetaBlock(self.subject_name,
self.session_name,
block_number,
root=self.root.parent)
print(subject_block)
# Load fUSi data
if not np.isscalar(freq_cutoffhz) and roi_name is not None:
assert len(freq_cutoffhz) == 2
# Bandpassed data is stored for ROIs
times, fusi_data = subject_block.fusi_get_data(dt_ms=dt_ms,
window_ms=window_ms,
svddrop=svddrop,
freq_cutoffhz=freq_cutoffhz,
roi_name=roi_name,
mirrored=mirrored)
else:
# WHOLE BRAIN
times, fusi_data = subject_block.fusi_get_data(dt_ms=dt_ms,
window_ms=window_ms,
svddrop=svddrop,
freq_cutoffhz=freq_cutoffhz)
spike_times, spike_clusters = subject_block.ephys_get_probe_data(
probe_name)
spike_matrix = futils.bin_spikes(times,
dt_ms/1000.,
spike_times,
spike_clusters,
nclusters=nclusters)
if bdx > 0 and concatenate:
# changes times to be continuous with last block
last_times = timestamps[bdx-1][-2:]
times += last_times[-1] + np.diff(last_times) - times.min()
if roi_name is not None:
if ('probe' in roi_name) and np.isscalar(freq_cutoffhz):
# only computed for these
# mask = subject_block.fusi_get_probe_slice_mask(roi_name) #
mask = subject_block.fusi_get_probe_master_mask(roi_name)
if mirrored:
# CONTROL: mirror and flip mask
mask = mask[::-1, ::-1]
fusi_data = fusi_data[..., mask.astype(np.bool)]
elif (('probe' in roi_name) or ('outcenter' in roi_name)) and isinstance(freq_cutoffhz, (tuple, list)):
# data is already masked
pass
else:
raise ValueError('Unknown fUSi mask: %s' % roi_name)
# trim individual blocks
if trim_beg:
times = times[trim_beg:]
fusi_data = fusi_data[trim_beg:]
spike_matrix = spike_matrix[trim_beg:]
if trim_end:
times = times[:trim_end]
fusi_data = fusi_data[:trim_end]
spike_matrix = spike_matrix[:trim_end]
timestamps.append(times)
slice_fusi_data.append(fusi_data)
spike_data.append(spike_matrix)
if concatenate:
timestamps = np.hstack(timestamps)
spike_data = np.vstack(spike_data)
slice_fusi_data = np.vstack(slice_fusi_data)
readers.hdf_dump(cache_file, {'timestamps': timestamps,
'slice_fusi_data': slice_fusi_data,
'spike_data': spike_data})
return timestamps, slice_fusi_data, spike_data
def load_joint_spectrogram_data(self,
slice_number,
probe_name,
dt_ms=300,
window_ms=400,
roi_name='probe00',
trim_beg=None,
trim_end=None,
rawfft=False,
rawpsd=False,
concatenate=True,
recache=False):
'''
Parameters
----------
slice_number (int): Index of the slice we want
probe_name (str)
trim_beg: data[beg:]
trim_end: data[:end]
Returns
-------
times
fusi_data
spike_data
'''
params = OrderedDict(locals())
params['path'] = self.session_path
cache_file = params2cache(params)
print(cache_file)
if cache_file.exists() and recache is False:
timestamps = readers.hdf_load(cache_file, 'timestamps')
slice_fusi_data = readers.hdf_load(cache_file, 'slice_fusi_data')
spike_data = readers.hdf_load(cache_file, 'spike_data')
return timestamps, slice_fusi_data, spike_data
fusi_block_slices = self.log_load_section(
'fusi', 'mapping_block2slices')
blocks = self.log_load_section('experiment', 'block_numbers')
slice_blocks = blocks[fusi_block_slices == slice_number]
nclusters = self.ephys_get_probe_nclusters(probe_name)
print(slice_blocks, slice_number, fusi_block_slices)
timestamps = []
slice_fusi_data = []
spike_data = []
for bdx, block_number in enumerate(slice_blocks):
assert np.allclose(block_number, int(block_number))
block_number = int(block_number)
subject_block = MetaBlock(self.subject_name,
self.session_name,
block_number,
root=self.root.parent)
print(subject_block)
# Load fUSi data
times, fusi_data = subject_block.fusi_get_roi_spectrogram(dt_ms=dt_ms,
window_ms=window_ms,
roi_name=roi_name,
rawfft=rawfft,
rawpsd=rawpsd,
)
spike_times, spike_clusters = subject_block.ephys_get_probe_data(
probe_name)
spike_matrix = futils.bin_spikes(times,
dt_ms/1000.,
spike_times,
spike_clusters,
nclusters=nclusters)
if bdx > 0 and concatenate:
# changes times to be continuous with last block
last_times = timestamps[bdx-1][-2:]
times += last_times[-1] + np.diff(last_times) - times.min()
# trim individual blocks
if trim_beg:
times = times[trim_beg:]
fusi_data = fusi_data[trim_beg:]
spike_matrix = spike_matrix[trim_beg:]
if trim_end:
times = times[:trim_end]
fusi_data = fusi_data[:trim_end]
spike_matrix = spike_matrix[:trim_end]
timestamps.append(times)
slice_fusi_data.append(fusi_data)
spike_data.append(spike_matrix)
if concatenate:
timestamps = np.hstack(timestamps)
spike_data = np.vstack(spike_data)
slice_fusi_data = np.vstack(slice_fusi_data)
readers.hdf_dump(cache_file, {'timestamps': timestamps,
'slice_fusi_data': slice_fusi_data,
'spike_data': spike_data})
return timestamps, slice_fusi_data, spike_data
class MetaBlock(MetaSession):
'''
'''
def __init__(self,
subject_name,
session_name,
block_name,
root=None,
verbose=False):
'''
'''
super(type(self), self).__init__(subject_name,
session_name,
root=root,
verbose=verbose)
block_name = str(block_name)
block_number = int(block_name)
block_path = self.session_path.joinpath(block_name)
self.verbose = verbose
self.block_name = block_name
self.block_number = block_number
self.block_path = block_path
assert block_path.exists()
def get_meta_session(self):
return MetaSession(self.subject_name, self.session_name, verbose=False)
@property
def meta_session(self):
return MetaSession(self.subject_name, self.session_name, verbose=False)
@property
def task_name(self):
'''
'''
session_info = self.log_load_section('experiment')
experiment_names = session_info['block_names']
for expname in experiment_names:
if int(self.block_number) in session_info[expname]:
experiment_name = expname
break
return experiment_name.split('_')[1]
@property
def slice_number(self):
'''
'''
slice_number = self.experiment_mapper_blocks2slices[self.block_number]
return slice_number
@property
def analysis_slicewidth_search(self):
search_widths = self.log_load_section('experiment', 'analysis_widthmm')
return float(search_widths[self.analysis_blocks == self.block_number])
def stimulus_load_times(self):
timeline_aligned = self.timeline_get_aligned_times()
# load timeline
self.localdb_timeline_load_block()
# get stimulus times
self.timeline.load_photod_stimuli(timeline_aligned)
stimulus_frame_times = self.timeline.phd_frame_times.copy()
stimulus_start_times = self.timeline.phd_stim_start.copy()
stimulus_end_times = self.timeline.phd_stim_end.copy()
return stimulus_start_times, stimulus_end_times
def stimulus_checkerboard_times(self):
'''
'''
stimulus_start_times, stimulus_end_times = self.stimulus_load_times()
nstimuli, nreps = stimulus_start_times.shape
ordered_times = np.sort(stimulus_start_times.T.reshape(
int(nreps/2), int(nstimuli*2)), 1).T
self.timeline.load_protocol()
print(self.timeline.protocol.pardefs)
stimulus_parameters = self.timeline.protocol.pars
xypos = stimulus_parameters[1:5].T # (xleft, right, bottom, top)
contrast = stimulus_parameters[[-1]].T # contrast
stimulus_params = np.hstack([xypos, contrast])
checkerboard_params, trial_ids = np.unique(
stimulus_params, axis=0, return_inverse=True)
checkerboard_ids = np.unique(trial_ids)
trial_names = {trialid: str(
checkerboard_params[trialid]) for trialid in np.unique(trial_ids)}
stimulus_matrix = np.asarray([trial_ids]*nreps).T
stimulus_onsets = np.asarray([stimulus_start_times[stimulus_matrix == cid]
for cid in checkerboard_ids]).T
return stimulus_start_times, stimulus_matrix
def stimulus_checkerboard_repeats(self):
'''
'''
stimulus_start_times, stimulus_matrix = self.stimulus_checkerboard_times()
repeat_ids = np.asarray([np.hstack([forward, backward]) for forward, backward in
zip(stimulus_matrix[:, ::2].T, stimulus_matrix[:, 1::2][::-1].T)]).T
# second half is repeated backwards
assert np.allclose(repeat_ids[:20, 0], repeat_ids[20:, 0][::-1])
repeat_times = np.asarray([np.hstack([forward, backward]) for forward, backward in
zip(stimulus_start_times[:, ::2].T, stimulus_start_times[:, 1::2][::-1].T)]).T
return repeat_times, repeat_ids
def mk_filename_for_localdb(self, filename, subfolder=None, mkdir=False):
date_txt = misc.date_tuple2isoformat(self.date_tuple)
flname = '{date}_{block}_{subject}_{fl}'.format(date=date_txt,
block=self.block_name,
subject=self.subject_name,
fl=filename)
if subfolder is None:
outpath = self.block_path
else:
outpath = self.block_path.joinpath(subfolder)
if mkdir:
outpath.mkdir(exist_ok=True)
return outpath.joinpath(flname)
def __str__(self):
info = (self.subject_name, self.session_name, self.block_name)
return 'subject=%s, session=%s, block=%s' % info
def __repr__(self):
info = (__name__, type(self).__name__,
self.subject_name, self.session_name, self.block_name)
return '<%s.%s [%s: session=%s, block=%s]>' % info
@property
def block_paths(self):
return self.get_block_paths(self.block_name)
def _get_hdf_field(self, filename, field):
'''
'''
return readers.hdf_load(filename, field)
def ephys_get_probe_spikes_object(self, probe_name, good_clusters=None):
'''
'''
spike_times, spike_clusters = self.ephys_get_probe_data(probe_name)
nclusters = self.ephys_get_probe_nclusters(probe_name)
if good_clusters is None:
good_clusters = self.ephys_get_good_clusters(probe_name)
cluster_depths = self.ephys_get_cluster_depths(probe_name)
from fusilib.io import spikes
return spikes.ProbeSpikes(spike_times, spike_clusters,
nclusters=nclusters,
good_clusters=good_clusters,
cluster_depths=cluster_depths)
def ephys_get_mua(self, probe_name, times, dt, **kwargs):
'''
'''
probe_object = self.ephys_get_probe_spikes_object(probe_name)
return probe_object.time_locked_mua_matrix(times, dt, **kwargs)
def ephys_load_spikes(self, probe_name):
'''
'''
filename = self.block_paths['%s_spikes' % probe_name]
clusters = hdf_load(filename, 'spike_clusters')
sample_ratehz = hdf_load(filename, 'phy_sample_ratehz')
# spike times are stored in units of sample rate
times = hdf_load(filename, 'spike_times')/sample_ratehz
dotdict = misc.DotDict(clusters=clusters,
times=times,
sample_ratehz=sample_ratehz)
setattr(self, probe_name, dotdict)
def ephys_load_digital(self, probe_name):
'''
'''
filename = self.block_paths['%s_' % probe_name]
clusters = hdf_load(filename, 'spike_digital')
sample_ratehz = hdf_load(filename, 'phy_sample_ratehz')
# spike times are stored in units of sample rate
times = hdf_load(filename, 'spike_times')/sample_ratehz
dotdict = misc.DotDict(clusters=clusters,
times=times,
sample_ratehz=sample_ratehz)
setattr(self, probe_name, dotdict)
def localdb_timeline_load_block(self):
'''
'''
timeline = righw.ExperimentData(self.subject_name,
expnum=int(self.block_name),
date=self.date_tuple,
root=self.root.parent)
setattr(self, 'timeline', timeline)
def fusi_get_slice_probedepth_and_voxels(self,
probe_name,
widthmm=None,
estimate_type='manual',
remove_outside=True):
'''
'''
if widthmm is None:
widthmm = self.analysis_slicewidth_search
slice_number = self.slice_number
out = self.meta_session.fusi_get_slice_probedepth_and_voxels(slice_number,
probe_name,
widthmm=widthmm,
estimate_type=estimate_type,
remove_outside=remove_outside)
return out
def fusi_get_probe_allen_mask(self, probe_name, allen_area_name=None, fusi_mirror=False, criterion=1):
'''
'''
from fusilib import allen
# OLD:'fusi_%s_section_in_probe_roi.hdf'%FUSI_PROBE_MASK_NAME)
probe_mask = self.meta_session.fusi_get_probe_master_mask(probe_name)
brain_mask = np.logical_not(self.fusi_get_outsidebrain_mask())
pmask = np.logical_and(brain_mask, probe_mask)
# ROI voxels in Allen Atlas
allenccf_areas = self.meta_session.fusi_get_allenccf_slices(
self.slice_number)
area_mask = allen.mk_area_mask_from_aligned(
allenccf_areas, allen_area_name, verbose=False)
# Neuropix fUSI track within allen area ROI
pmask = np.logical_and(area_mask, pmask)
if fusi_mirror is True:
# Mirror 2D projection for bilateral analysis
# (This way b/c the probe track might not hit the area if mirrored first)
pmask = pmask[:, ::-1]
# 3D projection of NPx probe in fUSI
# slice_position_mm = subject_block.meta_session.fusi_slice_position(subject_block.slice_number)
slice_search_widthmm = self.analysis_slicewidth_search
fusi_probe_depth, fusi_probe_mask, probe_limits_mm = self.meta_session.fusi_get_slice_probedepth_and_voxels(
self.slice_number, probe_name, widthmm=slice_search_widthmm)
# Percentage of 3D NPx probe mask inside the Allen Mask
pct_probe_in_roi = (np.logical_and(
fusi_probe_mask, pmask).sum()/fusi_probe_mask.sum())*100
return None if pct_probe_in_roi < criterion else pmask
def fusi_get_allen_area_mask(self,
allen_area_name=None,
fusi_mirror=False,
hemisphere=None,
exclude_probe=None):
'''
exclude_probe : str
The probe name ROI to exclude (e.g. 'probe00')
If given, the probe ROI is removed from the area ROI.
'''
from fusilib import allen
# OLD:'fusi_%s_section_in_probe_roi.hdf'%FUSI_PROBE_MASK_NAME)
brain_mask = np.logical_not(self.fusi_get_outsidebrain_mask())
# ROI voxels in Allen Atlas
allenccf_areas = self.meta_session.fusi_get_allenccf_slices(
self.slice_number)
area_mask = allen.mk_area_mask_from_aligned(
allenccf_areas, allen_area_name, verbose=False)
# Neuropix fUSI track within allen area ROI
allen_roi_mask = np.logical_and(area_mask, brain_mask)
if fusi_mirror is True:
# Mirror 2D projection for bilateral analysis
# (This way b/c the probe track might not hit the area if mirrored first)
allen_roi_mask = allen_roi_mask[:, ::-1]
if hemisphere is not None:
hemi_mask = self.fusi_get_hemisphere_mask(hemisphere)
allen_roi_mask = np.logical_and(allen_roi_mask, hemi_mask)
if exclude_probe is not None:
probe_mask = self.fusi_get_probe_allen_mask(exclude_probe,
allen_area_name=allen_area_name,
fusi_mirror=fusi_mirror)
allen_roi_mask = np.logical_and(
allen_roi_mask, np.logical_not(probe_mask))
return allen_roi_mask
def fusi_get_outsidebrain_mask(self, verbose=False):
'''
'''
fl = self.mk_filename_for_localdb(
'fusi_outsidebrain_roi.hdf', subfolder='fusi')
mask = hdf_load(str(fl), 'outsidebrain_mask', verbose=verbose)
return mask.astype(np.bool)
def fusi_get_hemisphere_mask(self, hemisphere, mask_inside_brain=True):
'''Get a mask of the left or right hemisphere:
hemisphere : str,
One of 'LH' or 'RH'
'''
brain_mask = np.logical_not(self.fusi_get_outsidebrain_mask())
midline_mask = self.fusi_get_midline_mask()
horiz_midpoint = int(np.median(midline_mask.nonzero()[1]))
hemisphere_mask = np.zeros_like(brain_mask)
if hemisphere == 'RH':
hemisphere_mask[:, horiz_midpoint:] = True
else:
hemisphere_mask[:, :horiz_midpoint] = True
if mask_inside_brain:
hemisphere_mask = np.logical_and(brain_mask, hemisphere_mask)
return hemisphere_mask
def fusi_get_midline_mask(self):
'''
'''
fl = self.mk_filename_for_localdb(
'fusi_midline_roi.hdf', subfolder='fusi')
# name was saved incorrectly =S
mask = hdf_load(str(fl), 'outsidebrain_mask')
return mask.astype(np.bool)
def fusi_get_raw_data_object(self, dataroot='.', nmax=1):
'''
Parameters
----------
dataroot (str) : Path for DAT files
nmax (int) : Number of images to load
'''
from fusilib.io import contacq
dataroot = pathlib.Path(dataroot)
data_path = dataroot.joinpath(self.subject_name,
misc.date_tuple2isoformat(
self.date_tuple),
self.block_name)
fusi_loader = contacq.MetaLoader(
data_path, verbose=True, nmaximum=nmax)
return fusi_loader
def fusi_get_heartbeat_estimate(self, key='filtered_vascular_timecourse_raw', clip=True):
'''
'''
fl = self.block_path.joinpath('fusi', 'physio_hearbeat_estimate.hdf')
physio = hdf_load(fl, key)
fusi_loader = self.fusi_get_raw_data_object()
START_DROPPED_FRAMES = 5
# the first 5 "frames" (i.e. data bursts) have no TTL
dropped_samples = START_DROPPED_FRAMES*fusi_loader.images_per_frame
physio = physio[dropped_samples:]
physio_dtms = 2 # 500[Hz]
ttl_dtms = 100 # 10[Hz]
# these are at a fixed 100ms
fusi_ttl_onsets = self.fusi_get_ttl_times()
from scipy import interpolate
ttl_samples = np.arange(len(fusi_ttl_onsets))
interpolator = interpolate.interp1d(
ttl_samples, fusi_ttl_onsets, fill_value='extrapolate')
# factor = dt_ms/ttl_dt
factor = physio_dtms/ttl_dtms
new_samples = np.arange(physio.shape[0])*factor
physio_times = interpolator(new_samples)
physio /= np.std(physio)
if clip:
physio = np.clip(physio, -5, 5)
return physio_times, physio
def fusi_get_probe_slice_mask(self, probe_name,
slice_name=None,
roi_file_name='fusi_probe_roi.hdf',
verbose=False):
'''
'''
fusi_block_slices = self.log_load_section(
'fusi', 'mapping_block2slices')
block_index = self.block_number - 1
fusi_slice_number = fusi_block_slices[block_index]
if slice_name is None:
slice_name = 'slice%02i' % fusi_slice_number
if verbose:
print(slice_name, block_index)
section_header = 'location_%s_%s' % (slice_name, probe_name)
localizer_info = self.log_load_section(section_header)
if verbose:
pprint(localizer_info)
localizer_subfolder = str(localizer_info['folder_name'])
mask_flname = super(type(self), self).mk_filename_for_localdb(roi_file_name,
subfolder=localizer_subfolder)
# return mask_flname
mask = hdf_load(str(mask_flname), 'mask', verbose=verbose)
if verbose:
print('fUSi probe ROI voxels: %i' % mask.sum())
return mask
def ephys_get_probe_data(self, probe_name, verbose=True):
'''
'''
flname = self.mk_filename_for_localdb(
'%s_spikes.hdf' % probe_name, subfolder='aligned2pxi')
assert flname.exists()
if verbose:
print(f'Loading spike data: {pathlib.Path(flname).name}...')
clusters = hdf_load(flname, 'clusters')
times = hdf_load(flname, 'times')
return times, clusters
def timeline_get_aligned_times(self, key='times'):
timeline_flname = self.mk_filename_for_localdb('timeline_aligned2pxi.hdf',
subfolder='aligned2pxi')
timeline_newtimes = readers.hdf_load(timeline_flname, key)
return timeline_newtimes
def fusi_get_ttl_times(self):
'''
'''
self.localdb_timeline_load_block()
fusi_ttl_signal = self.timeline.get_timeline_data('neuralFrames')[1]
fusi_ttl_signal = sync.remove_single_datapoint_onsets(fusi_ttl_signal)
# load new timeline times (aligned to PXI NI DAQ)
timeline_newtimes = self.timeline_get_aligned_times()
# compute the onsets from the TTL using the new times
ttl_dt = 100 # TTL is updated every 100[ms]
fusi_ttl_onsets = sync.digital2times(
timeline_newtimes, fusi_ttl_signal)[0]
return fusi_ttl_onsets
def fusi_get_times(self, **kwargs):
'''Call to fusi_get_data that only returns the times
'''
return self.fusi_get_data(**kwargs)[0]
def fusi_get_data(self, dt_ms=300, window_ms=400, svddrop=15,
freq_cutoffhz=15, roi_name=None, mirrored=False, verbose=True):
'''Specify the parameters of the preprocessed fUSi data to load
Parameters
----------
dt_ms : sample interval [in millisecond]
window_ms : window over which filtering and svd drop is applied
svddrop : number of SVD components that are thrown on
roi_name : name of ROI
mirrored : control region
Returns
-------
fusi_times : 1D np.ndarray (n,)
data time-stamps aligned to PXI
fusi_data : 3D np.ndarray (n, vdim, hdim):
fUSi data preprocessed with the speicified parameters
'''
# get time stamps
##############################
# load the neural frames TTL signal from timeline
self.localdb_timeline_load_block()
fusi_ttl_signal = self.timeline.get_timeline_data('neuralFrames')[1]
fusi_ttl_signal = sync.remove_single_datapoint_onsets(fusi_ttl_signal)
# load new timeline times (aligned to PXI NI DAQ)
timeline_newtimes = self.timeline_get_aligned_times()
# compute the onsets from the TTL using the new times
ttl_dt = 100 # TTL is updated every 100[ms]
fusi_ttl_onsets = sync.digital2times(
timeline_newtimes, fusi_ttl_signal)[0]
# load the preprocessed fusi data
########################################
if np.isscalar(freq_cutoffhz):
assert freq_cutoffhz == 15 # UNDEFINED STORAGE FOR OTHERS
if (svddrop != 5 and dt_ms == 50) or (svddrop != 15 and dt_ms == 300):
pattern = '{yyyymmdd}_sess{blocknum}_{dt}ms_window{window}ms_svddrop{ndropped}_highpasscutoff%0.02fHz.hdf' % freq_cutoffhz
flname = pattern.format(yyyymmdd=misc.date_tuple2number(self.date_tuple),
blocknum='%02i' % self.block_number,
dt='%03i' % dt_ms,
window='%04i' % window_ms,
ndropped=svddrop if svddrop is None else '%03i' % svddrop)
else:
pattern = '{yyyymmdd}_sess{blocknum}_{dt}ms_window{window}ms_svddrop{ndropped}.hdf'
flname = pattern.format(yyyymmdd=misc.date_tuple2number(self.date_tuple),
blocknum='%02i' % self.block_number,
dt='%03i' % dt_ms,
window='%04i' % window_ms,
ndropped='%03i' % svddrop)
elif isinstance(freq_cutoffhz, (tuple, list)):
assert len(freq_cutoffhz) == 2
low_cutoff, high_cutoff = freq_cutoffhz
# # OLD
# pattern = '{yyyymmdd}_sess{blocknum}_{dt}ms_window{window}ms_svddrop{ndropped}_bandpassL{low}hzH{high}hz.hdf'
if roi_name is None:
pattern = '{yyyymmdd}_sess{blocknum}_{dt}ms_window{window}ms_svddrop{ndropped}_bandpassed.hdf'
flname = pattern.format(yyyymmdd=misc.date_tuple2number(self.date_tuple),
blocknum='%02i' % self.block_number,
dt='%03i' % dt_ms,
window='%04i' % window_ms,
ndropped=('%03i' % svddrop if isinstance(
svddrop, int) else svddrop),
# low='%03i'%low_cutoff,
# high='%03i'%high_cutoff
)
else:
# for probe00 and probe01
if mirrored:
# control data: mirrored and flipped
pattern = '{yyyymmdd}_sess{blocknum}_{dt}ms_window{window}ms_svddrop{ndropped}_bandpassed_{roi_name}_mirrored.hdf'
else:
# standard data
pattern = '{yyyymmdd}_sess{blocknum}_{dt}ms_window{window}ms_svddrop{ndropped}_bandpassed_{roi_name}.hdf'
flname = pattern.format(yyyymmdd=misc.date_tuple2number(self.date_tuple),
blocknum='%02i' % self.block_number,
dt='%03i' % dt_ms,
window='%04i' % window_ms,
ndropped=('%03i' % svddrop if isinstance(
svddrop, int) else svddrop),
roi_name=roi_name)
fusi_preproc_fl = self.block_path.joinpath('fusi', flname)
if not fusi_preproc_fl.exists():
raise IOError('Does not exist: %s' % fusi_preproc_fl)
if verbose:
print(
f'Loading fUSI data: {pathlib.Path(fusi_preproc_fl).name}...')
if np.isscalar(freq_cutoffhz):
fusi_data = readers.hdf_load(fusi_preproc_fl, 'data')
else:
name = 'data_bandpass_%i_%i' % freq_cutoffhz
fusi_data = readers.hdf_load(fusi_preproc_fl, name)
if fusi_data.ndim == 4:
fusi_data = fusi_data.mean(1)
# get upsampled time stamps from the TTL
from scipy import interpolate
ttl_samples = np.arange(len(fusi_ttl_onsets))
interpolator = interpolate.interp1d(
ttl_samples, fusi_ttl_onsets, fill_value='extrapolate')
factor = dt_ms/ttl_dt
new_samples = np.arange(fusi_data.shape[0])*factor
fusi_times = interpolator(new_samples)
return fusi_times, fusi_data
def fusi_get_roi_spectrogram(self, dt_ms=300, window_ms=400, roi_name='probe00',
rawfft=False, rawpsd=False):
'''Specify the parameters of the preprocessed fUSi data to load
Parameters
----------
dt_ms : sample interval [in millisecond]
window_ms : window over which filtering and svd drop is applied
Returns
-------
fusi_times : 1D np.ndarray (n,)
data time-stamps aligned to PXI
fusi_data : 3D np.ndarray (n, vdim, hdim):
fUSi data preprocessed with the speicified parameters
'''
# get time stamps
##############################
# load the neural frames TTL signal from timeline
self.localdb_timeline_load_block()
fusi_ttl_signal = self.timeline.get_timeline_data('neuralFrames')[1]
fusi_ttl_signal = sync.remove_single_datapoint_onsets(fusi_ttl_signal)
# load new timeline times (aligned to PXI NI DAQ)
timeline_newtimes = self.timeline_get_aligned_times()
# compute the onsets from the TTL using the new times
ttl_dt = 100 # TTL is updated every 100[ms]
fusi_ttl_onsets = sync.digital2times(
timeline_newtimes, fusi_ttl_signal)[0]
# load the preprocessed fusi data
########################################
if rawfft:
pattern = '{yyyymmdd}_sess{blocknum}_{dt}ms_window{window}ms_fUSiROI{roi_name}_rawfft.hdf'
elif rawpsd:
pattern = '{yyyymmdd}_sess{blocknum}_{dt}ms_window{window}ms_fUSiROI{roi_name}_rawpsd.hdf'
else:
pattern = '{yyyymmdd}_sess{blocknum}_{dt}ms_window{window}ms_fUSiROI{roi_name}_psd.hdf'
flname = pattern.format(yyyymmdd=misc.date_tuple2number(self.date_tuple),
blocknum='%02i' % self.block_number,
dt='%03i' % dt_ms,
window='%04i' % window_ms,
roi_name=roi_name)
fusi_preproc_fl = self.block_path.joinpath('fusi', flname)
if not fusi_preproc_fl.exists():
raise IOError('Does not exist: %s' % fusi_preproc_fl)
print(fusi_preproc_fl)
fusi_data = readers.hdf_load(fusi_preproc_fl, 'data')
# get upsampled time stamps from the TTL
from scipy import interpolate
ttl_samples = np.arange(len(fusi_ttl_onsets))
interpolator = interpolate.interp1d(
ttl_samples, fusi_ttl_onsets, fill_value='extrapolate')
factor = dt_ms/ttl_dt
new_samples = np.arange(fusi_data.shape[0])*factor
fusi_times = interpolator(new_samples)
return fusi_times, fusi_data
def fusi_get_outbrainroi_mask(self):
'''
'''
probe_mask = self.fusi_get_probe_master_mask('probe00').astype(np.bool)
zdim, xdim = probe_mask.nonzero()
length = np.sqrt((zdim.max() - zdim.min())**2 +
(xdim.max() - xdim.min())**2)
nvox = probe_mask.sum()
depth = int(nvox/length)
length = int(length)
leftmost_side = int((probe_mask.shape[1] - length)/2.)
outbrain_mask = self.fusi_get_outsidebrain_mask()
outbrain_top = np.logical_not(outbrain_mask).nonzero()[0].min()
outbrain_roi = np.zeros_like(outbrain_mask)
outbrain_roi[outbrain_top - depth: outbrain_top,
leftmost_side:leftmost_side + length] = True
return outbrain_roi
def ephys_get_lfp(self, band, probe_name):
'''
'''
flname = self.mk_filename_for_localdb(
'%s_lfp_bands_v01.hdf' % probe_name, 'lfp')
assert flname.exists()
available_bands = ['alpha', 'beta', 'gamma', 'hgamma']
assert band in available_bands
lfp_band = readers.hdf_load(flname, band).T
times = self.fusi_get_times()
assert len(times) == lfp_band.shape[0]
return times, lfp_band[:, :384]
|
[
"fusilib.handler.matlab_data",
"numpy.sum",
"fusilib.misc.DotDict",
"numpy.allclose",
"fusilib.misc.date_tuple2isoformat",
"fusilib.align.fusiarr2nii",
"fusilib.align.allenccf_main_areas",
"fusilib.io.sync.remove_single_datapoint_onsets",
"numpy.clip",
"numpy.isnan",
"pathlib.Path",
"fusilib.misc.date_tuple2yyyymmdd",
"numpy.arange",
"pprint.pprint",
"skimage.measure.find_contours",
"scipy.linalg.svd",
"fusilib.io.phy.ProbeHandler",
"scipy.interpolate.interp1d",
"hashlib.sha512",
"os.path.join",
"numpy.unique",
"fusilib.extras.readers.hdf_dump",
"numpy.zeros_like",
"numpy.eye",
"fusilib.io.phy.RecordingHandler",
"numpy.std",
"scipy.ndimage.gaussian_filter",
"numpy.logical_not",
"fusilib.misc.convert_string2data",
"fusilib.allen.mk_area_mask_from_aligned",
"fusilib.extras.readers.hdf_load",
"fusilib.misc.date_isoformat2tuple",
"fusilib.io.spikeglx.load_data_chunk",
"matplotlib.pyplot.subplots",
"fusilib.utils.pixelwise_pctsignalchange",
"fusilib.io.phy.yield_table_columns",
"numpy.nanargmin",
"fusilib.utils.fast_find_between",
"numpy.asarray",
"numpy.hstack",
"fusilib.align.allenccf_cmap",
"fusilib.io.sync.analog2digital",
"numpy.alltrue",
"fusilib.misc.date_tuple2number",
"fusilib.resampling.lanczos_resample",
"numpy.vstack",
"fusilib.io.contacq.MetaLoader",
"numpy.nanmax",
"fusilib.io.sync.digital2times",
"nibabel.load",
"numpy.logical_and",
"numpy.isscalar",
"fusilib.io.logs.read_experiment_log",
"fusilib.utils.bin_spikes",
"numpy.zeros",
"numpy.nanmin",
"numpy.where",
"numpy.diff",
"fusilib.io.spikeglx.read_metadata",
"fusilib.misc.uri_convert_uri2local",
"fusilib.io.spikes.ProbeSpikes"
] |
[((490, 535), 'fusilib.extras.readers.hdf_load', 'readers.hdf_load', (['local_path', '*args'], {}), '(local_path, *args, **kwargs)\n', (506, 535), False, 'from fusilib.extras import readers\n'), ((972, 991), 'pathlib.Path', 'pathlib.Path', (['outfl'], {}), '(outfl)\n', (984, 991), False, 'import pathlib\n'), ((2938, 2961), 'numpy.unique', 'np.unique', (['slice_blocks'], {}), '(slice_blocks)\n', (2947, 2961), True, 'import numpy as np\n'), ((3267, 3310), 'numpy.asarray', 'np.asarray', (['all_slice_indeces'], {'dtype': 'np.int'}), '(all_slice_indeces, dtype=np.int)\n', (3277, 3310), True, 'import numpy as np\n'), ((6023, 6064), 'numpy.asarray', 'np.asarray', (["info['blocks_%s' % task_name]"], {}), "(info['blocks_%s' % task_name])\n", (6033, 6064), True, 'import numpy as np\n'), ((7887, 7908), 'numpy.vstack', 'np.vstack', (['slice_data'], {}), '(slice_data)\n', (7896, 7908), True, 'import numpy as np\n'), ((9815, 9837), 'numpy.vstack', 'np.vstack', (['blocks_data'], {}), '(blocks_data)\n', (9824, 9837), True, 'import numpy as np\n'), ((16005, 16047), 'fusilib.misc.date_tuple2isoformat', 'misc.date_tuple2isoformat', (['self.date_tuple'], {}), '(self.date_tuple)\n', (16030, 16047), False, 'from fusilib import misc, utils as futils\n'), ((16818, 16862), 'fusilib.misc.date_isoformat2tuple', 'misc.date_isoformat2tuple', (['self.session_name'], {}), '(self.session_name)\n', (16843, 16862), False, 'from fusilib import misc, utils as futils\n'), ((17450, 17491), 'fusilib.misc.date_tuple2yyyymmdd', 'misc.date_tuple2yyyymmdd', (['self.date_tuple'], {}), '(self.date_tuple)\n', (17474, 17491), False, 'from fusilib import misc, utils as futils\n'), ((20450, 20472), 'fusilib.io.phy.ProbeHandler', 'phy.ProbeHandler', (['path'], {}), '(path)\n', (20466, 20472), False, 'from fusilib.io import phy\n'), ((21324, 21350), 'fusilib.io.phy.RecordingHandler', 'phy.RecordingHandler', (['path'], {}), '(path)\n', (21344, 21350), False, 'from fusilib.io import phy\n'), ((21960, 21999), 'fusilib.io.spikeglx.read_metadata', 'spikeglx.read_metadata', (['pxinidaq_flname'], {}), '(pxinidaq_flname)\n', (21982, 21999), False, 'from fusilib.io import righw, spikeglx, phy, logs, sync\n'), ((23125, 23138), 'pprint.pprint', 'pprint', (['paths'], {}), '(paths)\n', (23131, 23138), False, 'from pprint import pprint\n'), ((23190, 23236), 'fusilib.misc.uri_convert_uri2local', 'misc.uri_convert_uri2local', (["paths['fusi_data']"], {}), "(paths['fusi_data'])\n", (23216, 23236), False, 'from fusilib import misc, utils as futils\n'), ((23252, 23277), 'fusilib.handler.matlab_data', 'handler.matlab_data', (['path'], {}), '(path)\n', (23271, 23277), False, 'from fusilib import handler\n'), ((27753, 27803), 'fusilib.misc.uri_convert_uri2local', 'misc.uri_convert_uri2local', (["paths['ystack_volume']"], {}), "(paths['ystack_volume'])\n", (27779, 27803), False, 'from fusilib import misc, utils as futils\n'), ((27819, 27844), 'fusilib.handler.matlab_data', 'handler.matlab_data', (['path'], {}), '(path)\n', (27838, 27844), False, 'from fusilib import handler\n'), ((28422, 28472), 'fusilib.misc.uri_convert_uri2local', 'misc.uri_convert_uri2local', (["paths['ystack_volume']"], {}), "(paths['ystack_volume'])\n", (28448, 28472), False, 'from fusilib import misc, utils as futils\n'), ((28488, 28513), 'fusilib.handler.matlab_data', 'handler.matlab_data', (['path'], {}), '(path)\n', (28507, 28513), False, 'from fusilib import handler\n'), ((28580, 28607), 'numpy.asarray', 'np.asarray', (['[xmm, ymm, zmm]'], {}), '([xmm, ymm, zmm])\n', (28590, 28607), True, 'import numpy as np\n'), ((28654, 28707), 'fusilib.align.fusiarr2nii', 'align.fusiarr2nii', (['arr'], {'xyzmm': 'xyzmm', 'flips': '(1, 1, -1)'}), '(arr, xyzmm=xyzmm, flips=(1, 1, -1))\n', (28671, 28707), False, 'from fusilib import align\n'), ((29091, 29119), 'nibabel.load', 'nib.load', (['fl_resampled_atlas'], {}), '(fl_resampled_atlas)\n', (29099, 29119), True, 'import nibabel as nib\n'), ((29937, 29995), 'numpy.asarray', 'np.asarray', (['[(idx == ystack_nslices) for idx in arr.shape]'], {}), '([(idx == ystack_nslices) for idx in arr.shape])\n', (29947, 29995), True, 'import numpy as np\n'), ((31235, 31293), 'numpy.asarray', 'np.asarray', (['[(idx == ystack_nslices) for idx in arr.shape]'], {}), '([(idx == ystack_nslices) for idx in arr.shape])\n', (31245, 31293), True, 'import numpy as np\n'), ((32771, 32786), 'fusilib.align.allenccf_cmap', 'allenccf_cmap', ([], {}), '()\n', (32784, 32786), False, 'from fusilib.align import allenccf_cmap\n'), ((35252, 35267), 'nibabel.load', 'nib.load', (['volfl'], {}), '(volfl)\n', (35260, 35267), True, 'import nibabel as nib\n'), ((35574, 35632), 'fusilib.extras.readers.hdf_load', 'readers.hdf_load', (['fl', '"""probe_tip_mm"""'], {'verbose': 'self.verbose'}), "(fl, 'probe_tip_mm', verbose=self.verbose)\n", (35590, 35632), False, 'from fusilib.extras import readers\n'), ((36448, 36483), 'numpy.asarray', 'np.asarray', (["info['slice_positions']"], {}), "(info['slice_positions'])\n", (36458, 36483), True, 'import numpy as np\n'), ((36504, 36540), 'numpy.asarray', 'np.asarray', (["info['ystack_positions']"], {}), "(info['ystack_positions'])\n", (36514, 36540), True, 'import numpy as np\n'), ((36745, 36803), 'numpy.asarray', 'np.asarray', (['[(idx == ystack_nslices) for idx in arr.shape]'], {}), '([(idx == ystack_nslices) for idx in arr.shape])\n', (36755, 36803), True, 'import numpy as np\n'), ((37031, 37140), 'numpy.logical_and', 'np.logical_and', (['(ystack_mm <= slice_position + widthmm / 2.0)', '(ystack_mm >= slice_position - widthmm / 2.0)'], {}), '(ystack_mm <= slice_position + widthmm / 2.0, ystack_mm >= \n slice_position - widthmm / 2.0)\n', (37045, 37140), True, 'import numpy as np\n'), ((37472, 37502), 'numpy.where', 'np.where', (['(dat < 0)', 'np.nan', 'dat'], {}), '(dat < 0, np.nan, dat)\n', (37480, 37502), True, 'import numpy as np\n'), ((38602, 38663), 'numpy.asarray', 'np.asarray', (['[npx_depth - probe_bottom, npx_depth - probe_top]'], {}), '([npx_depth - probe_bottom, npx_depth - probe_top])\n', (38612, 38663), True, 'import numpy as np\n'), ((39249, 39299), 'fusilib.misc.uri_convert_uri2local', 'misc.uri_convert_uri2local', (["paths['ystack_volume']"], {}), "(paths['ystack_volume'])\n", (39275, 39299), False, 'from fusilib import misc, utils as futils\n'), ((39315, 39340), 'fusilib.handler.matlab_data', 'handler.matlab_data', (['path'], {}), '(path)\n', (39334, 39340), False, 'from fusilib import handler\n'), ((41172, 41217), 'numpy.arange', 'np.arange', (['(0)', 'neuropix_size_um', 'mua_window_um'], {}), '(0, neuropix_size_um, mua_window_um)\n', (41181, 41217), True, 'import numpy as np\n'), ((41640, 41701), 'fusilib.utils.fast_find_between', 'futils.fast_find_between', (['depth_um', 'mua_depths', 'mua_window_um'], {}), '(depth_um, mua_depths, mua_window_um)\n', (41664, 41701), True, 'from fusilib import misc, utils as futils\n'), ((57834, 57956), 'fusilib.extras.readers.hdf_dump', 'readers.hdf_dump', (['cache_file', "{'timestamps': timestamps, 'slice_fusi_data': slice_fusi_data, 'spike_data':\n spike_data}"], {}), "(cache_file, {'timestamps': timestamps, 'slice_fusi_data':\n slice_fusi_data, 'spike_data': spike_data})\n", (57850, 57956), False, 'from fusilib.extras import readers\n'), ((62171, 62293), 'fusilib.extras.readers.hdf_dump', 'readers.hdf_dump', (['cache_file', "{'timestamps': timestamps, 'slice_fusi_data': slice_fusi_data, 'spike_data':\n spike_data}"], {}), "(cache_file, {'timestamps': timestamps, 'slice_fusi_data':\n slice_fusi_data, 'spike_data': spike_data})\n", (62187, 62293), False, 'from fusilib.extras import readers\n'), ((65351, 65379), 'numpy.hstack', 'np.hstack', (['[xypos, contrast]'], {}), '([xypos, contrast])\n', (65360, 65379), True, 'import numpy as np\n'), ((65421, 65476), 'numpy.unique', 'np.unique', (['stimulus_params'], {'axis': '(0)', 'return_inverse': '(True)'}), '(stimulus_params, axis=0, return_inverse=True)\n', (65430, 65476), True, 'import numpy as np\n'), ((65517, 65537), 'numpy.unique', 'np.unique', (['trial_ids'], {}), '(trial_ids)\n', (65526, 65537), True, 'import numpy as np\n'), ((66321, 66378), 'numpy.allclose', 'np.allclose', (['repeat_ids[:20, 0]', 'repeat_ids[20:, 0][::-1]'], {}), '(repeat_ids[:20, 0], repeat_ids[20:, 0][::-1])\n', (66332, 66378), True, 'import numpy as np\n'), ((66722, 66764), 'fusilib.misc.date_tuple2isoformat', 'misc.date_tuple2isoformat', (['self.date_tuple'], {}), '(self.date_tuple)\n', (66747, 66764), False, 'from fusilib import misc, utils as futils\n'), ((67851, 67884), 'fusilib.extras.readers.hdf_load', 'readers.hdf_load', (['filename', 'field'], {}), '(filename, field)\n', (67867, 67884), False, 'from fusilib.extras import readers\n'), ((68349, 68481), 'fusilib.io.spikes.ProbeSpikes', 'spikes.ProbeSpikes', (['spike_times', 'spike_clusters'], {'nclusters': 'nclusters', 'good_clusters': 'good_clusters', 'cluster_depths': 'cluster_depths'}), '(spike_times, spike_clusters, nclusters=nclusters,\n good_clusters=good_clusters, cluster_depths=cluster_depths)\n', (68367, 68481), False, 'from fusilib.io import spikes\n'), ((69201, 69274), 'fusilib.misc.DotDict', 'misc.DotDict', ([], {'clusters': 'clusters', 'times': 'times', 'sample_ratehz': 'sample_ratehz'}), '(clusters=clusters, times=times, sample_ratehz=sample_ratehz)\n', (69213, 69274), False, 'from fusilib import misc, utils as futils\n'), ((69767, 69840), 'fusilib.misc.DotDict', 'misc.DotDict', ([], {'clusters': 'clusters', 'times': 'times', 'sample_ratehz': 'sample_ratehz'}), '(clusters=clusters, times=times, sample_ratehz=sample_ratehz)\n', (69779, 69840), False, 'from fusilib import misc, utils as futils\n'), ((71632, 71670), 'numpy.logical_and', 'np.logical_and', (['brain_mask', 'probe_mask'], {}), '(brain_mask, probe_mask)\n', (71646, 71670), True, 'import numpy as np\n'), ((71828, 71907), 'fusilib.allen.mk_area_mask_from_aligned', 'allen.mk_area_mask_from_aligned', (['allenccf_areas', 'allen_area_name'], {'verbose': '(False)'}), '(allenccf_areas, allen_area_name, verbose=False)\n', (71859, 71907), False, 'from fusilib import allen\n'), ((71989, 72021), 'numpy.logical_and', 'np.logical_and', (['area_mask', 'pmask'], {}), '(area_mask, pmask)\n', (72003, 72021), True, 'import numpy as np\n'), ((73641, 73720), 'fusilib.allen.mk_area_mask_from_aligned', 'allen.mk_area_mask_from_aligned', (['allenccf_areas', 'allen_area_name'], {'verbose': '(False)'}), '(allenccf_areas, allen_area_name, verbose=False)\n', (73672, 73720), False, 'from fusilib import allen\n'), ((73811, 73848), 'numpy.logical_and', 'np.logical_and', (['area_mask', 'brain_mask'], {}), '(area_mask, brain_mask)\n', (73825, 73848), True, 'import numpy as np\n'), ((75378, 75403), 'numpy.zeros_like', 'np.zeros_like', (['brain_mask'], {}), '(brain_mask)\n', (75391, 75403), True, 'import numpy as np\n'), ((76261, 76283), 'pathlib.Path', 'pathlib.Path', (['dataroot'], {}), '(dataroot)\n', (76273, 76283), False, 'import pathlib\n'), ((76543, 76601), 'fusilib.io.contacq.MetaLoader', 'contacq.MetaLoader', (['data_path'], {'verbose': '(True)', 'nmaximum': 'nmax'}), '(data_path, verbose=True, nmaximum=nmax)\n', (76561, 76601), False, 'from fusilib.io import contacq\n'), ((77417, 77493), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['ttl_samples', 'fusi_ttl_onsets'], {'fill_value': '"""extrapolate"""'}), "(ttl_samples, fusi_ttl_onsets, fill_value='extrapolate')\n", (77437, 77493), False, 'from scipy import interpolate\n'), ((77701, 77715), 'numpy.std', 'np.std', (['physio'], {}), '(physio)\n', (77707, 77715), True, 'import numpy as np\n'), ((79741, 79779), 'fusilib.extras.readers.hdf_load', 'readers.hdf_load', (['timeline_flname', 'key'], {}), '(timeline_flname, key)\n', (79757, 79779), False, 'from fusilib.extras import readers\n'), ((80018, 80070), 'fusilib.io.sync.remove_single_datapoint_onsets', 'sync.remove_single_datapoint_onsets', (['fusi_ttl_signal'], {}), '(fusi_ttl_signal)\n', (80053, 80070), False, 'from fusilib.io import righw, spikeglx, phy, logs, sync\n'), ((81633, 81685), 'fusilib.io.sync.remove_single_datapoint_onsets', 'sync.remove_single_datapoint_onsets', (['fusi_ttl_signal'], {}), '(fusi_ttl_signal)\n', (81668, 81685), False, 'from fusilib.io import righw, spikeglx, phy, logs, sync\n'), ((82121, 82147), 'numpy.isscalar', 'np.isscalar', (['freq_cutoffhz'], {}), '(freq_cutoffhz)\n', (82132, 82147), True, 'import numpy as np\n'), ((85650, 85676), 'numpy.isscalar', 'np.isscalar', (['freq_cutoffhz'], {}), '(freq_cutoffhz)\n', (85661, 85676), True, 'import numpy as np\n'), ((86118, 86194), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['ttl_samples', 'fusi_ttl_onsets'], {'fill_value': '"""extrapolate"""'}), "(ttl_samples, fusi_ttl_onsets, fill_value='extrapolate')\n", (86138, 86194), False, 'from scipy import interpolate\n'), ((87285, 87337), 'fusilib.io.sync.remove_single_datapoint_onsets', 'sync.remove_single_datapoint_onsets', (['fusi_ttl_signal'], {}), '(fusi_ttl_signal)\n', (87320, 87337), False, 'from fusilib.io import righw, spikeglx, phy, logs, sync\n'), ((88661, 88702), 'fusilib.extras.readers.hdf_load', 'readers.hdf_load', (['fusi_preproc_fl', '"""data"""'], {}), "(fusi_preproc_fl, 'data')\n", (88677, 88702), False, 'from fusilib.extras import readers\n'), ((88868, 88944), 'scipy.interpolate.interp1d', 'interpolate.interp1d', (['ttl_samples', 'fusi_ttl_onsets'], {'fill_value': '"""extrapolate"""'}), "(ttl_samples, fusi_ttl_onsets, fill_value='extrapolate')\n", (88888, 88944), False, 'from scipy import interpolate\n'), ((89741, 89769), 'numpy.zeros_like', 'np.zeros_like', (['outbrain_mask'], {}), '(outbrain_mask)\n', (89754, 89769), True, 'import numpy as np\n'), ((760, 790), 'hashlib.sha512', 'hashlib.sha512', (['call_signature'], {}), '(call_signature)\n', (774, 790), False, 'import hashlib\n'), ((3148, 3191), 'numpy.asarray', 'np.asarray', (['all_slice_indeces'], {'dtype': 'np.int'}), '(all_slice_indeces, dtype=np.int)\n', (3158, 3191), True, 'import numpy as np\n'), ((4251, 4305), 'numpy.asarray', 'np.asarray', (['[t for t in blocks if t not in bad_blocks]'], {}), '([t for t in blocks if t not in bad_blocks])\n', (4261, 4305), True, 'import numpy as np\n'), ((4828, 4882), 'numpy.asarray', 'np.asarray', (['[t for t in blocks if t not in bad_blocks]'], {}), '([t for t in blocks if t not in bad_blocks])\n', (4838, 4882), True, 'import numpy as np\n'), ((15337, 15361), 'numpy.zeros_like', 'np.zeros_like', (['fusi_data'], {}), '(fusi_data)\n', (15350, 15361), True, 'import numpy as np\n'), ((15743, 15786), 'fusilib.utils.pixelwise_pctsignalchange', 'futils.pixelwise_pctsignalchange', (['fusi_data'], {}), '(fusi_data)\n', (15775, 15786), True, 'from fusilib import misc, utils as futils\n'), ((17771, 17820), 'fusilib.io.logs.read_experiment_log', 'logs.read_experiment_log', (['flname'], {'verbose': 'verbose'}), '(flname, verbose=verbose)\n', (17795, 17820), False, 'from fusilib.io import righw, spikeglx, phy, logs, sync\n'), ((18303, 18334), 'fusilib.misc.convert_string2data', 'misc.convert_string2data', (['value'], {}), '(value)\n', (18327, 18334), False, 'from fusilib import misc, utils as futils\n'), ((20383, 20434), 'fusilib.misc.uri_convert_uri2local', 'misc.uri_convert_uri2local', (["probe_paths['path_phy']"], {}), "(probe_paths['path_phy'])\n", (20409, 20434), False, 'from fusilib import misc, utils as futils\n'), ((21205, 21246), 'os.path.join', 'os.path.join', (['ephys_path', 'spikeglx_prefix'], {}), '(ephys_path, spikeglx_prefix)\n', (21217, 21246), False, 'import os\n'), ((21266, 21308), 'fusilib.misc.uri_convert_uri2local', 'misc.uri_convert_uri2local', (['recording_path'], {}), '(recording_path)\n', (21292, 21308), False, 'from fusilib import misc, utils as futils\n'), ((21811, 21852), 'fusilib.misc.uri_convert_uri2local', 'misc.uri_convert_uri2local', (["paths['path']"], {}), "(paths['path'])\n", (21837, 21852), False, 'from fusilib import misc, utils as futils\n'), ((22089, 22150), 'fusilib.io.spikeglx.load_data_chunk', 'spikeglx.load_data_chunk', (['pxinidaq_flname'], {'sample_duration': '(-1)'}), '(pxinidaq_flname, sample_duration=-1)\n', (22113, 22150), False, 'from fusilib.io import righw, spikeglx, phy, logs, sync\n'), ((22190, 22223), 'numpy.arange', 'np.arange', (['pxinidaq_data.shape[0]'], {}), '(pxinidaq_data.shape[0])\n', (22199, 22223), True, 'import numpy as np\n'), ((23384, 23429), 'numpy.allclose', 'np.allclose', (["data['yCoords']", "paths['ycoord']"], {}), "(data['yCoords'], paths['ycoord'])\n", (23395, 23429), True, 'import numpy as np\n'), ((24159, 24181), 'pprint.pprint', 'pprint', (['localizer_info'], {}), '(localizer_info)\n', (24165, 24181), False, 'from pprint import pprint\n'), ((26625, 26695), 'numpy.asarray', 'np.asarray', (['[[ml_top, ml_bottom], [ap_mm, ap_mm], [dv_top, dv_bottom]]'], {}), '([[ml_top, ml_bottom], [ap_mm, ap_mm], [dv_top, dv_bottom]])\n', (26635, 26695), True, 'import numpy as np\n'), ((28538, 28562), 'numpy.diff', 'np.diff', (["data['yCoords']"], {}), "(data['yCoords'])\n", (28545, 28562), True, 'import numpy as np\n'), ((32686, 32700), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (32698, 32700), True, 'from matplotlib import pyplot as plt\n'), ((33293, 33307), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (33305, 33307), True, 'from matplotlib import pyplot as plt\n'), ((34196, 34223), 'fusilib.align.allenccf_main_areas', 'align.allenccf_main_areas', ([], {}), '()\n', (34221, 34223), False, 'from fusilib import align\n'), ((34254, 34300), 'skimage.measure.find_contours', 'measure.find_contours', (['allenccf_slice', 'areaidx'], {}), '(allenccf_slice, areaidx)\n', (34275, 34300), False, 'from skimage import measure\n'), ((35663, 35681), 'numpy.sum', 'np.sum', (['(xyzmm ** 2)'], {}), '(xyzmm ** 2)\n', (35669, 35681), True, 'import numpy as np\n'), ((37578, 37603), 'numpy.nanmin', 'np.nanmin', (['dat'], {'axis': 'axis'}), '(dat, axis=axis)\n', (37587, 37603), True, 'import numpy as np\n'), ((37697, 37711), 'numpy.nanmin', 'np.nanmin', (['dat'], {}), '(dat)\n', (37706, 37711), True, 'import numpy as np\n'), ((37728, 37742), 'numpy.nanmin', 'np.nanmin', (['dat'], {}), '(dat)\n', (37737, 37742), True, 'import numpy as np\n'), ((40451, 40470), 'numpy.zeros', 'np.zeros', (['nclusters'], {}), '(nclusters)\n', (40459, 40470), True, 'import numpy as np\n'), ((49552, 49574), 'numpy.hstack', 'np.hstack', (['slice_times'], {}), '(slice_times)\n', (49561, 49574), True, 'import numpy as np\n'), ((49600, 49621), 'numpy.vstack', 'np.vstack', (['slice_data'], {}), '(slice_data)\n', (49609, 49621), True, 'import numpy as np\n'), ((52339, 52360), 'numpy.hstack', 'np.hstack', (['timestamps'], {}), '(timestamps)\n', (52348, 52360), True, 'import numpy as np\n'), ((52393, 52421), 'numpy.hstack', 'np.hstack', (['slice_physio_data'], {}), '(slice_physio_data)\n', (52402, 52421), True, 'import numpy as np\n'), ((53514, 53556), 'fusilib.extras.readers.hdf_load', 'readers.hdf_load', (['cache_file', '"""timestamps"""'], {}), "(cache_file, 'timestamps')\n", (53530, 53556), False, 'from fusilib.extras import readers\n'), ((53587, 53634), 'fusilib.extras.readers.hdf_load', 'readers.hdf_load', (['cache_file', '"""slice_fusi_data"""'], {}), "(cache_file, 'slice_fusi_data')\n", (53603, 53634), False, 'from fusilib.extras import readers\n'), ((53660, 53702), 'fusilib.extras.readers.hdf_load', 'readers.hdf_load', (['cache_file', '"""spike_data"""'], {}), "(cache_file, 'spike_data')\n", (53676, 53702), False, 'from fusilib.extras import readers\n'), ((55834, 55928), 'fusilib.utils.bin_spikes', 'futils.bin_spikes', (['times', '(dt_ms / 1000.0)', 'spike_times', 'spike_clusters'], {'nclusters': 'nclusters'}), '(times, dt_ms / 1000.0, spike_times, spike_clusters,\n nclusters=nclusters)\n', (55851, 55928), True, 'from fusilib import misc, utils as futils\n'), ((57699, 57720), 'numpy.hstack', 'np.hstack', (['timestamps'], {}), '(timestamps)\n', (57708, 57720), True, 'import numpy as np\n'), ((57746, 57767), 'numpy.vstack', 'np.vstack', (['spike_data'], {}), '(spike_data)\n', (57755, 57767), True, 'import numpy as np\n'), ((57798, 57824), 'numpy.vstack', 'np.vstack', (['slice_fusi_data'], {}), '(slice_fusi_data)\n', (57807, 57824), True, 'import numpy as np\n'), ((59201, 59243), 'fusilib.extras.readers.hdf_load', 'readers.hdf_load', (['cache_file', '"""timestamps"""'], {}), "(cache_file, 'timestamps')\n", (59217, 59243), False, 'from fusilib.extras import readers\n'), ((59274, 59321), 'fusilib.extras.readers.hdf_load', 'readers.hdf_load', (['cache_file', '"""slice_fusi_data"""'], {}), "(cache_file, 'slice_fusi_data')\n", (59290, 59321), False, 'from fusilib.extras import readers\n'), ((59347, 59389), 'fusilib.extras.readers.hdf_load', 'readers.hdf_load', (['cache_file', '"""spike_data"""'], {}), "(cache_file, 'spike_data')\n", (59363, 59389), False, 'from fusilib.extras import readers\n'), ((60978, 61072), 'fusilib.utils.bin_spikes', 'futils.bin_spikes', (['times', '(dt_ms / 1000.0)', 'spike_times', 'spike_clusters'], {'nclusters': 'nclusters'}), '(times, dt_ms / 1000.0, spike_times, spike_clusters,\n nclusters=nclusters)\n', (60995, 61072), True, 'from fusilib import misc, utils as futils\n'), ((62036, 62057), 'numpy.hstack', 'np.hstack', (['timestamps'], {}), '(timestamps)\n', (62045, 62057), True, 'import numpy as np\n'), ((62083, 62104), 'numpy.vstack', 'np.vstack', (['spike_data'], {}), '(spike_data)\n', (62092, 62104), True, 'import numpy as np\n'), ((62135, 62161), 'numpy.vstack', 'np.vstack', (['slice_fusi_data'], {}), '(slice_fusi_data)\n', (62144, 62161), True, 'import numpy as np\n'), ((65680, 65711), 'numpy.asarray', 'np.asarray', (['([trial_ids] * nreps)'], {}), '([trial_ids] * nreps)\n', (65690, 65711), True, 'import numpy as np\n'), ((65738, 65828), 'numpy.asarray', 'np.asarray', (['[stimulus_start_times[stimulus_matrix == cid] for cid in checkerboard_ids]'], {}), '([stimulus_start_times[stimulus_matrix == cid] for cid in\n checkerboard_ids])\n', (65748, 65828), True, 'import numpy as np\n'), ((74210, 74251), 'numpy.logical_and', 'np.logical_and', (['allen_roi_mask', 'hemi_mask'], {}), '(allen_roi_mask, hemi_mask)\n', (74224, 74251), True, 'import numpy as np\n'), ((75620, 75663), 'numpy.logical_and', 'np.logical_and', (['brain_mask', 'hemisphere_mask'], {}), '(brain_mask, hemisphere_mask)\n', (75634, 75663), True, 'import numpy as np\n'), ((76379, 76421), 'fusilib.misc.date_tuple2isoformat', 'misc.date_tuple2isoformat', (['self.date_tuple'], {}), '(self.date_tuple)\n', (76404, 76421), False, 'from fusilib import misc, utils as futils\n'), ((77599, 77625), 'numpy.arange', 'np.arange', (['physio.shape[0]'], {}), '(physio.shape[0])\n', (77608, 77625), True, 'import numpy as np\n'), ((77754, 77776), 'numpy.clip', 'np.clip', (['physio', '(-5)', '(5)'], {}), '(physio, -5, 5)\n', (77761, 77776), True, 'import numpy as np\n'), ((78574, 78596), 'pprint.pprint', 'pprint', (['localizer_info'], {}), '(localizer_info)\n', (78580, 78596), False, 'from pprint import pprint\n'), ((80332, 80386), 'fusilib.io.sync.digital2times', 'sync.digital2times', (['timeline_newtimes', 'fusi_ttl_signal'], {}), '(timeline_newtimes, fusi_ttl_signal)\n', (80350, 80386), False, 'from fusilib.io import righw, spikeglx, phy, logs, sync\n'), ((81947, 82001), 'fusilib.io.sync.digital2times', 'sync.digital2times', (['timeline_newtimes', 'fusi_ttl_signal'], {}), '(timeline_newtimes, fusi_ttl_signal)\n', (81965, 82001), False, 'from fusilib.io import righw, spikeglx, phy, logs, sync\n'), ((85702, 85743), 'fusilib.extras.readers.hdf_load', 'readers.hdf_load', (['fusi_preproc_fl', '"""data"""'], {}), "(fusi_preproc_fl, 'data')\n", (85718, 85743), False, 'from fusilib.extras import readers\n'), ((85839, 85878), 'fusilib.extras.readers.hdf_load', 'readers.hdf_load', (['fusi_preproc_fl', 'name'], {}), '(fusi_preproc_fl, name)\n', (85855, 85878), False, 'from fusilib.extras import readers\n'), ((86260, 86289), 'numpy.arange', 'np.arange', (['fusi_data.shape[0]'], {}), '(fusi_data.shape[0])\n', (86269, 86289), True, 'import numpy as np\n'), ((87599, 87653), 'fusilib.io.sync.digital2times', 'sync.digital2times', (['timeline_newtimes', 'fusi_ttl_signal'], {}), '(timeline_newtimes, fusi_ttl_signal)\n', (87617, 87653), False, 'from fusilib.io import righw, spikeglx, phy, logs, sync\n'), ((89010, 89039), 'numpy.arange', 'np.arange', (['fusi_data.shape[0]'], {}), '(fusi_data.shape[0])\n', (89019, 89039), True, 'import numpy as np\n'), ((90250, 90280), 'fusilib.extras.readers.hdf_load', 'readers.hdf_load', (['flname', 'band'], {}), '(flname, band)\n', (90266, 90280), False, 'from fusilib.extras import readers\n'), ((3058, 3085), 'numpy.isnan', 'np.isnan', (['all_slice_indeces'], {}), '(all_slice_indeces)\n', (3066, 3085), True, 'import numpy as np\n'), ((4017, 4033), 'numpy.isnan', 'np.isnan', (['blocks'], {}), '(blocks)\n', (4025, 4033), True, 'import numpy as np\n'), ((4594, 4610), 'numpy.isnan', 'np.isnan', (['blocks'], {}), '(blocks)\n', (4602, 4610), True, 'import numpy as np\n'), ((14940, 14985), 'numpy.asarray', 'np.asarray', (['subject_block.fusi_image_pixel_mm'], {}), '(subject_block.fusi_image_pixel_mm)\n', (14950, 14985), True, 'import numpy as np\n'), ((15487, 15529), 'scipy.ndimage.gaussian_filter', 'ndimage.gaussian_filter', (['img'], {'sigma': 'sigmas'}), '(img, sigma=sigmas)\n', (15510, 15529), False, 'from scipy import ndimage\n'), ((18519, 18540), 'numpy.alltrue', 'np.alltrue', (['isnumeric'], {}), '(isnumeric)\n', (18529, 18540), True, 'import numpy as np\n'), ((22300, 22351), 'fusilib.io.sync.analog2digital', 'sync.analog2digital', (['pxinidaq_data[:, index_npsync]'], {}), '(pxinidaq_data[:, index_npsync])\n', (22319, 22351), False, 'from fusilib.io import righw, spikeglx, phy, logs, sync\n'), ((22483, 22535), 'fusilib.io.sync.analog2digital', 'sync.analog2digital', (['pxinidaq_data[:, index_acqlive]'], {}), '(pxinidaq_data[:, index_acqlive])\n', (22502, 22535), False, 'from fusilib.io import righw, spikeglx, phy, logs, sync\n'), ((22614, 22666), 'fusilib.io.sync.analog2digital', 'sync.analog2digital', (['pxinidaq_data[:, index_flipper]'], {}), '(pxinidaq_data[:, index_flipper])\n', (22633, 22666), False, 'from fusilib.io import righw, spikeglx, phy, logs, sync\n'), ((25144, 25166), 'numpy.unique', 'np.unique', (['fusi_slices'], {}), '(fusi_slices)\n', (25153, 25166), True, 'import numpy as np\n'), ((35209, 35228), 'pathlib.Path', 'pathlib.Path', (['volfl'], {}), '(volfl)\n', (35221, 35228), False, 'import pathlib\n'), ((36353, 36362), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (36359, 36362), True, 'import numpy as np\n'), ((37932, 37954), 'numpy.nanmax', 'np.nanmax', (['dat'], {'axis': '(1)'}), '(dat, axis=1)\n', (37941, 37954), True, 'import numpy as np\n'), ((37979, 38009), 'numpy.logical_not', 'np.logical_not', (['probe_location'], {}), '(probe_location)\n', (37993, 38009), True, 'import numpy as np\n'), ((38477, 38490), 'numpy.isnan', 'np.isnan', (['dat'], {}), '(dat)\n', (38485, 38490), True, 'import numpy as np\n'), ((40011, 40027), 'numpy.diff', 'np.diff', (['hcoords'], {}), '(hcoords)\n', (40018, 40027), True, 'import numpy as np\n'), ((40066, 40082), 'numpy.diff', 'np.diff', (['vcoords'], {}), '(vcoords)\n', (40073, 40082), True, 'import numpy as np\n'), ((41361, 41395), 'numpy.zeros', 'np.zeros', (['(mua_nchunks, nclusters)'], {}), '((mua_nchunks, nclusters))\n', (41369, 41395), True, 'import numpy as np\n'), ((42051, 42070), 'numpy.zeros', 'np.zeros', (['nclusters'], {}), '(nclusters)\n', (42059, 42070), True, 'import numpy as np\n'), ((42912, 42954), 'fusilib.io.phy.yield_table_columns', 'phy.yield_table_columns', (['fl'], {'delimiter': '""","""'}), "(fl, delimiter=',')\n", (42935, 42954), False, 'from fusilib.io import phy\n'), ((43036, 43055), 'numpy.zeros', 'np.zeros', (['nclusters'], {}), '(nclusters)\n', (43044, 43055), True, 'import numpy as np\n'), ((43425, 43444), 'numpy.zeros', 'np.zeros', (['nclusters'], {}), '(nclusters)\n', (43433, 43444), True, 'import numpy as np\n'), ((65632, 65652), 'numpy.unique', 'np.unique', (['trial_ids'], {}), '(trial_ids)\n', (65641, 65652), True, 'import numpy as np\n'), ((74609, 74635), 'numpy.logical_not', 'np.logical_not', (['probe_mask'], {}), '(probe_mask)\n', (74623, 74635), True, 'import numpy as np\n'), ((88164, 88203), 'fusilib.misc.date_tuple2number', 'misc.date_tuple2number', (['self.date_tuple'], {}), '(self.date_tuple)\n', (88186, 88203), False, 'from fusilib import misc, utils as futils\n'), ((12369, 12429), 'scipy.linalg.svd', 'LA.svd', (['fusi_data[:, outsidebrain_mask]'], {'full_matrices': '(False)'}), '(fusi_data[:, outsidebrain_mask], full_matrices=False)\n', (12375, 12429), True, 'from scipy import linalg as LA\n'), ((18570, 18587), 'numpy.asarray', 'np.asarray', (['value'], {}), '(value)\n', (18580, 18587), True, 'import numpy as np\n'), ((25174, 25185), 'numpy.isnan', 'np.isnan', (['t'], {}), '(t)\n', (25182, 25185), True, 'import numpy as np\n'), ((37843, 37860), 'numpy.nanargmin', 'np.nanargmin', (['dat'], {}), '(dat)\n', (37855, 37860), True, 'import numpy as np\n'), ((51371, 51468), 'fusilib.resampling.lanczos_resample', 'resampling.lanczos_resample', (['(dt_ms / 1000.0)', 'physio_data[..., None]', 'physio_times', 'fusi_times'], {}), '(dt_ms / 1000.0, physio_data[..., None],\n physio_times, fusi_times)\n', (51398, 51468), False, 'from fusilib import resampling\n'), ((54671, 54697), 'numpy.isscalar', 'np.isscalar', (['freq_cutoffhz'], {}), '(freq_cutoffhz)\n', (54682, 54697), True, 'import numpy as np\n'), ((56419, 56445), 'numpy.isscalar', 'np.isscalar', (['freq_cutoffhz'], {}), '(freq_cutoffhz)\n', (56430, 56445), True, 'import numpy as np\n'), ((66104, 66134), 'numpy.hstack', 'np.hstack', (['[forward, backward]'], {}), '([forward, backward])\n', (66113, 66134), True, 'import numpy as np\n'), ((66414, 66444), 'numpy.hstack', 'np.hstack', (['[forward, backward]'], {}), '([forward, backward])\n', (66423, 66444), True, 'import numpy as np\n'), ((49085, 49104), 'numpy.diff', 'np.diff', (['last_times'], {}), '(last_times)\n', (49092, 49104), True, 'import numpy as np\n'), ((51861, 51880), 'numpy.diff', 'np.diff', (['last_times'], {}), '(last_times)\n', (51868, 51880), True, 'import numpy as np\n'), ((56302, 56321), 'numpy.diff', 'np.diff', (['last_times'], {}), '(last_times)\n', (56309, 56321), True, 'import numpy as np\n'), ((61446, 61465), 'numpy.diff', 'np.diff', (['last_times'], {}), '(last_times)\n', (61453, 61465), True, 'import numpy as np\n'), ((72732, 72770), 'numpy.logical_and', 'np.logical_and', (['fusi_probe_mask', 'pmask'], {}), '(fusi_probe_mask, pmask)\n', (72746, 72770), True, 'import numpy as np\n'), ((82492, 82531), 'fusilib.misc.date_tuple2number', 'misc.date_tuple2number', (['self.date_tuple'], {}), '(self.date_tuple)\n', (82514, 82531), False, 'from fusilib import misc, utils as futils\n'), ((83002, 83041), 'fusilib.misc.date_tuple2number', 'misc.date_tuple2number', (['self.date_tuple'], {}), '(self.date_tuple)\n', (83024, 83041), False, 'from fusilib import misc, utils as futils\n'), ((79340, 79360), 'pathlib.Path', 'pathlib.Path', (['flname'], {}), '(flname)\n', (79352, 79360), False, 'import pathlib\n'), ((83801, 83840), 'fusilib.misc.date_tuple2number', 'misc.date_tuple2number', (['self.date_tuple'], {}), '(self.date_tuple)\n', (83823, 83840), False, 'from fusilib import misc, utils as futils\n'), ((84888, 84927), 'fusilib.misc.date_tuple2number', 'misc.date_tuple2number', (['self.date_tuple'], {}), '(self.date_tuple)\n', (84910, 84927), False, 'from fusilib import misc, utils as futils\n'), ((85598, 85627), 'pathlib.Path', 'pathlib.Path', (['fusi_preproc_fl'], {}), '(fusi_preproc_fl)\n', (85610, 85627), False, 'import pathlib\n'), ((89669, 89698), 'numpy.logical_not', 'np.logical_not', (['outbrain_mask'], {}), '(outbrain_mask)\n', (89683, 89698), True, 'import numpy as np\n')]
|
# coding: utf-8
# # simplified Confident Learning Tutorial
# *Author: <NAME>, <EMAIL>*
#
# In this tutorial, we show how to implement confident learning without using cleanlab (for the most part).
# This tutorial is to confident learning what this tutorial https://pytorch.org/tutorials/beginner/examples_tensor/two_layer_net_numpy.html
# is to deep learning.
#
# The actual implementations in cleanlab are complex because they support parallel processing, numerous type and input checks, lots of hyper-parameter settings, lots of utilities to make things work smoothly for all types of inputs, and ancillary functions.
#
# I ignore all of that here and provide you a bare-bones implementation using mostly for-loops and some numpy.
# Here we'll do two simple things:
# 1. Compute the confident joint which fully characterizes all label noise.
# 2. Find the indices of all label errors, ordered by likelihood of being an error.
#
# ## INPUT (stuff we need beforehand):
# 1. s - These are the noisy labels. This is an np.array of noisy labels, shape (n,1)
# 2. psx - These are the out-of-sample holdout predicted probabilities for every example in your dataset. This is an np.array (2d) of probabilities, shape (n, m)
#
# ## OUTPUT (what this returns):
# 1. confident_joint - an (m, m) np.array matrix characterizing all the label error counts for every pair of labels.
# 2. label_errors_idx - a numpy array comprised of indices of every label error, ordered by likelihood of being a label error.
#
# In this tutorial we use the handwritten digits dataset as an example.
# In[1]:
from __future__ import print_function, absolute_import, division, with_statement
import cleanlab
import numpy as np
from sklearn.datasets import load_digits
from sklearn.linear_model import LogisticRegression
# To silence convergence warnings caused by using a weak
# logistic regression classifier on image data
import warnings
warnings.simplefilter("ignore")
np.random.seed(477)
# In[2]:
# STEP 0 - Get some real digits data. Add a bunch of label errors. Get probs.
# Get handwritten digits data
X = load_digits()['data']
y = load_digits()['target']
print('Handwritten digits datasets number of classes:', len(np.unique(y)))
print('Handwritten digits datasets number of examples:', len(y))
# Add lots of errors to labels
NUM_ERRORS = 100
s = np.array(y)
error_indices = np.random.choice(len(s), NUM_ERRORS, replace=False)
for i in error_indices:
# Switch to some wrong label thats a different class
wrong_label = np.random.choice(np.delete(range(10), s[i]))
s[i] = wrong_label
# Confirm that we indeed added NUM_ERRORS label errors
assert (len(s) - sum(s == y) == NUM_ERRORS)
actual_label_errors = np.arange(len(y))[s != y]
print('\nIndices of actual label errors:\n', actual_label_errors)
# To keep the tutorial short, we use cleanlab to get the
# out-of-sample predicted probabilities using cross-validation
# with a very simple, non-optimized logistic regression classifier
psx = cleanlab.latent_estimation.estimate_cv_predicted_probabilities(
X, s, clf=LogisticRegression(max_iter=1000, multi_class='auto', solver='lbfgs'))
# Now we have our noisy labels s and predicted probabilities psx.
# That's all we need for confident learning.
# In[3]:
# STEP 1 - Compute confident joint
# Verify inputs
s = np.asarray(s)
psx = np.asarray(psx)
# Find the number of unique classes if K is not given
K = len(np.unique(s))
# Estimate the probability thresholds for confident counting
# You can specify these thresholds yourself if you want
# as you may want to optimize them using a validation set.
# By default (and provably so) they are set to the average class prob.
thresholds = [np.mean(psx[:,k][s == k]) for k in range(K)] # P(s^=k|s=k)
thresholds = np.asarray(thresholds)
# Compute confident joint
confident_joint = np.zeros((K, K), dtype = int)
for i, row in enumerate(psx):
s_label = s[i]
# Find out how many classes each example is confidently labeled as
confident_bins = row >= thresholds - 1e-6
num_confident_bins = sum(confident_bins)
# If more than one conf class, inc the count of the max prob class
if num_confident_bins == 1:
confident_joint[s_label][np.argmax(confident_bins)] += 1
elif num_confident_bins > 1:
confident_joint[s_label][np.argmax(row)] += 1
# Normalize confident joint (use cleanlab, trust me on this)
confident_joint = cleanlab.latent_estimation.calibrate_confident_joint(
confident_joint, s)
cleanlab.util.print_joint_matrix(confident_joint)
# In[4]:
# STEP 2 - Find label errors
# We arbitrarily choose at least 5 examples left in every class.
# Regardless of whether some of them might be label errors.
MIN_NUM_PER_CLASS = 5
# Leave at least MIN_NUM_PER_CLASS examples per class.
# NOTE prune_count_matrix is transposed (relative to confident_joint)
prune_count_matrix = cleanlab.pruning.keep_at_least_n_per_class(
prune_count_matrix=confident_joint.T,
n=MIN_NUM_PER_CLASS,
)
s_counts = np.bincount(s)
noise_masks_per_class = []
# For each row in the transposed confident joint
for k in range(K):
noise_mask = np.zeros(len(psx), dtype=bool)
psx_k = psx[:, k]
if s_counts[k] > MIN_NUM_PER_CLASS: # Don't prune if not MIN_NUM_PER_CLASS
for j in range(K): # noisy label index (k is the true label index)
if k != j: # Only prune for noise rates, not diagonal entries
num2prune = prune_count_matrix[k][j]
if num2prune > 0:
# num2prune'th largest p(classk) - p(class j)
# for x with noisy label j
margin = psx_k - psx[:, j]
s_filter = s == j
threshold = -np.partition(
-margin[s_filter], num2prune - 1
)[num2prune - 1]
noise_mask = noise_mask | (s_filter & (margin >= threshold))
noise_masks_per_class.append(noise_mask)
else:
noise_masks_per_class.append(np.zeros(len(s), dtype=bool))
# Boolean label error mask
label_errors_bool = np.stack(noise_masks_per_class).any(axis=0)
# Remove label errors if given label == model prediction
for i, pred_label in enumerate(psx.argmax(axis=1)):
# np.all let's this work for multi_label and single label
if label_errors_bool[i] and np.all(pred_label == s[i]):
label_errors_bool[i] = False
# Convert boolean mask to an ordered list of indices for label errors
label_errors_idx = np.arange(len(s))[label_errors_bool]
# self confidence is the holdout probability that an example
# belongs to its given class label
self_confidence = np.array(
[np.mean(psx[i][s[i]]) for i in label_errors_idx]
)
margin = self_confidence - psx[label_errors_bool].max(axis=1)
label_errors_idx = label_errors_idx[np.argsort(margin)]
print('Indices of label errors found by confident learning:')
print('Note label errors are sorted by likelihood of being an error')
print('but here we just sort them by index for comparison with above.')
print(np.array(sorted(label_errors_idx)))
# In[5]:
score = sum([e in label_errors_idx for e in actual_label_errors]) / NUM_ERRORS
print('% actual errors that confident learning found: {:.0%}'.format(score))
score = sum([e in actual_label_errors for e in label_errors_idx]) / len(label_errors_idx)
print('% confident learning errors that are actual errors: {:.0%}'.format(score))
|
[
"numpy.stack",
"sklearn.datasets.load_digits",
"numpy.partition",
"cleanlab.util.print_joint_matrix",
"numpy.random.seed",
"warnings.simplefilter",
"numpy.argmax",
"cleanlab.pruning.keep_at_least_n_per_class",
"numpy.asarray",
"numpy.zeros",
"numpy.all",
"numpy.argsort",
"sklearn.linear_model.LogisticRegression",
"numpy.mean",
"numpy.array",
"numpy.bincount",
"cleanlab.latent_estimation.calibrate_confident_joint",
"numpy.unique"
] |
[((1920, 1951), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (1941, 1951), False, 'import warnings\n'), ((1952, 1971), 'numpy.random.seed', 'np.random.seed', (['(477)'], {}), '(477)\n', (1966, 1971), True, 'import numpy as np\n'), ((2341, 2352), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (2349, 2352), True, 'import numpy as np\n'), ((3327, 3340), 'numpy.asarray', 'np.asarray', (['s'], {}), '(s)\n', (3337, 3340), True, 'import numpy as np\n'), ((3347, 3362), 'numpy.asarray', 'np.asarray', (['psx'], {}), '(psx)\n', (3357, 3362), True, 'import numpy as np\n'), ((3774, 3796), 'numpy.asarray', 'np.asarray', (['thresholds'], {}), '(thresholds)\n', (3784, 3796), True, 'import numpy as np\n'), ((3842, 3869), 'numpy.zeros', 'np.zeros', (['(K, K)'], {'dtype': 'int'}), '((K, K), dtype=int)\n', (3850, 3869), True, 'import numpy as np\n'), ((4418, 4490), 'cleanlab.latent_estimation.calibrate_confident_joint', 'cleanlab.latent_estimation.calibrate_confident_joint', (['confident_joint', 's'], {}), '(confident_joint, s)\n', (4470, 4490), False, 'import cleanlab\n'), ((4497, 4546), 'cleanlab.util.print_joint_matrix', 'cleanlab.util.print_joint_matrix', (['confident_joint'], {}), '(confident_joint)\n', (4529, 4546), False, 'import cleanlab\n'), ((4883, 4989), 'cleanlab.pruning.keep_at_least_n_per_class', 'cleanlab.pruning.keep_at_least_n_per_class', ([], {'prune_count_matrix': 'confident_joint.T', 'n': 'MIN_NUM_PER_CLASS'}), '(prune_count_matrix=\n confident_joint.T, n=MIN_NUM_PER_CLASS)\n', (4925, 4989), False, 'import cleanlab\n'), ((5008, 5022), 'numpy.bincount', 'np.bincount', (['s'], {}), '(s)\n', (5019, 5022), True, 'import numpy as np\n'), ((2098, 2111), 'sklearn.datasets.load_digits', 'load_digits', ([], {}), '()\n', (2109, 2111), False, 'from sklearn.datasets import load_digits\n'), ((2124, 2137), 'sklearn.datasets.load_digits', 'load_digits', ([], {}), '()\n', (2135, 2137), False, 'from sklearn.datasets import load_digits\n'), ((3426, 3438), 'numpy.unique', 'np.unique', (['s'], {}), '(s)\n', (3435, 3438), True, 'import numpy as np\n'), ((3702, 3728), 'numpy.mean', 'np.mean', (['psx[:, k][s == k]'], {}), '(psx[:, k][s == k])\n', (3709, 3728), True, 'import numpy as np\n'), ((6819, 6837), 'numpy.argsort', 'np.argsort', (['margin'], {}), '(margin)\n', (6829, 6837), True, 'import numpy as np\n'), ((2208, 2220), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (2217, 2220), True, 'import numpy as np\n'), ((3075, 3144), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'max_iter': '(1000)', 'multi_class': '"""auto"""', 'solver': '"""lbfgs"""'}), "(max_iter=1000, multi_class='auto', solver='lbfgs')\n", (3093, 3144), False, 'from sklearn.linear_model import LogisticRegression\n'), ((6100, 6131), 'numpy.stack', 'np.stack', (['noise_masks_per_class'], {}), '(noise_masks_per_class)\n', (6108, 6131), True, 'import numpy as np\n'), ((6349, 6375), 'numpy.all', 'np.all', (['(pred_label == s[i])'], {}), '(pred_label == s[i])\n', (6355, 6375), True, 'import numpy as np\n'), ((6670, 6691), 'numpy.mean', 'np.mean', (['psx[i][s[i]]'], {}), '(psx[i][s[i]])\n', (6677, 6691), True, 'import numpy as np\n'), ((4219, 4244), 'numpy.argmax', 'np.argmax', (['confident_bins'], {}), '(confident_bins)\n', (4228, 4244), True, 'import numpy as np\n'), ((4317, 4331), 'numpy.argmax', 'np.argmax', (['row'], {}), '(row)\n', (4326, 4331), True, 'import numpy as np\n'), ((5737, 5783), 'numpy.partition', 'np.partition', (['(-margin[s_filter])', '(num2prune - 1)'], {}), '(-margin[s_filter], num2prune - 1)\n', (5749, 5783), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 20 11:54:56 2021
@author: dof
"""
import math
from colorspacious import cspace_convert
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import ListedColormap
from scipy.ndimage import filters
from scipy.signal import savgol_filter
'''
J = lightness
C = chroma
h = hue
'''
# Resolution of colorspace
J_RES = 256
C_RES = 256
# NAME = 'So normal'
# ANGLE = np.pi * 2 * 0.7
# OFFSET = np.pi * 2 * 0.64
# CCW = False
# SMOOTH = 1/3
# NAME = 'Wow unique'
# ANGLE = np.pi * 2 * 1.0
# OFFSET = np.pi * 2 * 0.275
# CCW = True
# SMOOTH = 1/2
# NAME = 'Viridis-like (red bg)'
# ANGLE = np.pi * 2 * 1.0
# OFFSET = np.pi * 2 * 0.1
# CCW = True
# SMOOTH = 1/4
# NAME = 'Viridis-like (purple bg)'
# ANGLE = np.pi * 2 * 0.9
# OFFSET = np.pi * 2 * 0.1
# CCW = True
# SMOOTH = 1/5
NAME = 'Audacity proposal'
ANGLE = np.pi * 2 * 0.875
OFFSET = np.pi * 2 * 0.5
CCW = False
SMOOTH = 1/3
DESATURATE = 0.9
# Generate CAM02-UCS(Jp, ap, bp) colorspace
j_space = np.linspace(0.1, 99, J_RES)
c_space = np.linspace(0, 50, C_RES)
if CCW:
h_ = np.linspace(ANGLE+OFFSET, OFFSET, J_RES)
else:
h_ = np.linspace(OFFSET, ANGLE+OFFSET, J_RES)
jpapbp = np.zeros([C_RES, J_RES, 3])
for jdx, jp in enumerate(j_space):
for cdx, chroma in enumerate(c_space):
ap = np.cos(h_[jdx]) * chroma
bp = np.sin(h_[jdx]) * chroma
jpapbp[cdx, jdx] = (jp, ap, bp)
# Convert to sRGB
rgb = cspace_convert(jpapbp, "CAM02-UCS", "sRGB1")
# Get chroma limit of sRGB
c_limit = np.zeros_like(j_space)
for jdx in range(J_RES):
max_cdx = 0
for cdx in range(1, C_RES):
if np.any(rgb[cdx, jdx] <= 0) or np.any(1 < rgb[cdx, jdx]):
max_cdx = cdx - 1
break
c_limit[jdx] = max_cdx
# Smooth chroma limit contour
c_smoothed = np.concatenate([-c_limit[::-1][:-1], c_limit, -c_limit[::-1][1:]])
c_smoothed = savgol_filter(c_smoothed, math.ceil(J_RES*SMOOTH*1.5/2)*2 - 1, 3)
c_smoothed = filters.uniform_filter1d(c_smoothed, int(J_RES*SMOOTH*1.5/2)) * DESATURATE
c_smoothed = c_smoothed[J_RES:2*J_RES]
c_selected = c_smoothed.clip(min=0).astype(int)
# Generate and plot gaumt
gamut_image = np.copy(rgb)
gamut_image[gamut_image<=0] = 1
gamut_image[1<gamut_image] = 0
# Mark smoothed contour on image
for jdx, max_c in enumerate(c_selected):
if 0 == jdx % 2:
gamut_image[max_c, jdx] = 1
else:
gamut_image[max_c, jdx] = 0
plt.figure(figsize=[5, 5])
plt.imshow(gamut_image)
# Get colors on contour
cm_jpapbp = []
for jdx, cdx in enumerate(c_smoothed):
chroma = cdx * 50 / C_RES
jp = j_space[jdx]
ap = np.cos(h_[jdx]) * chroma
bp = np.sin(h_[jdx]) * chroma
cm_jpapbp.append([jp, ap, bp])
cm_rgb = cspace_convert(cm_jpapbp, "CAM02-UCS", "sRGB1")
cm_data = np.clip(cm_rgb, 0, 1)
# Display viscm
test_cm = ListedColormap(cm_data, name=NAME)
try:
from viscm import viscm
viscm(test_cm)
except ImportError:
print("viscm not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto', cmap=test_cm)
# Plot RGB value graph
cm255 = np.asarray(cm_data) * 255
seg_simple = 8
fix, ax = plt.subplots()
plt.plot(cm255[:,0], 'r')
plt.plot(cm255[:,1], 'g')
plt.plot(cm255[:,2], 'b')
plt.plot(np.mean(cm255, axis=1))
ax.set_xticks(np.linspace(0, J_RES, seg_simple+1, endpoint=True))
ax.set_yticks(np.arange(0, 257, 16))
ax.grid(which='both')
plt.show()
# Generate uint8 format colormaps
cm_data_u8 = (cm_data*255 + 0.5).astype('uint8')
cm_selected = cm_rgb*0.8 + 0.3
cm_selected_u8 = (np.clip(cm_selected, 0, 1)*255 + 0.5).astype('uint8')
cm_data_JCh = cspace_convert(cm_rgb, "sRGB1", "JCh")
cm_data_JCh[..., 0] += 20 # Boost lightness
cm_data_JCh[..., 1] += 20 # Boost chroma
cm_data_JCh[..., 2] += 90 # Change hue
cm_sel_freq = cspace_convert(cm_data_JCh, "JCh", "sRGB1")
cm_sel_freq_u8 = (np.clip(cm_sel_freq, 0, 1)*255 + 0.5).astype('uint8')
# Save colormaps to C format
with open('AColorResources.h', 'wt') as ofile:
ofile.write('const unsigned char specColormap[%d][3] = {\n' % J_RES)
for r, g, b in cm_data_u8:
ofile.write(' {%3d, %3d, %3d},\n' % (r, g, b))
ofile.write('};\n\n')
ofile.write('const unsigned char selColormap[%d][3] = {\n' % J_RES)
for r, g, b in cm_selected_u8:
ofile.write(' {%3d, %3d, %3d},\n' % (r, g, b))
ofile.write('};\n\n')
ofile.write('const unsigned char freqSelColormap[%d][3] = {\n' % J_RES)
for r, g, b in cm_sel_freq_u8:
ofile.write(' {%3d, %3d, %3d},\n' % (r, g, b))
ofile.write('};\n\n')
|
[
"numpy.clip",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"numpy.sin",
"matplotlib.colors.ListedColormap",
"numpy.zeros_like",
"numpy.copy",
"matplotlib.pyplot.imshow",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show",
"math.ceil",
"numpy.asarray",
"colorspacious.cspace_convert",
"numpy.cos",
"numpy.concatenate",
"matplotlib.pyplot.plot",
"numpy.zeros",
"viscm.viscm",
"numpy.any"
] |
[((1024, 1051), 'numpy.linspace', 'np.linspace', (['(0.1)', '(99)', 'J_RES'], {}), '(0.1, 99, J_RES)\n', (1035, 1051), True, 'import numpy as np\n'), ((1062, 1087), 'numpy.linspace', 'np.linspace', (['(0)', '(50)', 'C_RES'], {}), '(0, 50, C_RES)\n', (1073, 1087), True, 'import numpy as np\n'), ((1213, 1240), 'numpy.zeros', 'np.zeros', (['[C_RES, J_RES, 3]'], {}), '([C_RES, J_RES, 3])\n', (1221, 1240), True, 'import numpy as np\n'), ((1460, 1504), 'colorspacious.cspace_convert', 'cspace_convert', (['jpapbp', '"""CAM02-UCS"""', '"""sRGB1"""'], {}), "(jpapbp, 'CAM02-UCS', 'sRGB1')\n", (1474, 1504), False, 'from colorspacious import cspace_convert\n'), ((1544, 1566), 'numpy.zeros_like', 'np.zeros_like', (['j_space'], {}), '(j_space)\n', (1557, 1566), True, 'import numpy as np\n'), ((1837, 1903), 'numpy.concatenate', 'np.concatenate', (['[-c_limit[::-1][:-1], c_limit, -c_limit[::-1][1:]]'], {}), '([-c_limit[::-1][:-1], c_limit, -c_limit[::-1][1:]])\n', (1851, 1903), True, 'import numpy as np\n'), ((2203, 2215), 'numpy.copy', 'np.copy', (['rgb'], {}), '(rgb)\n', (2210, 2215), True, 'import numpy as np\n'), ((2458, 2484), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '[5, 5]'}), '(figsize=[5, 5])\n', (2468, 2484), True, 'import matplotlib.pyplot as plt\n'), ((2485, 2508), 'matplotlib.pyplot.imshow', 'plt.imshow', (['gamut_image'], {}), '(gamut_image)\n', (2495, 2508), True, 'import matplotlib.pyplot as plt\n'), ((2755, 2802), 'colorspacious.cspace_convert', 'cspace_convert', (['cm_jpapbp', '"""CAM02-UCS"""', '"""sRGB1"""'], {}), "(cm_jpapbp, 'CAM02-UCS', 'sRGB1')\n", (2769, 2802), False, 'from colorspacious import cspace_convert\n'), ((2813, 2834), 'numpy.clip', 'np.clip', (['cm_rgb', '(0)', '(1)'], {}), '(cm_rgb, 0, 1)\n', (2820, 2834), True, 'import numpy as np\n'), ((2863, 2897), 'matplotlib.colors.ListedColormap', 'ListedColormap', (['cm_data'], {'name': 'NAME'}), '(cm_data, name=NAME)\n', (2877, 2897), False, 'from matplotlib.colors import ListedColormap\n'), ((3196, 3210), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3208, 3210), True, 'import matplotlib.pyplot as plt\n'), ((3211, 3237), 'matplotlib.pyplot.plot', 'plt.plot', (['cm255[:, 0]', '"""r"""'], {}), "(cm255[:, 0], 'r')\n", (3219, 3237), True, 'import matplotlib.pyplot as plt\n'), ((3237, 3263), 'matplotlib.pyplot.plot', 'plt.plot', (['cm255[:, 1]', '"""g"""'], {}), "(cm255[:, 1], 'g')\n", (3245, 3263), True, 'import matplotlib.pyplot as plt\n'), ((3263, 3289), 'matplotlib.pyplot.plot', 'plt.plot', (['cm255[:, 2]', '"""b"""'], {}), "(cm255[:, 2], 'b')\n", (3271, 3289), True, 'import matplotlib.pyplot as plt\n'), ((3449, 3459), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3457, 3459), True, 'import matplotlib.pyplot as plt\n'), ((3664, 3702), 'colorspacious.cspace_convert', 'cspace_convert', (['cm_rgb', '"""sRGB1"""', '"""JCh"""'], {}), "(cm_rgb, 'sRGB1', 'JCh')\n", (3678, 3702), False, 'from colorspacious import cspace_convert\n'), ((3847, 3890), 'colorspacious.cspace_convert', 'cspace_convert', (['cm_data_JCh', '"""JCh"""', '"""sRGB1"""'], {}), "(cm_data_JCh, 'JCh', 'sRGB1')\n", (3861, 3890), False, 'from colorspacious import cspace_convert\n'), ((1106, 1148), 'numpy.linspace', 'np.linspace', (['(ANGLE + OFFSET)', 'OFFSET', 'J_RES'], {}), '(ANGLE + OFFSET, OFFSET, J_RES)\n', (1117, 1148), True, 'import numpy as np\n'), ((1162, 1204), 'numpy.linspace', 'np.linspace', (['OFFSET', '(ANGLE + OFFSET)', 'J_RES'], {}), '(OFFSET, ANGLE + OFFSET, J_RES)\n', (1173, 1204), True, 'import numpy as np\n'), ((2936, 2950), 'viscm.viscm', 'viscm', (['test_cm'], {}), '(test_cm)\n', (2941, 2950), False, 'from viscm import viscm\n'), ((3144, 3163), 'numpy.asarray', 'np.asarray', (['cm_data'], {}), '(cm_data)\n', (3154, 3163), True, 'import numpy as np\n'), ((3298, 3320), 'numpy.mean', 'np.mean', (['cm255'], {'axis': '(1)'}), '(cm255, axis=1)\n', (3305, 3320), True, 'import numpy as np\n'), ((3337, 3389), 'numpy.linspace', 'np.linspace', (['(0)', 'J_RES', '(seg_simple + 1)'], {'endpoint': '(True)'}), '(0, J_RES, seg_simple + 1, endpoint=True)\n', (3348, 3389), True, 'import numpy as np\n'), ((3403, 3424), 'numpy.arange', 'np.arange', (['(0)', '(257)', '(16)'], {}), '(0, 257, 16)\n', (3412, 3424), True, 'import numpy as np\n'), ((2650, 2665), 'numpy.cos', 'np.cos', (['h_[jdx]'], {}), '(h_[jdx])\n', (2656, 2665), True, 'import numpy as np\n'), ((2684, 2699), 'numpy.sin', 'np.sin', (['h_[jdx]'], {}), '(h_[jdx])\n', (2690, 2699), True, 'import numpy as np\n'), ((1332, 1347), 'numpy.cos', 'np.cos', (['h_[jdx]'], {}), '(h_[jdx])\n', (1338, 1347), True, 'import numpy as np\n'), ((1370, 1385), 'numpy.sin', 'np.sin', (['h_[jdx]'], {}), '(h_[jdx])\n', (1376, 1385), True, 'import numpy as np\n'), ((1651, 1677), 'numpy.any', 'np.any', (['(rgb[cdx, jdx] <= 0)'], {}), '(rgb[cdx, jdx] <= 0)\n', (1657, 1677), True, 'import numpy as np\n'), ((1681, 1706), 'numpy.any', 'np.any', (['(1 < rgb[cdx, jdx])'], {}), '(1 < rgb[cdx, jdx])\n', (1687, 1706), True, 'import numpy as np\n'), ((1944, 1979), 'math.ceil', 'math.ceil', (['(J_RES * SMOOTH * 1.5 / 2)'], {}), '(J_RES * SMOOTH * 1.5 / 2)\n', (1953, 1979), False, 'import math\n'), ((3047, 3071), 'numpy.linspace', 'np.linspace', (['(0)', '(100)', '(256)'], {}), '(0, 100, 256)\n', (3058, 3071), True, 'import numpy as np\n'), ((3595, 3621), 'numpy.clip', 'np.clip', (['cm_selected', '(0)', '(1)'], {}), '(cm_selected, 0, 1)\n', (3602, 3621), True, 'import numpy as np\n'), ((3909, 3935), 'numpy.clip', 'np.clip', (['cm_sel_freq', '(0)', '(1)'], {}), '(cm_sel_freq, 0, 1)\n', (3916, 3935), True, 'import numpy as np\n')]
|
from exptools2.core import Session
import numpy as np
import pandas as pd
from psychopy import tools, logging
import scipy.stats as ss
from stimuli import FixationCross, MotorStim, MotorMovie
from trial import MotorTrial, InstructionTrial, DummyWaiterTrial, OutroTrial
import os
opj = os.path.join
opd = os.path.dirname
class MotorSession(Session):
def __init__(self, output_str, output_dir, settings_file):
""" Initializes StroopSession object.
Parameters
----------
output_str : str
Basename for all output-files (like logs), e.g., "sub-01_task-stroop_run-1"
output_dir : str
Path to desired output-directory (default: None, which results in $pwd/logs)
settings_file : str
Path to yaml-file with settings (default: None, which results in the package's
default settings file (in data/default_settings.yml)
eyetracker_on: bool, optional
Make link with eyetracker during experiment, default is True
params_file: str, optional
File containing the pRF-parameters used as target site for the stimuli
size_file: str, optional
Path to a numpy array containing the stimulus sizes to be used as per the output of `call_sizeresponse`
"""
super().__init__(output_str, output_dir=output_dir, settings_file=settings_file) # initialize parent class!
self.duration = self.settings['design'].get('stim_duration')
self.n_trials = self.settings['design'].get('n_trials')
self.outro_trial_time = self.settings['design'].get('end_duration')
self.unilateral_hand = self.settings['design'].get('unilateral_hand')
self.stim_height = self.settings['stimuli'].get('text_height')
self.stim_width = self.settings['stimuli'].get('text_width')
self.fixation_width = self.settings['various'].get('fixation_width')
self.fixation_color = self.settings['various'].get('fixation_color')
self.unilateral_movie = "1hand.mp4"
self.bilateral_movie = "2hands.mp4"
self.movie_files = [self.bilateral_movie, self.unilateral_movie]
# Make sure that we have an even number of nr_stim_sizes*repetitions so we can balance constrasts properly
if (self.n_trials % 2) != 0:
raise ValueError(f"{self.n_trials} is not an even number")
# define crossing fixation lines
self.fixation = FixationCross(win=self.win, lineWidth=self.fixation_width, color=self.fixation_color)
# define button options
self.button_options = self.settings['various'].get('buttons')
# define stim:
self.motorstim = MotorStim(session=self)
self.motormovie = MotorMovie(session=self)
for movie in self.motormovie.movie1, self.motormovie.movie2:
movie.draw()
def create_trials(self):
""" Creates trials (ideally before running your session!) """
# ITI stuff
total_iti_duration = self.n_trials * self.settings['design'].get('mean_iti_duration')
min_iti_duration = total_iti_duration - self.settings['design'].get('total_iti_duration_leeway'),
max_iti_duration = total_iti_duration + self.settings['design'].get('total_iti_duration_leeway')
def return_itis(mean_duration, minimal_duration, maximal_duration, n_trials):
itis = np.random.exponential(scale=mean_duration-minimal_duration, size=n_trials)
itis += minimal_duration
itis[itis>maximal_duration] = maximal_duration
return itis
nits = 0
itis = return_itis(mean_duration=self.settings['design'].get('mean_iti_duration'),
minimal_duration=self.settings['design'].get('minimal_iti_duration'),
maximal_duration=self.settings['design'].get('maximal_iti_duration'),
n_trials=self.n_trials)
while (itis.sum() < min_iti_duration) | (itis.sum() > max_iti_duration):
itis = return_itis(mean_duration=self.settings['design'].get('mean_iti_duration'),
minimal_duration=self.settings['design'].get('minimal_iti_duration'),
maximal_duration=self.settings['design'].get('maximal_iti_duration'),
n_trials=self.n_trials)
nits += 1
print(f'ITIs created with total ITI duration of {itis.sum()} after {nits} iterations')
self.total_experiment_time = itis.sum() + self.settings['design'].get('start_duration') + self.settings['design'].get('end_duration') + (self.n_trials*self.duration)
print(f"Total experiment time: {round(self.total_experiment_time,2)}s")
instruction_trial = InstructionTrial(session=self,
trial_nr=0,
phase_durations=[np.inf],
txt='Please keep fixating at the center.',
keys=['space'])
dummy_trial = DummyWaiterTrial(session=self,
trial_nr=1,
phase_durations=[np.inf, self.settings['design'].get('start_duration')],
txt='Waiting for experiment to start')
outro_trial = OutroTrial(session=self,
trial_nr=self.n_trials+2,
phase_durations=[self.outro_trial_time],
txt='')
# parameters
self.movement = np.r_[np.ones(self.n_trials//2, dtype=int), np.zeros(self.n_trials//2, dtype=int)]
np.random.shuffle(self.movement)
self.trials = [instruction_trial, dummy_trial]
for i in range(self.n_trials):
# append trial
self.trials.append(MotorTrial(session=self,
trial_nr=2+i,
phase_durations=[itis[i], self.settings['design'].get('stim_duration')],
phase_names=['iti', 'stim'],
parameters={'condition': ['bilateral', 'unilateral'][self.movement[i]],
'fix_color_changetime': np.random.rand()*self.settings['design'].get('mean_iti_duration')},
timing='seconds',
verbose=True))
self.trials.append(outro_trial)
def run(self):
""" Runs experiment. """
self.create_trials() # create them *before* running!
self.start_experiment()
for trial in self.trials:
trial.run()
self.close()
|
[
"trial.OutroTrial",
"stimuli.MotorStim",
"stimuli.MotorMovie",
"numpy.random.exponential",
"stimuli.FixationCross",
"numpy.ones",
"numpy.zeros",
"numpy.random.rand",
"numpy.random.shuffle",
"trial.InstructionTrial"
] |
[((2510, 2600), 'stimuli.FixationCross', 'FixationCross', ([], {'win': 'self.win', 'lineWidth': 'self.fixation_width', 'color': 'self.fixation_color'}), '(win=self.win, lineWidth=self.fixation_width, color=self.\n fixation_color)\n', (2523, 2600), False, 'from stimuli import FixationCross, MotorStim, MotorMovie\n'), ((2749, 2772), 'stimuli.MotorStim', 'MotorStim', ([], {'session': 'self'}), '(session=self)\n', (2758, 2772), False, 'from stimuli import FixationCross, MotorStim, MotorMovie\n'), ((2799, 2823), 'stimuli.MotorMovie', 'MotorMovie', ([], {'session': 'self'}), '(session=self)\n', (2809, 2823), False, 'from stimuli import FixationCross, MotorStim, MotorMovie\n'), ((4865, 4997), 'trial.InstructionTrial', 'InstructionTrial', ([], {'session': 'self', 'trial_nr': '(0)', 'phase_durations': '[np.inf]', 'txt': '"""Please keep fixating at the center."""', 'keys': "['space']"}), "(session=self, trial_nr=0, phase_durations=[np.inf], txt=\n 'Please keep fixating at the center.', keys=['space'])\n", (4881, 4997), False, 'from trial import MotorTrial, InstructionTrial, DummyWaiterTrial, OutroTrial\n'), ((5491, 5597), 'trial.OutroTrial', 'OutroTrial', ([], {'session': 'self', 'trial_nr': '(self.n_trials + 2)', 'phase_durations': '[self.outro_trial_time]', 'txt': '""""""'}), "(session=self, trial_nr=self.n_trials + 2, phase_durations=[self.\n outro_trial_time], txt='')\n", (5501, 5597), False, 'from trial import MotorTrial, InstructionTrial, DummyWaiterTrial, OutroTrial\n'), ((5826, 5858), 'numpy.random.shuffle', 'np.random.shuffle', (['self.movement'], {}), '(self.movement)\n', (5843, 5858), True, 'import numpy as np\n'), ((3482, 3558), 'numpy.random.exponential', 'np.random.exponential', ([], {'scale': '(mean_duration - minimal_duration)', 'size': 'n_trials'}), '(scale=mean_duration - minimal_duration, size=n_trials)\n', (3503, 3558), True, 'import numpy as np\n'), ((5741, 5779), 'numpy.ones', 'np.ones', (['(self.n_trials // 2)'], {'dtype': 'int'}), '(self.n_trials // 2, dtype=int)\n', (5748, 5779), True, 'import numpy as np\n'), ((5779, 5818), 'numpy.zeros', 'np.zeros', (['(self.n_trials // 2)'], {'dtype': 'int'}), '(self.n_trials // 2, dtype=int)\n', (5787, 5818), True, 'import numpy as np\n'), ((6479, 6495), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (6493, 6495), True, 'import numpy as np\n')]
|
import csv
from random import sample
from math import sqrt
from numpy import zeros, linspace, zeros_like
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics import confusion_matrix
from scipy.stats import mode
def assign_closest_centroid(data, centroids):
clusters = []
count = 0
for item in data:
d_min = 1e9
closest = 0
c=0
for centroid in centroids:
d = 0
for i in range(len(centroid)):
d += (item[i] - centroid[i])**2
d = sqrt(d)
if d_min > d:
d_min = d
closest = c
c+=1
clusters.append([closest, d_min, count])
count+=1
return clusters
def compute_d_avg(clusters, k, gamma):
c = 0
d = 0
for item in clusters:
if item[0] != k:
d = (d / (c+1)) * c + item[1]/(c+1)
c+=1
return d*gamma
def sort_by_second(item):
return item[1]
def sort_by_third(item):
return item[2]
def update_clusters_kmor(data, centroids, n_zero, k, gamma):
clusters = assign_closest_centroid(data, centroids)
d_avg = compute_d_avg(clusters, k, gamma)
clusters.sort(key=sort_by_second, reverse=True)
for i in range(n_zero):
if clusters[i][1] > d_avg:
clusters[i][0] = k
clusters[i][1] = -1
clusters.sort(key=sort_by_third)
return clusters
def update_clusters_odc(data, centroids, k, gamma, old_clusters):
clusters = assign_closest_centroid(data, centroids)
d_avg = compute_d_avg(clusters, k, gamma)
for i in range(len(clusters)):
if clusters[i][1] > d_avg or old_clusters[i][0] == k:
clusters[i][0] = k
clusters[i][1] = -1
return clusters
# update centroids given data and their assigned cluster
def update_centroids(data, clusters, k):
centroids = []
for i in range(k):
centroid = []
for w in range(len(data[0])):
centroid.append(0)
c = 0
for j in range(len(clusters)):
if clusters[j][0] == i:
for f in range(len(data[0])):
centroid[f]= ((centroid[f] /(c+1))*c + data[j][f]/(c+1))
c+=1
centroids.append(centroid)
return centroids
def update_distance(data, clusters, centroids):
for item in range(len(data)):
d = 0
if clusters[item][0] == len(centroids):
continue
for i in range(len(data[0])):
d += (data[item][i] - centroids[clusters[item][0]][i])**2
d = sqrt(d)
clusters[item][1] = d
return clusters
def compute_p(clusters, d_avg, k):
P = 0
for item in clusters:
if item[0] != k:
P += item[1]
else:
P += d_avg
return P
def get_outliers(clusters, k):
outliers = []
for item in clusters:
if item[0] == k:
outliers.append(item)
return outliers
def permutation(lst):
if len(lst) == 0:
return []
if len(lst) == 1:
return [lst]
l = []
for i in range(len(lst)):
m = lst[i]
remLst = lst[:i] + lst[i+1:]
for p in permutation(remLst):
l.append([m] + p)
return l
def best_ME(predicted, labels):
dummy = []
for i in range(1,k+1):
dummy.append(i)
permutations = permutation(dummy)
new_clusters = zeros_like(predicted)
M_best =2
for perm in permutations:
M_total = 0
classes = []
for item in predicted:
f = 1
for i in range(k):
if item == i:
classes.append(perm[i])
f = 0
if f == 1:
classes.append(k+1)
mat = confusion_matrix(labels, classes)
for a in range(k+1):
M = 0
tp = 0
tn = 0
fp = 0
fn = 0
count = 0
for b in range(k+1):
for c in range(k+1):
if a == c:
count += mat[b][c]
if a == b and a == c:
tp += mat[b][c]
elif c == a:
fn += mat[b][c]
elif b == a:
fp += mat[b][c]
else:
tn += mat[b][c]
#print(tp,fp,tn,fn)
M = sqrt((1-(tp/(tp+fp)))**2+(fp/(tn+fp))**2)
M_total += M*count/len(labels)
if M_total < M_best:
M_best= M_total
return M_best
def KMOR(dataset, gamma, k, max_outlier_ratio, number_of_executions):
data = []
labels = []
# Split data and labels
with open(dataset, 'r', newline='') as csvfile:
csvreader = csv.reader(csvfile)
for item in csvreader:
dummylist = [float(i) for i in item]
data.append(dummylist[1:-1])
labels.append(dummylist[-1])
labels = [int(i) for i in labels]
# Set Model variables
n_zero = int(len(data)/max_outlier_ratio)
max_iteration = 100
theta = 1e-6
T = number_of_executions
R_avg = 0
M_avg = 0
O_avg = 0
for s in range(T):
# Inintiate centroids randomly
centroids = sample(data,k)
P = 0
t = 0
clusters = assign_closest_centroid(data, centroids)
for i in range(max_iteration):
clusters = update_clusters_kmor(data, centroids, n_zero, k, gamma)
centroids = update_centroids(data, clusters, k)
clusters = update_distance(data, clusters, centroids)
d_avg = compute_d_avg(clusters, k, gamma)
P_new = compute_p(clusters, d_avg, k)
if abs(P_new-P) < theta:
break
P = P_new
t+=1
O = len(get_outliers(clusters, k))
O_avg += O
predicted = []
for i in clusters:
predicted.append(i[0])
R = adjusted_rand_score(predicted, labels)
R_avg += R
M_best = best_ME(predicted,labels)
M_avg += M_best
print(R_avg/T, M_avg/T, O_avg/T)
def kmeansmm(dataset, k, max_outlier_ratio, number_of_executions):
KMOR(dataset, 0, k, max_outlier_ratio, number_of_executions)
def ODC(dataset, gamma, k, number_of_executions):
data = []
labels = []
# Split data and labels
with open(dataset, 'r', newline='') as csvfile:
csvreader = csv.reader(csvfile)
for item in csvreader:
dummylist = [float(i) for i in item]
data.append(dummylist[1:-1])
labels.append(dummylist[-1])
labels = [int(i) for i in labels]
# Set Model variables
max_iteration = 100
theta = 1e-6
T = number_of_executions
R_avg = 0
M_avg = 0
O_avg = 0
for s in range(T):
# Inintiate centroids randomly
centroids = sample(data,k)
P = 0
t = 0
clusters = assign_closest_centroid(data, centroids)
for i in range(max_iteration):
clusters = update_clusters_odc(data, centroids, k, gamma, clusters)
centroids = update_centroids(data, clusters, k)
clusters = update_distance(data, clusters, centroids)
d_avg = compute_d_avg(clusters, k, gamma)
P_new = compute_p(clusters, d_avg, k)
if abs(P_new-P) < theta:
break
P = P_new
t+=1
O = len(get_outliers(clusters, k))
O_avg += O
predicted = []
for i in clusters:
predicted.append(i[0])
R = adjusted_rand_score(predicted, labels)
R_avg += R
M_best = best_ME(predicted,labels)
M_avg += M_best
print(R_avg/T, M_avg/T, O_avg/T)
if __name__ == '__main__':
dataset = 0
gamma = 0
k = 0
while True:
inp = input("Enter 1 for BCW and 2 for shuttle: ")
if inp == '1':
dataset = 'datasets/breast-cancer-wisconsin-no-miss.csv'
gamma = 1
k = 1
max_outlier_ratio = 2
number_of_executions = 10
break
elif inp == '2':
dataset = "datasets/shuttle_normal.csv"
gamma = 4.1
k = 3
max_outlier_ratio = 10
number_of_executions = 10
break
while True:
inp = input("Enter 1 for KMOR and 2 for k-means-- and 3 for ODC: ")
if inp == '1':
KMOR(dataset, gamma, k, max_outlier_ratio, number_of_executions)
break
elif inp == '2':
kmeansmm(dataset, k, max_outlier_ratio, number_of_executions)
break
elif inp == '3':
ODC(dataset, gamma, k, number_of_executions)
break
|
[
"numpy.zeros_like",
"csv.reader",
"math.sqrt",
"random.sample",
"sklearn.metrics.cluster.adjusted_rand_score",
"sklearn.metrics.confusion_matrix"
] |
[((3405, 3426), 'numpy.zeros_like', 'zeros_like', (['predicted'], {}), '(predicted)\n', (3415, 3426), False, 'from numpy import zeros, linspace, zeros_like\n'), ((2580, 2587), 'math.sqrt', 'sqrt', (['d'], {}), '(d)\n', (2584, 2587), False, 'from math import sqrt\n'), ((3765, 3798), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['labels', 'classes'], {}), '(labels, classes)\n', (3781, 3798), False, 'from sklearn.metrics import confusion_matrix\n'), ((4791, 4810), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (4801, 4810), False, 'import csv\n'), ((5279, 5294), 'random.sample', 'sample', (['data', 'k'], {}), '(data, k)\n', (5285, 5294), False, 'from random import sample\n'), ((5989, 6027), 'sklearn.metrics.cluster.adjusted_rand_score', 'adjusted_rand_score', (['predicted', 'labels'], {}), '(predicted, labels)\n', (6008, 6027), False, 'from sklearn.metrics.cluster import adjusted_rand_score\n'), ((6465, 6484), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (6475, 6484), False, 'import csv\n'), ((6907, 6922), 'random.sample', 'sample', (['data', 'k'], {}), '(data, k)\n', (6913, 6922), False, 'from random import sample\n'), ((7618, 7656), 'sklearn.metrics.cluster.adjusted_rand_score', 'adjusted_rand_score', (['predicted', 'labels'], {}), '(predicted, labels)\n', (7637, 7656), False, 'from sklearn.metrics.cluster import adjusted_rand_score\n'), ((548, 555), 'math.sqrt', 'sqrt', (['d'], {}), '(d)\n', (552, 555), False, 'from math import sqrt\n'), ((4430, 4485), 'math.sqrt', 'sqrt', (['((1 - tp / (tp + fp)) ** 2 + (fp / (tn + fp)) ** 2)'], {}), '((1 - tp / (tp + fp)) ** 2 + (fp / (tn + fp)) ** 2)\n', (4434, 4485), False, 'from math import sqrt\n')]
|
# -*- coding:utf-8 -*-
# @project: GPT2-NewsTitle
# @filename: train.py
# @author: 刘聪NLP
# @contact: <EMAIL>
# @time: 2020/12/16 16:28
"""
文件说明:
通过新闻正文生成新闻标题的GPT2模型的训练文件
"""
import torch
import os
import random
import numpy as np
import argparse
import logging
from transformers.modeling_gpt2 import GPT2Config
from model import GPT2LMHeadModel
from transformers import BertTokenizer
from data_set import GPT2NewsTitleDataSet, collate_func
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from transformers import AdamW, get_linear_schedule_with_warmup
from tqdm import tqdm, trange
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
def train(model, device, train_data, test_data, args):
"""
训练模型
Args:
model: 模型
device: 设备信息
train_data: 训练数据类
test_data: 测试数据类
args: 训练参数配置信息
Returns:
"""
tb_write = SummaryWriter()
if args.gradient_accumulation_steps < 1:
raise ValueError("gradient_accumulation_steps参数无效,必须大于等于1")
# 计算真实的训练batch_size大小
train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
train_sampler = RandomSampler(train_data)
train_data_loader = DataLoader(train_data, sampler=train_sampler,
batch_size=train_batch_size, collate_fn=collate_func)
total_steps = int(len(train_data_loader) * args.num_train_epochs / args.gradient_accumulation_steps)
logger.info("总训练步数为:{}".format(total_steps))
model.to(device)
# 获取模型所有参数
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(
nd in n for nd in no_decay)], 'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
# 设置优化器
optimizer = AdamW(optimizer_grouped_parameters,
lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=int(args.warmup_proportion * total_steps),
num_training_steps=total_steps)
# 清空cuda缓存
torch.cuda.empty_cache()
# 将模型调至训练状态
model.train()
title_id = train_data.title_id
tr_loss, logging_loss, min_loss = 0.0, 0.0, 0.0
global_step = 0
# 开始训练模型
for iepoch in trange(0, int(args.num_train_epochs), desc="Epoch", disable=False):
iter_bar = tqdm(train_data_loader, desc="Iter (loss=X.XXX)", disable=False)
for step, batch in enumerate(iter_bar):
input_ids = batch["input_ids"].to(device)
token_type_ids = batch["token_type_ids"].to(device)
# 获取训练结果
outputs = model.forward(input_ids=input_ids, token_type_ids=token_type_ids, labels=input_ids, title_id=title_id)
loss = outputs[0]
tr_loss += loss.item()
# 将损失值放到Iter中,方便观察
iter_bar.set_description("Iter (loss=%5.3f)" % loss.item())
# 判断是否进行梯度累积,如果进行,则将损失值除以累积步数
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
# 损失进行回传
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
# 当训练步数整除累积步数时,进行参数优化
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step()
optimizer.zero_grad()
global_step += 1
# 如果步数整除logging_steps,则记录学习率和训练集损失值
if args.logging_steps > 0 and global_step % args.logging_steps == 0:
tb_write.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_write.add_scalar("train_loss", (tr_loss-logging_loss) /
(args.logging_steps*args.gradient_accumulation_steps), global_step)
logging_loss = tr_loss
# 如果步数整除eval_steps,则进行模型测试,记录测试集的损失
if args.eval_steps > 0 and global_step % args.eval_steps == 0:
eval_loss = evaluate(model, device, test_data, args)
tb_write.add_scalar("test_loss", eval_loss, global_step)
model.train()
# 每个epoch进行完,则保存模型
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
model_to_save = model.module if hasattr(model, "module") else model
model_to_save.save_pretrained(output_dir)
# 清空cuda缓存
torch.cuda.empty_cache()
def evaluate(model, device, test_data, args):
"""
对测试数据集进行模型测试
Args:
model: 模型
device: 设备信息
test_data: 测试数据类
args: 训练参数配置信息
Returns:
"""
# 构造测试集的DataLoader
test_sampler = SequentialSampler(test_data)
test_data_loader = DataLoader(test_data, sampler=test_sampler,
batch_size=args.test_batch_size, collate_fn=collate_func)
iter_bar = tqdm(test_data_loader, desc="iter", disable=False)
title_id = test_data.title_id
total_loss, total = 0.0, 0.0
# 进行测试
for step, batch in enumerate(iter_bar):
# 模型设为eval
model.eval()
with torch.no_grad():
input_ids = batch["input_ids"].to(device)
token_type_ids = batch["token_type_ids"].to(device)
# 获取预测结果
outputs = model.forward(input_ids=input_ids, token_type_ids=token_type_ids, labels=input_ids, title_id=title_id)
loss = outputs[0]
loss = loss.item()
# 对loss进行累加
total_loss += loss*len(batch["input_ids"])
total += len(batch["input_ids"])
# 计算最终测试集的loss结果
test_loss = total_loss / total
return test_loss
def set_args():
"""设置训练模型所需参数"""
parser = argparse.ArgumentParser()
parser.add_argument('--device', default='0', type=str, help='设置训练或测试时使用的显卡')
parser.add_argument('--config_path', default='./config/config.json', type=str, help='模型参数配置信息')
parser.add_argument('--vocab_path', default='./vocab/vocab.txt', type=str, help='词表,该词表为小词表,并增加了一些新的标记')
parser.add_argument('--train_file_path', default='./data_dir/train_data.json', type=str, help='新闻标题生成的训练数据')
parser.add_argument('--test_file_path', default='./data_dir/test_data.json', type=str, help='新闻标题生成的测试数据')
parser.add_argument('--pretrained_model_path', default=None, type=str, help='预训练的GPT2模型的路径')
parser.add_argument('--data_dir', default='./data_dir', type=str, help='生成缓存数据的存放路径')
parser.add_argument('--num_train_epochs', default=5, type=int, help='模型训练的轮数')
parser.add_argument('--train_batch_size', default=16, type=int, help='训练时每个batch的大小')
parser.add_argument('--test_batch_size', default=8, type=int, help='测试时每个batch的大小')
parser.add_argument('--learning_rate', default=1e-4, type=float, help='模型训练时的学习率')
parser.add_argument('--warmup_proportion', default=0.1, type=float, help='warm up概率,即训练总步长的百分之多少,进行warm up')
parser.add_argument('--adam_epsilon', default=1e-8, type=float, help='Adam优化器的epsilon值')
parser.add_argument('--logging_steps', default=20, type=int, help='保存训练日志的步数')
parser.add_argument('--eval_steps', default=4000, type=int, help='训练时,多少步进行一次测试')
parser.add_argument('--gradient_accumulation_steps', default=4, type=int, help='梯度积累')
parser.add_argument('--max_grad_norm', default=1.0, type=float, help='')
parser.add_argument('--output_dir', default='output_dir/', type=str, help='模型输出路径')
parser.add_argument('--seed', type=int, default=2020, help='随机种子')
parser.add_argument('--max_len', type=int, default=512, help='输入模型的最大长度,要比config中n_ctx小')
parser.add_argument('--title_max_len', type=int, default=32, help='生成标题的最大长度,要比max_len小')
return parser.parse_args()
def main():
# 设置模型训练参数
args = set_args()
# 设置显卡信息
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICE"] = args.device
# 获取device信息,用于模型训练
device = torch.device("cuda" if torch.cuda.is_available() and int(args.device) >= 0 else "cpu")
# 设置随机种子,方便模型复现
if args.seed:
torch.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
# 加载模型的config
model_config = GPT2Config.from_json_file(args.config_path)
# 实例化GPT2LMHeadModel模型,这里我们没有加载预训练好的模型,而是直接从头开始训练。
# 为什么从头开始训练?我们采用的是小模型,只有6层,并且词表也做了修改,没有找到合适的预训练模型。(其实是,穷人,卡不行。)
# 判断是否使用预训练好的GPT2模型
if args.pretrained_model_path:
model = GPT2LMHeadModel.from_pretrained(args.pretrained_model_path)
else:
# 如果没有指定的预训练模型,则初始化模型
model = GPT2LMHeadModel(config=model_config)
# model = GPT2LMHeadModel(config=model_config)
# 实例化tokenizer
tokenizer = BertTokenizer.from_pretrained(args.vocab_path, do_lower_case=True)
# 将[space]作为一个分割整体,例如:"我爱[Space]中国。",使用原始tokenizer分词结果为"['我', '爱', '[', 'Space', ']', '中', '国', '。']";
# 增加分割符号后的结果为"['我', '爱', '[Space]', '中', '国', '。']"
tokenizer.add_tokens("[Space]", special_tokens=True)
# 创建模型的输出目录
if not os.path.exists(args.output_dir):
os.mkdir(args.output_dir)
# 加载训练数据和测试数据
train_data = GPT2NewsTitleDataSet(tokenizer, args.max_len, args.title_max_len, args.data_dir, "train", args.train_file_path)
test_data = GPT2NewsTitleDataSet(tokenizer, args.max_len, args.title_max_len, args.data_dir, "test", args.test_file_path)
# 开始训练
train(model, device, train_data, test_data, args)
if __name__ == '__main__':
main()
|
[
"os.mkdir",
"numpy.random.seed",
"argparse.ArgumentParser",
"torch.utils.data.RandomSampler",
"data_set.GPT2NewsTitleDataSet",
"torch.no_grad",
"torch.utils.data.DataLoader",
"os.path.exists",
"torch.utils.data.SequentialSampler",
"random.seed",
"tqdm.tqdm",
"torch.manual_seed",
"transformers.BertTokenizer.from_pretrained",
"transformers.AdamW",
"torch.cuda.is_available",
"model.GPT2LMHeadModel.from_pretrained",
"model.GPT2LMHeadModel",
"tensorboardX.SummaryWriter",
"logging.basicConfig",
"transformers.modeling_gpt2.GPT2Config.from_json_file",
"torch.cuda.empty_cache",
"logging.getLogger"
] |
[((741, 884), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'datefmt': '"""%m/%d/%Y %H:%M:%S"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt=\n '%m/%d/%Y %H:%M:%S', level=logging.INFO)\n", (760, 884), False, 'import logging\n'), ((924, 951), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (941, 951), False, 'import logging\n'), ((1187, 1202), 'tensorboardX.SummaryWriter', 'SummaryWriter', ([], {}), '()\n', (1200, 1202), False, 'from tensorboardX import SummaryWriter\n'), ((1447, 1472), 'torch.utils.data.RandomSampler', 'RandomSampler', (['train_data'], {}), '(train_data)\n', (1460, 1472), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler\n'), ((1497, 1600), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {'sampler': 'train_sampler', 'batch_size': 'train_batch_size', 'collate_fn': 'collate_func'}), '(train_data, sampler=train_sampler, batch_size=train_batch_size,\n collate_fn=collate_func)\n', (1507, 1600), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler\n'), ((2256, 2342), 'transformers.AdamW', 'AdamW', (['optimizer_grouped_parameters'], {'lr': 'args.learning_rate', 'eps': 'args.adam_epsilon'}), '(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.\n adam_epsilon)\n', (2261, 2342), False, 'from transformers import AdamW, get_linear_schedule_with_warmup\n'), ((2578, 2602), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (2600, 2602), False, 'import torch\n'), ((5227, 5255), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['test_data'], {}), '(test_data)\n', (5244, 5255), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler\n'), ((5279, 5384), 'torch.utils.data.DataLoader', 'DataLoader', (['test_data'], {'sampler': 'test_sampler', 'batch_size': 'args.test_batch_size', 'collate_fn': 'collate_func'}), '(test_data, sampler=test_sampler, batch_size=args.test_batch_size,\n collate_fn=collate_func)\n', (5289, 5384), False, 'from torch.utils.data import DataLoader, RandomSampler, SequentialSampler\n'), ((5430, 5480), 'tqdm.tqdm', 'tqdm', (['test_data_loader'], {'desc': '"""iter"""', 'disable': '(False)'}), "(test_data_loader, desc='iter', disable=False)\n", (5434, 5480), False, 'from tqdm import tqdm, trange\n'), ((6251, 6276), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6274, 6276), False, 'import argparse\n'), ((8715, 8758), 'transformers.modeling_gpt2.GPT2Config.from_json_file', 'GPT2Config.from_json_file', (['args.config_path'], {}), '(args.config_path)\n', (8740, 8758), False, 'from transformers.modeling_gpt2 import GPT2Config\n'), ((9196, 9262), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['args.vocab_path'], {'do_lower_case': '(True)'}), '(args.vocab_path, do_lower_case=True)\n', (9225, 9262), False, 'from transformers import BertTokenizer\n'), ((9612, 9728), 'data_set.GPT2NewsTitleDataSet', 'GPT2NewsTitleDataSet', (['tokenizer', 'args.max_len', 'args.title_max_len', 'args.data_dir', '"""train"""', 'args.train_file_path'], {}), "(tokenizer, args.max_len, args.title_max_len, args.\n data_dir, 'train', args.train_file_path)\n", (9632, 9728), False, 'from data_set import GPT2NewsTitleDataSet, collate_func\n'), ((9740, 9854), 'data_set.GPT2NewsTitleDataSet', 'GPT2NewsTitleDataSet', (['tokenizer', 'args.max_len', 'args.title_max_len', 'args.data_dir', '"""test"""', 'args.test_file_path'], {}), "(tokenizer, args.max_len, args.title_max_len, args.\n data_dir, 'test', args.test_file_path)\n", (9760, 9854), False, 'from data_set import GPT2NewsTitleDataSet, collate_func\n'), ((2862, 2926), 'tqdm.tqdm', 'tqdm', (['train_data_loader'], {'desc': '"""Iter (loss=X.XXX)"""', 'disable': '(False)'}), "(train_data_loader, desc='Iter (loss=X.XXX)', disable=False)\n", (2866, 2926), False, 'from tqdm import tqdm, trange\n'), ((4967, 4991), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (4989, 4991), False, 'import torch\n'), ((8584, 8612), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (8601, 8612), False, 'import torch\n'), ((8621, 8643), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (8632, 8643), False, 'import random\n'), ((8652, 8677), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (8666, 8677), True, 'import numpy as np\n'), ((8957, 9016), 'model.GPT2LMHeadModel.from_pretrained', 'GPT2LMHeadModel.from_pretrained', (['args.pretrained_model_path'], {}), '(args.pretrained_model_path)\n', (8988, 9016), False, 'from model import GPT2LMHeadModel\n'), ((9073, 9109), 'model.GPT2LMHeadModel', 'GPT2LMHeadModel', ([], {'config': 'model_config'}), '(config=model_config)\n', (9088, 9109), False, 'from model import GPT2LMHeadModel\n'), ((9510, 9541), 'os.path.exists', 'os.path.exists', (['args.output_dir'], {}), '(args.output_dir)\n', (9524, 9541), False, 'import os\n'), ((9551, 9576), 'os.mkdir', 'os.mkdir', (['args.output_dir'], {}), '(args.output_dir)\n', (9559, 9576), False, 'import os\n'), ((5656, 5671), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5669, 5671), False, 'import torch\n'), ((8474, 8499), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8497, 8499), False, 'import torch\n')]
|
import importlib
import copy
import io, time
from io import BytesIO
import chardet
import os
import collections
from itertools import combinations, cycle, product
import math
import numpy as np
import pandas as pd
import pickle
import tarfile
import random
import re
import requests
from nltk.corpus import stopwords
from scipy.sparse import hstack, lil_matrix
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.decomposition import PCA
from sklearn.cluster import AgglomerativeClustering
from sklearn.linear_model import LogisticRegression, LinearRegression, Ridge
import torch.nn.functional as F
from tqdm import tqdm
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', 1000)
pd.set_option('display.expand_frame_repr', False)
pd.set_option('max_colwidth', -1) # change None to -1
from collections import Counter, defaultdict
import numpy as np
import re
import sys
import sklearn
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.metrics import roc_auc_score
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.linear_model import LogisticRegression, LinearRegression, Ridge
from sklearn.model_selection import KFold, cross_val_score, train_test_split
from sklearn.metrics import classification_report, accuracy_score
import torch
from torchvision import datasets, transforms
from torch import nn, optim, autograd
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from transformers import * # here import bert
import warnings
warnings.filterwarnings("ignore")
# from data_structure import Dataset #, get_IMDB, get_kindle
import argparse
import utils
importlib.reload(utils)
from utils import *
from vae import VAE, vae_loss_function, train_vae, test_vae
# randseed = 52744889
randseed = int(time.time()*1e7%1e8)
print("random seed: ", randseed)
sys.stdout.flush()
random.seed(randseed)
np.random.seed(randseed)
torch.manual_seed(randseed)
parser = argparse.ArgumentParser(description='Text Reviews')
parser.add_argument('-d', '--dataset', type=str, default='amazon',choices=['yelp', 'amazon', 'tripadvisor'])
parser.add_argument('--datsubsample', type=int, default=10000)
parser.add_argument('--n_restarts', type=int, default=1)
parser.add_argument('--steps', type=int, default=2001)
parser.add_argument('--hidden_dim', type=int, default=128)
parser.add_argument('--l2_reg', type=float, default=1e-3)
parser.add_argument('--lr', type=float, default=1e-2)
parser.add_argument('--mode', type=str, default="linear", choices=["linear", "logistic"])
parser.add_argument('--z_dim', type=int, default=1000)
parser.add_argument('--batch_size', type=int, default=200)
parser.add_argument('--num_features', type=int, default=5)
parser.add_argument('--input_dim', type=int, default=0)
parser.add_argument('--vae_epochs', type=int, default=101)
parser.add_argument('--spurious_corr', type=float, default=0.9)
parser.add_argument('--alter_freq', type=int, default=50)
parser.add_argument('--mode_latent', type=str, default="pcaz", choices=["vaez", "bertz", "bertz_cl", "pcaz"])
parser.add_argument('--mode_train_data', type=str, default="text", choices=["text", "bertz"])
flags, unk = parser.parse_known_args()
res = pd.DataFrame(vars(flags), index=[0])
res['randseed'] = randseed
print(flags)
sys.stdout.flush()
moniker = flags.dataset
out_dir = moniker + '_out'
if not os.path.exists(out_dir):
os.makedirs(out_dir)
dat_file = 'dat/'+ moniker + '/' + moniker + '_meta.csv'
# detect encoding
# rawdata=open(dat_file,'rb').read()
# result = chardet.detect(rawdata)
# charenc = result['encoding']
# print(charenc)
if moniker == 'amazon':
full_dat = pd.read_csv(dat_file)
elif moniker == 'tripadvisor':
full_dat = pd.read_csv(dat_file, encoding='Windows-1252')
elif moniker == 'yelp':
full_dat = pd.read_csv(dat_file, lineterminator='\n')
full_dat = full_dat.rename(columns={'stars_x':'y', 'text':'review_text'})
data = full_dat[full_dat['y']!=3].sample(n=flags.datsubsample)
texts = list(data['review_text'])
labels = (np.array(data['y']) > 3)
split1, split2 = int(0.6*len(texts)), (int(0.6*len(texts)) + int(0.2*len(texts)))
train_text, train_label = texts[:split1], torch.from_numpy(labels[:split1]).float().cuda()
testobs_text, testobs_label = texts[split1:split2], torch.from_numpy(labels[split1:split2]).float().cuda()
testct_text, testct_label = texts[split2:], torch.from_numpy(labels[split2:]).float().cuda()
stop_words = set(stopwords.words('english'))
# vec = CountVectorizer(min_df=5, binary=True, max_df=0.8, ngram_range=(1,3))
vec = TfidfVectorizer(min_df=10, binary=True, max_df=0.8, ngram_range=(1,3))
X_full = vec.fit_transform(train_text)
X_train_full = vec.transform(train_text)
X_testobs_full = vec.transform(testobs_text)
X_testct_full = vec.transform(testct_text)
feats = np.array(vec.get_feature_names())
top_feature_idx, placebo_feature_idx, coef = get_top_terms(vec.transform(train_text), train_label.cpu().numpy(), coef_thresh=0.0, placebo_thresh=0.1) # use coef_threshold=0.0 to take all features, no thresholding happening here.
# top_feature_idx = np.arange(500)
X_train_np = vec.transform(train_text).toarray()
X_testobs_np = vec.transform(testobs_text).toarray()
X_testct_np = vec.transform(testct_text).toarray()
fea_corrcoef = np.corrcoef(X_train_np[:,top_feature_idx].T) - np.eye(X_train_np[:,top_feature_idx].shape[1])
colinear_fea = np.where(fea_corrcoef>0.96)[0]
feature_idx = np.array(list(set(top_feature_idx) - set(colinear_fea)))
# only consider words in feature_idx
id2term = collections.OrderedDict({i:v for i,v in enumerate(feats[feature_idx])})
term2id = collections.OrderedDict({v:i for i,v in enumerate(feats[feature_idx])})
spurious_words = np.array([term2id['as'], term2id['also'], term2id['am'], term2id['an']])
final_train_accs = []
final_test_accs = []
final_train_baselineaccs = []
final_test_baselineaccs = []
final_train_baselinevaeaccs = []
final_test_baselinevaeaccs = []
for restart in range(flags.n_restarts):
print("Restart", restart)
def make_environment(texts, labels, e):
def torch_bernoulli(p, size):
return (torch.rand(size) < p).float()
def torch_xor(a, b):
return (a-b).abs() # Assumes both inputs are either 0 or 1
# Assign a binary label based on the digit; flip label with probability 0.25
labels = (labels == 1).float()
labels = torch_xor(labels, torch_bernoulli(0.35, len(labels)).cuda())
# Assign a color based on the label; flip the color with probability e
spurious_counts = torch.stack([torch_xor(labels, torch_bernoulli(e, len(labels)).cuda()) for i in range(len(spurious_words))], axis=1)
# Apply the color to the image by zeroing out the other color channel
texts[:,spurious_words] = spurious_counts.cpu().numpy()
return {
'texts': torch.from_numpy(texts).float().cuda(),
'labels': labels[:, None].cuda(),
'colors': spurious_counts.cuda()
}
train_data = make_environment(X_train_np[:,feature_idx], train_label, 1-flags.spurious_corr)
X_train, train_label = train_data['texts'], train_data['labels']
testobs_data = make_environment(X_testobs_np[:,feature_idx], testobs_label, 1-flags.spurious_corr)
X_testobs, testobs_label = testobs_data['texts'], testobs_data['labels']
testct_data = make_environment(X_testct_np[:,feature_idx], testct_label, 0.9)
X_testct, testct_label = testct_data['texts'], testct_data['labels']
vocabsize = X_train.shape[1]
flags.input_dim = vocabsize
# calculate pca embedding
pca = PCA(n_components=flags.z_dim)
# pca.fit(np.row_stack([X_train_np, X_testobs_np, X_testct_np]))
pca.fit(np.row_stack([X_train_np[:,feature_idx]]))
train_pca_embedding = torch.from_numpy(pca.transform(X_train_np[:,feature_idx])).float().cuda()
testobs_pca_embedding = torch.from_numpy(pca.transform(X_testobs_np[:,feature_idx])).float().cuda()
testct_pca_embedding = torch.from_numpy(pca.transform(X_testct_np[:,feature_idx])).float().cuda()
print(np.cumsum(pca.explained_variance_ratio_))
print(pca.explained_variance_ratio_ * flags.input_dim)
# take only the top pc dimensions with effective sample size > 100
# flags.z_dim = np.sum(pca.explained_variance_ratio_ * flags.input_dim > 30)
# print(flags.z_dim)
# # calculate pca embedding
# pca = PCA(n_components=flags.z_dim)
# # pca.fit(np.row_stack([X_train_np, X_testobs_np, X_testct_np]))
# pca.fit(np.row_stack([X_train_np[:,feature_idx]]))
# train_pca_embedding = torch.from_numpy(pca.transform(X_train_np[:,feature_idx])).float().cuda()
# testobs_pca_embedding = torch.from_numpy(pca.transform(X_testobs_np[:,feature_idx])).float().cuda()
# testct_pca_embedding = torch.from_numpy(pca.transform(X_testct_np[:,feature_idx])).float().cuda()
# flags.num_features = flags.input_dim - flags.z_dim
subset_nonsing=False
if flags.mode_latent == "vaez":
z_dim = flags.z_dim
elif flags.mode_latent == "bertz":
z_dim = train_embedding.shape[1]
elif flags.mode_latent == "bertz_cl":
z_dim = X_train_cl_embedding.shape[1]
subset_nonsing=True
elif flags.mode_latent == "pcaz":
z_dim = flags.z_dim
# z_dim = flags.z_dim
print(vocabsize, z_dim)
sys.stdout.flush()
def compute_prob(logits, mode="logistic"):
if mode == "linear":
probs = torch.max(torch.stack([logits,torch.zeros_like(logits)],dim=2),dim=2)[0]
probs = torch.min(torch.stack([probs,torch.ones_like(probs)],dim=2),dim=2)[0]
elif mode == "logistic":
probs = nn.Sigmoid()(logits)
return probs
class MLP(nn.Module):
def __init__(self):
super(MLP, self).__init__()
self.input_dim = flags.input_dim
self.z_dim = z_dim
self.num_features = flags.num_features
lin1 = nn.Linear(self.input_dim, self.num_features)
lin4 = nn.Linear(self.z_dim, 1)
for lin in [lin1, lin4]:
nn.init.xavier_uniform_(lin.weight)
nn.init.zeros_(lin.bias)
self._main = nn.Sequential(lin1)
self._tvaez = nn.Sequential(lin4)
self.finallayer = nn.Linear(self.num_features + 1, 1)
def forward(self, inputbow, vaez):
features = torch.matmul(inputbow, F.softmax(self._main[0].weight,dim=1).T)
logits = self.finallayer(torch.cat([features, self._tvaez(vaez)],dim=1))
probs = compute_prob(logits, mode=flags.mode)
features_ctr = features - features.mean(dim=0)
beta_hat = 0.
feature_hats = 0.
logit_hats = logits
prob_hats = probs
return features, logits, probs, beta_hat, logit_hats, prob_hats
def mean_nll(probs, y, mode="logistic"):
if mode == "linear":
mean_nll = nn.MSELoss()(probs, y)
elif mode == "logistic":
mean_nll = nn.BCELoss()(probs, y)
return mean_nll
def mean_accuracy(probs, y):
preds = (probs > 0.5).float()
return ((preds - y).abs() < 1e-2).float().mean()
# the Net component is not used
class Net(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(flags.num_features, 1)
def forward(self, x):
x = self.fc(x)
return x
def initNet(layer):
nn.init.xavier_uniform_(layer.weight)
nn.init.zeros_(layer.bias)
envs = [
{'text': X_train, 'pcaz': train_pca_embedding, 'labels': train_label}, \
{'text': X_testct, 'pcaz': testct_pca_embedding, 'labels': testct_label}, \
{'text': X_testobs, 'pcaz': testobs_pca_embedding, 'labels': testobs_label}]
if subset_nonsing == True:
envs[0]['text'] = envs[0]['text'][nonsing_sents]
envs[0]['labels'] = envs[0]['labels'][nonsing_sents]
if flags.mode_train_data == 'text':
flags.input_dim = vocabsize
train_loader = torch.utils.data.DataLoader(dataset=envs[0]['text'].view(-1, flags.input_dim), batch_size=flags.batch_size, shuffle=False)
testct_loader = torch.utils.data.DataLoader(dataset=envs[1]['text'].view(-1, flags.input_dim), batch_size=flags.batch_size, shuffle=False)
testobs_loader = torch.utils.data.DataLoader(dataset=envs[2]['text'].view(-1, flags.input_dim), batch_size=flags.batch_size, shuffle=False)
elif flags.mode_train_data == 'bertz':
flags.input_dim = train_embedding.shape[1]
train_loader = torch.utils.data.DataLoader(dataset=envs[0]['bertz'].view(-1, flags.input_dim), batch_size=flags.batch_size, shuffle=False)
testct_loader = torch.utils.data.DataLoader(dataset=envs[1]['bertz'].view(-1, flags.input_dim), batch_size=flags.batch_size, shuffle=False)
testobs_loader = torch.utils.data.DataLoader(dataset=envs[2]['bertz'].view(-1, flags.input_dim), batch_size=flags.batch_size, shuffle=False)
if flags.mode_latent == 'vae':
trainvaez_name = flags.dataset + 'k' + str(flags.z_dim) + 'trainvae.pt'
testctvaez_name = flags.dataset + 'k' + str(flags.z_dim) + 'testctvae.pt'
testobsvaez_name = flags.dataset + 'k' + str(flags.z_dim) + 'testobsvae.pt'
envs[0]['vaeimage'] = torch.load(trainvaez_name)[0].detach()
envs[1]['vaeimage'] = torch.load(testctvaez_name)[0].detach()
envs[2]['vaeimage'] = torch.load(testobsvaez_name)[0].detach()
envs[0]['vaez'] = torch.load(trainvaez_name)[1].detach()
envs[1]['vaez'] = torch.load(testctvaez_name)[1].detach()
envs[2]['vaez'] = torch.load(testobsvaez_name)[1].detach()
mlp = MLP().cuda()
optimizer_causalrep = optim.Adam(mlp._main.parameters(), lr=flags.lr, weight_decay=1e-8)
for step in range(flags.steps):
for i in range(len(envs)):
env = envs[i]
features, logits, probs, beta_hat, logit_hats, prob_hats = mlp(env[flags.mode_train_data], env[flags.mode_latent])
labels = env['labels']
env['nll'] = mean_nll(probs, env['labels'], mode=flags.mode)
env['nllhat'] = mean_nll(prob_hats, env['labels'], mode=flags.mode)
env['acc'] = mean_accuracy(probs, env['labels'])
env['acchat'] = mean_accuracy(prob_hats, env['labels'])
y = labels - labels.mean()
X = torch.cat([features, env[flags.mode_latent]], dim=1)
X = X - X.mean(dim=0)
X = torch.cat([torch.ones(X.shape[0],1).cuda(), X], dim=1)
beta = [torch.matmul(
torch.matmul(
torch.inverse(flags.l2_reg*torch.eye(X.shape[1]).cuda()+
torch.matmul(
torch.transpose(X, 0, 1),
X)),
torch.transpose(X, 0, 1)),
y[:,j]) for j in range(y.shape[1])]
env['covs'] = cov(torch.cat([beta[0][1:flags.num_features+1] *features, torch.unsqueeze((beta[0][-flags.z_dim:] * env[flags.mode_latent]).sum(dim=1),1)], dim=1))[-1][:-1] # extract the last row to have cov(Features, C)
env['causalrep'] = ((features.std(dim=0) * beta[0][1:flags.num_features+1])**2).sum()
# + 2 * env['covs']).sum()
weight_norm = torch.tensor(0.).cuda()
for w in mlp.finallayer.parameters():
weight_norm += w.norm().pow(2)
env['l2penalty'] = flags.l2_reg * weight_norm
if step % 500 == 0:
print("\nnll", env['nll'],
"\nl2", env['l2penalty'],
"\ncausalrep", env['causalrep'])
# "\nfeatureZr2", env['featureZr2'])
sys.stdout.flush()
train_l2penalty = torch.stack([envs[0]['l2penalty']])
train_causalrep = torch.stack([envs[0]['causalrep']])
train_nll = torch.stack([envs[0]['nll']]).mean()
train_acc = torch.stack([envs[0]['acc']]).mean()
testct_nll = torch.stack([envs[1]['nll']]).mean()
testct_acc = torch.stack([envs[1]['acc']]).mean()
testobs_nll = torch.stack([envs[2]['nll']]).mean()
testobs_acc = torch.stack([envs[2]['acc']]).mean()
nll_loss = train_nll.clone()
# + train_l2penalty.clone()
if step % 1 == 0:
l1_penalty = F.softmax(mlp._main[0].weight,dim=1).abs().sum()
train_causalrep_loss = -train_causalrep.clone()
# + 1e-3 * l1_penalty - 1e-2 * torch.log(1 - train_featureZr2)
optimizer_causalrep.zero_grad()
train_causalrep_loss.backward(retain_graph=True)
optimizer_causalrep.step()
if step % 100 == 0:
train_features, train_y = mlp(envs[0][flags.mode_train_data], envs[0][flags.mode_latent])[0].clone().cpu().detach().numpy(), envs[0]['labels'].clone().cpu().detach().numpy()
testct_features, testct_y = mlp(envs[1][flags.mode_train_data], envs[1][flags.mode_latent])[0].clone().cpu().detach().numpy(), envs[1]['labels'].clone().cpu().detach().numpy()
testobs_features, testobs_y = mlp(envs[2][flags.mode_train_data], envs[2][flags.mode_latent])[0].clone().cpu().detach().numpy(), envs[2]['labels'].clone().cpu().detach().numpy()
C_vals = [1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3]
causalrep_alphas, causalrep_trainaccs, causalrep_testobsaccs, causalrep_testctaccs = [], [], [], []
for C in C_vals:
alpha = 1./C
print('\ncausal-pred-w-features', 'C', C)
# clf = LinearRegression()
# clf = Ridge(alpha=alpha)
clf = LogisticRegression(C=C, class_weight='auto', solver='lbfgs')
clf.fit(train_features, train_y)
resulttrain = classification_report((train_y > 0), (clf.predict(train_features) > 0), output_dict=True)
resultct = classification_report((testct_y > 0), (clf.predict(testct_features) > 0), output_dict=True)
resultobs = classification_report((testobs_y > 0), (clf.predict(testobs_features)> 0), output_dict=True)
print('train',resulttrain['accuracy'])
print('testobs',resultobs['accuracy'])
print('testct',resultct['accuracy'])
sys.stdout.flush()
causalrep_trainaccs.append(resulttrain['accuracy'])
causalrep_testobsaccs.append(resultobs['accuracy'])
causalrep_testctaccs.append(resultct['accuracy'])
causalrep_alphas.append(alpha)
print("\n\n##### causal rep top words")
feature_weights = torch.topk(F.softmax(mlp._main[0].weight,dim=1),20, axis=1)
top_causal_words = feature_weights[1].detach().cpu().numpy()
top_causal_weights = feature_weights[0].detach().cpu().numpy()
for j in np.argsort(-np.abs(beta[0][1:(1+flags.num_features)].detach().cpu().numpy())):
# for j in range(top_causal_words.shape[0]):
print("feature", j)
print("coefficient", beta[0][j+1])
sort_causal_words = np.argsort(-top_causal_weights[j])[:20]
print("top causal words", [id2term[i] for i in top_causal_words[j][sort_causal_words]], top_causal_weights[j][sort_causal_words]
)
causalrep_res = {}
assert len(causalrep_alphas) == len(causalrep_trainaccs)
assert len(causalrep_alphas) == len(causalrep_testobsaccs)
assert len(causalrep_alphas) == len(causalrep_testctaccs)
for item in ['causalrep_trainaccs', 'causalrep_testobsaccs', 'causalrep_testctaccs']:
for i, alpha in enumerate(causalrep_alphas):
curname = item + '_' + str(alpha)
if item == 'causalrep_trainaccs':
causalrep_res[curname] = causalrep_trainaccs[i]
elif item == 'causalrep_testobsaccs':
causalrep_res[curname] = causalrep_testobsaccs[i]
elif item == 'causalrep_testctaccs':
causalrep_res[curname] = causalrep_testctaccs[i]
res = pd.concat([pd.DataFrame(causalrep_res, index=[0]), res], axis=1)
if step % 10 == 0:
print("itr", np.int32(step),
# "train_causalrephat_loss", train_causalrep_loss.detach().cpu().numpy(),
"train_causalrep", train_causalrep.detach().cpu().numpy(),
# "train_causalrephat", train_causalrephat.detach().cpu().numpy(),
"train_nll", train_nll.detach().cpu().numpy(),
"train_acc", train_acc.detach().cpu().numpy(),
"testct_acc", testct_acc.detach().cpu().numpy(),
"testobs_acc", testobs_acc.detach().cpu().numpy())
sys.stdout.flush()
print("step", step, "add causalrep_res")
res = pd.concat([pd.DataFrame(causalrep_res, index=[0]), res], axis=1)
# compare with naive
naive_alphas, naive_trainaccs, naive_testobsaccs, naive_testctaccs = [], [], [], []
for C in [1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3]:
alpha = 1./C
print('\nnaive-pred', 'C', C)
# clf = LinearRegression()
# clf = Ridge(alpha=alpha)
clf = LogisticRegression(C=C, class_weight='auto', solver='lbfgs')
clf.fit(envs[0][flags.mode_train_data].cpu().detach().numpy(), train_y)
resulttrain = classification_report((train_y > 0), (clf.predict(envs[0][flags.mode_train_data].cpu().detach().numpy()) > 0), output_dict=True)
resultct = classification_report((testct_y > 0), (clf.predict(envs[1][flags.mode_train_data].cpu().detach().numpy()) > 0), output_dict=True)
resultobs = classification_report((testobs_y > 0), (clf.predict(envs[2][flags.mode_train_data].cpu().detach().numpy())> 0), output_dict=True)
print('train',resulttrain['accuracy'])
print('testobs',resultobs['accuracy'])
print('testct',resultct['accuracy'])
sys.stdout.flush()
naive_weights = clf.coef_
top_naive_words = np.argsort(-np.abs(naive_weights))[0,:20]
top_coef = naive_weights[0,top_naive_words]
print("top naive words", [id2term[i] for i in top_naive_words], top_coef)
naive_trainaccs.append(resulttrain['accuracy'])
naive_testobsaccs.append(resultobs['accuracy'])
naive_testctaccs.append(resultct['accuracy'])
naive_alphas.append(alpha)
naive_res = {}
assert len(naive_alphas) == len(naive_trainaccs)
assert len(naive_alphas) == len(naive_testobsaccs)
assert len(naive_alphas) == len(naive_testctaccs)
for item in ['naive_trainaccs', 'naive_testobsaccs', 'naive_testctaccs']:
for i, alpha in enumerate(naive_alphas):
curname = item + '_' + str(alpha)
if item == 'naive_trainaccs':
naive_res[curname] = naive_trainaccs[i]
elif item == 'naive_testobsaccs':
naive_res[curname] = naive_testobsaccs[i]
elif item == 'naive_testctaccs':
naive_res[curname] = naive_testctaccs[i]
res = pd.concat([pd.DataFrame(naive_res, index=[0]), res], axis=1)
res.to_csv(out_dir + '/reviews_text' + str(int(time.time()*1e6)) + '.csv')
|
[
"numpy.random.seed",
"argparse.ArgumentParser",
"numpy.abs",
"torch.eye",
"sklearn.feature_extraction.text.TfidfVectorizer",
"pandas.read_csv",
"torch.cat",
"numpy.argsort",
"sys.stdout.flush",
"pandas.set_option",
"pandas.DataFrame",
"torch.ones",
"torch.nn.MSELoss",
"torch.nn.BCELoss",
"torch.load",
"os.path.exists",
"numpy.cumsum",
"random.seed",
"numpy.int32",
"torch.nn.Linear",
"torch.zeros_like",
"torch.manual_seed",
"numpy.corrcoef",
"torch.nn.init.xavier_uniform_",
"torch.nn.init.zeros_",
"sklearn.linear_model.LogisticRegression",
"nltk.corpus.stopwords.words",
"torch.rand",
"torch.nn.Sigmoid",
"torch.from_numpy",
"torch.ones_like",
"os.makedirs",
"torch.stack",
"warnings.filterwarnings",
"torch.nn.Sequential",
"time.time",
"torch.nn.functional.softmax",
"importlib.reload",
"numpy.where",
"numpy.array",
"sklearn.decomposition.PCA",
"numpy.row_stack",
"numpy.eye",
"torch.tensor",
"torch.transpose"
] |
[((638, 680), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', 'None'], {}), "('display.max_columns', None)\n", (651, 680), True, 'import pandas as pd\n'), ((683, 722), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', '(1000)'], {}), "('display.max_rows', 1000)\n", (696, 722), True, 'import pandas as pd\n'), ((723, 772), 'pandas.set_option', 'pd.set_option', (['"""display.expand_frame_repr"""', '(False)'], {}), "('display.expand_frame_repr', False)\n", (736, 772), True, 'import pandas as pd\n'), ((773, 806), 'pandas.set_option', 'pd.set_option', (['"""max_colwidth"""', '(-1)'], {}), "('max_colwidth', -1)\n", (786, 806), True, 'import pandas as pd\n'), ((1581, 1614), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (1604, 1614), False, 'import warnings\n'), ((1708, 1731), 'importlib.reload', 'importlib.reload', (['utils'], {}), '(utils)\n', (1724, 1731), False, 'import importlib\n'), ((1904, 1922), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (1920, 1922), False, 'import sys\n'), ((1923, 1944), 'random.seed', 'random.seed', (['randseed'], {}), '(randseed)\n', (1934, 1944), False, 'import random\n'), ((1945, 1969), 'numpy.random.seed', 'np.random.seed', (['randseed'], {}), '(randseed)\n', (1959, 1969), True, 'import numpy as np\n'), ((1970, 1997), 'torch.manual_seed', 'torch.manual_seed', (['randseed'], {}), '(randseed)\n', (1987, 1997), False, 'import torch\n'), ((2008, 2059), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Text Reviews"""'}), "(description='Text Reviews')\n", (2031, 2059), False, 'import argparse\n'), ((3343, 3361), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3359, 3361), False, 'import sys\n'), ((4627, 4698), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'min_df': '(10)', 'binary': '(True)', 'max_df': '(0.8)', 'ngram_range': '(1, 3)'}), '(min_df=10, binary=True, max_df=0.8, ngram_range=(1, 3))\n', (4642, 4698), False, 'from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\n'), ((5779, 5851), 'numpy.array', 'np.array', (["[term2id['as'], term2id['also'], term2id['am'], term2id['an']]"], {}), "([term2id['as'], term2id['also'], term2id['am'], term2id['an']])\n", (5787, 5851), True, 'import numpy as np\n'), ((7644, 7673), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'flags.z_dim'}), '(n_components=flags.z_dim)\n', (7647, 7673), False, 'from sklearn.decomposition import PCA\n'), ((9269, 9287), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (9285, 9287), False, 'import sys\n'), ((3422, 3445), 'os.path.exists', 'os.path.exists', (['out_dir'], {}), '(out_dir)\n', (3436, 3445), False, 'import os\n'), ((3451, 3471), 'os.makedirs', 'os.makedirs', (['out_dir'], {}), '(out_dir)\n', (3462, 3471), False, 'import os\n'), ((3710, 3731), 'pandas.read_csv', 'pd.read_csv', (['dat_file'], {}), '(dat_file)\n', (3721, 3731), True, 'import pandas as pd\n'), ((4095, 4114), 'numpy.array', 'np.array', (["data['y']"], {}), "(data['y'])\n", (4103, 4114), True, 'import numpy as np\n'), ((4513, 4539), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (4528, 4539), False, 'from nltk.corpus import stopwords\n'), ((5347, 5392), 'numpy.corrcoef', 'np.corrcoef', (['X_train_np[:, top_feature_idx].T'], {}), '(X_train_np[:, top_feature_idx].T)\n', (5358, 5392), True, 'import numpy as np\n'), ((5394, 5441), 'numpy.eye', 'np.eye', (['X_train_np[:, top_feature_idx].shape[1]'], {}), '(X_train_np[:, top_feature_idx].shape[1])\n', (5400, 5441), True, 'import numpy as np\n'), ((5456, 5485), 'numpy.where', 'np.where', (['(fea_corrcoef > 0.96)'], {}), '(fea_corrcoef > 0.96)\n', (5464, 5485), True, 'import numpy as np\n'), ((7748, 7790), 'numpy.row_stack', 'np.row_stack', (['[X_train_np[:, feature_idx]]'], {}), '([X_train_np[:, feature_idx]])\n', (7760, 7790), True, 'import numpy as np\n'), ((8093, 8133), 'numpy.cumsum', 'np.cumsum', (['pca.explained_variance_ratio_'], {}), '(pca.explained_variance_ratio_)\n', (8102, 8133), True, 'import numpy as np\n'), ((11231, 11268), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['layer.weight'], {}), '(layer.weight)\n', (11254, 11268), False, 'from torch import nn, optim, autograd\n'), ((11273, 11299), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['layer.bias'], {}), '(layer.bias)\n', (11287, 11299), False, 'from torch import nn, optim, autograd\n'), ((15330, 15365), 'torch.stack', 'torch.stack', (["[envs[0]['l2penalty']]"], {}), "([envs[0]['l2penalty']])\n", (15341, 15365), False, 'import torch\n'), ((15388, 15423), 'torch.stack', 'torch.stack', (["[envs[0]['causalrep']]"], {}), "([envs[0]['causalrep']])\n", (15399, 15423), False, 'import torch\n'), ((20473, 20533), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': 'C', 'class_weight': '"""auto"""', 'solver': '"""lbfgs"""'}), "(C=C, class_weight='auto', solver='lbfgs')\n", (20491, 20533), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression, Ridge\n'), ((21179, 21197), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (21195, 21197), False, 'import sys\n'), ((3778, 3824), 'pandas.read_csv', 'pd.read_csv', (['dat_file'], {'encoding': '"""Windows-1252"""'}), "(dat_file, encoding='Windows-1252')\n", (3789, 3824), True, 'import pandas as pd\n'), ((9829, 9873), 'torch.nn.Linear', 'nn.Linear', (['self.input_dim', 'self.num_features'], {}), '(self.input_dim, self.num_features)\n', (9838, 9873), False, 'from torch import nn, optim, autograd\n'), ((9889, 9913), 'torch.nn.Linear', 'nn.Linear', (['self.z_dim', '(1)'], {}), '(self.z_dim, 1)\n', (9898, 9913), False, 'from torch import nn, optim, autograd\n'), ((10053, 10072), 'torch.nn.Sequential', 'nn.Sequential', (['lin1'], {}), '(lin1)\n', (10066, 10072), False, 'from torch import nn, optim, autograd\n'), ((10095, 10114), 'torch.nn.Sequential', 'nn.Sequential', (['lin4'], {}), '(lin4)\n', (10108, 10114), False, 'from torch import nn, optim, autograd\n'), ((10142, 10177), 'torch.nn.Linear', 'nn.Linear', (['(self.num_features + 1)', '(1)'], {}), '(self.num_features + 1, 1)\n', (10151, 10177), False, 'from torch import nn, optim, autograd\n'), ((11107, 11139), 'torch.nn.Linear', 'nn.Linear', (['flags.num_features', '(1)'], {}), '(flags.num_features, 1)\n', (11116, 11139), False, 'from torch import nn, optim, autograd\n'), ((14026, 14078), 'torch.cat', 'torch.cat', (['[features, env[flags.mode_latent]]'], {'dim': '(1)'}), '([features, env[flags.mode_latent]], dim=1)\n', (14035, 14078), False, 'import torch\n'), ((20061, 20079), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (20077, 20079), False, 'import sys\n'), ((20139, 20177), 'pandas.DataFrame', 'pd.DataFrame', (['causalrep_res'], {'index': '[0]'}), '(causalrep_res, index=[0])\n', (20151, 20177), True, 'import pandas as pd\n'), ((22232, 22266), 'pandas.DataFrame', 'pd.DataFrame', (['naive_res'], {'index': '[0]'}), '(naive_res, index=[0])\n', (22244, 22266), True, 'import pandas as pd\n'), ((1850, 1861), 'time.time', 'time.time', ([], {}), '()\n', (1859, 1861), False, 'import io, time\n'), ((3864, 3906), 'pandas.read_csv', 'pd.read_csv', (['dat_file'], {'lineterminator': '"""\n"""'}), "(dat_file, lineterminator='\\n')\n", (3875, 3906), True, 'import pandas as pd\n'), ((9959, 9994), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['lin.weight'], {}), '(lin.weight)\n', (9982, 9994), False, 'from torch import nn, optim, autograd\n'), ((10007, 10031), 'torch.nn.init.zeros_', 'nn.init.zeros_', (['lin.bias'], {}), '(lin.bias)\n', (10021, 10031), False, 'from torch import nn, optim, autograd\n'), ((10752, 10764), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (10762, 10764), False, 'from torch import nn, optim, autograd\n'), ((15288, 15306), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (15304, 15306), False, 'import sys\n'), ((15440, 15469), 'torch.stack', 'torch.stack', (["[envs[0]['nll']]"], {}), "([envs[0]['nll']])\n", (15451, 15469), False, 'import torch\n'), ((15494, 15523), 'torch.stack', 'torch.stack', (["[envs[0]['acc']]"], {}), "([envs[0]['acc']])\n", (15505, 15523), False, 'import torch\n'), ((15548, 15577), 'torch.stack', 'torch.stack', (["[envs[1]['nll']]"], {}), "([envs[1]['nll']])\n", (15559, 15577), False, 'import torch\n'), ((15602, 15631), 'torch.stack', 'torch.stack', (["[envs[1]['acc']]"], {}), "([envs[1]['acc']])\n", (15613, 15631), False, 'import torch\n'), ((15657, 15686), 'torch.stack', 'torch.stack', (["[envs[2]['nll']]"], {}), "([envs[2]['nll']])\n", (15668, 15686), False, 'import torch\n'), ((15712, 15741), 'torch.stack', 'torch.stack', (["[envs[2]['acc']]"], {}), "([envs[2]['acc']])\n", (15723, 15741), False, 'import torch\n'), ((17123, 17183), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': 'C', 'class_weight': '"""auto"""', 'solver': '"""lbfgs"""'}), "(C=C, class_weight='auto', solver='lbfgs')\n", (17141, 17183), False, 'from sklearn.linear_model import LogisticRegression, LinearRegression, Ridge\n'), ((17740, 17758), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (17756, 17758), False, 'import sys\n'), ((18078, 18115), 'torch.nn.functional.softmax', 'F.softmax', (['mlp._main[0].weight'], {'dim': '(1)'}), '(mlp._main[0].weight, dim=1)\n', (18087, 18115), True, 'import torch.nn.functional as F\n'), ((19485, 19523), 'pandas.DataFrame', 'pd.DataFrame', (['causalrep_res'], {'index': '[0]'}), '(causalrep_res, index=[0])\n', (19497, 19523), True, 'import pandas as pd\n'), ((19585, 19599), 'numpy.int32', 'np.int32', (['step'], {}), '(step)\n', (19593, 19599), True, 'import numpy as np\n'), ((9578, 9590), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (9588, 9590), False, 'from torch import nn, optim, autograd\n'), ((10260, 10298), 'torch.nn.functional.softmax', 'F.softmax', (['self._main[0].weight'], {'dim': '(1)'}), '(self._main[0].weight, dim=1)\n', (10269, 10298), True, 'import torch.nn.functional as F\n'), ((10823, 10835), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (10833, 10835), False, 'from torch import nn, optim, autograd\n'), ((12998, 13024), 'torch.load', 'torch.load', (['trainvaez_name'], {}), '(trainvaez_name)\n', (13008, 13024), False, 'import torch\n'), ((13063, 13090), 'torch.load', 'torch.load', (['testctvaez_name'], {}), '(testctvaez_name)\n', (13073, 13090), False, 'import torch\n'), ((13129, 13157), 'torch.load', 'torch.load', (['testobsvaez_name'], {}), '(testobsvaez_name)\n', (13139, 13157), False, 'import torch\n'), ((13193, 13219), 'torch.load', 'torch.load', (['trainvaez_name'], {}), '(trainvaez_name)\n', (13203, 13219), False, 'import torch\n'), ((13254, 13281), 'torch.load', 'torch.load', (['testctvaez_name'], {}), '(testctvaez_name)\n', (13264, 13281), False, 'import torch\n'), ((13316, 13344), 'torch.load', 'torch.load', (['testobsvaez_name'], {}), '(testobsvaez_name)\n', (13326, 13344), False, 'import torch\n'), ((14894, 14911), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (14906, 14911), False, 'import torch\n'), ((18527, 18561), 'numpy.argsort', 'np.argsort', (['(-top_causal_weights[j])'], {}), '(-top_causal_weights[j])\n', (18537, 18561), True, 'import numpy as np\n'), ((21263, 21284), 'numpy.abs', 'np.abs', (['naive_weights'], {}), '(naive_weights)\n', (21269, 21284), True, 'import numpy as np\n'), ((4246, 4279), 'torch.from_numpy', 'torch.from_numpy', (['labels[:split1]'], {}), '(labels[:split1])\n', (4262, 4279), False, 'import torch\n'), ((4347, 4386), 'torch.from_numpy', 'torch.from_numpy', (['labels[split1:split2]'], {}), '(labels[split1:split2])\n', (4363, 4386), False, 'import torch\n'), ((4446, 4479), 'torch.from_numpy', 'torch.from_numpy', (['labels[split2:]'], {}), '(labels[split2:])\n', (4462, 4479), False, 'import torch\n'), ((14435, 14459), 'torch.transpose', 'torch.transpose', (['X', '(0)', '(1)'], {}), '(X, 0, 1)\n', (14450, 14459), False, 'import torch\n'), ((6195, 6211), 'torch.rand', 'torch.rand', (['size'], {}), '(size)\n', (6205, 6211), False, 'import torch\n'), ((9404, 9428), 'torch.zeros_like', 'torch.zeros_like', (['logits'], {}), '(logits)\n', (9420, 9428), False, 'import torch\n'), ((9492, 9514), 'torch.ones_like', 'torch.ones_like', (['probs'], {}), '(probs)\n', (9507, 9514), False, 'import torch\n'), ((14132, 14157), 'torch.ones', 'torch.ones', (['X.shape[0]', '(1)'], {}), '(X.shape[0], 1)\n', (14142, 14157), False, 'import torch\n'), ((15861, 15898), 'torch.nn.functional.softmax', 'F.softmax', (['mlp._main[0].weight'], {'dim': '(1)'}), '(mlp._main[0].weight, dim=1)\n', (15870, 15898), True, 'import torch.nn.functional as F\n'), ((22332, 22343), 'time.time', 'time.time', ([], {}), '()\n', (22341, 22343), False, 'import io, time\n'), ((6930, 6953), 'torch.from_numpy', 'torch.from_numpy', (['texts'], {}), '(texts)\n', (6946, 6953), False, 'import torch\n'), ((14364, 14388), 'torch.transpose', 'torch.transpose', (['X', '(0)', '(1)'], {}), '(X, 0, 1)\n', (14379, 14388), False, 'import torch\n'), ((14276, 14297), 'torch.eye', 'torch.eye', (['X.shape[1]'], {}), '(X.shape[1])\n', (14285, 14297), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
"""
"""
import tensorflow as tf
import numpy as np
import tflearn
from ddpg.ddpg import build_summaries, ActorNetwork, CriticNetwork, OrnsteinUhlenbeckActionNoise, ReplayBuffer, \
getReward
from src.BallTracker import ballTracker
from src.Pepper import Pepper
from src.Pepper.Pepper import readAngle
from src.Settings import *
# ===========================
# Agent Training
# ===========================
def train(sess, session, thread, args, actor, critic, actor_noise, update_model, saver):
# Set up summary Ops
summary_ops, summary_vars = build_summaries()
if update_model == False:
sess.run(tf.global_variables_initializer())
# Initialize target network weights
actor.update_target_network()
critic.update_target_network()
writer = tf.summary.FileWriter(args['summary_dir'], sess.graph)
# Initialize replay memory
replay_buffer = ReplayBuffer(int(args['buffer_size']), int(args['random_seed']))
# Needed to enable BatchNorm.
# This hurts the performance on Pendulum but could be useful
# in other environments.
tflearn.is_training(True)
for i in range(int(args['max_episodes'])):
# s = env.reset()
ep_reward = 0
ep_ave_max_q = 0
for j in range(int(args['max_episode_len'])):
service = session.service("ALMotion")
params = dict()
# Hole Anfangszustand
delta1 = thread.delta[0]
winkel1 = readAngle(session)
s = [winkel1, delta1]
# Hole action
a = actor.predict(np.reshape(s, (1, 2))) + (1. / (1. + i))
# ITERATE THORUGH SAMPLED DATA AND ADD TO REPLAY BUFFER
a = actor.predict(np.reshape(s, (1, actor.s_dim))) + actor_noise()
# a[0] = ((a[0] - (1 - action_bound)) / (action_bound - (1 - action_bound))) * (
# OBERE_GRENZE - UNTERE_GRENZE) + UNTERE_GRENZE
# a[0] = a[0]
rewardTMP = 0
if a[0] < UNTERE_GRENZE:
# print("Winkel zu klein :" + str(a[0]))
a[0] = UNTERE_GRENZE
rewardTMP = -1000
if a[0] > OBERE_GRENZE:
# print("Winkel zu gross :" + str(a[0]))
a[0] = OBERE_GRENZE
rewardTMP = -1000
# Fuehre Action aus
params[args['motor']] = [a[0], TIME_TO_MOVE]
Pepper.move(params, service)
# Hole Folgezustand
delta2 = thread.delta[0]
winkel2 = readAngle(session)
s2 = [winkel2, delta2]
# Hole Reward
r = getReward(delta2) + rewardTMP
terminal = False
print(str(a[0]) + "\t" + str(
r))
replay_buffer.add(np.reshape(s, (actor.s_dim,)), np.reshape(a, (actor.a_dim,)), r,
terminal, np.reshape(s2, (actor.s_dim,)))
# export actor Model somewhere
# Keep adding experience to the memory until
# there are at least minibatch size samples
if replay_buffer.size() > int(args['minibatch_size']):
s_batch, a_batch, r_batch, t_batch, s2_batch = \
replay_buffer.sample_batch(int(args['minibatch_size']))
# Calculate targets
target_q = critic.predict_target(
s2_batch, actor.predict_target(s2_batch))
y_i = []
for k in range(int(args['minibatch_size'])):
if t_batch[k]:
y_i.append(r_batch[k])
else:
y_i.append(r_batch[k] + critic.gamma * target_q[k])
# Update the critic given the targets
predicted_q_value, _ = critic.train(
s_batch, a_batch, np.reshape(y_i, (int(args['minibatch_size']), 1)))
ep_ave_max_q += np.amax(predicted_q_value)
# Update the actor policy using the sampled gradient
a_outs = actor.predict(s_batch)
grads = critic.action_gradients(s_batch, a_outs)
actor.train(s_batch, grads[0])
# Update target networks
actor.update_target_network()
critic.update_target_network()
s = s2
ep_reward += r
if terminal:
summary_str = sess.run(summary_ops, feed_dict={
summary_vars[0]: ep_reward,
summary_vars[1]: ep_ave_max_q / float(j)
})
writer.add_summary(summary_str, i)
writer.flush()
print('| Reward: {:d} | Episode: {:d} | Qmax: {:.4f}'.format(int(ep_reward), \
i, (ep_ave_max_q / float(j))))
break
def main():
with tf.Session() as sess:
print("Starte BallTrackerThread")
global delta
thread1 = ballTracker.BallTrackerThread()
thread1.start()
print("Main running...")
session = Pepper.init(ip, port)
Pepper.roboInit(session)
# Ensure action bound is symmetric
# assert (env.action_space.high == -env.action_space.low)
actor = ActorNetwork(sess, state_dim, action_dim, action_bound,
float(args['actor_lr']), float(args['tau']),
int(args['minibatch_size']))
critic = CriticNetwork(sess, state_dim, action_dim,
float(args['critic_lr']), float(args['tau']),
float(args['gamma']),
actor.get_num_trainable_vars())
actor_noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(action_dim))
if args['mode'] == 'INIT':
saver = tf.train.Saver()
train(sess, args, actor, critic, actor_noise, False, saver)
elif args['mode'] == 'TRAIN':
saver = tf.train.Saver()
saver.restore(sess, args['model'] + "/model")
train(sess, args, actor, critic, actor_noise, True, saver)
elif args['mode'] == 'TEST':
saver = tf.train.Saver()
saver.restore(sess, args['model'] + "/model")
testDDPG(sess, args, actor, critic, actor_noise)
else:
print('No mode defined!')
#train(sess, session, thread1, args, actor, critic, actor_noise)
print('Terminated')
if __name__ == '__main__':
main()
|
[
"src.Pepper.Pepper.init",
"src.BallTracker.ballTracker.BallTrackerThread",
"tensorflow.train.Saver",
"tensorflow.global_variables_initializer",
"src.Pepper.Pepper.move",
"src.Pepper.Pepper.readAngle",
"tensorflow.Session",
"numpy.zeros",
"numpy.amax",
"tensorflow.summary.FileWriter",
"src.Pepper.Pepper.roboInit",
"numpy.reshape",
"tflearn.is_training",
"ddpg.ddpg.build_summaries",
"ddpg.ddpg.getReward"
] |
[((586, 603), 'ddpg.ddpg.build_summaries', 'build_summaries', ([], {}), '()\n', (601, 603), False, 'from ddpg.ddpg import build_summaries, ActorNetwork, CriticNetwork, OrnsteinUhlenbeckActionNoise, ReplayBuffer, getReward\n'), ((823, 877), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (["args['summary_dir']", 'sess.graph'], {}), "(args['summary_dir'], sess.graph)\n", (844, 877), True, 'import tensorflow as tf\n'), ((1128, 1153), 'tflearn.is_training', 'tflearn.is_training', (['(True)'], {}), '(True)\n', (1147, 1153), False, 'import tflearn\n'), ((4960, 4972), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4970, 4972), True, 'import tensorflow as tf\n'), ((5064, 5095), 'src.BallTracker.ballTracker.BallTrackerThread', 'ballTracker.BallTrackerThread', ([], {}), '()\n', (5093, 5095), False, 'from src.BallTracker import ballTracker\n'), ((5172, 5193), 'src.Pepper.Pepper.init', 'Pepper.init', (['ip', 'port'], {}), '(ip, port)\n', (5183, 5193), False, 'from src.Pepper import Pepper\n'), ((5202, 5226), 'src.Pepper.Pepper.roboInit', 'Pepper.roboInit', (['session'], {}), '(session)\n', (5217, 5226), False, 'from src.Pepper import Pepper\n'), ((652, 685), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (683, 685), True, 'import tensorflow as tf\n'), ((1502, 1520), 'src.Pepper.Pepper.readAngle', 'readAngle', (['session'], {}), '(session)\n', (1511, 1520), False, 'from src.Pepper.Pepper import readAngle\n'), ((2449, 2477), 'src.Pepper.Pepper.move', 'Pepper.move', (['params', 'service'], {}), '(params, service)\n', (2460, 2477), False, 'from src.Pepper import Pepper\n'), ((2570, 2588), 'src.Pepper.Pepper.readAngle', 'readAngle', (['session'], {}), '(session)\n', (2579, 2588), False, 'from src.Pepper.Pepper import readAngle\n'), ((5929, 5945), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (5943, 5945), True, 'import tensorflow as tf\n'), ((2667, 2684), 'ddpg.ddpg.getReward', 'getReward', (['delta2'], {}), '(delta2)\n', (2676, 2684), False, 'from ddpg.ddpg import build_summaries, ActorNetwork, CriticNetwork, OrnsteinUhlenbeckActionNoise, ReplayBuffer, getReward\n'), ((2819, 2848), 'numpy.reshape', 'np.reshape', (['s', '(actor.s_dim,)'], {}), '(s, (actor.s_dim,))\n', (2829, 2848), True, 'import numpy as np\n'), ((2850, 2879), 'numpy.reshape', 'np.reshape', (['a', '(actor.a_dim,)'], {}), '(a, (actor.a_dim,))\n', (2860, 2879), True, 'import numpy as np\n'), ((2924, 2954), 'numpy.reshape', 'np.reshape', (['s2', '(actor.s_dim,)'], {}), '(s2, (actor.s_dim,))\n', (2934, 2954), True, 'import numpy as np\n'), ((3971, 3997), 'numpy.amax', 'np.amax', (['predicted_q_value'], {}), '(predicted_q_value)\n', (3978, 3997), True, 'import numpy as np\n'), ((5851, 5871), 'numpy.zeros', 'np.zeros', (['action_dim'], {}), '(action_dim)\n', (5859, 5871), True, 'import numpy as np\n'), ((6076, 6092), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (6090, 6092), True, 'import tensorflow as tf\n'), ((1612, 1633), 'numpy.reshape', 'np.reshape', (['s', '(1, 2)'], {}), '(s, (1, 2))\n', (1622, 1633), True, 'import numpy as np\n'), ((1752, 1783), 'numpy.reshape', 'np.reshape', (['s', '(1, actor.s_dim)'], {}), '(s, (1, actor.s_dim))\n', (1762, 1783), True, 'import numpy as np\n'), ((6279, 6295), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (6293, 6295), True, 'import tensorflow as tf\n')]
|
# Author: <NAME>
# Created: 2021-019-01
# Copyright (C) 2018, <NAME>
# License: MIT
import moderngl
import numpy as np
from PIL import Image
from generativepy.color import Color
def make_3dimage(outfile, draw, width, height, background=Color(0), channels=3):
'''
Create a PNG file using moderngl
:param outfile: Name of output file
:param draw: the draw function
:param width: width in pixels, int
:param height: height in pixels, int
:param background: background colour
:param channels: 3 for rgb, 4 for rgba
:return:
'''
if outfile.lower().endswith('.png'):
outfile = outfile[:-4]
frame = make_3dimage_frame(draw, width, height, background, channels)
image = Image.fromarray(frame)
image.save(outfile + '.png')
def make_3dimage_frame(draw, width, height, background=Color(0), channels=3):
'''
Create a numpy frame file using moderngl
:param draw: the draw function
:param width: width in pixels, int
:param height: height in pixels, int
:param background: background colour
:param channels: 3 for rgb, 4 for rgba
:return:
'''
ctx = moderngl.create_standalone_context()
fbo = ctx.simple_framebuffer((width, height))
fbo.use()
fbo.clear(*background)
draw(ctx, width, height, 0, 1)
data = fbo.read()
frame = np.frombuffer(data, dtype=np.uint8)
frame = frame.reshape((height, width, 3))
frame = frame[::-1]
ctx.release()
return frame
def make_3dimage_frames(draw, width, height, count, background=Color(0), channels=3):
'''
Create a sequence of numpy frame file using moderngl
:param draw: the draw function
:param width: width in pixels, int
:param height: height in pixels, int
:param count: number of frames to create
:param background: background colour
:param channels: 3 for rgb, 4 for rgba
:return:
'''
for i in range(count):
ctx = moderngl.create_standalone_context()
fbo = ctx.simple_framebuffer((width, height))
fbo.use()
fbo.clear(*background)
draw(ctx, width, height, i, count)
data = fbo.read()
frame = np.frombuffer(data, dtype=np.uint8)
frame = frame.reshape((height, width, 3))
frame = frame[::-1]
ctx.release()
yield frame
def make_3dimages(outfile, draw, width, height, background=Color(0), channels=3):
'''
Create a sequence of PNG files using moderngl
:param outfile: Name of output file
:param draw: the draw function
:param width: width in pixels, int
:param height: height in pixels, int
:param count: number of frames to create
:param background: background colour
:param channels: 3 for rgb, 4 for rgba
:return:
'''
if outfile.lower().endswith('.png'):
outfile = outfile[:-4]
frames = make_3dimage_frames(draw, width, height, background, channels)
for i, frame in enumerate(frames):
image = Image.fromarray(frame)
image.save(outfile + str(i).zfill(8) + '.png')
|
[
"PIL.Image.fromarray",
"numpy.frombuffer",
"generativepy.color.Color",
"moderngl.create_standalone_context"
] |
[((240, 248), 'generativepy.color.Color', 'Color', (['(0)'], {}), '(0)\n', (245, 248), False, 'from generativepy.color import Color\n'), ((726, 748), 'PIL.Image.fromarray', 'Image.fromarray', (['frame'], {}), '(frame)\n', (741, 748), False, 'from PIL import Image\n'), ((838, 846), 'generativepy.color.Color', 'Color', (['(0)'], {}), '(0)\n', (843, 846), False, 'from generativepy.color import Color\n'), ((1144, 1180), 'moderngl.create_standalone_context', 'moderngl.create_standalone_context', ([], {}), '()\n', (1178, 1180), False, 'import moderngl\n'), ((1343, 1378), 'numpy.frombuffer', 'np.frombuffer', (['data'], {'dtype': 'np.uint8'}), '(data, dtype=np.uint8)\n', (1356, 1378), True, 'import numpy as np\n'), ((1549, 1557), 'generativepy.color.Color', 'Color', (['(0)'], {}), '(0)\n', (1554, 1557), False, 'from generativepy.color import Color\n'), ((2387, 2395), 'generativepy.color.Color', 'Color', (['(0)'], {}), '(0)\n', (2392, 2395), False, 'from generativepy.color import Color\n'), ((1943, 1979), 'moderngl.create_standalone_context', 'moderngl.create_standalone_context', ([], {}), '()\n', (1977, 1979), False, 'import moderngl\n'), ((2170, 2205), 'numpy.frombuffer', 'np.frombuffer', (['data'], {'dtype': 'np.uint8'}), '(data, dtype=np.uint8)\n', (2183, 2205), True, 'import numpy as np\n'), ((2976, 2998), 'PIL.Image.fromarray', 'Image.fromarray', (['frame'], {}), '(frame)\n', (2991, 2998), False, 'from PIL import Image\n')]
|
#!/usr/bin/env python
import argparse
import numpy as np
import mdtraj as md
from LLC_Membranes.llclib import physical, topology, transform, file_rw
import sys
import tqdm
from scipy.sparse import lil_matrix
import pickle
import matplotlib.pyplot as plt
def initialize():
parser = argparse.ArgumentParser(description='Calculate coordination number')
# Trajectory Control
parser.add_argument('-t', '--traj', default='traj_whole.xtc', type=str, help='Trajectory file. Make sure '
'molecules are whole')
parser.add_argument('-g', '--gro', default='wiggle.gro', type=str, help='Name of coordinate file')
parser.add_argument('-b', '--begin', default=0, type=int, help='Start frame')
parser.add_argument('-e', '--end', default=-1, type=int, help='Last frame')
parser.add_argument('-skip', default=1, type=int, help='Include every skip frames in calculation')
# atom selection
parser.add_argument('-r', '--residue', default=None, help='Residue to calculate coordination number with respect '
'to')
parser.add_argument('-rc', '--coordinated_residue', default=None, help='Name of residue coordinated to residue_')
parser.add_argument('-ac', '--coordinated_atoms', default=None, nargs='+', help='Name of residue coordinate to '
'residue')
parser.add_argument('-a', '--atoms', default=None, nargs='+', help='Name of atoms to calculate correlation '
'function with respect to. The center of mass will be used')
parser.add_argument('-ta', '--atype', default=None, help='Element name of atoms of which you want coordination '
'number')
parser.add_argument('-tc', '--coordinated_type', default=None, help='Element name of coordinated atoms')
# coordination calculation parameters
parser.add_argument('-cut', default=0.25, type=float, help='Maximum distance between pairs where they are considered'
'coordinated (nm)')
# saving options
parser.add_argument('-s', '--savename', default='coordination.pl', help='Name under which to save distance '
'array')
parser.add_argument('-l', '--load', action="store_true", help='Load distance array saved with name '
'passed to save_distances option.')
parser.add_argument('-bins', nargs='+', default=100, type=int, help='Integer or array of bin values. If more than'
'one value is used, order the inputs according to the order given in args.axis')
parser.add_argument('-nocom', action='store_true', help='Calculate coordination based on pairwise distance between'
'all atoms, even those that are a part of the same residue.')
return parser
class System(object):
def __init__(self, traj, gro, residue=None, coordinated_residue=None, atoms=None, coordinated_atoms=None, type=None,
ctype=None, begin=0, end=-1, skip=1, com=True, t=None):
""" Narrow system down to groups of interest
:param traj: Name of GROMACS trajectory (.xtc or .trr), can be None if t is specified
:param gro: Name of GROMACS coordinate file (.gro)
:param residue: Name of residue to include in calculation
:param coordinated_residue: Name of residue whose coordination we are interested in
:param atoms: Specify names of atoms to include. All else will be excluded
:param coordinated_atoms: Specify names of coordinated atoms to include. All else will be excluded
:param type: Atom types to include
:param ctype: Coordinated atom types to include
:param begin: first frame to analyze
:param end: last frame to analyze
:param skip: number of frames to skip between analysis steps
:param com: Calculate coordination based on the center of mass position of the selected atom group
:param t: mdtraj trajectory object. If this is passed, traj and gro will not be loaded
:type traj: str or NoneType
:type gro: str
:type residue: str
:type coordinated_residue: str
:type atoms: list of str
:type coordinated_atoms: list of str
:type type: list of str
:type ctype: list of str
:type begin: int
:type end: int
:type skip: int
:type com: bool
:type t: object
"""
if t is None:
print("Loading trajectory...", end='', flush=True)
self.t = md.load(traj, top=gro)[begin:end:skip]
print("Done!")
else:
self.t = t
names = topology.fix_names(gro) # rename atoms because mdtraj screws it up in some cases.
for i, a in enumerate(self.t.topology.atoms):
a.name = names[i]
self.time = self.t.time / 1000 # time in nanoseconds
# get locations of atoms of interest and coordinating atoms
# calculate centers of mass for certain groups. Decision-making in this regard is made in self.narrow_atoms()
if atoms is not None:
if atoms[0] == 'all':
atoms = self.all_atoms(res=residue)
if coordinated_atoms is not None:
if coordinated_atoms[0] == 'all':
coordinated_atoms = self.all_atoms(res=coordinated_residue)
self.com, self.com_map = self.narrow_atoms(atoms, residue, type, com=com)
self.com_coordinated, self.com_coordinated_map = self.narrow_atoms(coordinated_atoms, coordinated_residue,
ctype, coordination=True, com=com)
# relate indices to
self.names = [a.name for a in self.t.topology.atoms]
self.residues = [a.residue.name for a in self.t.topology.atoms]
self.distances = None
self.ncoord = None
def all_atoms(self, res=None):
if res is not None:
r = topology.Residue(res)
return [x for x in r.names.values()]
def narrow_atoms(self, atoms, residue, type, coordination=False, com=True):
if atoms is not None or type is not None:
if residue is not None:
if type is not None:
# get names of all atoms with the appropriate type
atoms = set([a.name for a in self.t.topology.atoms if a.element.symbol == type])
# get indices of all atoms in the system that make up the atoms list
atom_indices = [a.index for a in self.t.topology.atoms if a.residue.name == residue and
a.name in atoms]
if type is not None:
# if a residue is specified with an atom type, assume you want the locations of each atom of that
# type within each residue.
return self.t.xyz[:, atom_indices, :], topology.map_atoms(atom_indices)
else:
# If a residue is specified with atoms, assume that the user wants to isolate calculations to the
# center of mass of a group of atoms within a particular residue
if com:
residue = topology.Residue(residue)
atom_mass = [residue.mass[v] for v in residue.mass.keys() if
v in atoms] # mass of atoms of interest
if len(atom_mass) > 1:
return physical.center_of_mass(self.t.xyz[:, atom_indices, :], atom_mass), \
topology.map_atoms(atom_indices, len(atom_mass))
else:
self.t.xyz[:, atom_indices, :], topology.map_atoms(atom_indices)
else:
return self.t.xyz[:, atom_indices, :], topology.map_atoms(atom_indices)
else:
if type is not None:
# get indices of any atoms whose element = type
atom_indices = [a.index for a in self.t.topology.atoms if a.element.symbol == type]
else:
# get indices of any atoms in the "atoms" list
atom_indices = [a.index for a in self.t.topology.atoms if a.name in atoms]
return self.t.xyz[:, atom_indices, :], topology.map_atoms(atom_indices)
else:
if residue is not None:
# calculate the center of mass of the residue based on all atoms
# First get indices of all atoms in the residue
atom_indices = [a.index for a in self.t.topology.atoms if a.residue.name == residue]
res = topology.Residue(residue)
atom_mass = [v for v in res.mass.values()] # mass of each atom in an individual residue
return physical.center_of_mass(self.t.xyz[:, atom_indices, :], atom_mass), \
topology.map_atoms(atom_indices, len(atom_mass))
else:
# if you forget a flag, exit the program with a descriptive error
if coordination:
sys.exit('You must supply at least a residue (-cr / --coordinated_residue) or an atom '
'(-ca / --coordinated_atoms)')
else:
sys.exit('You must supply at least a residue (-r / --residue) or an atom name (-a / --atoms)')
def distance_search(self, cut=0.31):
""" Find all minimum image pairwise distances
:param cut: maximum distance
:type cut: float
:return:
"""
# initialize array to hold all pairwise distances
self.distances = np.zeros([self.t.n_frames], dtype=object)
print('Calculating minimum image distances!')
for t in tqdm.tqdm(range(self.distances.shape[0])):
self.distances[t] = lil_matrix((self.com.shape[1], self.com_coordinated.shape[1])) # sparse matrices are 2D
for i in range(self.com.shape[1]):
xyz_distances = self.com_coordinated[t, ...] - self.com[t, i, :]
min_distances = physical.minimum_image_distance(xyz_distances, self.t.unitcell_vectors[t, ...])
euclidean_dist = np.linalg.norm(min_distances, axis=1)
under_cut = np.where(euclidean_dist < cut)[0]
self.distances[t][i, under_cut] = euclidean_dist[under_cut]
def n_coordinated(self, res=None, atom_groups=None, plot=True):
""" Plot number of atoms coordinated to residue vs. time
:param res: a list of residue names. This function will plot coordinated atoms belonging to specific residues
separately.
:param atom_groups: a list of lists of atom names. Can be used to distinguish between coordination with
different groups of atoms within the same residue. If atom names are the same between residues, also pass
the appropriate residue name for each group of atoms to res
:type res: list
:type atom_groups: list
:return:
"""
if atom_groups is not None:
self.ncoord = np.zeros([len(atom_groups), self.t.n_frames, self.com.shape[1]])
if res is not None:
print('Organizing atoms based on atom type and residue name...')
for t in tqdm.tqdm(range(self.t.n_frames)):
for i in range(self.ncoord.shape[2]):
atom_types = [self.names[self.com_coordinated_map[j][0]] for j in
np.nonzero(self.distances[t][i, :])[1]] # residue name
residues = [self.residues[self.com_coordinated_map[j][0]] for j in
np.nonzero(self.distances[t][i, :])[1]]
grp = []
for ndx, k in enumerate(atom_types):
atoms = np.array([a.count(k) for a in atom_groups])
resi = np.array([a.count(residues[ndx]) for a in res])
try:
grp.append(np.where(atoms + resi == 2)[0][0])
except IndexError:
pass
for g in grp:
self.ncoord[g, t, i] += 1
else:
print('Organizing atoms based on atom type...')
for t in tqdm.tqdm(range(self.t.n_frames)):
for i in range(self.ncoord.shape[2]):
atom_types = [self.names[self.com_coordinated_map[j][0]] for j in
np.nonzero(self.distances[t][i, :])[1]] # residue name
grp = []
for ndx, k in enumerate(atom_types):
atoms = [a.count(k) for a in atom_groups]
try:
grp.append(atoms.index(1))
except IndexError:
pass
for g in grp:
self.ncoord[g, t, i] += 1
# for i in range(ncoord.shape[0]):
#
# plt.plot(self.time, ncoord[i, ...].mean(axis=1), label='%s of %s' % (atom_groups[i], res[i]),
# linewidth=2)
colors = ['blue', 'green', 'orange']
for j in np.random.randint(self.ncoord.shape[2], size=1):
for i in range(self.ncoord.shape[0]):
plt.plot(self.time, self.ncoord[i, :, j], label='%s of %s' % (atom_groups[i], res[i]),
linewidth=2, color=colors[i])
#plt.legend()
else:
self.ncoord = np.zeros([self.t.n_frames, self.com.shape[1]])
for i in tqdm.tqdm(range(self.ncoord.shape[1])):
self.ncoord[:, i] = [len(np.nonzero(self.distances[t][i, :])[1]) for t in range(self.t.n_frames)]
# print('Average coordinated molecules per frame: %.2f +/- %.2f' % (self.ncoord.mean(), self.ncoord.std()))
if plot:
plt.plot(self.time, self.ncoord.sum(axis=1))
if plot:
plt.xlabel('Time (ns)', fontsize=14)
plt.ylabel('Number of coordinated molecules', fontsize=14)
plt.gcf().get_axes()[0].tick_params(labelsize=14)
plt.tight_layout()
plt.show()
if __name__ == "__main__":
args = initialize().parse_args()
if not args.load:
if args.nocom:
com = False
else:
com = True
system = System(args.traj, args.gro, atoms=args.atoms, coordinated_atoms=args.coordinated_atoms,
residue=args.residue, coordinated_residue=args.coordinated_residue, type=args.atype,
ctype=args.coordinated_type, begin=args.begin, end=args.end, skip=args.skip, com=com)
system.distance_search(cut=args.cut) # calculate pairwise distance between all points in self.com and self.com_coordinated
system.n_coordinated(plot=True)
file_rw.save_object(system, args.savename)
else:
print('Loading pickled object!...', end='', flush=True)
system = pickle.load(open(args.savename, "rb"))
print('Done!')
plt.plot(system.time, system.ncoord.sum(axis=1))
plt.show()
exit()
print((system.ncoord.flatten() > 0).sum() / (system.ncoord.shape[0] * system.ncoord.shape[1]))
# system.n_coordinated(plot=True)
#system.plot(res=['HII', 'HII', 'HOH'], atom_groups=[['O3', 'O4'], ['O', 'O1', 'O2'], ['O']])
# system.n_coordinated(plot=True)
#
# for i in np.nonzero(system.distances[-1][0, :])[1]:
# print('%s -- %s' % (system.com_map[0], system.com_coordinated_map[i]))
|
[
"argparse.ArgumentParser",
"LLC_Membranes.llclib.file_rw.save_object",
"mdtraj.load",
"scipy.sparse.lil_matrix",
"numpy.random.randint",
"numpy.linalg.norm",
"matplotlib.pyplot.tight_layout",
"LLC_Membranes.llclib.topology.map_atoms",
"LLC_Membranes.llclib.topology.Residue",
"matplotlib.pyplot.show",
"LLC_Membranes.llclib.physical.center_of_mass",
"LLC_Membranes.llclib.topology.fix_names",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gcf",
"sys.exit",
"matplotlib.pyplot.plot",
"numpy.zeros",
"numpy.nonzero",
"numpy.where",
"LLC_Membranes.llclib.physical.minimum_image_distance",
"matplotlib.pyplot.xlabel"
] |
[((289, 357), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Calculate coordination number"""'}), "(description='Calculate coordination number')\n", (312, 357), False, 'import argparse\n'), ((4920, 4943), 'LLC_Membranes.llclib.topology.fix_names', 'topology.fix_names', (['gro'], {}), '(gro)\n', (4938, 4943), False, 'from LLC_Membranes.llclib import physical, topology, transform, file_rw\n'), ((10011, 10052), 'numpy.zeros', 'np.zeros', (['[self.t.n_frames]'], {'dtype': 'object'}), '([self.t.n_frames], dtype=object)\n', (10019, 10052), True, 'import numpy as np\n'), ((15460, 15502), 'LLC_Membranes.llclib.file_rw.save_object', 'file_rw.save_object', (['system', 'args.savename'], {}), '(system, args.savename)\n', (15479, 15502), False, 'from LLC_Membranes.llclib import physical, topology, transform, file_rw\n'), ((15723, 15733), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15731, 15733), True, 'import matplotlib.pyplot as plt\n'), ((6229, 6250), 'LLC_Membranes.llclib.topology.Residue', 'topology.Residue', (['res'], {}), '(res)\n', (6245, 6250), False, 'from LLC_Membranes.llclib import physical, topology, transform, file_rw\n'), ((10200, 10262), 'scipy.sparse.lil_matrix', 'lil_matrix', (['(self.com.shape[1], self.com_coordinated.shape[1])'], {}), '((self.com.shape[1], self.com_coordinated.shape[1]))\n', (10210, 10262), False, 'from scipy.sparse import lil_matrix\n'), ((13756, 13803), 'numpy.random.randint', 'np.random.randint', (['self.ncoord.shape[2]'], {'size': '(1)'}), '(self.ncoord.shape[2], size=1)\n', (13773, 13803), True, 'import numpy as np\n'), ((14094, 14140), 'numpy.zeros', 'np.zeros', (['[self.t.n_frames, self.com.shape[1]]'], {}), '([self.t.n_frames, self.com.shape[1]])\n', (14102, 14140), True, 'import numpy as np\n'), ((14552, 14588), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (ns)"""'], {'fontsize': '(14)'}), "('Time (ns)', fontsize=14)\n", (14562, 14588), True, 'import matplotlib.pyplot as plt\n'), ((14601, 14659), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of coordinated molecules"""'], {'fontsize': '(14)'}), "('Number of coordinated molecules', fontsize=14)\n", (14611, 14659), True, 'import matplotlib.pyplot as plt\n'), ((14734, 14752), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14750, 14752), True, 'import matplotlib.pyplot as plt\n'), ((14766, 14776), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14774, 14776), True, 'import matplotlib.pyplot as plt\n'), ((4800, 4822), 'mdtraj.load', 'md.load', (['traj'], {'top': 'gro'}), '(traj, top=gro)\n', (4807, 4822), True, 'import mdtraj as md\n'), ((8648, 8680), 'LLC_Membranes.llclib.topology.map_atoms', 'topology.map_atoms', (['atom_indices'], {}), '(atom_indices)\n', (8666, 8680), False, 'from LLC_Membranes.llclib import physical, topology, transform, file_rw\n'), ((9001, 9026), 'LLC_Membranes.llclib.topology.Residue', 'topology.Residue', (['residue'], {}), '(residue)\n', (9017, 9026), False, 'from LLC_Membranes.llclib import physical, topology, transform, file_rw\n'), ((10449, 10528), 'LLC_Membranes.llclib.physical.minimum_image_distance', 'physical.minimum_image_distance', (['xyz_distances', 'self.t.unitcell_vectors[t, ...]'], {}), '(xyz_distances, self.t.unitcell_vectors[t, ...])\n', (10480, 10528), False, 'from LLC_Membranes.llclib import physical, topology, transform, file_rw\n'), ((10562, 10599), 'numpy.linalg.norm', 'np.linalg.norm', (['min_distances'], {'axis': '(1)'}), '(min_distances, axis=1)\n', (10576, 10599), True, 'import numpy as np\n'), ((9156, 9222), 'LLC_Membranes.llclib.physical.center_of_mass', 'physical.center_of_mass', (['self.t.xyz[:, atom_indices, :]', 'atom_mass'], {}), '(self.t.xyz[:, atom_indices, :], atom_mass)\n', (9179, 9222), False, 'from LLC_Membranes.llclib import physical, topology, transform, file_rw\n'), ((9452, 9577), 'sys.exit', 'sys.exit', (['"""You must supply at least a residue (-cr / --coordinated_residue) or an atom (-ca / --coordinated_atoms)"""'], {}), "(\n 'You must supply at least a residue (-cr / --coordinated_residue) or an atom (-ca / --coordinated_atoms)'\n )\n", (9460, 9577), False, 'import sys\n'), ((9642, 9746), 'sys.exit', 'sys.exit', (['"""You must supply at least a residue (-r / --residue) or an atom name (-a / --atoms)"""'], {}), "(\n 'You must supply at least a residue (-r / --residue) or an atom name (-a / --atoms)'\n )\n", (9650, 9746), False, 'import sys\n'), ((10628, 10658), 'numpy.where', 'np.where', (['(euclidean_dist < cut)'], {}), '(euclidean_dist < cut)\n', (10636, 10658), True, 'import numpy as np\n'), ((13880, 14001), 'matplotlib.pyplot.plot', 'plt.plot', (['self.time', 'self.ncoord[i, :, j]'], {'label': "('%s of %s' % (atom_groups[i], res[i]))", 'linewidth': '(2)', 'color': 'colors[i]'}), "(self.time, self.ncoord[i, :, j], label='%s of %s' % (atom_groups[i\n ], res[i]), linewidth=2, color=colors[i])\n", (13888, 14001), True, 'import matplotlib.pyplot as plt\n'), ((7183, 7215), 'LLC_Membranes.llclib.topology.map_atoms', 'topology.map_atoms', (['atom_indices'], {}), '(atom_indices)\n', (7201, 7215), False, 'from LLC_Membranes.llclib import physical, topology, transform, file_rw\n'), ((7506, 7531), 'LLC_Membranes.llclib.topology.Residue', 'topology.Residue', (['residue'], {}), '(residue)\n', (7522, 7531), False, 'from LLC_Membranes.llclib import physical, topology, transform, file_rw\n'), ((8149, 8181), 'LLC_Membranes.llclib.topology.map_atoms', 'topology.map_atoms', (['atom_indices'], {}), '(atom_indices)\n', (8167, 8181), False, 'from LLC_Membranes.llclib import physical, topology, transform, file_rw\n'), ((14244, 14279), 'numpy.nonzero', 'np.nonzero', (['self.distances[t][i, :]'], {}), '(self.distances[t][i, :])\n', (14254, 14279), True, 'import numpy as np\n'), ((7779, 7845), 'LLC_Membranes.llclib.physical.center_of_mass', 'physical.center_of_mass', (['self.t.xyz[:, atom_indices, :]', 'atom_mass'], {}), '(self.t.xyz[:, atom_indices, :], atom_mass)\n', (7802, 7845), False, 'from LLC_Membranes.llclib import physical, topology, transform, file_rw\n'), ((8025, 8057), 'LLC_Membranes.llclib.topology.map_atoms', 'topology.map_atoms', (['atom_indices'], {}), '(atom_indices)\n', (8043, 8057), False, 'from LLC_Membranes.llclib import physical, topology, transform, file_rw\n'), ((14672, 14681), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (14679, 14681), True, 'import matplotlib.pyplot as plt\n'), ((11882, 11917), 'numpy.nonzero', 'np.nonzero', (['self.distances[t][i, :]'], {}), '(self.distances[t][i, :])\n', (11892, 11917), True, 'import numpy as np\n'), ((12065, 12100), 'numpy.nonzero', 'np.nonzero', (['self.distances[t][i, :]'], {}), '(self.distances[t][i, :])\n', (12075, 12100), True, 'import numpy as np\n'), ((12981, 13016), 'numpy.nonzero', 'np.nonzero', (['self.distances[t][i, :]'], {}), '(self.distances[t][i, :])\n', (12991, 13016), True, 'import numpy as np\n'), ((12439, 12466), 'numpy.where', 'np.where', (['(atoms + resi == 2)'], {}), '(atoms + resi == 2)\n', (12447, 12466), True, 'import numpy as np\n')]
|
"""
Utilities for labelling 3D objects from a mask
"""
import os
import xarray as xr
import numpy as np
import cloud_identification
OUT_FILENAME_FORMAT = "{base_name}.objects.{objects_name}.nc"
def make_objects_name(mask_name, splitting_var):
return "{mask_name}.split_on.{splitting_var}".format(**locals())
def _label_objects_wrapper(mask, splitting_scalar, remove_at_edge=False):
if remove_at_edge:
raise NotImplementedError
def _remove_at_edge(object_labels):
mask_edge = np.zeros_like(mask)
mask_edge[
:, :, 1
] = True # has to be k==1 because object identification codes treats actual edges as ghost cells
mask_edge[:, :, -2] = True
cloud_identification.remove_intersecting(object_labels, mask_edge)
if mask.shape != splitting_scalar.shape:
raise Exception(
"Incompatible shapes of splitting scalar ({}) and "
"mask ({})".format(splitting_scalar.shape, mask.shape)
)
assert mask.dims == splitting_scalar.dims
for d in mask.dims:
assert np.all(mask[d].values == splitting_scalar[d].values)
object_labels = cloud_identification.number_objects(
splitting_scalar.values, mask=mask.values
)
# if remove_at_edge:
# _remove_at_edge(object_labels)
return object_labels
def label_objects(mask, splitting_scalar):
if splitting_scalar is not None:
mask = mask.sel(zt=splitting_scalar.zt).squeeze()
object_labels = _label_objects_wrapper(mask=mask, splitting_scalar=splitting_scalar)
da = xr.DataArray(
data=object_labels, coords=mask.coords, dims=mask.dims, name="object_labels"
)
da.name = make_objects_name(
mask_name=mask.name, splitting_var=splitting_scalar.name
)
da.attrs["mask_name"] = mask.name
da.attrs["splitting_scalar"] = splitting_scalar.name
return da
if __name__ == "__main__":
import argparse
argparser = argparse.ArgumentParser(__doc__)
argparser.add_argument("base_name", type=str)
argparser.add_argument("mask_name", type=str)
argparser.add_argument("splitting_scalar")
argparser.add_argument("--z_max", type=float, default=np.inf)
argparser.add_argument("--remove-edge-objects", default=False, action="store_true")
args = argparser.parse_args()
input_name = args.base_name
fn_mask = "{}.mask_3d.{}.nc".format(input_name, args.mask_name)
fn_mask_2d = "{}.mask.{}.nc".format(input_name, args.mask_name)
if os.path.exists(fn_mask):
pass
elif os.path.exists(fn_mask_2d):
fn_mask = fn_mask_2d
print("Using 2D xy mask")
else:
raise Exception(
"Couldn't find mask file `{}` or `{}`" "".format(fn_mask, fn_mask_2d)
)
mask = xr.open_dataarray(fn_mask, decode_times=False)
if args.splitting_scalar is not None:
fn_ss = "{}.{}.nc".format(input_name, args.splitting_scalar)
if not os.path.exists(fn_ss):
raise Exception("Couldn't find splitting scalar file `{}`" "".format(fn_ss))
splitting_scalar = xr.open_dataarray(fn_ss, decode_times=False).squeeze()
else:
splitting_scalar = None
if args.z_max is not np.inf:
mask = mask.sel(zt=slice(0.0, args.z_max)).squeeze()
if splitting_scalar is not None:
splitting_scalar = splitting_scalar.sel(zt=slice(0.0, args.z_max)).squeeze()
ds = label_objects(
mask=mask,
splitting_scalar=splitting_scalar,
remove_at_edge=args.remove_edge_objects,
)
ds.attrs["input_name"] = input_name
ds.attrs["mask_name"] = args.mask_name
ds.attrs["z_max"] = args.z_max
ds.attrs["splitting_scalar"] = args.splitting_scalar
out_filename = OUT_FILENAME_FORMAT.format(
base_name=input_name.replace("/", "__"),
objects_name=make_objects_name(
mask_name=args.mask_name, splitting_var=args.splitting_scalar
),
).replace("__masks", "")
ds.to_netcdf(out_filename)
print("Wrote output to `{}`".format(out_filename))
|
[
"numpy.zeros_like",
"argparse.ArgumentParser",
"os.path.exists",
"cloud_identification.number_objects",
"xarray.DataArray",
"cloud_identification.remove_intersecting",
"xarray.open_dataarray",
"numpy.all"
] |
[((1185, 1263), 'cloud_identification.number_objects', 'cloud_identification.number_objects', (['splitting_scalar.values'], {'mask': 'mask.values'}), '(splitting_scalar.values, mask=mask.values)\n', (1220, 1263), False, 'import cloud_identification\n'), ((1607, 1702), 'xarray.DataArray', 'xr.DataArray', ([], {'data': 'object_labels', 'coords': 'mask.coords', 'dims': 'mask.dims', 'name': '"""object_labels"""'}), "(data=object_labels, coords=mask.coords, dims=mask.dims, name=\n 'object_labels')\n", (1619, 1702), True, 'import xarray as xr\n'), ((1993, 2025), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['__doc__'], {}), '(__doc__)\n', (2016, 2025), False, 'import argparse\n'), ((2540, 2563), 'os.path.exists', 'os.path.exists', (['fn_mask'], {}), '(fn_mask)\n', (2554, 2563), False, 'import os\n'), ((2816, 2862), 'xarray.open_dataarray', 'xr.open_dataarray', (['fn_mask'], {'decode_times': '(False)'}), '(fn_mask, decode_times=False)\n', (2833, 2862), True, 'import xarray as xr\n'), ((1111, 1163), 'numpy.all', 'np.all', (['(mask[d].values == splitting_scalar[d].values)'], {}), '(mask[d].values == splitting_scalar[d].values)\n', (1117, 1163), True, 'import numpy as np\n'), ((2587, 2613), 'os.path.exists', 'os.path.exists', (['fn_mask_2d'], {}), '(fn_mask_2d)\n', (2601, 2613), False, 'import os\n'), ((519, 538), 'numpy.zeros_like', 'np.zeros_like', (['mask'], {}), '(mask)\n', (532, 538), True, 'import numpy as np\n'), ((747, 813), 'cloud_identification.remove_intersecting', 'cloud_identification.remove_intersecting', (['object_labels', 'mask_edge'], {}), '(object_labels, mask_edge)\n', (787, 813), False, 'import cloud_identification\n'), ((2990, 3011), 'os.path.exists', 'os.path.exists', (['fn_ss'], {}), '(fn_ss)\n', (3004, 3011), False, 'import os\n'), ((3129, 3173), 'xarray.open_dataarray', 'xr.open_dataarray', (['fn_ss'], {'decode_times': '(False)'}), '(fn_ss, decode_times=False)\n', (3146, 3173), True, 'import xarray as xr\n')]
|
import numpy as np
import pytest
import unittest
from desc.equilibrium import Equilibrium, EquilibriaFamily
from desc.profiles import PowerSeriesProfile, SplineProfile
from desc.geometry import (
FourierRZCurve,
FourierRZToroidalSurface,
ZernikeRZToroidalSection,
)
class TestConstructor(unittest.TestCase):
def test_defaults(self):
eq = Equilibrium()
self.assertEqual(eq.spectral_indexing, "ansi")
self.assertEqual(eq.NFP, 1)
self.assertEqual(eq.L, 1)
self.assertEqual(eq.M, 1)
self.assertEqual(eq.N, 0)
self.assertEqual(eq.sym, False)
self.assertTrue(eq.surface.eq(FourierRZToroidalSurface()))
self.assertIsInstance(eq.pressure, PowerSeriesProfile)
np.testing.assert_allclose(eq.p_l, [0])
self.assertIsInstance(eq.iota, PowerSeriesProfile)
np.testing.assert_allclose(eq.i_l, [0])
def test_supplied_objects(self):
pressure = SplineProfile([1, 2, 3])
iota = SplineProfile([2, 3, 4])
surface = ZernikeRZToroidalSection(spectral_indexing="ansi")
axis = FourierRZCurve([-1, 10, 1], [1, 0, -1], NFP=2)
eq = Equilibrium(pressure=pressure, iota=iota, surface=surface, axis=axis)
self.assertTrue(eq.pressure.eq(pressure))
self.assertTrue(eq.iota.eq(iota))
self.assertTrue(eq.surface.eq(surface))
self.assertTrue(eq.axis.eq(axis))
self.assertEqual(eq.spectral_indexing, "ansi")
self.assertEqual(eq.NFP, 2)
surface2 = FourierRZToroidalSurface(NFP=3)
eq2 = Equilibrium(surface=surface2)
self.assertEqual(eq2.NFP, 3)
self.assertEqual(eq2.axis.NFP, 3)
eq3 = Equilibrium(surface=surface, axis=None)
np.testing.assert_allclose(eq3.axis.R_n, [10])
def test_dict(self):
inputs = {
"L": 4,
"M": 2,
"N": 2,
"NFP": 3,
"sym": False,
"spectral_indexing": "ansi",
"surface": np.array(
[[0, 0, 0, 10, 0], [0, 1, 0, 1, 1], [0, -1, 1, 0.1, 0.1]]
),
"axis": np.array([[0, 10, 0]]),
"pressure": np.array([[0, 10], [2, 5]]),
"iota": np.array([[0, 1], [2, 3]]),
}
eq = Equilibrium(**inputs)
self.assertEqual(eq.L, 4)
self.assertEqual(eq.M, 2)
self.assertEqual(eq.N, 2)
self.assertEqual(eq.NFP, 3)
self.assertEqual(eq.spectral_indexing, "ansi")
np.testing.assert_allclose(eq.p_l, [10, 0, 5])
np.testing.assert_allclose(eq.i_l, [1, 0, 3])
self.assertIsInstance(eq.surface, FourierRZToroidalSurface)
np.testing.assert_allclose(eq.Rb_lmn, [0, 0, 0, 0, 10, 1, 0.1, 0, 0])
np.testing.assert_allclose(eq.Zb_lmn, [0, 0, 0, 0, 0, 1, 0.1, 0, 0])
inputs["surface"] = np.array([[0, 0, 0, 10, 0], [1, 1, 0, 1, 1]])
eq = Equilibrium(**inputs)
self.assertEqual(eq.bdry_mode, "poincare")
np.testing.assert_allclose(eq.Rb_lmn, [10, 0, 1])
def test_asserts(self):
with pytest.raises(AssertionError):
eq = Equilibrium(L=3.4)
with pytest.raises(AssertionError):
eq = Equilibrium(M=3.4)
with pytest.raises(AssertionError):
eq = Equilibrium(N=3.4)
with pytest.raises(AssertionError):
eq = Equilibrium(NFP=3.4j)
with pytest.raises(ValueError):
eq = Equilibrium(surface=np.array([[1, 1, 1, 10, 2]]))
with pytest.raises(TypeError):
eq = Equilibrium(surface=FourierRZCurve())
with pytest.raises(TypeError):
eq = Equilibrium(axis=2)
with pytest.raises(ValueError):
eq = Equilibrium(surface=FourierRZToroidalSurface(NFP=1), NFP=2)
with pytest.raises(TypeError):
eq = Equilibrium(pressure="abc")
with pytest.raises(TypeError):
eq = Equilibrium(iota="def")
def test_supplied_coeffs(self):
R_lmn = np.random.random(3)
Z_lmn = np.random.random(3)
L_lmn = np.random.random(3)
eq = Equilibrium(R_lmn=R_lmn, Z_lmn=Z_lmn, L_lmn=L_lmn)
np.testing.assert_allclose(R_lmn, eq.R_lmn)
np.testing.assert_allclose(Z_lmn, eq.Z_lmn)
np.testing.assert_allclose(L_lmn, eq.L_lmn)
with pytest.raises(ValueError):
eq = Equilibrium(L=4, R_lmn=R_lmn)
class TestInitialGuess(unittest.TestCase):
def test_errors(self):
eq = Equilibrium()
with pytest.raises(ValueError):
eq.set_initial_guess(1, "a", 4)
with pytest.raises(ValueError):
eq.set_initial_guess(1, 2)
with pytest.raises(ValueError):
eq.set_initial_guess(eq, eq.surface)
del eq._surface
with pytest.raises(ValueError):
eq.set_initial_guess()
with pytest.raises(ValueError):
eq.set_initial_guess("path", 3)
with pytest.raises(ValueError):
eq.set_initial_guess("path", "hdf5")
def test_guess_from_other(self):
eq1 = Equilibrium(L=4, M=2)
eq2 = Equilibrium(L=2, M=1)
eq2.set_initial_guess(eq1)
eq2.change_resolution(L=4, M=2)
np.testing.assert_allclose(eq1.R_lmn, eq2.R_lmn)
np.testing.assert_allclose(eq1.Z_lmn, eq2.Z_lmn)
def test_guess_from_file(self):
eq1 = Equilibrium(L=24, M=12, sym=True, spectral_indexing="fringe")
path = "./tests/inputs/SOLOVEV_output.h5"
eq1.set_initial_guess(path)
eq2 = EquilibriaFamily.load(path)
np.testing.assert_allclose(eq1.R_lmn, eq2[-1].R_lmn)
np.testing.assert_allclose(eq1.Z_lmn, eq2[-1].Z_lmn)
def test_guess_from_surface(self):
eq = Equilibrium()
surface = FourierRZToroidalSurface()
# turn the circular cross section into an elipse w AR=2
surface.set_coeffs(m=-1, n=0, R=None, Z=2)
# move z axis up to 0.5 for no good reason
axis = FourierRZCurve([0, 10, 0], [0, 0.5, 0])
eq.set_initial_guess(surface, axis)
np.testing.assert_allclose(eq.compute_volume(), 2 * 10 * np.pi * np.pi * 2 * 1)
|
[
"desc.geometry.FourierRZToroidalSurface",
"desc.equilibrium.Equilibrium",
"desc.profiles.SplineProfile",
"numpy.array",
"numpy.random.random",
"pytest.raises",
"desc.geometry.ZernikeRZToroidalSection",
"numpy.testing.assert_allclose",
"desc.equilibrium.EquilibriaFamily.load",
"desc.geometry.FourierRZCurve"
] |
[((365, 378), 'desc.equilibrium.Equilibrium', 'Equilibrium', ([], {}), '()\n', (376, 378), False, 'from desc.equilibrium import Equilibrium, EquilibriaFamily\n'), ((751, 790), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['eq.p_l', '[0]'], {}), '(eq.p_l, [0])\n', (777, 790), True, 'import numpy as np\n'), ((858, 897), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['eq.i_l', '[0]'], {}), '(eq.i_l, [0])\n', (884, 897), True, 'import numpy as np\n'), ((956, 980), 'desc.profiles.SplineProfile', 'SplineProfile', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (969, 980), False, 'from desc.profiles import PowerSeriesProfile, SplineProfile\n'), ((996, 1020), 'desc.profiles.SplineProfile', 'SplineProfile', (['[2, 3, 4]'], {}), '([2, 3, 4])\n', (1009, 1020), False, 'from desc.profiles import PowerSeriesProfile, SplineProfile\n'), ((1039, 1089), 'desc.geometry.ZernikeRZToroidalSection', 'ZernikeRZToroidalSection', ([], {'spectral_indexing': '"""ansi"""'}), "(spectral_indexing='ansi')\n", (1063, 1089), False, 'from desc.geometry import FourierRZCurve, FourierRZToroidalSurface, ZernikeRZToroidalSection\n'), ((1105, 1151), 'desc.geometry.FourierRZCurve', 'FourierRZCurve', (['[-1, 10, 1]', '[1, 0, -1]'], {'NFP': '(2)'}), '([-1, 10, 1], [1, 0, -1], NFP=2)\n', (1119, 1151), False, 'from desc.geometry import FourierRZCurve, FourierRZToroidalSurface, ZernikeRZToroidalSection\n'), ((1166, 1235), 'desc.equilibrium.Equilibrium', 'Equilibrium', ([], {'pressure': 'pressure', 'iota': 'iota', 'surface': 'surface', 'axis': 'axis'}), '(pressure=pressure, iota=iota, surface=surface, axis=axis)\n', (1177, 1235), False, 'from desc.equilibrium import Equilibrium, EquilibriaFamily\n'), ((1530, 1561), 'desc.geometry.FourierRZToroidalSurface', 'FourierRZToroidalSurface', ([], {'NFP': '(3)'}), '(NFP=3)\n', (1554, 1561), False, 'from desc.geometry import FourierRZCurve, FourierRZToroidalSurface, ZernikeRZToroidalSection\n'), ((1576, 1605), 'desc.equilibrium.Equilibrium', 'Equilibrium', ([], {'surface': 'surface2'}), '(surface=surface2)\n', (1587, 1605), False, 'from desc.equilibrium import Equilibrium, EquilibriaFamily\n'), ((1700, 1739), 'desc.equilibrium.Equilibrium', 'Equilibrium', ([], {'surface': 'surface', 'axis': 'None'}), '(surface=surface, axis=None)\n', (1711, 1739), False, 'from desc.equilibrium import Equilibrium, EquilibriaFamily\n'), ((1748, 1794), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['eq3.axis.R_n', '[10]'], {}), '(eq3.axis.R_n, [10])\n', (1774, 1794), True, 'import numpy as np\n'), ((2280, 2301), 'desc.equilibrium.Equilibrium', 'Equilibrium', ([], {}), '(**inputs)\n', (2291, 2301), False, 'from desc.equilibrium import Equilibrium, EquilibriaFamily\n'), ((2504, 2550), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['eq.p_l', '[10, 0, 5]'], {}), '(eq.p_l, [10, 0, 5])\n', (2530, 2550), True, 'import numpy as np\n'), ((2559, 2604), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['eq.i_l', '[1, 0, 3]'], {}), '(eq.i_l, [1, 0, 3])\n', (2585, 2604), True, 'import numpy as np\n'), ((2681, 2750), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['eq.Rb_lmn', '[0, 0, 0, 0, 10, 1, 0.1, 0, 0]'], {}), '(eq.Rb_lmn, [0, 0, 0, 0, 10, 1, 0.1, 0, 0])\n', (2707, 2750), True, 'import numpy as np\n'), ((2759, 2827), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['eq.Zb_lmn', '[0, 0, 0, 0, 0, 1, 0.1, 0, 0]'], {}), '(eq.Zb_lmn, [0, 0, 0, 0, 0, 1, 0.1, 0, 0])\n', (2785, 2827), True, 'import numpy as np\n'), ((2857, 2902), 'numpy.array', 'np.array', (['[[0, 0, 0, 10, 0], [1, 1, 0, 1, 1]]'], {}), '([[0, 0, 0, 10, 0], [1, 1, 0, 1, 1]])\n', (2865, 2902), True, 'import numpy as np\n'), ((2916, 2937), 'desc.equilibrium.Equilibrium', 'Equilibrium', ([], {}), '(**inputs)\n', (2927, 2937), False, 'from desc.equilibrium import Equilibrium, EquilibriaFamily\n'), ((2997, 3046), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['eq.Rb_lmn', '[10, 0, 1]'], {}), '(eq.Rb_lmn, [10, 0, 1])\n', (3023, 3046), True, 'import numpy as np\n'), ((4012, 4031), 'numpy.random.random', 'np.random.random', (['(3)'], {}), '(3)\n', (4028, 4031), True, 'import numpy as np\n'), ((4048, 4067), 'numpy.random.random', 'np.random.random', (['(3)'], {}), '(3)\n', (4064, 4067), True, 'import numpy as np\n'), ((4084, 4103), 'numpy.random.random', 'np.random.random', (['(3)'], {}), '(3)\n', (4100, 4103), True, 'import numpy as np\n'), ((4117, 4167), 'desc.equilibrium.Equilibrium', 'Equilibrium', ([], {'R_lmn': 'R_lmn', 'Z_lmn': 'Z_lmn', 'L_lmn': 'L_lmn'}), '(R_lmn=R_lmn, Z_lmn=Z_lmn, L_lmn=L_lmn)\n', (4128, 4167), False, 'from desc.equilibrium import Equilibrium, EquilibriaFamily\n'), ((4176, 4219), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['R_lmn', 'eq.R_lmn'], {}), '(R_lmn, eq.R_lmn)\n', (4202, 4219), True, 'import numpy as np\n'), ((4228, 4271), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['Z_lmn', 'eq.Z_lmn'], {}), '(Z_lmn, eq.Z_lmn)\n', (4254, 4271), True, 'import numpy as np\n'), ((4280, 4323), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['L_lmn', 'eq.L_lmn'], {}), '(L_lmn, eq.L_lmn)\n', (4306, 4323), True, 'import numpy as np\n'), ((4498, 4511), 'desc.equilibrium.Equilibrium', 'Equilibrium', ([], {}), '()\n', (4509, 4511), False, 'from desc.equilibrium import Equilibrium, EquilibriaFamily\n'), ((5090, 5111), 'desc.equilibrium.Equilibrium', 'Equilibrium', ([], {'L': '(4)', 'M': '(2)'}), '(L=4, M=2)\n', (5101, 5111), False, 'from desc.equilibrium import Equilibrium, EquilibriaFamily\n'), ((5126, 5147), 'desc.equilibrium.Equilibrium', 'Equilibrium', ([], {'L': '(2)', 'M': '(1)'}), '(L=2, M=1)\n', (5137, 5147), False, 'from desc.equilibrium import Equilibrium, EquilibriaFamily\n'), ((5232, 5280), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['eq1.R_lmn', 'eq2.R_lmn'], {}), '(eq1.R_lmn, eq2.R_lmn)\n', (5258, 5280), True, 'import numpy as np\n'), ((5289, 5337), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['eq1.Z_lmn', 'eq2.Z_lmn'], {}), '(eq1.Z_lmn, eq2.Z_lmn)\n', (5315, 5337), True, 'import numpy as np\n'), ((5390, 5451), 'desc.equilibrium.Equilibrium', 'Equilibrium', ([], {'L': '(24)', 'M': '(12)', 'sym': '(True)', 'spectral_indexing': '"""fringe"""'}), "(L=24, M=12, sym=True, spectral_indexing='fringe')\n", (5401, 5451), False, 'from desc.equilibrium import Equilibrium, EquilibriaFamily\n'), ((5552, 5579), 'desc.equilibrium.EquilibriaFamily.load', 'EquilibriaFamily.load', (['path'], {}), '(path)\n', (5573, 5579), False, 'from desc.equilibrium import Equilibrium, EquilibriaFamily\n'), ((5589, 5641), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['eq1.R_lmn', 'eq2[-1].R_lmn'], {}), '(eq1.R_lmn, eq2[-1].R_lmn)\n', (5615, 5641), True, 'import numpy as np\n'), ((5650, 5702), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['eq1.Z_lmn', 'eq2[-1].Z_lmn'], {}), '(eq1.Z_lmn, eq2[-1].Z_lmn)\n', (5676, 5702), True, 'import numpy as np\n'), ((5757, 5770), 'desc.equilibrium.Equilibrium', 'Equilibrium', ([], {}), '()\n', (5768, 5770), False, 'from desc.equilibrium import Equilibrium, EquilibriaFamily\n'), ((5789, 5815), 'desc.geometry.FourierRZToroidalSurface', 'FourierRZToroidalSurface', ([], {}), '()\n', (5813, 5815), False, 'from desc.geometry import FourierRZCurve, FourierRZToroidalSurface, ZernikeRZToroidalSection\n'), ((5997, 6036), 'desc.geometry.FourierRZCurve', 'FourierRZCurve', (['[0, 10, 0]', '[0, 0.5, 0]'], {}), '([0, 10, 0], [0, 0.5, 0])\n', (6011, 6036), False, 'from desc.geometry import FourierRZCurve, FourierRZToroidalSurface, ZernikeRZToroidalSection\n'), ((2013, 2080), 'numpy.array', 'np.array', (['[[0, 0, 0, 10, 0], [0, 1, 0, 1, 1], [0, -1, 1, 0.1, 0.1]]'], {}), '([[0, 0, 0, 10, 0], [0, 1, 0, 1, 1], [0, -1, 1, 0.1, 0.1]])\n', (2021, 2080), True, 'import numpy as np\n'), ((2132, 2154), 'numpy.array', 'np.array', (['[[0, 10, 0]]'], {}), '([[0, 10, 0]])\n', (2140, 2154), True, 'import numpy as np\n'), ((2180, 2207), 'numpy.array', 'np.array', (['[[0, 10], [2, 5]]'], {}), '([[0, 10], [2, 5]])\n', (2188, 2207), True, 'import numpy as np\n'), ((2229, 2255), 'numpy.array', 'np.array', (['[[0, 1], [2, 3]]'], {}), '([[0, 1], [2, 3]])\n', (2237, 2255), True, 'import numpy as np\n'), ((3090, 3119), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (3103, 3119), False, 'import pytest\n'), ((3138, 3156), 'desc.equilibrium.Equilibrium', 'Equilibrium', ([], {'L': '(3.4)'}), '(L=3.4)\n', (3149, 3156), False, 'from desc.equilibrium import Equilibrium, EquilibriaFamily\n'), ((3170, 3199), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (3183, 3199), False, 'import pytest\n'), ((3218, 3236), 'desc.equilibrium.Equilibrium', 'Equilibrium', ([], {'M': '(3.4)'}), '(M=3.4)\n', (3229, 3236), False, 'from desc.equilibrium import Equilibrium, EquilibriaFamily\n'), ((3250, 3279), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (3263, 3279), False, 'import pytest\n'), ((3298, 3316), 'desc.equilibrium.Equilibrium', 'Equilibrium', ([], {'N': '(3.4)'}), '(N=3.4)\n', (3309, 3316), False, 'from desc.equilibrium import Equilibrium, EquilibriaFamily\n'), ((3330, 3359), 'pytest.raises', 'pytest.raises', (['AssertionError'], {}), '(AssertionError)\n', (3343, 3359), False, 'import pytest\n'), ((3378, 3399), 'desc.equilibrium.Equilibrium', 'Equilibrium', ([], {'NFP': '(3.4j)'}), '(NFP=3.4j)\n', (3389, 3399), False, 'from desc.equilibrium import Equilibrium, EquilibriaFamily\n'), ((3413, 3438), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3426, 3438), False, 'import pytest\n'), ((3520, 3544), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (3533, 3544), False, 'import pytest\n'), ((3614, 3638), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (3627, 3638), False, 'import pytest\n'), ((3657, 3676), 'desc.equilibrium.Equilibrium', 'Equilibrium', ([], {'axis': '(2)'}), '(axis=2)\n', (3668, 3676), False, 'from desc.equilibrium import Equilibrium, EquilibriaFamily\n'), ((3690, 3715), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3703, 3715), False, 'import pytest\n'), ((3807, 3831), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (3820, 3831), False, 'import pytest\n'), ((3850, 3877), 'desc.equilibrium.Equilibrium', 'Equilibrium', ([], {'pressure': '"""abc"""'}), "(pressure='abc')\n", (3861, 3877), False, 'from desc.equilibrium import Equilibrium, EquilibriaFamily\n'), ((3891, 3915), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (3904, 3915), False, 'import pytest\n'), ((3934, 3957), 'desc.equilibrium.Equilibrium', 'Equilibrium', ([], {'iota': '"""def"""'}), "(iota='def')\n", (3945, 3957), False, 'from desc.equilibrium import Equilibrium, EquilibriaFamily\n'), ((4338, 4363), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4351, 4363), False, 'import pytest\n'), ((4382, 4411), 'desc.equilibrium.Equilibrium', 'Equilibrium', ([], {'L': '(4)', 'R_lmn': 'R_lmn'}), '(L=4, R_lmn=R_lmn)\n', (4393, 4411), False, 'from desc.equilibrium import Equilibrium, EquilibriaFamily\n'), ((4525, 4550), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4538, 4550), False, 'import pytest\n'), ((4609, 4634), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4622, 4634), False, 'import pytest\n'), ((4688, 4713), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4701, 4713), False, 'import pytest\n'), ((4801, 4826), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4814, 4826), False, 'import pytest\n'), ((4877, 4902), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4890, 4902), False, 'import pytest\n'), ((4961, 4986), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4974, 4986), False, 'import pytest\n'), ((651, 677), 'desc.geometry.FourierRZToroidalSurface', 'FourierRZToroidalSurface', ([], {}), '()\n', (675, 677), False, 'from desc.geometry import FourierRZCurve, FourierRZToroidalSurface, ZernikeRZToroidalSection\n'), ((3477, 3505), 'numpy.array', 'np.array', (['[[1, 1, 1, 10, 2]]'], {}), '([[1, 1, 1, 10, 2]])\n', (3485, 3505), True, 'import numpy as np\n'), ((3583, 3599), 'desc.geometry.FourierRZCurve', 'FourierRZCurve', ([], {}), '()\n', (3597, 3599), False, 'from desc.geometry import FourierRZCurve, FourierRZToroidalSurface, ZernikeRZToroidalSection\n'), ((3754, 3785), 'desc.geometry.FourierRZToroidalSurface', 'FourierRZToroidalSurface', ([], {'NFP': '(1)'}), '(NFP=1)\n', (3778, 3785), False, 'from desc.geometry import FourierRZCurve, FourierRZToroidalSurface, ZernikeRZToroidalSection\n')]
|
from sklearn.datasets import make_moons
import sys
sys.path.append('./Data_Process')
path_result = "./Latent_representation/"
from Models import *
from Metrics import *
import scipy.io as scio
from Data_Process import *
from sklearn.cluster import KMeans
import numpy as np
import torch
import time
import warnings
warnings.filterwarnings('ignore')
# Features: X (n × d); adjacency: similarity matrix; labels: Y
# Parameters that need to be entered manually
######################################################### Setting #####################################################
Dataset = 'cora'
Classification = False
Clustering = False
Link_Prediction = True
t_SNE = False
scale = 0
########################################## hyper-parameters##############################################################
Epoch_Num = 200
Learning_Rate = 1e-4
Hidden_Layer_1 = 1024
Hidden_Layer_2 = 128
################################### Load dataset ######################################################################
if (Dataset is "cora") or (Dataset is "citeseer"):
load_data = Load_Data(Dataset)
Features, Labels, Adjacency_Matrix_raw = load_data.Graph()
Features = torch.Tensor(Features)
else:
load_data = Load_Data(Dataset)
Features, Labels = load_data.CPU()
################################### Calculate the adjacency matrix #########################################################
if('Adjacency_Matrix_raw' in vars()):
print('Adjacency matrix is raw')
pass
else:
print('Adjacency matrix_raw is caculated by KNN')
graph = Graph_Construction(Features)
Adjacency_Matrix_raw = graph.KNN()
################################### Link Prediction ##################################################################
if Link_Prediction:
adj_train, val_edges, val_edges_false, test_edges, test_edges_false = mask_test_edges(sp.coo_matrix(Adjacency_Matrix_raw))
Adjacency_Matrix = adj_train.todense()
Adjacency_Matrix = torch.Tensor(Adjacency_Matrix)
Features = torch.Tensor(Features)
else:
Features = torch.Tensor(Features)
Adjacency_Matrix = torch.Tensor(Adjacency_Matrix_raw)
################################################ adjacency convolution ##################################################
convolution_kernel = Convolution_Kernel(Adjacency_Matrix)
Adjacency_Convolution = convolution_kernel.Adjacency_Convolution()
############################################ Results Initialization ###################################################
ACC_GAE_total = []
NMI_GAE_total = []
PUR_GAE_total = []
ACC_GAE_total_STD = []
NMI_GAE_total_STD = []
PUR_GAE_total_STD = []
F1_score = []
roc_score = []
ap_score = []
####################################### Model #########################################################################
mse_loss = torch.nn.MSELoss(size_average=False)
bce_loss = torch.nn.BCELoss(size_average=False)
model_GAE = myGAE(Features.shape[1], Hidden_Layer_1, Hidden_Layer_2)
optimzer = torch.optim.Adam(model_GAE.parameters(), lr=Learning_Rate)
start_time = time.time()
####################################### Train and result ################################################################
for epoch in range(Epoch_Num):
Graph_Reconstruction, Latent_Representation = model_GAE(Adjacency_Convolution, Features)
loss = bce_loss(Graph_Reconstruction.view(-1), Adjacency_Matrix.view(-1))
optimzer.zero_grad()
loss.backward()
optimzer.step()
Latent_Representation = Latent_Representation.cpu().detach().numpy()
##################################################### Results ####################################################
if Classification and (epoch + 1) % 5 == 0:
print("Epoch:{},Loss:{:.4f}".format(epoch + 1, loss.item()))
score = mySVM(Latent_Representation, Labels, scale=0.3)
print("Epoch[{}/{}], F1-score = {}".format(epoch + 1, Epoch_Num, score))
np.save(path_result + "{}.npy".format(epoch + 1), Latent_Representation)
F1_score.append(score)
elif Clustering and (epoch + 1) % 5 == 0:
print("Epoch:{},Loss:{:.4f}".format(epoch + 1, loss.item()))
ACC_H2 = []
NMI_H2 = []
PUR_H2 = []
kmeans = KMeans(n_clusters=max(np.int_(Labels).flatten()))
for i in range(10):
Y_pred_OK = kmeans.fit_predict(Latent_Representation)
Labels_K = np.array(Labels).flatten()
AM = clustering_metrics(Y_pred_OK, Labels_K)
ACC, NMI, PUR = AM.evaluationClusterModelFromLabel(print_msg=False)
ACC_H2.append(ACC)
NMI_H2.append(NMI)
PUR_H2.append(PUR)
ACC_GAE_total.append(100 * np.mean(ACC_H2))
NMI_GAE_total.append(100 * np.mean(NMI_H2))
PUR_GAE_total.append(100 * np.mean(PUR_H2))
ACC_GAE_total_STD.append(100 * np.std(ACC_H2))
NMI_GAE_total_STD.append(100 * np.std(NMI_H2))
PUR_GAE_total_STD.append(100 * np.std(PUR_H2))
print('ACC_H2=', 100 * np.mean(ACC_H2), '\n', 'NMI_H2=', 100 * np.mean(NMI_H2), '\n', 'PUR_H2=',
100 * np.mean(PUR_H2))
np.save(path_result + "{}.npy".format(epoch + 1), Latent_Representation)
elif Link_Prediction and (epoch + 1) % 5 == 0:
roc_score_temp, ap_score_temp = get_roc_score(test_edges, test_edges_false, Latent_Representation)
roc_score.append(roc_score_temp)
ap_score.append(ap_score_temp)
print("Epoch: [{}]/[{}]".format(epoch + 1, Epoch_Num))
print("AUC = {}".format(roc_score_temp))
print("AP = {}".format(ap_score_temp))
###############################################################Clustering Result ##############################
if Clustering:
Index_MAX = np.argmax(ACC_GAE_total)
ACC_GAE_max = np.float(ACC_GAE_total[Index_MAX])
NMI_GAE_max = np.float(NMI_GAE_total[Index_MAX])
PUR_GAE_max = np.float(PUR_GAE_total[Index_MAX])
ACC_STD = np.float(ACC_GAE_total_STD[Index_MAX])
NMI_STD = np.float(NMI_GAE_total_STD[Index_MAX])
PUR_STD = np.float(PUR_GAE_total_STD[Index_MAX])
print('ACC_GAE_max={:.2f} +- {:.2f}'.format(ACC_GAE_max, ACC_STD))
print('NMI_GAE_max={:.2f} +- {:.2f}'.format(NMI_GAE_max, NMI_STD))
print('PUR_GAE_max={:.2f} +- {:.2f}'.format(PUR_GAE_max, PUR_STD))
print("The incompleteness of the adjacency matrix is {}%".format(scale * 100))
elif Classification:
print("GAE: F1-score_max is {:.2f}".format(100*np.max(F1_score)))
elif Link_Prediction:
print("VGAE: AUC_max is {:.2f}".format(100 * np.max(roc_score)))
print("VGAE: AP_max is {:.2f}".format(100 * np.max(ap_score)))
########################################################### t- SNE #################################################
if t_SNE:
print("dataset is {}".format(Dataset))
print("Index_Max = {}".format(Index_MAX))
Latent_Representation_max = np.load(path_result + "{}.npy".format((Index_MAX+1) * 5))
Features = np.array(Features)
plot_embeddings(Latent_Representation_max, Features, Labels)
########################################################################################################################
end_time = time.time()
print("Running time is {}".format(end_time - start_time))
|
[
"sys.path.append",
"torch.nn.MSELoss",
"numpy.int_",
"torch.nn.BCELoss",
"warnings.filterwarnings",
"numpy.argmax",
"numpy.std",
"numpy.float",
"time.time",
"numpy.max",
"torch.Tensor",
"numpy.array",
"numpy.mean"
] |
[((53, 86), 'sys.path.append', 'sys.path.append', (['"""./Data_Process"""'], {}), "('./Data_Process')\n", (68, 86), False, 'import sys\n'), ((330, 363), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (353, 363), False, 'import warnings\n'), ((2885, 2921), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {'size_average': '(False)'}), '(size_average=False)\n', (2901, 2921), False, 'import torch\n'), ((2934, 2970), 'torch.nn.BCELoss', 'torch.nn.BCELoss', ([], {'size_average': '(False)'}), '(size_average=False)\n', (2950, 2970), False, 'import torch\n'), ((3126, 3137), 'time.time', 'time.time', ([], {}), '()\n', (3135, 3137), False, 'import time\n'), ((7325, 7336), 'time.time', 'time.time', ([], {}), '()\n', (7334, 7336), False, 'import time\n'), ((1212, 1234), 'torch.Tensor', 'torch.Tensor', (['Features'], {}), '(Features)\n', (1224, 1234), False, 'import torch\n'), ((2018, 2048), 'torch.Tensor', 'torch.Tensor', (['Adjacency_Matrix'], {}), '(Adjacency_Matrix)\n', (2030, 2048), False, 'import torch\n'), ((2065, 2087), 'torch.Tensor', 'torch.Tensor', (['Features'], {}), '(Features)\n', (2077, 2087), False, 'import torch\n'), ((2111, 2133), 'torch.Tensor', 'torch.Tensor', (['Features'], {}), '(Features)\n', (2123, 2133), False, 'import torch\n'), ((2158, 2192), 'torch.Tensor', 'torch.Tensor', (['Adjacency_Matrix_raw'], {}), '(Adjacency_Matrix_raw)\n', (2170, 2192), False, 'import torch\n'), ((5866, 5890), 'numpy.argmax', 'np.argmax', (['ACC_GAE_total'], {}), '(ACC_GAE_total)\n', (5875, 5890), True, 'import numpy as np\n'), ((5910, 5944), 'numpy.float', 'np.float', (['ACC_GAE_total[Index_MAX]'], {}), '(ACC_GAE_total[Index_MAX])\n', (5918, 5944), True, 'import numpy as np\n'), ((5964, 5998), 'numpy.float', 'np.float', (['NMI_GAE_total[Index_MAX]'], {}), '(NMI_GAE_total[Index_MAX])\n', (5972, 5998), True, 'import numpy as np\n'), ((6018, 6052), 'numpy.float', 'np.float', (['PUR_GAE_total[Index_MAX]'], {}), '(PUR_GAE_total[Index_MAX])\n', (6026, 6052), True, 'import numpy as np\n'), ((6070, 6108), 'numpy.float', 'np.float', (['ACC_GAE_total_STD[Index_MAX]'], {}), '(ACC_GAE_total_STD[Index_MAX])\n', (6078, 6108), True, 'import numpy as np\n'), ((6124, 6162), 'numpy.float', 'np.float', (['NMI_GAE_total_STD[Index_MAX]'], {}), '(NMI_GAE_total_STD[Index_MAX])\n', (6132, 6162), True, 'import numpy as np\n'), ((6178, 6216), 'numpy.float', 'np.float', (['PUR_GAE_total_STD[Index_MAX]'], {}), '(PUR_GAE_total_STD[Index_MAX])\n', (6186, 6216), True, 'import numpy as np\n'), ((7104, 7122), 'numpy.array', 'np.array', (['Features'], {}), '(Features)\n', (7112, 7122), True, 'import numpy as np\n'), ((4786, 4801), 'numpy.mean', 'np.mean', (['ACC_H2'], {}), '(ACC_H2)\n', (4793, 4801), True, 'import numpy as np\n'), ((4839, 4854), 'numpy.mean', 'np.mean', (['NMI_H2'], {}), '(NMI_H2)\n', (4846, 4854), True, 'import numpy as np\n'), ((4892, 4907), 'numpy.mean', 'np.mean', (['PUR_H2'], {}), '(PUR_H2)\n', (4899, 4907), True, 'import numpy as np\n'), ((4949, 4963), 'numpy.std', 'np.std', (['ACC_H2'], {}), '(ACC_H2)\n', (4955, 4963), True, 'import numpy as np\n'), ((5005, 5019), 'numpy.std', 'np.std', (['NMI_H2'], {}), '(NMI_H2)\n', (5011, 5019), True, 'import numpy as np\n'), ((5061, 5075), 'numpy.std', 'np.std', (['PUR_H2'], {}), '(PUR_H2)\n', (5067, 5075), True, 'import numpy as np\n'), ((5111, 5126), 'numpy.mean', 'np.mean', (['ACC_H2'], {}), '(ACC_H2)\n', (5118, 5126), True, 'import numpy as np\n'), ((5151, 5166), 'numpy.mean', 'np.mean', (['NMI_H2'], {}), '(NMI_H2)\n', (5158, 5166), True, 'import numpy as np\n'), ((5206, 5221), 'numpy.mean', 'np.mean', (['PUR_H2'], {}), '(PUR_H2)\n', (5213, 5221), True, 'import numpy as np\n'), ((6595, 6611), 'numpy.max', 'np.max', (['F1_score'], {}), '(F1_score)\n', (6601, 6611), True, 'import numpy as np\n'), ((4486, 4502), 'numpy.array', 'np.array', (['Labels'], {}), '(Labels)\n', (4494, 4502), True, 'import numpy as np\n'), ((6689, 6706), 'numpy.max', 'np.max', (['roc_score'], {}), '(roc_score)\n', (6695, 6706), True, 'import numpy as np\n'), ((6758, 6774), 'numpy.max', 'np.max', (['ap_score'], {}), '(ap_score)\n', (6764, 6774), True, 'import numpy as np\n'), ((4338, 4353), 'numpy.int_', 'np.int_', (['Labels'], {}), '(Labels)\n', (4345, 4353), True, 'import numpy as np\n')]
|
import random
import numpy as np
from collections import deque
from keras.models import Sequential
from keras.layers import Dense, Activation,Dropout
from keras.optimizers import Adam
from keras import backend as K
import matplotlib.pyplot as plt
import pygame
import random
#setup/initialize the environment
black = (20,20,20)
white = (230,230,230)
red = (230,0,0)
green = (0,230,0)
blue = (0,0,230)
display_width = 320
display_height = 320
clock = pygame.time.Clock()
fps = 30
EPISODES = 20
class DQNAgent:
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
self.memory = deque(maxlen=2000)
self.gamma = 0.95 # discount rate
self.epsilon = 1.0 # exploration rate
self.epsilon_min = 0.01
self.epsilon_decay = 0.999539589
self.learning_rate = 0.001
self.model = self._build_model()
#self.target_model = self._build_model()
#self.update_target_model()
def _build_model(self):
# Neural Net for Deep-Q learning Model
model = Sequential()
model.add(Dense(128, input_dim=self.state_size))
model.add(Activation('relu'))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dense(self.action_size, activation='linear'))
model.compile(loss="mean_squared_error",
optimizer=Adam(lr=self.learning_rate))
return model
#def update_target_model(self):
# copy weights from model to target_model
# self.target_model.set_weights(self.model.get_weights())
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
def act(self, state):
if np.random.rand() <= self.epsilon:
return random.randrange(self.action_size)
act_values = self.model.predict(state)
return np.argmax(act_values[0]) # returns action
def replay(self, batch_size):
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state, done in minibatch:
target = self.model.predict(state)
if done:
target[0][action] = reward
else:
Q_future = self.model.predict(next_state)[0]
target[0][action] = reward + self.gamma * np.amax(Q_future)
self.model.fit(state, target, epochs=1, verbose=0)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
def load(self,name):
self.model.load_weights(name)
def save(self,name):
self.model.save_weights(name)
pygame.init()
font = pygame.font.SysFont("Arial.ttf",30)
pygame.display.set_caption("snake environment for data fetch")
gameDisplay = pygame.display.set_mode((display_width,display_height))
#define snake class
class Snake():
def __init__(self):
self.length_counter = 1
self.body_list = []
self.body_thickness = 20
self.head_x = round(display_width / 2 / self.body_thickness) * self.body_thickness
self.head_y = round(display_height / 2 / self.body_thickness) * self.body_thickness
self.head_x_change = 0
self.head_y_change = 0
def draw(self,act):
if act == 0 and self.head_x_change == 0:
self.head_x_change = -self.body_thickness
self.head_y_change = 0
if act == 1 and self.head_x_change == 0:
self.head_x_change = self.body_thickness
self.head_y_change = 0
if act == 2 and self.head_y_change == 0:
self.head_y_change = -self.body_thickness
self.head_x_change = 0
if act == 3 and self.head_y_change == 0:
self.head_y_change = self.body_thickness
self.head_x_change = 0
self.head_x += self.head_x_change
self.head_y += self.head_y_change
self.body_list.append([self.head_x,self.head_y])
if len(self.body_list) > self.length_counter:
del self.body_list[0]
for XnY in self.body_list[:-1]:
pygame.draw.rect(gameDisplay,white,[XnY[0],XnY[1],self.body_thickness,self.body_thickness])
pygame.draw.rect(gameDisplay,red,[self.body_list[-1][0],self.body_list[-1][1],self.body_thickness,self.body_thickness])
pygame.display.update()
#define apple class
class Apple():
def __init__(self):
self.thickness = 20
self.x_pos = round(random.randrange(0,display_width-self.thickness)/self.thickness)*self.thickness
self.y_pos = round(random.randrange(0,display_height-self.thickness)/self.thickness)*self.thickness
def draw(self):
pygame.draw.rect(gameDisplay,blue,[self.x_pos,self.y_pos,self.thickness,self.thickness])
pygame.display.update()
#define apple eaten function
def apple_eaten(snake_obj,apple_obj):
x = False
if apple_obj.x_pos == snake_obj.head_x and apple_obj.y_pos == snake_obj.head_y:
x = True
snake_obj.length_counter += 1
apple_obj.x_pos = round(random.randrange(0,display_width-apple_obj.thickness)/apple_obj.thickness)*apple_obj.thickness
apple_obj.y_pos = round(random.randrange(0,display_height-apple_obj.thickness)/apple_obj.thickness)*apple_obj.thickness
while True:
if (apple_obj.x_pos,apple_obj.y_pos) in snake_obj.body_list:
apple_obj.x_pos = round(random.randrange(0,display_width-apple_obj.thickness)/apple_obj.thickness)*apple_obj.thickness
apple_obj.y_pos = round(random.randrange(0,display_height-apple_obj.thickness)/apple_obj.thickness)*apple_obj.thickness
else:
break
return x
#define game over function
def show_game_over_screen():
gameOver = True
gameExit = False
text = "game over press p to play again or q to quit"
text_to_screen = font.render(text,True,blue)
text_rect = text_to_screen.get_rect()
text_rect.center = display_width/2 , display_height/2
while gameOver:
gameDisplay.blit(text_to_screen,text_rect)
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameOver = False
gameExit = True
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_p:
GameLoop()
gameOver = False
gameExit = True
elif event.key == pygame.K_q:
gameOver = False
gameExit = True
return gameOver,gameExit
#define collision function
def collision(snake_obj):
gameOver = False
if snake_obj.head_x >= display_width or snake_obj.head_x < 0 or snake_obj.head_y >= display_height or snake_obj.head_y < 0:
gameOver = True
else:
for XnY in snake_obj.body_list[:-1]:
if XnY == snake_obj.body_list[-1]:
gameOver = True
break
return gameOver
#define show score function
def show_score(snake_obj):
text = str(snake_obj.length_counter-1)
text_to_screen = font.render(text,True,green)
gameDisplay.blit(text_to_screen,[0,0])
pygame.display.update()
def mod(x):
if x>=0:
return x
return -x
#defining get_state function
def get_state(snake_obj,apple_obj):
data_u = snake_obj.head_y / snake_obj.body_thickness
data_l = snake_obj.head_x / snake_obj.body_thickness
data_r = (display_width - snake_obj.head_x - snake_obj.body_thickness)/snake_obj.body_thickness
data_d = (display_height - snake_obj.head_y - snake_obj.body_thickness)/snake_obj.body_thickness
data_ul = 0
data_ur = 0
data_dl = 0
data_dr = 0
if data_u > data_l:
data_ul = data_l * 2 ** .5
else:
data_ul = data_u * 2 ** .5
if data_u > data_r:
data_ur = data_r * 2 ** .5
else:
data_ur = data_u * 2 ** .5
if data_d > data_l:
data_dl = data_l * 2 ** .5
else:
data_dl = data_d * 2 ** .5
if data_d > data_r:
data_dr = data_r * 2 ** .5 - (apple_obj.thickness/snake_obj.body_thickness) * 2 ** .5
else:
data_dr = data_d * 2 ** .5 - (apple_obj.thickness/snake_obj.body_thickness) * 2 ** .5
data_ul = round(data_ul,2)
data_ur = round(data_ur,2)
data_dl = round(data_dl,2)
data_dr = round(data_dr,2)
bin_app_u = 0
bin_app_d = 0
bin_app_r = 0
bin_app_l = 0
bin_app_ul = 0
bin_app_ur = 0
bin_app_dl = 0
bin_app_dr = 0
if mod(apple_obj.x_pos - snake_obj.head_x) == mod(apple_obj.y_pos - snake_obj.head_y):
if apple_obj.x_pos > snake_obj.head_x and apple_obj.y_pos > snake_obj.head_y:
bin_app_dr = 1
elif apple_obj.x_pos < snake_obj.head_x and apple_obj.y_pos < snake_obj.head_y:
bin_app_ul = 1
elif apple_obj.x_pos > snake_obj.head_x and apple_obj.y_pos < snake_obj.head_y:
bin_app_ur = 1
elif apple_obj.x_pos < snake_obj.head_x and apple_obj.y_pos > snake_obj.head_y:
bin_app_dl = 1
elif apple_obj.x_pos == snake_obj.head_x:
if apple_obj.y_pos > snake_obj.head_y:
bin_app_d = 1
else:
bin_app_u = 1
elif apple_obj.y_pos == snake_obj.head_y:
if apple_obj.x_pos > snake_obj.head_x:
bin_app_r = 1
else:
bin_app_l = 1
bin_bod_u = 0
bin_bod_d = 0
bin_bod_r = 0
bin_bod_l = 0
bin_bod_ul = 0
bin_bod_ur = 0
bin_bod_dl = 0
bin_bod_dr = 0
for XnY in snake_obj.body_list[:-1]:
if mod(XnY[0] - snake_obj.head_x) == mod(XnY[1] - snake_obj.head_y):
if XnY[0] > snake_obj.head_x and XnY[1] > snake_obj.head_y:
bin_bod_dr = 1
elif XnY[0] < snake_obj.head_x and XnY[1] < snake_obj.head_y:
bin_bod_ul = 1
elif XnY[0] > snake_obj.head_x and XnY[1] < snake_obj.head_y:
bin_bod_ur = 1
elif XnY[0] < snake_obj.head_x and XnY[1] > snake_obj.head_y:
bin_bod_dl = 1
elif XnY[0] == snake_obj.head_x:
if XnY[1] > snake_obj.head_y:
bin_bod_d = 1
else:
bin_bod_u = 1
elif XnY[1] == snake_obj.head_y:
if XnY[0] > snake_obj.head_x:
bin_bod_r = 1
else:
bin_bod_l = 1
state = [data_l,bin_app_l,bin_bod_l,data_ul,bin_app_ul,bin_bod_ul,data_u,bin_app_u,bin_bod_u,data_ur,bin_app_ur,bin_bod_ur,data_r,bin_app_r,bin_bod_r,data_dr,bin_app_dr,bin_bod_dr,data_d,bin_app_d,bin_bod_d,data_dl,bin_app_dl,bin_bod_dl]
#state = [data_u,data_l,data_r,data_d,data_ul,data_ur,data_dl,data_dr,bin_app_u,bin_app_l,bin_app_r,bin_app_d,bin_app_ul,bin_app_ur,bin_app_dl,bin_app_dr,bin_bod_u,bin_bod_l,bin_bod_r,bin_bod_d,bin_bod_ul,bin_bod_ur,bin_bod_dl,bin_bod_dr]
return state
def GameLoop():
state_size = 24
action_size = 4
count = 0
agent = DQNAgent(state_size, action_size)
agent.load("C:/Users/subha/Desktop/python_codes/project4/9000.hdf5")
for e in range(EPISODES):
count = 0
gameOver = False
action_performed = 0
snake_obj = Snake()
apple_obj = Apple()
C_state = get_state(snake_obj,apple_obj)
C_state = np.array(C_state)
C_state = np.reshape(C_state,[1,state_size])
while not gameOver:
count += 1
gameDisplay.fill(black)
pygame.display.update()
action_performed = np.argmax(agent.model.predict(C_state)[0])
apple_obj.draw()
snake_obj.draw(action_performed)
temp = apple_eaten(snake_obj,apple_obj)
gameOver = collision(snake_obj)
show_score(snake_obj)
N_state = get_state(snake_obj,apple_obj)
N_state = np.array(N_state)
N_state = np.reshape(N_state,[1,state_size])
C_state = N_state
if temp == True:
count = 0
elif count == 400:
break
clock.tick(fps)
pygame.quit()
GameLoop()
quit()
|
[
"numpy.argmax",
"random.sample",
"pygame.event.get",
"pygame.display.update",
"collections.deque",
"pygame.font.SysFont",
"pygame.display.set_mode",
"numpy.reshape",
"pygame.display.set_caption",
"pygame.quit",
"pygame.draw.rect",
"keras.optimizers.Adam",
"pygame.init",
"pygame.time.Clock",
"keras.layers.Activation",
"numpy.amax",
"keras.layers.Dense",
"numpy.array",
"random.randrange",
"numpy.random.rand",
"keras.models.Sequential"
] |
[((470, 489), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (487, 489), False, 'import pygame\n'), ((2788, 2801), 'pygame.init', 'pygame.init', ([], {}), '()\n', (2799, 2801), False, 'import pygame\n'), ((2812, 2848), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""Arial.ttf"""', '(30)'], {}), "('Arial.ttf', 30)\n", (2831, 2848), False, 'import pygame\n'), ((2851, 2913), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""snake environment for data fetch"""'], {}), "('snake environment for data fetch')\n", (2877, 2913), False, 'import pygame\n'), ((2931, 2987), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(display_width, display_height)'], {}), '((display_width, display_height))\n', (2954, 2987), False, 'import pygame\n'), ((7458, 7481), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (7479, 7481), False, 'import pygame\n'), ((12538, 12551), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (12549, 12551), False, 'import pygame\n'), ((689, 707), 'collections.deque', 'deque', ([], {'maxlen': '(2000)'}), '(maxlen=2000)\n', (694, 707), False, 'from collections import deque\n'), ((1138, 1150), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1148, 1150), False, 'from keras.models import Sequential\n'), ((2010, 2034), 'numpy.argmax', 'np.argmax', (['act_values[0]'], {}), '(act_values[0])\n', (2019, 2034), True, 'import numpy as np\n'), ((2111, 2149), 'random.sample', 'random.sample', (['self.memory', 'batch_size'], {}), '(self.memory, batch_size)\n', (2124, 2149), False, 'import random\n'), ((4378, 4507), 'pygame.draw.rect', 'pygame.draw.rect', (['gameDisplay', 'red', '[self.body_list[-1][0], self.body_list[-1][1], self.body_thickness, self.\n body_thickness]'], {}), '(gameDisplay, red, [self.body_list[-1][0], self.body_list[-\n 1][1], self.body_thickness, self.body_thickness])\n', (4394, 4507), False, 'import pygame\n'), ((4507, 4530), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (4528, 4530), False, 'import pygame\n'), ((4871, 4968), 'pygame.draw.rect', 'pygame.draw.rect', (['gameDisplay', 'blue', '[self.x_pos, self.y_pos, self.thickness, self.thickness]'], {}), '(gameDisplay, blue, [self.x_pos, self.y_pos, self.thickness,\n self.thickness])\n', (4887, 4968), False, 'import pygame\n'), ((4969, 4992), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (4990, 4992), False, 'import pygame\n'), ((6304, 6327), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (6325, 6327), False, 'import pygame\n'), ((6350, 6368), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (6366, 6368), False, 'import pygame\n'), ((11721, 11738), 'numpy.array', 'np.array', (['C_state'], {}), '(C_state)\n', (11729, 11738), True, 'import numpy as np\n'), ((11758, 11794), 'numpy.reshape', 'np.reshape', (['C_state', '[1, state_size]'], {}), '(C_state, [1, state_size])\n', (11768, 11794), True, 'import numpy as np\n'), ((1170, 1207), 'keras.layers.Dense', 'Dense', (['(128)'], {'input_dim': 'self.state_size'}), '(128, input_dim=self.state_size)\n', (1175, 1207), False, 'from keras.layers import Dense, Activation, Dropout\n'), ((1228, 1246), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1238, 1246), False, 'from keras.layers import Dense, Activation, Dropout\n'), ((1269, 1279), 'keras.layers.Dense', 'Dense', (['(128)'], {}), '(128)\n', (1274, 1279), False, 'from keras.layers import Dense, Activation, Dropout\n'), ((1300, 1318), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1310, 1318), False, 'from keras.layers import Dense, Activation, Dropout\n'), ((1341, 1385), 'keras.layers.Dense', 'Dense', (['self.action_size'], {'activation': '"""linear"""'}), "(self.action_size, activation='linear')\n", (1346, 1385), False, 'from keras.layers import Dense, Activation, Dropout\n'), ((1857, 1873), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1871, 1873), True, 'import numpy as np\n'), ((1911, 1945), 'random.randrange', 'random.randrange', (['self.action_size'], {}), '(self.action_size)\n', (1927, 1945), False, 'import random\n'), ((4277, 4377), 'pygame.draw.rect', 'pygame.draw.rect', (['gameDisplay', 'white', '[XnY[0], XnY[1], self.body_thickness, self.body_thickness]'], {}), '(gameDisplay, white, [XnY[0], XnY[1], self.body_thickness,\n self.body_thickness])\n', (4293, 4377), False, 'import pygame\n'), ((11896, 11919), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (11917, 11919), False, 'import pygame\n'), ((12281, 12298), 'numpy.array', 'np.array', (['N_state'], {}), '(N_state)\n', (12289, 12298), True, 'import numpy as np\n'), ((12322, 12358), 'numpy.reshape', 'np.reshape', (['N_state', '[1, state_size]'], {}), '(N_state, [1, state_size])\n', (12332, 12358), True, 'import numpy as np\n'), ((1470, 1497), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'self.learning_rate'}), '(lr=self.learning_rate)\n', (1474, 1497), False, 'from keras.optimizers import Adam\n'), ((4652, 4703), 'random.randrange', 'random.randrange', (['(0)', '(display_width - self.thickness)'], {}), '(0, display_width - self.thickness)\n', (4668, 4703), False, 'import random\n'), ((4760, 4812), 'random.randrange', 'random.randrange', (['(0)', '(display_height - self.thickness)'], {}), '(0, display_height - self.thickness)\n', (4776, 4812), False, 'import random\n'), ((5256, 5312), 'random.randrange', 'random.randrange', (['(0)', '(display_width - apple_obj.thickness)'], {}), '(0, display_width - apple_obj.thickness)\n', (5272, 5312), False, 'import random\n'), ((5384, 5441), 'random.randrange', 'random.randrange', (['(0)', '(display_height - apple_obj.thickness)'], {}), '(0, display_height - apple_obj.thickness)\n', (5400, 5441), False, 'import random\n'), ((2472, 2489), 'numpy.amax', 'np.amax', (['Q_future'], {}), '(Q_future)\n', (2479, 2489), True, 'import numpy as np\n'), ((5616, 5672), 'random.randrange', 'random.randrange', (['(0)', '(display_width - apple_obj.thickness)'], {}), '(0, display_width - apple_obj.thickness)\n', (5632, 5672), False, 'import random\n'), ((5752, 5809), 'random.randrange', 'random.randrange', (['(0)', '(display_height - apple_obj.thickness)'], {}), '(0, display_height - apple_obj.thickness)\n', (5768, 5809), False, 'import random\n')]
|
"""
desi_specs.py
Author: <NAME>
References:
- https://github.com/desihub/desitarget/blob/master/py/desitarget/sv3/data/sv3_targetmask.yaml
- https://desidatamodel.readthedocs.io/en/latest/DESI_SPECTRO_REDUX/SPECPROD/tiles/TILEID/NIGHT/coadd-SPECTRO-TILEID-NIGHT.html
"""
import os
import numpy as np
from astropy.io import fits
from astropy.table import Table, join, vstack, unique
from easyquery import Query, QueryMaker
from SAGA.utils import join_str_arr
__all__ = ["load_fits", "get_all_tiles_and_nights", "find_redshifts_and_specs", "pack_for_marz", "is_bright_time", "is_bgs_target", "is_lowz_target"]
__author__ = "<NAME>"
BASE_DIR = "/global/cfs/cdirs/desi/spectro/redux/daily/tiles"
def load_fits(filename, hdu=1):
"""
Usage:
t = load_fits("/path/to/fits", hdu=1)
t1, t2 = load_fits("/path/to/fits", hdu=[1, 2])
"""
if isinstance(hdu, int):
needed_hdu = [hdu]
return_tuple = False
else:
needed_hdu = hdu
return_tuple = True
if not all((isinstance(h, int) for h in needed_hdu)):
raise ValueError
hdu_list = fits.open(filename, cache=False, memmap=True)
try:
t = [hdu_list[hdu].data if hdu_list[hdu].is_image else Table(hdu_list[hdu].data, masked=False) for hdu in needed_hdu]
finally:
try:
for hdu in needed_hdu:
del hdu_list[hdu].data
hdu_list.close()
del hdu_list
except: # pylint: disable=bare-except # noqa: E722
pass
if return_tuple:
return tuple(t)
return t.pop()
def _get_redshifts(filepath, target_ids=None):
z1, z2 = load_fits(filepath, [1, 2])
if "SV3_DESI_TARGET" not in z2.colnames:
return
z2.sort(["TARGETID", "NUM_ITER"])
z2 = unique(z2, "TARGETID", keep="last")
q = Query() if target_ids is None else QueryMaker.isin("TARGETID", target_ids)
z1 = q.filter(z1, ["TARGETID", "Z", "ZERR", "ZWARN", "CHI2", "SPECTYPE", "DELTACHI2"])
z2 = q.filter(z2, ["TARGETID", "TARGET_RA", "TARGET_DEC", "FLUX_R", "FLUX_G", "SHAPE_R", "OBSCONDITIONS", "NIGHT", "TILEID", "SV3_DESI_TARGET", "SV3_BGS_TARGET", "SV3_SCND_TARGET"])
if len(z1) and len(z2):
z = join(z1, z2, "TARGETID")
if len(z):
return z
def _get_specs(filepath, sorted_target_ids):
target_ids_here = load_fits(filepath)["TARGETID"]
idx = np.searchsorted(sorted_target_ids, target_ids_here)
idx[idx >= len(sorted_target_ids)] = -1
matched = target_ids_here == sorted_target_ids[idx]
if not matched.any():
return
return (
np.asarray(target_ids_here[matched]),
np.concatenate(load_fits(filepath, [2, 7, 12])), # B_WAVELENGTH, R_WAVELENGTH, Z_WAVELENGTH
np.hstack([d[matched] for d in load_fits(filepath, [3, 8, 13])]), # B_FLUX, R_FLUX, Z_FLUX
np.hstack([d[matched] for d in load_fits(filepath, [4, 9, 14])]), # B_IVAR, R_IVAR, Z_IVAR
)
def get_all_tiles_and_nights(nights_since=20210405):
"""
Returns a table of TILEID and NIGHT for all nights since `nights_since`
"""
out = {"TILEID":[], "NIGHT":[]}
for tileid in os.listdir(BASE_DIR):
try:
tileid = int(tileid)
except ValueError:
continue
dirpath = "{}/{}".format(BASE_DIR, tileid)
try:
nights = os.listdir(dirpath)
except (OSError, IOError):
continue
for night in nights:
try:
night = int(night)
except ValueError:
continue
if night >= nights_since:
out["TILEID"].append(tileid)
out["NIGHT"].append(night)
return Table(out)
def _loop_over_files(tileid, night):
dirpath = "{}/{}/{}".format(BASE_DIR, tileid, night)
for filename in os.listdir(dirpath):
if filename.endswith(".fits"):
yield filename.partition("-")[0], os.path.join(dirpath, filename)
def _filename_to_path(filename):
tileid, night = map(int, filename.partition('.')[0].split('-')[2:4])
return "{}/{}/{}/{}".format(BASE_DIR, tileid, night, filename)
is_bright_time = Query("(OBSCONDITIONS >> 9) % 2 > 0")
is_bgs_target = Query("SV3_BGS_TARGET > 0")
is_lowz_target = Query("(SV3_SCND_TARGET >> 15) % 8 > 0")
is_lowz = Query("Z < 0.05")
is_galaxy = QueryMaker.equals("SPECTYPE", "GALAXY")
def find_redshifts_and_specs(t=None, retrieve_specs=False, skip_redshifts=False, selection=is_lowz_target, exclude_bgs=False, all_lowz=False, **kwargs):
"""
Takes a table `t` with columns "TILEID" and "NIGHT", and all redshifts for LOWZ targets.
Set `exclude_bgs` to True to exclude targets that overlap with BGS.
Alternatively, the input table can have a "TARGETID" column,
in which case the function will find corresponding redshifts.
Set `retrieve_specs` to True to also obtain the spectra.
If this case, the returned variables are:
redshifts, specs_flux, specs_ivar, specs_wl, specs_targetid
Note that the function will not verify if all requested targets are found.
It will also not verify if the redshifts table is consistent with specs.
"""
if t is None:
t = Table(kwargs)
filenames_known = "FILENAME" in t.colnames
targets_known = "TARGETID" in t.colnames
group_keys = ["FILENAME"] if filenames_known else ["TILEID", "NIGHT"]
assert all(c in t.colnames for c in group_keys)
if targets_known:
t.sort(group_keys + ["TARGETID"])
redshifts = []
specs = []
if skip_redshifts:
if not (filenames_known and targets_known):
raise ValueError("Must have FILENAME and TARGETID in the input table to skip redshift collection.")
if not retrieve_specs:
raise ValueError("Nothing to do!!")
redshifts = t
else:
q = Query(selection)
if all_lowz:
q = q | (is_lowz & is_galaxy)
if exclude_bgs:
q = q & (~is_bgs_target)
for t1 in t.group_by(group_keys).groups:
if filenames_known:
file_iter = [("zbest", _filename_to_path(t1["FILENAME"][0]))]
else:
file_iter = _loop_over_files(t1["TILEID"][0], t1["NIGHT"][0])
for filetype, filepath in file_iter:
if filetype == "zbest":
data_this = _get_redshifts(filepath, t1["TARGETID"] if targets_known else None)
if data_this is not None and not targets_known:
data_this = q.filter(data_this)
if data_this is not None and len(data_this):
data_this["FILENAME"] = os.path.basename(filepath)
redshifts.append(data_this)
redshifts = vstack(redshifts)
print("Found {} redshifts".format(len(redshifts)))
redshifts.sort(["FILENAME", "TARGETID"])
if not retrieve_specs:
return redshifts
for redshifts_this in redshifts.group_by(["FILENAME"]).groups:
filepath = _filename_to_path(redshifts_this["FILENAME"][0].replace("zbest-", "coadd-"))
data_this = _get_specs(filepath, redshifts_this["TARGETID"])
if data_this is not None:
specs.append(data_this)
specs_id = np.concatenate([t[0] for t in specs])
specs_flux = np.vstack([t[2] for t in specs])
specs_ivar = np.vstack([t[3] for t in specs])
sorter = specs_id.argsort()
assert len(specs_id) == len(specs_flux)
assert len(specs_id) == len(specs_ivar)
specs_wl = specs[0][1]
assert all((t[1] == specs_wl).all() for t in specs)
print("Found {} specs".format(len(specs_id)))
if len(redshifts) == len(specs_id) and not (redshifts["TARGETID"] == specs_id).all():
print("WARNING: TARGETID in redshifts does not match those in specs")
return redshifts, specs_flux, specs_ivar, specs_wl, specs_id
def pack_for_marz(output_path, redshifts, specs_flux, specs_ivar, specs_wl, *args):
"""
Pack redshift table and specs into marz format.
Example usage:
data = find_redshifts_and_specs(t, retrieve_specs=True)
pack_for_marz("/path/to/output.fits", *data)
"""
if len(redshifts) != len(specs_flux):
raise ValueError
with np.errstate(divide="ignore"):
mag = 22.5 - 2.5*np.log10(redshifts["FLUX_R"])
t = Table(
{
"TYPE": np.where(specs_flux.any(axis=1), "P", ""),
"COMMENT": redshifts["TARGETID"].astype(str),
"RA": np.deg2rad(redshifts["TARGET_RA"]),
"DEC": np.deg2rad(redshifts["TARGET_DEC"]),
"MAGNITUDE": mag,
}
)
t["NAME"] = join_str_arr(
"Z=", redshifts["Z"].astype(np.float32).astype(str),
",ZW=", redshifts["ZWARN"].astype(str),
",T=", (np.log2(redshifts["SV3_SCND_TARGET"])-14).astype(np.int16).astype(str),
)
# VACUME WAVELEGNTH TO AIR WAVELEGNTH
dwave = specs_wl / (1.0 + 2.735182e-4 + 131.4182 / specs_wl**2 + 2.76249e8 / specs_wl**4)
with np.errstate(divide="ignore"):
specs_var = 1.0 / specs_ivar
fits.HDUList([
fits.PrimaryHDU(specs_flux, do_not_scale_image_data=True),
fits.ImageHDU(specs_var, name="variance", do_not_scale_image_data=True),
fits.ImageHDU(dwave, name="wavelength", do_not_scale_image_data=True),
fits.BinTableHDU(t, name="fibres"),
]).writeto(output_path, overwrite=True)
|
[
"astropy.io.fits.PrimaryHDU",
"os.path.join",
"astropy.io.fits.ImageHDU",
"astropy.io.fits.BinTableHDU",
"numpy.log10",
"astropy.table.unique",
"os.path.basename",
"easyquery.Query",
"numpy.log2",
"numpy.asarray",
"astropy.table.join",
"easyquery.QueryMaker.equals",
"astropy.io.fits.open",
"easyquery.QueryMaker.isin",
"os.listdir",
"numpy.vstack",
"numpy.concatenate",
"astropy.table.Table",
"numpy.deg2rad",
"numpy.searchsorted",
"numpy.errstate",
"astropy.table.vstack"
] |
[((4192, 4229), 'easyquery.Query', 'Query', (['"""(OBSCONDITIONS >> 9) % 2 > 0"""'], {}), "('(OBSCONDITIONS >> 9) % 2 > 0')\n", (4197, 4229), False, 'from easyquery import Query, QueryMaker\n'), ((4246, 4273), 'easyquery.Query', 'Query', (['"""SV3_BGS_TARGET > 0"""'], {}), "('SV3_BGS_TARGET > 0')\n", (4251, 4273), False, 'from easyquery import Query, QueryMaker\n'), ((4291, 4331), 'easyquery.Query', 'Query', (['"""(SV3_SCND_TARGET >> 15) % 8 > 0"""'], {}), "('(SV3_SCND_TARGET >> 15) % 8 > 0')\n", (4296, 4331), False, 'from easyquery import Query, QueryMaker\n'), ((4342, 4359), 'easyquery.Query', 'Query', (['"""Z < 0.05"""'], {}), "('Z < 0.05')\n", (4347, 4359), False, 'from easyquery import Query, QueryMaker\n'), ((4372, 4411), 'easyquery.QueryMaker.equals', 'QueryMaker.equals', (['"""SPECTYPE"""', '"""GALAXY"""'], {}), "('SPECTYPE', 'GALAXY')\n", (4389, 4411), False, 'from easyquery import Query, QueryMaker\n'), ((1113, 1158), 'astropy.io.fits.open', 'fits.open', (['filename'], {'cache': '(False)', 'memmap': '(True)'}), '(filename, cache=False, memmap=True)\n', (1122, 1158), False, 'from astropy.io import fits\n'), ((1792, 1827), 'astropy.table.unique', 'unique', (['z2', '"""TARGETID"""'], {'keep': '"""last"""'}), "(z2, 'TARGETID', keep='last')\n", (1798, 1827), False, 'from astropy.table import Table, join, vstack, unique\n'), ((2404, 2455), 'numpy.searchsorted', 'np.searchsorted', (['sorted_target_ids', 'target_ids_here'], {}), '(sorted_target_ids, target_ids_here)\n', (2419, 2455), True, 'import numpy as np\n'), ((3166, 3186), 'os.listdir', 'os.listdir', (['BASE_DIR'], {}), '(BASE_DIR)\n', (3176, 3186), False, 'import os\n'), ((3717, 3727), 'astropy.table.Table', 'Table', (['out'], {}), '(out)\n', (3722, 3727), False, 'from astropy.table import Table, join, vstack, unique\n'), ((3844, 3863), 'os.listdir', 'os.listdir', (['dirpath'], {}), '(dirpath)\n', (3854, 3863), False, 'import os\n'), ((7325, 7362), 'numpy.concatenate', 'np.concatenate', (['[t[0] for t in specs]'], {}), '([t[0] for t in specs])\n', (7339, 7362), True, 'import numpy as np\n'), ((7380, 7412), 'numpy.vstack', 'np.vstack', (['[t[2] for t in specs]'], {}), '([t[2] for t in specs])\n', (7389, 7412), True, 'import numpy as np\n'), ((7430, 7462), 'numpy.vstack', 'np.vstack', (['[t[3] for t in specs]'], {}), '([t[3] for t in specs])\n', (7439, 7462), True, 'import numpy as np\n'), ((1836, 1843), 'easyquery.Query', 'Query', ([], {}), '()\n', (1841, 1843), False, 'from easyquery import Query, QueryMaker\n'), ((1871, 1910), 'easyquery.QueryMaker.isin', 'QueryMaker.isin', (['"""TARGETID"""', 'target_ids'], {}), "('TARGETID', target_ids)\n", (1886, 1910), False, 'from easyquery import Query, QueryMaker\n'), ((2228, 2252), 'astropy.table.join', 'join', (['z1', 'z2', '"""TARGETID"""'], {}), "(z1, z2, 'TARGETID')\n", (2232, 2252), False, 'from astropy.table import Table, join, vstack, unique\n'), ((2619, 2655), 'numpy.asarray', 'np.asarray', (['target_ids_here[matched]'], {}), '(target_ids_here[matched])\n', (2629, 2655), True, 'import numpy as np\n'), ((5258, 5271), 'astropy.table.Table', 'Table', (['kwargs'], {}), '(kwargs)\n', (5263, 5271), False, 'from astropy.table import Table, join, vstack, unique\n'), ((5906, 5922), 'easyquery.Query', 'Query', (['selection'], {}), '(selection)\n', (5911, 5922), False, 'from easyquery import Query, QueryMaker\n'), ((6830, 6847), 'astropy.table.vstack', 'vstack', (['redshifts'], {}), '(redshifts)\n', (6836, 6847), False, 'from astropy.table import Table, join, vstack, unique\n'), ((8324, 8352), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""'}), "(divide='ignore')\n", (8335, 8352), True, 'import numpy as np\n'), ((9106, 9134), 'numpy.errstate', 'np.errstate', ([], {'divide': '"""ignore"""'}), "(divide='ignore')\n", (9117, 9134), True, 'import numpy as np\n'), ((3367, 3386), 'os.listdir', 'os.listdir', (['dirpath'], {}), '(dirpath)\n', (3377, 3386), False, 'import os\n'), ((8582, 8616), 'numpy.deg2rad', 'np.deg2rad', (["redshifts['TARGET_RA']"], {}), "(redshifts['TARGET_RA'])\n", (8592, 8616), True, 'import numpy as np\n'), ((8637, 8672), 'numpy.deg2rad', 'np.deg2rad', (["redshifts['TARGET_DEC']"], {}), "(redshifts['TARGET_DEC'])\n", (8647, 8672), True, 'import numpy as np\n'), ((1236, 1275), 'astropy.table.Table', 'Table', (['hdu_list[hdu].data'], {'masked': '(False)'}), '(hdu_list[hdu].data, masked=False)\n', (1241, 1275), False, 'from astropy.table import Table, join, vstack, unique\n'), ((8379, 8408), 'numpy.log10', 'np.log10', (["redshifts['FLUX_R']"], {}), "(redshifts['FLUX_R'])\n", (8387, 8408), True, 'import numpy as np\n'), ((3950, 3981), 'os.path.join', 'os.path.join', (['dirpath', 'filename'], {}), '(dirpath, filename)\n', (3962, 3981), False, 'import os\n'), ((9205, 9262), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['specs_flux'], {'do_not_scale_image_data': '(True)'}), '(specs_flux, do_not_scale_image_data=True)\n', (9220, 9262), False, 'from astropy.io import fits\n'), ((9272, 9343), 'astropy.io.fits.ImageHDU', 'fits.ImageHDU', (['specs_var'], {'name': '"""variance"""', 'do_not_scale_image_data': '(True)'}), "(specs_var, name='variance', do_not_scale_image_data=True)\n", (9285, 9343), False, 'from astropy.io import fits\n'), ((9353, 9422), 'astropy.io.fits.ImageHDU', 'fits.ImageHDU', (['dwave'], {'name': '"""wavelength"""', 'do_not_scale_image_data': '(True)'}), "(dwave, name='wavelength', do_not_scale_image_data=True)\n", (9366, 9422), False, 'from astropy.io import fits\n'), ((9432, 9466), 'astropy.io.fits.BinTableHDU', 'fits.BinTableHDU', (['t'], {'name': '"""fibres"""'}), "(t, name='fibres')\n", (9448, 9466), False, 'from astropy.io import fits\n'), ((6730, 6756), 'os.path.basename', 'os.path.basename', (['filepath'], {}), '(filepath)\n', (6746, 6756), False, 'import os\n'), ((8877, 8914), 'numpy.log2', 'np.log2', (["redshifts['SV3_SCND_TARGET']"], {}), "(redshifts['SV3_SCND_TARGET'])\n", (8884, 8914), True, 'import numpy as np\n')]
|
import logging
import typing
import numpy
import torch
import torch.nn as nn
import torch.nn.functional as functional
LOGGER = logging.getLogger(__name__)
class ModelAverage(nn.Module):
"""
This class works by averaging the outputs of existing models. The models are expected to have linear outputs (i.e.,
*before* converting to probabilities via softmax) of two dimensions, where the first dimension is the batch index
and the second dimension is the class index. The output of this model is the log of the average of the softmax of
each model's output.
"""
def __init__(self, *modules: nn.Module):
super().__init__()
self.base_modules = nn.ModuleList(modules=modules)
def forward(self, x):
linear_outputs: typing.List[torch.Tensor] = []
for module in self.base_modules:
linear_outputs.append(functional.softmax(module(x), dim=1))
# Make a len(self.modules)-by-(batch size)-by-(num classes) Tensor from the set of outputs.
concatenated_outputs = torch.cat([torch.unsqueeze(linear_output, dim=0) for linear_output in linear_outputs])
return torch.log(torch.mean(concatenated_outputs, 0))
class TestNet(nn.Module):
"""
This class is a class used for testing the ModelAverage class. It is a linear network with fixed outputs that are
set to `torch.log(probabilities)`, via setting the weights to 0 and the biases to the log probabilities. The number
of classes in the output of this network is 2, and the input to the network is a 3-dimensional vector.
"""
def __init__(self, *probabilities: float):
super().__init__()
self.fixed_linear = nn.Linear(3, 2)
self.fixed_linear.weight.data.fill_(0)
self.fixed_linear.bias.data = torch.log(torch.tensor(probabilities))
def forward(self, x):
return self.fixed_linear(x)
def test_model_average():
model1 = TestNet(0.25, 0.75)
model2 = TestNet(0.75, 0.25)
model_average = ModelAverage(model1, model2)
model_average.eval()
tensor_input = torch.randn(2, 3)
expected_output = torch.log(torch.tensor([[0.5, 0.5], [0.5, 0.5]]))
actual_output = model_average(tensor_input).detach()
assert (
numpy.nextafter(1.0, 2.0, dtype=numpy.float32) - 1.0
>=
torch.abs(1.0 - actual_output / expected_output)
).all()
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
test_model_average()
|
[
"torch.mean",
"logging.basicConfig",
"torch.nn.ModuleList",
"torch.randn",
"torch.nn.Linear",
"torch.unsqueeze",
"numpy.nextafter",
"torch.tensor",
"torch.abs",
"logging.getLogger"
] |
[((131, 158), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (148, 158), False, 'import logging\n'), ((2078, 2095), 'torch.randn', 'torch.randn', (['(2)', '(3)'], {}), '(2, 3)\n', (2089, 2095), False, 'import torch\n'), ((2412, 2452), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (2431, 2452), False, 'import logging\n'), ((690, 720), 'torch.nn.ModuleList', 'nn.ModuleList', ([], {'modules': 'modules'}), '(modules=modules)\n', (703, 720), True, 'import torch.nn as nn\n'), ((1688, 1703), 'torch.nn.Linear', 'nn.Linear', (['(3)', '(2)'], {}), '(3, 2)\n', (1697, 1703), True, 'import torch.nn as nn\n'), ((2128, 2166), 'torch.tensor', 'torch.tensor', (['[[0.5, 0.5], [0.5, 0.5]]'], {}), '([[0.5, 0.5], [0.5, 0.5]])\n', (2140, 2166), False, 'import torch\n'), ((1159, 1194), 'torch.mean', 'torch.mean', (['concatenated_outputs', '(0)'], {}), '(concatenated_outputs, 0)\n', (1169, 1194), False, 'import torch\n'), ((1799, 1826), 'torch.tensor', 'torch.tensor', (['probabilities'], {}), '(probabilities)\n', (1811, 1826), False, 'import torch\n'), ((1058, 1095), 'torch.unsqueeze', 'torch.unsqueeze', (['linear_output'], {'dim': '(0)'}), '(linear_output, dim=0)\n', (1073, 1095), False, 'import torch\n'), ((2318, 2366), 'torch.abs', 'torch.abs', (['(1.0 - actual_output / expected_output)'], {}), '(1.0 - actual_output / expected_output)\n', (2327, 2366), False, 'import torch\n'), ((2246, 2292), 'numpy.nextafter', 'numpy.nextafter', (['(1.0)', '(2.0)'], {'dtype': 'numpy.float32'}), '(1.0, 2.0, dtype=numpy.float32)\n', (2261, 2292), False, 'import numpy\n')]
|
import numpy as np
def softmax(arr):
expL = np.exp(arr) # Broadcasting
sumExpL = sum(expL)
result = []
for i in expL:
result.append(i * 1.0/sumExpL)
return result
|
[
"numpy.exp"
] |
[((49, 60), 'numpy.exp', 'np.exp', (['arr'], {}), '(arr)\n', (55, 60), True, 'import numpy as np\n')]
|
import numpy as np
import scipy.sparse as sp
import pickle as pkl
import os
import h5py
import pandas as pd
import pdb
def map_data(data):
"""
Map data to proper indices in case they are not in a continues [0, N) range
Parameters
----------
data : np.int32 arrays
Returns
-------
mapped_data : np.int32 arrays
n : length of mapped_data
"""
uniq = list(set(data))
id_dict = {old: new for new, old in enumerate(sorted(uniq))}
data = np.array([id_dict[x] for x in data])
n = len(uniq)
return data, id_dict, n
def load_matlab_file(path_file, name_field):
"""
load '.mat' files
inputs:
path_file, string containing the file path
name_field, string containig the field name (default='shape')
warning:
'.mat' files should be saved in the '-v7.3' format
"""
db = h5py.File(path_file, 'r')
ds = db[name_field]
try:
if 'ir' in ds.keys():
data = np.asarray(ds['data'])
ir = np.asarray(ds['ir'])
jc = np.asarray(ds['jc'])
out = sp.csc_matrix((data, ir, jc)).astype(np.float32)
except AttributeError:
# Transpose in case is a dense matrix because of the row- vs column- major ordering between python and matlab
out = np.asarray(ds).astype(np.float32).T
db.close()
return out
def load_data_from_database(dataset, mode='transductive', testing=False, rating_map=None, post_rating_map=None, ratio=1.0):
"""
Loads official train/test split and uses 10% of training samples for validaiton
For each split computes 1-of-num_classes labels. Also computes training
adjacency matrix. Assumes flattening happens everywhere in row-major fashion.
"""
dtypes = {
'u_nodes': np.str, 'v_nodes': np.int32,
'ratings': np.float32}
filename_train = 'data/' + dataset + '/' + mode + '/train.csv'
filename_test = 'data/' + dataset + '/' + mode + '/test.csv'
data_train = pd.read_csv(
filename_train, header=None,
names=['u_nodes', 'v_nodes', 'ratings'], dtype=dtypes)
data_test = pd.read_csv(
filename_test, header=None,
names=['u_nodes', 'v_nodes', 'ratings'], dtype=dtypes)
if mode == 'inductive':
filename_test_init = 'data/' + dataset + '/' + mode + '/test_init.csv'
data_test_init = pd.read_csv(
filename_test_init, header=None,
names=['u_nodes', 'v_nodes', 'ratings'], dtype=dtypes)
data_array_train = data_train.values.tolist()
data_array_train = np.array(data_array_train)
data_array_test = data_test.values.tolist()
data_array_test = np.array(data_array_test)
if mode == 'inductive':
data_array_test_init = data_test_init.values.tolist()
data_array_test_init = np.array(data_array_test_init)
if ratio < 1.0:
data_array_train = data_array_train[data_array_train[:, -1].argsort()[:int(ratio*len(data_array_train))]]
if mode == 'inductive':
data_array = np.concatenate([data_array_train, data_array_test, data_array_test_init], axis=0)
else:
data_array = np.concatenate([data_array_train, data_array_test], axis=0)
u_nodes_ratings = data_array[:, 0].astype(dtypes['u_nodes'])
v_nodes_ratings = data_array[:, 1].astype(dtypes['v_nodes'])
ratings = data_array[:, 2].astype(dtypes['ratings'])
if rating_map is not None:
for i, x in enumerate(ratings):
ratings[i] = rating_map[x]
u_nodes_ratings, u_dict, num_users = map_data(u_nodes_ratings)
v_nodes_ratings, v_dict, num_items = map_data(v_nodes_ratings)
u_nodes_ratings, v_nodes_ratings = u_nodes_ratings.astype(np.int64), v_nodes_ratings.astype(np.int32)
ratings = ratings.astype(np.float64)
u_nodes = u_nodes_ratings
v_nodes = v_nodes_ratings
neutral_rating = -1 # int(np.ceil(np.float(num_classes)/2.)) - 1
# assumes that ratings_train contains at least one example of every rating type
rating_dict = {r: i for i, r in enumerate(np.sort(np.unique(ratings)).tolist())}
labels = np.full((num_users, num_items), neutral_rating, dtype=np.int32)
labels[u_nodes, v_nodes] = np.array([rating_dict[r] for r in ratings])
for i in range(len(u_nodes)):
assert(labels[u_nodes[i], v_nodes[i]] == rating_dict[ratings[i]])
labels = labels.reshape([-1])
# number of test and validation edges, see cf-nade code
num_train = data_array_train.shape[0]
num_test = data_array_test.shape[0]
num_val = int(np.ceil(num_train * 0.2))
num_train = num_train - num_val
if mode == 'inductive':
num_test_init = data_array_test_init.shape[0]
pairs_nonzero = np.array([[u, v] for u, v in zip(u_nodes, v_nodes)])
idx_nonzero = np.array([u * num_items + v for u, v in pairs_nonzero])
for i in range(len(ratings)):
assert(labels[idx_nonzero[i]] == rating_dict[ratings[i]])
if mode == 'inductive':
idx_nonzero_train = idx_nonzero[0:num_train+num_val]
idx_nonzero_test = idx_nonzero[num_train+num_val:num_train+num_val+num_test]
idx_nonzero_test_init = idx_nonzero[num_train+num_val+num_test:]
pairs_nonzero_train = pairs_nonzero[0:num_train+num_val]
pairs_nonzero_test = pairs_nonzero[num_train+num_val:num_train+num_val+num_test]
pairs_nonzero_test_init = pairs_nonzero[num_train+num_val+num_test:]
else:
idx_nonzero_train = idx_nonzero[0:num_train+num_val]
idx_nonzero_test = idx_nonzero[num_train+num_val:]
pairs_nonzero_train = pairs_nonzero[0:num_train+num_val]
pairs_nonzero_test = pairs_nonzero[num_train+num_val:]
# Internally shuffle training set (before splitting off validation set)
rand_idx = list(range(len(idx_nonzero_train)))
np.random.seed(42)
np.random.shuffle(rand_idx)
idx_nonzero_train = idx_nonzero_train[rand_idx]
pairs_nonzero_train = pairs_nonzero_train[rand_idx]
if mode == 'inductive':
idx_nonzero = np.concatenate([idx_nonzero_train, idx_nonzero_test, idx_nonzero_test_init], axis=0)
pairs_nonzero = np.concatenate([pairs_nonzero_train, pairs_nonzero_test, pairs_nonzero_test_init], axis=0)
val_idx = idx_nonzero[0:num_val]
train_idx = idx_nonzero[num_val:num_train + num_val]
test_idx = idx_nonzero[num_train + num_val:num_train + num_val + num_test]
test_init_idx = idx_nonzero[num_train + num_val + num_test:]
else:
idx_nonzero = np.concatenate([idx_nonzero_train, idx_nonzero_test], axis=0)
pairs_nonzero = np.concatenate([pairs_nonzero_train, pairs_nonzero_test], axis=0)
val_idx = idx_nonzero[0:num_val]
train_idx = idx_nonzero[num_val:num_train + num_val]
test_idx = idx_nonzero[num_train + num_val:]
assert(len(test_idx) == num_test)
val_pairs_idx = pairs_nonzero[0:num_val]
train_pairs_idx = pairs_nonzero[num_val:num_train + num_val]
test_pairs_idx = pairs_nonzero[num_train + num_val:num_train + num_val + num_test]
u_test_idx, v_test_idx = test_pairs_idx.transpose()
u_val_idx, v_val_idx = val_pairs_idx.transpose()
u_train_idx, v_train_idx = train_pairs_idx.transpose()
# create labels
train_labels = labels[train_idx]
val_labels = labels[val_idx]
test_labels = labels[test_idx]
if testing:
u_train_idx = np.hstack([u_train_idx, u_val_idx])
v_train_idx = np.hstack([v_train_idx, v_val_idx])
train_labels = np.hstack([train_labels, val_labels])
# for adjacency matrix construction
if mode == 'inductive':
train_idx = np.hstack([train_idx, val_idx, test_init_idx])
else:
train_idx = np.hstack([train_idx, val_idx])
class_values = np.sort(np.unique(ratings))
# make training adjacency matrix
rating_mx_train = np.zeros(num_users * num_items, dtype=np.float32)
if post_rating_map is None:
rating_mx_train[train_idx] = labels[train_idx].astype(np.float32) + 1.
else:
rating_mx_train[train_idx] = np.array([post_rating_map[r] for r in class_values[labels[train_idx]]]) + 1.
rating_mx_train = sp.csr_matrix(rating_mx_train.reshape(num_users, num_items))
# Features
# drug features
drugs_file = 'data/' + dataset + '/drugs_features.csv'
genes_file = 'data/' + dataset + '/genes_features.csv'
if os.path.exists(drugs_file) and os.path.exists(genes_file):
drugs_df = pd.read_csv(drugs_file)
drugs_headers = drugs_df.columns.values[:-1]
num_drug_features = drugs_headers.shape[0]
u_features = np.zeros((num_users, num_drug_features), dtype=np.float32)
for drugbank_id, d_vec in zip(drugs_df['drugbank_id'].values.tolist(), drugs_df[drugs_headers].values.tolist()):
# check if drugbank_id was listed in ratings file and therefore in mapping dictionary
if drugbank_id in u_dict.keys():
u_features[u_dict[drugbank_id], :] = d_vec
# gene features
genes_df = pd.read_csv(genes_file)
genes_headers = genes_df.columns.values[:-1]
num_gene_features = genes_headers.shape[0]
v_features = np.zeros((num_items, num_gene_features), dtype=np.float32)
for gene_id, g_vec in zip(genes_df['gene_id'].values.tolist(), genes_df[genes_headers].values.tolist()):
# check if gene_id was listed in ratings file and therefore in mapping dictionary
if gene_id in v_dict.keys():
v_features[v_dict[gene_id], :] = g_vec
u_features = sp.csr_matrix(u_features)
v_features = sp.csr_matrix(v_features)
print("Drug features shape: "+str(u_features.shape))
print("Gene features shape: "+str(v_features.shape))
else:
u_features = None
v_features = None
drug_dict = {v: k for k, v in u_dict.items()}
gene_dict = {val: key for key,val in v_dict.items()}
return u_features, v_features, rating_mx_train, train_labels, u_train_idx, v_train_idx, \
val_labels, u_val_idx, v_val_idx, test_labels, u_test_idx, v_test_idx, class_values, drug_dict, gene_dict
|
[
"numpy.full",
"h5py.File",
"numpy.random.seed",
"numpy.concatenate",
"numpy.ceil",
"pandas.read_csv",
"numpy.asarray",
"numpy.unique",
"numpy.zeros",
"os.path.exists",
"numpy.hstack",
"scipy.sparse.csc_matrix",
"scipy.sparse.csr_matrix",
"numpy.array",
"numpy.random.shuffle"
] |
[((486, 522), 'numpy.array', 'np.array', (['[id_dict[x] for x in data]'], {}), '([id_dict[x] for x in data])\n', (494, 522), True, 'import numpy as np\n'), ((868, 893), 'h5py.File', 'h5py.File', (['path_file', '"""r"""'], {}), "(path_file, 'r')\n", (877, 893), False, 'import h5py\n'), ((1998, 2097), 'pandas.read_csv', 'pd.read_csv', (['filename_train'], {'header': 'None', 'names': "['u_nodes', 'v_nodes', 'ratings']", 'dtype': 'dtypes'}), "(filename_train, header=None, names=['u_nodes', 'v_nodes',\n 'ratings'], dtype=dtypes)\n", (2009, 2097), True, 'import pandas as pd\n'), ((2128, 2226), 'pandas.read_csv', 'pd.read_csv', (['filename_test'], {'header': 'None', 'names': "['u_nodes', 'v_nodes', 'ratings']", 'dtype': 'dtypes'}), "(filename_test, header=None, names=['u_nodes', 'v_nodes',\n 'ratings'], dtype=dtypes)\n", (2139, 2226), True, 'import pandas as pd\n'), ((2573, 2599), 'numpy.array', 'np.array', (['data_array_train'], {}), '(data_array_train)\n', (2581, 2599), True, 'import numpy as np\n'), ((2670, 2695), 'numpy.array', 'np.array', (['data_array_test'], {}), '(data_array_test)\n', (2678, 2695), True, 'import numpy as np\n'), ((4104, 4167), 'numpy.full', 'np.full', (['(num_users, num_items)', 'neutral_rating'], {'dtype': 'np.int32'}), '((num_users, num_items), neutral_rating, dtype=np.int32)\n', (4111, 4167), True, 'import numpy as np\n'), ((4199, 4242), 'numpy.array', 'np.array', (['[rating_dict[r] for r in ratings]'], {}), '([rating_dict[r] for r in ratings])\n', (4207, 4242), True, 'import numpy as np\n'), ((4789, 4846), 'numpy.array', 'np.array', (['[(u * num_items + v) for u, v in pairs_nonzero]'], {}), '([(u * num_items + v) for u, v in pairs_nonzero])\n', (4797, 4846), True, 'import numpy as np\n'), ((5822, 5840), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (5836, 5840), True, 'import numpy as np\n'), ((5845, 5872), 'numpy.random.shuffle', 'np.random.shuffle', (['rand_idx'], {}), '(rand_idx)\n', (5862, 5872), True, 'import numpy as np\n'), ((7883, 7932), 'numpy.zeros', 'np.zeros', (['(num_users * num_items)'], {'dtype': 'np.float32'}), '(num_users * num_items, dtype=np.float32)\n', (7891, 7932), True, 'import numpy as np\n'), ((2374, 2477), 'pandas.read_csv', 'pd.read_csv', (['filename_test_init'], {'header': 'None', 'names': "['u_nodes', 'v_nodes', 'ratings']", 'dtype': 'dtypes'}), "(filename_test_init, header=None, names=['u_nodes', 'v_nodes',\n 'ratings'], dtype=dtypes)\n", (2385, 2477), True, 'import pandas as pd\n'), ((2818, 2848), 'numpy.array', 'np.array', (['data_array_test_init'], {}), '(data_array_test_init)\n', (2826, 2848), True, 'import numpy as np\n'), ((3034, 3119), 'numpy.concatenate', 'np.concatenate', (['[data_array_train, data_array_test, data_array_test_init]'], {'axis': '(0)'}), '([data_array_train, data_array_test, data_array_test_init],\n axis=0)\n', (3048, 3119), True, 'import numpy as np\n'), ((3147, 3206), 'numpy.concatenate', 'np.concatenate', (['[data_array_train, data_array_test]'], {'axis': '(0)'}), '([data_array_train, data_array_test], axis=0)\n', (3161, 3206), True, 'import numpy as np\n'), ((4552, 4576), 'numpy.ceil', 'np.ceil', (['(num_train * 0.2)'], {}), '(num_train * 0.2)\n', (4559, 4576), True, 'import numpy as np\n'), ((6032, 6120), 'numpy.concatenate', 'np.concatenate', (['[idx_nonzero_train, idx_nonzero_test, idx_nonzero_test_init]'], {'axis': '(0)'}), '([idx_nonzero_train, idx_nonzero_test, idx_nonzero_test_init],\n axis=0)\n', (6046, 6120), True, 'import numpy as np\n'), ((6141, 6235), 'numpy.concatenate', 'np.concatenate', (['[pairs_nonzero_train, pairs_nonzero_test, pairs_nonzero_test_init]'], {'axis': '(0)'}), '([pairs_nonzero_train, pairs_nonzero_test,\n pairs_nonzero_test_init], axis=0)\n', (6155, 6235), True, 'import numpy as np\n'), ((6520, 6581), 'numpy.concatenate', 'np.concatenate', (['[idx_nonzero_train, idx_nonzero_test]'], {'axis': '(0)'}), '([idx_nonzero_train, idx_nonzero_test], axis=0)\n', (6534, 6581), True, 'import numpy as np\n'), ((6606, 6671), 'numpy.concatenate', 'np.concatenate', (['[pairs_nonzero_train, pairs_nonzero_test]'], {'axis': '(0)'}), '([pairs_nonzero_train, pairs_nonzero_test], axis=0)\n', (6620, 6671), True, 'import numpy as np\n'), ((7399, 7434), 'numpy.hstack', 'np.hstack', (['[u_train_idx, u_val_idx]'], {}), '([u_train_idx, u_val_idx])\n', (7408, 7434), True, 'import numpy as np\n'), ((7457, 7492), 'numpy.hstack', 'np.hstack', (['[v_train_idx, v_val_idx]'], {}), '([v_train_idx, v_val_idx])\n', (7466, 7492), True, 'import numpy as np\n'), ((7516, 7553), 'numpy.hstack', 'np.hstack', (['[train_labels, val_labels]'], {}), '([train_labels, val_labels])\n', (7525, 7553), True, 'import numpy as np\n'), ((7803, 7821), 'numpy.unique', 'np.unique', (['ratings'], {}), '(ratings)\n', (7812, 7821), True, 'import numpy as np\n'), ((8413, 8439), 'os.path.exists', 'os.path.exists', (['drugs_file'], {}), '(drugs_file)\n', (8427, 8439), False, 'import os\n'), ((8444, 8470), 'os.path.exists', 'os.path.exists', (['genes_file'], {}), '(genes_file)\n', (8458, 8470), False, 'import os\n'), ((8491, 8514), 'pandas.read_csv', 'pd.read_csv', (['drugs_file'], {}), '(drugs_file)\n', (8502, 8514), True, 'import pandas as pd\n'), ((8642, 8700), 'numpy.zeros', 'np.zeros', (['(num_users, num_drug_features)'], {'dtype': 'np.float32'}), '((num_users, num_drug_features), dtype=np.float32)\n', (8650, 8700), True, 'import numpy as np\n'), ((9068, 9091), 'pandas.read_csv', 'pd.read_csv', (['genes_file'], {}), '(genes_file)\n', (9079, 9091), True, 'import pandas as pd\n'), ((9219, 9277), 'numpy.zeros', 'np.zeros', (['(num_items, num_gene_features)'], {'dtype': 'np.float32'}), '((num_items, num_gene_features), dtype=np.float32)\n', (9227, 9277), True, 'import numpy as np\n'), ((9603, 9628), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['u_features'], {}), '(u_features)\n', (9616, 9628), True, 'import scipy.sparse as sp\n'), ((9650, 9675), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['v_features'], {}), '(v_features)\n', (9663, 9675), True, 'import scipy.sparse as sp\n'), ((976, 998), 'numpy.asarray', 'np.asarray', (["ds['data']"], {}), "(ds['data'])\n", (986, 998), True, 'import numpy as np\n'), ((1016, 1036), 'numpy.asarray', 'np.asarray', (["ds['ir']"], {}), "(ds['ir'])\n", (1026, 1036), True, 'import numpy as np\n'), ((1054, 1074), 'numpy.asarray', 'np.asarray', (["ds['jc']"], {}), "(ds['jc'])\n", (1064, 1074), True, 'import numpy as np\n'), ((7654, 7700), 'numpy.hstack', 'np.hstack', (['[train_idx, val_idx, test_init_idx]'], {}), '([train_idx, val_idx, test_init_idx])\n', (7663, 7700), True, 'import numpy as np\n'), ((7739, 7770), 'numpy.hstack', 'np.hstack', (['[train_idx, val_idx]'], {}), '([train_idx, val_idx])\n', (7748, 7770), True, 'import numpy as np\n'), ((8091, 8162), 'numpy.array', 'np.array', (['[post_rating_map[r] for r in class_values[labels[train_idx]]]'], {}), '([post_rating_map[r] for r in class_values[labels[train_idx]]])\n', (8099, 8162), True, 'import numpy as np\n'), ((1093, 1122), 'scipy.sparse.csc_matrix', 'sp.csc_matrix', (['(data, ir, jc)'], {}), '((data, ir, jc))\n', (1106, 1122), True, 'import scipy.sparse as sp\n'), ((1301, 1315), 'numpy.asarray', 'np.asarray', (['ds'], {}), '(ds)\n', (1311, 1315), True, 'import numpy as np\n'), ((4059, 4077), 'numpy.unique', 'np.unique', (['ratings'], {}), '(ratings)\n', (4068, 4077), True, 'import numpy as np\n')]
|
import numpy as np
def perform_thresholding(f,M,type):
"""
Only 3 types of thresholding currently implemented
"""
if type == "largest":
a = np.sort(np.ravel(abs(f)))[::-1] #sort a 1D copy of F in descending order
T = a[M]
y = f*(abs(f) > T)
elif type == "soft":
s = abs(f) - M
s = (s + abs(s))/2
y = np.sign(f)*s
elif type == "hard":
y = f*(abs(f) > M)
return y
|
[
"numpy.sign"
] |
[((373, 383), 'numpy.sign', 'np.sign', (['f'], {}), '(f)\n', (380, 383), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# File : test.py
# Author : <NAME>
# Email : <EMAIL>
# Date : 25/01/2018
#
# This file is part of Semantic-Graph-PyTorch.
import json
import os.path as osp
from os.path import join as pjoin
import time
import numpy as np
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.cuda as cuda
import jacinle.io as io
from jacinle.cli.argument import JacArgumentParser
from jacinle.logging import get_logger
from jacinle.utils.meter import GroupMeters
from jacinle.utils.tqdm import tqdm_pbar
from jaclearn.embedding.word_embedding import load as load_word_embedding
from jactorch.cuda.copy import async_copy_to
from jactorch.io import load_weights
from jactorch.utils.meta import as_numpy, mark_volatile
from evaluation.completion.cli import ensure_path, format_meters, dump_metainfo
from evaluation.completion.dataset import CompletionDataset, make_dataloader
from evaluation.completion.model import CompletionModel
from vocab import Vocabulary
logger = get_logger(__file__)
parser = JacArgumentParser(description='Semantic graph testing')
parser.add_argument('--load', required=True, type='checked_dir', metavar='DIR', help='path to checkpoint directory')
parser.add_argument('--mode', default='all', choices=['all', 'noun', 'prep'], metavar='M')
parser.add_argument('--use-gpu', default=True, type='bool', metavar='B', help='use GPU or not')
parser.add_argument('--vse', required=True, type='checked_file', metavar='FILE', help='vse file')
parser.add_argument('--glove-only', action='store_true')
parser.add_argument('--data-dir', required=True, type='checked_dir', help='data directory')
parser.add_argument('--dev-img', default='dev_ims.npy', metavar='FILE', help='dev data json file')
parser.add_argument('--dev-cap', default='dev_caps_replace.json', metavar='FILE', help='dev data json file')
parser.add_argument('--test-img', default='test_ims.npy', metavar='FILE', help='testing data json file')
parser.add_argument('--test-cap', default='test_caps_replace.json', metavar='FILE', help='testing data json file')
parser.add_argument('--data-workers', type=int, default=4, metavar='N', help='the num of workers that input testing data')
args = parser.parse_args()
if args.use_gpu:
nr_devs = cuda.device_count()
assert nr_devs > 0, 'No GPU device available'
class Vocab(object):
def __init__(self, idx2word=None, options=None, sync=None):
assert options is None
if sync is not None:
self.idx2word = sync.idx2word
self.word2idx = sync.word2idx
else:
self.idx2word = idx2word
self.word2idx = dict([(w, i) for i, w in enumerate(self.idx2word)])
self.sent_trunc_length = None
@classmethod
def from_pickle(cls, path):
vocab = io.load(path)
return cls(sync=vocab)
def project(self, sentence, is_sent=True):
sentence = sentence.strip().lower().split()
if is_sent:
sentence = ['<start>'] + sentence + ['<end>']
if self.sent_trunc_length is not None:
if len(sentence) > self.sent_trunc_length:
sentence = sentence[:self.sent_trunc_length]
return list(map(lambda word: self.word2idx.get(word, 3), sentence))
def __len__(self):
return len(self.idx2word)
def __call__(self, sent, is_sent=True):
return self.project(sent, is_sent=is_sent)
def load_word_embedding(vse):
checkpoint = torch.load(vse)
opt = checkpoint['opt']
vocab = Vocab.from_pickle(pjoin(opt.vocab_path, '%s_vocab.pkl' % opt.data_name))
if not args.glove_only:
embed_weights = checkpoint['model'][1]['embed.weight'].cpu().numpy()
_, glove_weights = io.load('data/snli/glove.pkl')
embed_weights = np.concatenate((glove_weights, embed_weights), axis=1)
else:
_, embed_weights = io.load('data/snli/glove.pkl')
embedding = nn.Embedding(embed_weights.shape[0], embed_weights.shape[1], padding_idx=0)
embedding.weight.data.copy_(torch.from_numpy(embed_weights))
return vocab, embedding
def main():
logger.critical('Loading the word embedding.')
vocab, word_embeddings = load_word_embedding(args.vse)
logger.critical('Building up the model.')
model = CompletionModel(word_embeddings)
if args.use_gpu:
model.cuda()
# Disable the cudnn benchmark.
model.eval()
cudnn.benchmark = False
logger.critical('Loading the dataset.')
dev_dataset = CompletionDataset(vocab, pjoin(args.data_dir, args.dev_img), pjoin(args.data_dir, args.dev_cap), mode=args.mode)
test_dataset = CompletionDataset(vocab, pjoin(args.data_dir, args.test_img), pjoin(args.data_dir, args.test_cap), mode=args.mode)
logger.critical('Building up the data loader.')
dev_dataloader = make_dataloader(dev_dataset, num_workers=args.data_workers, batch_size=64, shuffle=False, drop_last=False, pin_memory=True)
test_dataloader = make_dataloader(test_dataset, num_workers=args.data_workers, batch_size=64, shuffle=False, drop_last=False, pin_memory=True)
for epoch_id in range(1, 11):
load_weights(model, pjoin(args.load, 'epoch_{}.pth'.format(epoch_id)))
for loader in [dev_dataloader, test_dataloader]:
meters = GroupMeters()
end = time.time()
with tqdm_pbar(total=len(loader), leave=False) as pbar:
for i, data in enumerate(loader):
feed_dict = data
feed_dict = mark_volatile(feed_dict)
if args.use_gpu:
feed_dict = async_copy_to(feed_dict, 0)
data_time = time.time() - end; end = time.time()
output_dict = model(feed_dict)
output_dict = as_numpy(output_dict)
gpu_time = time.time() - end; end = time.time()
meters.update({k: float(v) for k, v in output_dict.items() if k.startswith('top')}, n=len(feed_dict['image']))
meters.update({'time/data': data_time, 'time/gpu': gpu_time})
pbar.set_description(format_meters('sentid={}'.format(i), meters.val, '{}={:.4f}', ', '))
pbar.update()
end = time.time()
print(epoch_id, sorted(meters.avg.items()))
if __name__ == '__main__':
main()
|
[
"jacinle.cli.argument.JacArgumentParser",
"jacinle.utils.meter.GroupMeters",
"jacinle.io.load",
"evaluation.completion.dataset.make_dataloader",
"torch.nn.Embedding",
"torch.load",
"jactorch.utils.meta.as_numpy",
"evaluation.completion.model.CompletionModel",
"torch.cuda.device_count",
"jaclearn.embedding.word_embedding.load",
"time.time",
"jactorch.utils.meta.mark_volatile",
"jacinle.logging.get_logger",
"jactorch.cuda.copy.async_copy_to",
"os.path.join",
"numpy.concatenate",
"torch.from_numpy"
] |
[((1015, 1035), 'jacinle.logging.get_logger', 'get_logger', (['__file__'], {}), '(__file__)\n', (1025, 1035), False, 'from jacinle.logging import get_logger\n'), ((1046, 1101), 'jacinle.cli.argument.JacArgumentParser', 'JacArgumentParser', ([], {'description': '"""Semantic graph testing"""'}), "(description='Semantic graph testing')\n", (1063, 1101), False, 'from jacinle.cli.argument import JacArgumentParser\n'), ((2267, 2286), 'torch.cuda.device_count', 'cuda.device_count', ([], {}), '()\n', (2284, 2286), True, 'import torch.cuda as cuda\n'), ((3472, 3487), 'torch.load', 'torch.load', (['vse'], {}), '(vse)\n', (3482, 3487), False, 'import torch\n'), ((3928, 4003), 'torch.nn.Embedding', 'nn.Embedding', (['embed_weights.shape[0]', 'embed_weights.shape[1]'], {'padding_idx': '(0)'}), '(embed_weights.shape[0], embed_weights.shape[1], padding_idx=0)\n', (3940, 4003), True, 'import torch.nn as nn\n'), ((4191, 4220), 'jaclearn.embedding.word_embedding.load', 'load_word_embedding', (['args.vse'], {}), '(args.vse)\n', (4210, 4220), True, 'from jaclearn.embedding.word_embedding import load as load_word_embedding\n'), ((4280, 4312), 'evaluation.completion.model.CompletionModel', 'CompletionModel', (['word_embeddings'], {}), '(word_embeddings)\n', (4295, 4312), False, 'from evaluation.completion.model import CompletionModel\n'), ((4820, 4947), 'evaluation.completion.dataset.make_dataloader', 'make_dataloader', (['dev_dataset'], {'num_workers': 'args.data_workers', 'batch_size': '(64)', 'shuffle': '(False)', 'drop_last': '(False)', 'pin_memory': '(True)'}), '(dev_dataset, num_workers=args.data_workers, batch_size=64,\n shuffle=False, drop_last=False, pin_memory=True)\n', (4835, 4947), False, 'from evaluation.completion.dataset import CompletionDataset, make_dataloader\n'), ((4966, 5094), 'evaluation.completion.dataset.make_dataloader', 'make_dataloader', (['test_dataset'], {'num_workers': 'args.data_workers', 'batch_size': '(64)', 'shuffle': '(False)', 'drop_last': '(False)', 'pin_memory': '(True)'}), '(test_dataset, num_workers=args.data_workers, batch_size=64,\n shuffle=False, drop_last=False, pin_memory=True)\n', (4981, 5094), False, 'from evaluation.completion.dataset import CompletionDataset, make_dataloader\n'), ((2803, 2816), 'jacinle.io.load', 'io.load', (['path'], {}), '(path)\n', (2810, 2816), True, 'import jacinle.io as io\n'), ((3546, 3599), 'os.path.join', 'pjoin', (['opt.vocab_path', "('%s_vocab.pkl' % opt.data_name)"], {}), "(opt.vocab_path, '%s_vocab.pkl' % opt.data_name)\n", (3551, 3599), True, 'from os.path import join as pjoin\n'), ((3734, 3764), 'jacinle.io.load', 'io.load', (['"""data/snli/glove.pkl"""'], {}), "('data/snli/glove.pkl')\n", (3741, 3764), True, 'import jacinle.io as io\n'), ((3789, 3843), 'numpy.concatenate', 'np.concatenate', (['(glove_weights, embed_weights)'], {'axis': '(1)'}), '((glove_weights, embed_weights), axis=1)\n', (3803, 3843), True, 'import numpy as np\n'), ((3881, 3911), 'jacinle.io.load', 'io.load', (['"""data/snli/glove.pkl"""'], {}), "('data/snli/glove.pkl')\n", (3888, 3911), True, 'import jacinle.io as io\n'), ((4036, 4067), 'torch.from_numpy', 'torch.from_numpy', (['embed_weights'], {}), '(embed_weights)\n', (4052, 4067), False, 'import torch\n'), ((4524, 4558), 'os.path.join', 'pjoin', (['args.data_dir', 'args.dev_img'], {}), '(args.data_dir, args.dev_img)\n', (4529, 4558), True, 'from os.path import join as pjoin\n'), ((4560, 4594), 'os.path.join', 'pjoin', (['args.data_dir', 'args.dev_cap'], {}), '(args.data_dir, args.dev_cap)\n', (4565, 4594), True, 'from os.path import join as pjoin\n'), ((4656, 4691), 'os.path.join', 'pjoin', (['args.data_dir', 'args.test_img'], {}), '(args.data_dir, args.test_img)\n', (4661, 4691), True, 'from os.path import join as pjoin\n'), ((4693, 4728), 'os.path.join', 'pjoin', (['args.data_dir', 'args.test_cap'], {}), '(args.data_dir, args.test_cap)\n', (4698, 4728), True, 'from os.path import join as pjoin\n'), ((5284, 5297), 'jacinle.utils.meter.GroupMeters', 'GroupMeters', ([], {}), '()\n', (5295, 5297), False, 'from jacinle.utils.meter import GroupMeters\n'), ((5317, 5328), 'time.time', 'time.time', ([], {}), '()\n', (5326, 5328), False, 'import time\n'), ((5516, 5540), 'jactorch.utils.meta.mark_volatile', 'mark_volatile', (['feed_dict'], {}), '(feed_dict)\n', (5529, 5540), False, 'from jactorch.utils.meta import as_numpy, mark_volatile\n'), ((5701, 5712), 'time.time', 'time.time', ([], {}), '()\n', (5710, 5712), False, 'import time\n'), ((5799, 5820), 'jactorch.utils.meta.as_numpy', 'as_numpy', (['output_dict'], {}), '(output_dict)\n', (5807, 5820), False, 'from jactorch.utils.meta import as_numpy, mark_volatile\n'), ((5879, 5890), 'time.time', 'time.time', ([], {}), '()\n', (5888, 5890), False, 'import time\n'), ((6277, 6288), 'time.time', 'time.time', ([], {}), '()\n', (6286, 6288), False, 'import time\n'), ((5615, 5642), 'jactorch.cuda.copy.async_copy_to', 'async_copy_to', (['feed_dict', '(0)'], {}), '(feed_dict, 0)\n', (5628, 5642), False, 'from jactorch.cuda.copy import async_copy_to\n'), ((5676, 5687), 'time.time', 'time.time', ([], {}), '()\n', (5685, 5687), False, 'import time\n'), ((5853, 5864), 'time.time', 'time.time', ([], {}), '()\n', (5862, 5864), False, 'import time\n')]
|
import numpy as np
from bokeh.io import curdoc
from bokeh.plotting import figure
N = 4000
x = np.random.random(size=N) * 100
y = np.random.random(size=N) * 100
radii = np.random.random(size=N) * 1.5
colors = [
"#%02x%02x%02x" % (int(r), int(g), 150) for r, g in zip(50+2*x, 30+2*y)
]
p = figure(tools="", toolbar_location=None)
p.circle(x, y, radius=radii,
fill_color=colors, fill_alpha=0.6,
line_color=None)
curdoc().add_root(p)
|
[
"bokeh.io.curdoc",
"bokeh.plotting.figure",
"numpy.random.random"
] |
[((295, 334), 'bokeh.plotting.figure', 'figure', ([], {'tools': '""""""', 'toolbar_location': 'None'}), "(tools='', toolbar_location=None)\n", (301, 334), False, 'from bokeh.plotting import figure\n'), ((96, 120), 'numpy.random.random', 'np.random.random', ([], {'size': 'N'}), '(size=N)\n', (112, 120), True, 'import numpy as np\n'), ((131, 155), 'numpy.random.random', 'np.random.random', ([], {'size': 'N'}), '(size=N)\n', (147, 155), True, 'import numpy as np\n'), ((170, 194), 'numpy.random.random', 'np.random.random', ([], {'size': 'N'}), '(size=N)\n', (186, 194), True, 'import numpy as np\n'), ((436, 444), 'bokeh.io.curdoc', 'curdoc', ([], {}), '()\n', (442, 444), False, 'from bokeh.io import curdoc\n')]
|
from visibilitygraphs.dubinspath.dubinsCar import DubinsCar
from visibilitygraphs.dubinspath.vanaAirplane import VanaAirplane
from visibilitygraphs.models import AStarVertex
from visibilitygraphs.dubinspath.helpers import dubinsCurve2d, vanaAirplaneCurve
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
def testDubinsCurve2dLSL():
dubins = DubinsCar()
path = dubins.calculatePath(
AStarVertex(x=0, y=0, psi=0),
AStarVertex(x=1, y=1, psi=np.pi / 2),
.25
)
f = dubinsCurve2d([path.start.x, path.start.y, path.start.psi], path.a, path.b, path.c, path.r, path.type)
t = np.linspace(0, 1, 100)
xy = np.array([f(s) for s in t])
plt.plot(xy[:, 0], xy[:, 1])
plt.title('LSL')
plt.show()
def testDubinsCurve2dLSR():
dubins = DubinsCar()
path = dubins.calculatePath(
AStarVertex(x=0, y=0, psi=0),
AStarVertex(x=1, y=1, psi=0),
.25
)
f = dubinsCurve2d([path.start.x, path.start.y, path.start.psi], path.a, path.b, path.c, path.r, path.type)
t = np.linspace(0, 1, 100)
xy = np.array([f(s) for s in t])
plt.plot(xy[:, 0], xy[:, 1])
plt.title('LSR')
plt.show()
def testDubinsCurve2dRSL():
dubins = DubinsCar()
path = dubins.calculatePath(
AStarVertex(x=0, y=0, psi=0),
AStarVertex(x=1, y=-1, psi=0),
.25
)
f = dubinsCurve2d([path.start.x, path.start.y, path.start.psi], path.a, path.b, path.c, path.r, path.type)
t = np.linspace(0, 1, 100)
xy = np.array([f(s) for s in t])
plt.plot(xy[:, 0], xy[:, 1])
plt.title('RSL')
plt.show()
def testDubinsCurve2dRSR():
dubins = DubinsCar()
path = dubins.calculatePath(
AStarVertex(x=0, y=0, psi=0),
AStarVertex(x=1, y=-1, psi=-np.pi/2),
.25
)
f = dubinsCurve2d([path.start.x, path.start.y, path.start.psi], path.a, path.b, path.c, path.r, path.type)
t = np.linspace(0, 1, 100)
xy = np.array([f(s) for s in t])
plt.plot(xy[:, 0], xy[:, 1])
plt.title('RSR')
plt.show()
def testDubinsCurve2dLRL():
dubins = DubinsCar()
path = dubins.calculatePath(
AStarVertex(x=0, y=0, psi=0),
AStarVertex(x=0, y=-.125, psi=np.pi),
.25
)
f = dubinsCurve2d([path.start.x, path.start.y, path.start.psi], path.a, path.b, path.c, path.r, path.type)
t = np.linspace(0, 1, 100)
xy = np.array([f(s) for s in t])
plt.plot(xy[:, 0], xy[:, 1])
plt.title('RSL')
plt.show()
def testDubinsCurve2dRLR():
dubins = DubinsCar()
path = dubins.calculatePath(
AStarVertex(x=0, y=0, psi=0),
AStarVertex(x=0, y=.125, psi=np.pi),
.25
)
f = dubinsCurve2d([path.start.x, path.start.y, path.start.psi], path.a, path.b, path.c, path.r, path.type)
t = np.linspace(0, 1, 100)
xy = np.array([f(s) for s in t])
plt.plot(xy[:, 0], xy[:, 1])
plt.title('RSL')
plt.show()
def testVanaAirplaneCurve():
dubins = VanaAirplane()
path = dubins.calculatePath(
AStarVertex(x=0, y=0, z=0, psi=0, gamma=0),
AStarVertex(x=1, y=1, z=1, psi=0, gamma=0),
.25,
2 * np.pi / 9
)
f = vanaAirplaneCurve(path)
t = np.linspace(0, 1, 100)
x = np.array([f(s) for s in t])
ax = plt.axes(projection='3d')
ax.plot(x[:, 0], x[:, 1], x[:, 2])
plt.title('3d curve')
plt.show()
|
[
"matplotlib.pyplot.title",
"visibilitygraphs.dubinspath.dubinsCar.DubinsCar",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axes",
"visibilitygraphs.dubinspath.helpers.dubinsCurve2d",
"visibilitygraphs.dubinspath.helpers.vanaAirplaneCurve",
"numpy.linspace",
"visibilitygraphs.dubinspath.vanaAirplane.VanaAirplane",
"visibilitygraphs.models.AStarVertex"
] |
[((383, 394), 'visibilitygraphs.dubinspath.dubinsCar.DubinsCar', 'DubinsCar', ([], {}), '()\n', (392, 394), False, 'from visibilitygraphs.dubinspath.dubinsCar import DubinsCar\n'), ((538, 644), 'visibilitygraphs.dubinspath.helpers.dubinsCurve2d', 'dubinsCurve2d', (['[path.start.x, path.start.y, path.start.psi]', 'path.a', 'path.b', 'path.c', 'path.r', 'path.type'], {}), '([path.start.x, path.start.y, path.start.psi], path.a, path.b,\n path.c, path.r, path.type)\n', (551, 644), False, 'from visibilitygraphs.dubinspath.helpers import dubinsCurve2d, vanaAirplaneCurve\n'), ((649, 671), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (660, 671), True, 'import numpy as np\n'), ((713, 741), 'matplotlib.pyplot.plot', 'plt.plot', (['xy[:, 0]', 'xy[:, 1]'], {}), '(xy[:, 0], xy[:, 1])\n', (721, 741), True, 'import matplotlib.pyplot as plt\n'), ((746, 762), 'matplotlib.pyplot.title', 'plt.title', (['"""LSL"""'], {}), "('LSL')\n", (755, 762), True, 'import matplotlib.pyplot as plt\n'), ((767, 777), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (775, 777), True, 'import matplotlib.pyplot as plt\n'), ((820, 831), 'visibilitygraphs.dubinspath.dubinsCar.DubinsCar', 'DubinsCar', ([], {}), '()\n', (829, 831), False, 'from visibilitygraphs.dubinspath.dubinsCar import DubinsCar\n'), ((967, 1073), 'visibilitygraphs.dubinspath.helpers.dubinsCurve2d', 'dubinsCurve2d', (['[path.start.x, path.start.y, path.start.psi]', 'path.a', 'path.b', 'path.c', 'path.r', 'path.type'], {}), '([path.start.x, path.start.y, path.start.psi], path.a, path.b,\n path.c, path.r, path.type)\n', (980, 1073), False, 'from visibilitygraphs.dubinspath.helpers import dubinsCurve2d, vanaAirplaneCurve\n'), ((1078, 1100), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (1089, 1100), True, 'import numpy as np\n'), ((1142, 1170), 'matplotlib.pyplot.plot', 'plt.plot', (['xy[:, 0]', 'xy[:, 1]'], {}), '(xy[:, 0], xy[:, 1])\n', (1150, 1170), True, 'import matplotlib.pyplot as plt\n'), ((1175, 1191), 'matplotlib.pyplot.title', 'plt.title', (['"""LSR"""'], {}), "('LSR')\n", (1184, 1191), True, 'import matplotlib.pyplot as plt\n'), ((1196, 1206), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1204, 1206), True, 'import matplotlib.pyplot as plt\n'), ((1249, 1260), 'visibilitygraphs.dubinspath.dubinsCar.DubinsCar', 'DubinsCar', ([], {}), '()\n', (1258, 1260), False, 'from visibilitygraphs.dubinspath.dubinsCar import DubinsCar\n'), ((1397, 1503), 'visibilitygraphs.dubinspath.helpers.dubinsCurve2d', 'dubinsCurve2d', (['[path.start.x, path.start.y, path.start.psi]', 'path.a', 'path.b', 'path.c', 'path.r', 'path.type'], {}), '([path.start.x, path.start.y, path.start.psi], path.a, path.b,\n path.c, path.r, path.type)\n', (1410, 1503), False, 'from visibilitygraphs.dubinspath.helpers import dubinsCurve2d, vanaAirplaneCurve\n'), ((1508, 1530), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (1519, 1530), True, 'import numpy as np\n'), ((1572, 1600), 'matplotlib.pyplot.plot', 'plt.plot', (['xy[:, 0]', 'xy[:, 1]'], {}), '(xy[:, 0], xy[:, 1])\n', (1580, 1600), True, 'import matplotlib.pyplot as plt\n'), ((1605, 1621), 'matplotlib.pyplot.title', 'plt.title', (['"""RSL"""'], {}), "('RSL')\n", (1614, 1621), True, 'import matplotlib.pyplot as plt\n'), ((1626, 1636), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1634, 1636), True, 'import matplotlib.pyplot as plt\n'), ((1679, 1690), 'visibilitygraphs.dubinspath.dubinsCar.DubinsCar', 'DubinsCar', ([], {}), '()\n', (1688, 1690), False, 'from visibilitygraphs.dubinspath.dubinsCar import DubinsCar\n'), ((1834, 1940), 'visibilitygraphs.dubinspath.helpers.dubinsCurve2d', 'dubinsCurve2d', (['[path.start.x, path.start.y, path.start.psi]', 'path.a', 'path.b', 'path.c', 'path.r', 'path.type'], {}), '([path.start.x, path.start.y, path.start.psi], path.a, path.b,\n path.c, path.r, path.type)\n', (1847, 1940), False, 'from visibilitygraphs.dubinspath.helpers import dubinsCurve2d, vanaAirplaneCurve\n'), ((1945, 1967), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (1956, 1967), True, 'import numpy as np\n'), ((2009, 2037), 'matplotlib.pyplot.plot', 'plt.plot', (['xy[:, 0]', 'xy[:, 1]'], {}), '(xy[:, 0], xy[:, 1])\n', (2017, 2037), True, 'import matplotlib.pyplot as plt\n'), ((2042, 2058), 'matplotlib.pyplot.title', 'plt.title', (['"""RSR"""'], {}), "('RSR')\n", (2051, 2058), True, 'import matplotlib.pyplot as plt\n'), ((2063, 2073), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2071, 2073), True, 'import matplotlib.pyplot as plt\n'), ((2116, 2127), 'visibilitygraphs.dubinspath.dubinsCar.DubinsCar', 'DubinsCar', ([], {}), '()\n', (2125, 2127), False, 'from visibilitygraphs.dubinspath.dubinsCar import DubinsCar\n'), ((2271, 2377), 'visibilitygraphs.dubinspath.helpers.dubinsCurve2d', 'dubinsCurve2d', (['[path.start.x, path.start.y, path.start.psi]', 'path.a', 'path.b', 'path.c', 'path.r', 'path.type'], {}), '([path.start.x, path.start.y, path.start.psi], path.a, path.b,\n path.c, path.r, path.type)\n', (2284, 2377), False, 'from visibilitygraphs.dubinspath.helpers import dubinsCurve2d, vanaAirplaneCurve\n'), ((2382, 2404), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (2393, 2404), True, 'import numpy as np\n'), ((2446, 2474), 'matplotlib.pyplot.plot', 'plt.plot', (['xy[:, 0]', 'xy[:, 1]'], {}), '(xy[:, 0], xy[:, 1])\n', (2454, 2474), True, 'import matplotlib.pyplot as plt\n'), ((2479, 2495), 'matplotlib.pyplot.title', 'plt.title', (['"""RSL"""'], {}), "('RSL')\n", (2488, 2495), True, 'import matplotlib.pyplot as plt\n'), ((2500, 2510), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2508, 2510), True, 'import matplotlib.pyplot as plt\n'), ((2553, 2564), 'visibilitygraphs.dubinspath.dubinsCar.DubinsCar', 'DubinsCar', ([], {}), '()\n', (2562, 2564), False, 'from visibilitygraphs.dubinspath.dubinsCar import DubinsCar\n'), ((2707, 2813), 'visibilitygraphs.dubinspath.helpers.dubinsCurve2d', 'dubinsCurve2d', (['[path.start.x, path.start.y, path.start.psi]', 'path.a', 'path.b', 'path.c', 'path.r', 'path.type'], {}), '([path.start.x, path.start.y, path.start.psi], path.a, path.b,\n path.c, path.r, path.type)\n', (2720, 2813), False, 'from visibilitygraphs.dubinspath.helpers import dubinsCurve2d, vanaAirplaneCurve\n'), ((2818, 2840), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (2829, 2840), True, 'import numpy as np\n'), ((2882, 2910), 'matplotlib.pyplot.plot', 'plt.plot', (['xy[:, 0]', 'xy[:, 1]'], {}), '(xy[:, 0], xy[:, 1])\n', (2890, 2910), True, 'import matplotlib.pyplot as plt\n'), ((2915, 2931), 'matplotlib.pyplot.title', 'plt.title', (['"""RSL"""'], {}), "('RSL')\n", (2924, 2931), True, 'import matplotlib.pyplot as plt\n'), ((2936, 2946), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2944, 2946), True, 'import matplotlib.pyplot as plt\n'), ((2990, 3004), 'visibilitygraphs.dubinspath.vanaAirplane.VanaAirplane', 'VanaAirplane', ([], {}), '()\n', (3002, 3004), False, 'from visibilitygraphs.dubinspath.vanaAirplane import VanaAirplane\n'), ((3191, 3214), 'visibilitygraphs.dubinspath.helpers.vanaAirplaneCurve', 'vanaAirplaneCurve', (['path'], {}), '(path)\n', (3208, 3214), False, 'from visibilitygraphs.dubinspath.helpers import dubinsCurve2d, vanaAirplaneCurve\n'), ((3223, 3245), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (3234, 3245), True, 'import numpy as np\n'), ((3291, 3316), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (3299, 3316), True, 'import matplotlib.pyplot as plt\n'), ((3360, 3381), 'matplotlib.pyplot.title', 'plt.title', (['"""3d curve"""'], {}), "('3d curve')\n", (3369, 3381), True, 'import matplotlib.pyplot as plt\n'), ((3386, 3396), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3394, 3396), True, 'import matplotlib.pyplot as plt\n'), ((436, 464), 'visibilitygraphs.models.AStarVertex', 'AStarVertex', ([], {'x': '(0)', 'y': '(0)', 'psi': '(0)'}), '(x=0, y=0, psi=0)\n', (447, 464), False, 'from visibilitygraphs.models import AStarVertex\n'), ((474, 510), 'visibilitygraphs.models.AStarVertex', 'AStarVertex', ([], {'x': '(1)', 'y': '(1)', 'psi': '(np.pi / 2)'}), '(x=1, y=1, psi=np.pi / 2)\n', (485, 510), False, 'from visibilitygraphs.models import AStarVertex\n'), ((873, 901), 'visibilitygraphs.models.AStarVertex', 'AStarVertex', ([], {'x': '(0)', 'y': '(0)', 'psi': '(0)'}), '(x=0, y=0, psi=0)\n', (884, 901), False, 'from visibilitygraphs.models import AStarVertex\n'), ((911, 939), 'visibilitygraphs.models.AStarVertex', 'AStarVertex', ([], {'x': '(1)', 'y': '(1)', 'psi': '(0)'}), '(x=1, y=1, psi=0)\n', (922, 939), False, 'from visibilitygraphs.models import AStarVertex\n'), ((1302, 1330), 'visibilitygraphs.models.AStarVertex', 'AStarVertex', ([], {'x': '(0)', 'y': '(0)', 'psi': '(0)'}), '(x=0, y=0, psi=0)\n', (1313, 1330), False, 'from visibilitygraphs.models import AStarVertex\n'), ((1340, 1369), 'visibilitygraphs.models.AStarVertex', 'AStarVertex', ([], {'x': '(1)', 'y': '(-1)', 'psi': '(0)'}), '(x=1, y=-1, psi=0)\n', (1351, 1369), False, 'from visibilitygraphs.models import AStarVertex\n'), ((1732, 1760), 'visibilitygraphs.models.AStarVertex', 'AStarVertex', ([], {'x': '(0)', 'y': '(0)', 'psi': '(0)'}), '(x=0, y=0, psi=0)\n', (1743, 1760), False, 'from visibilitygraphs.models import AStarVertex\n'), ((1770, 1808), 'visibilitygraphs.models.AStarVertex', 'AStarVertex', ([], {'x': '(1)', 'y': '(-1)', 'psi': '(-np.pi / 2)'}), '(x=1, y=-1, psi=-np.pi / 2)\n', (1781, 1808), False, 'from visibilitygraphs.models import AStarVertex\n'), ((2169, 2197), 'visibilitygraphs.models.AStarVertex', 'AStarVertex', ([], {'x': '(0)', 'y': '(0)', 'psi': '(0)'}), '(x=0, y=0, psi=0)\n', (2180, 2197), False, 'from visibilitygraphs.models import AStarVertex\n'), ((2207, 2244), 'visibilitygraphs.models.AStarVertex', 'AStarVertex', ([], {'x': '(0)', 'y': '(-0.125)', 'psi': 'np.pi'}), '(x=0, y=-0.125, psi=np.pi)\n', (2218, 2244), False, 'from visibilitygraphs.models import AStarVertex\n'), ((2606, 2634), 'visibilitygraphs.models.AStarVertex', 'AStarVertex', ([], {'x': '(0)', 'y': '(0)', 'psi': '(0)'}), '(x=0, y=0, psi=0)\n', (2617, 2634), False, 'from visibilitygraphs.models import AStarVertex\n'), ((2644, 2680), 'visibilitygraphs.models.AStarVertex', 'AStarVertex', ([], {'x': '(0)', 'y': '(0.125)', 'psi': 'np.pi'}), '(x=0, y=0.125, psi=np.pi)\n', (2655, 2680), False, 'from visibilitygraphs.models import AStarVertex\n'), ((3046, 3088), 'visibilitygraphs.models.AStarVertex', 'AStarVertex', ([], {'x': '(0)', 'y': '(0)', 'z': '(0)', 'psi': '(0)', 'gamma': '(0)'}), '(x=0, y=0, z=0, psi=0, gamma=0)\n', (3057, 3088), False, 'from visibilitygraphs.models import AStarVertex\n'), ((3098, 3140), 'visibilitygraphs.models.AStarVertex', 'AStarVertex', ([], {'x': '(1)', 'y': '(1)', 'z': '(1)', 'psi': '(0)', 'gamma': '(0)'}), '(x=1, y=1, z=1, psi=0, gamma=0)\n', (3109, 3140), False, 'from visibilitygraphs.models import AStarVertex\n')]
|
## Automatically adapted for scipy Oct 07, 2005 by convertcode.py
from scipy.optimize import minpack2
import numpy
import __builtin__
pymin = __builtin__.min
def line_search(f, myfprime, xk, pk, gfk, old_fval, old_old_fval,
args=(), c1=1e-4, c2=0.9, amax=50):
fc = 0
gc = 0
phi0 = old_fval
derphi0 = numpy.dot(gfk,pk)
alpha1 = pymin(1.0,1.01*2*(phi0-old_old_fval)/derphi0)
# trevor: added this test
alpha1 = pymin(alpha1,amax)
if isinstance(myfprime,type(())):
eps = myfprime[1]
fprime = myfprime[0]
newargs = (f,eps) + args
gradient = False
else:
fprime = myfprime
newargs = args
gradient = True
xtol = 1e-14
amin = 1e-8
isave = numpy.zeros((2,), numpy.intc)
dsave = numpy.zeros((13,), float)
task = 'START'
fval = old_fval
gval = gfk
while 1:
stp,fval,derphi,task = minpack2.dcsrch(alpha1, phi0, derphi0, c1, c2,
xtol, task, amin, amax,isave,dsave)
#print 'minpack2.dcsrch', alpha1, phi0, derphi0, c1, c2, xtol, task, amin, amax,isave,dsave
#print 'returns', stp,fval,derphi,task
if task[:2] == 'FG':
alpha1 = stp
fval = f(xk+stp*pk,*args)
fc += 1
gval = fprime(xk+stp*pk,*newargs)
if gradient: gc += 1
else: fc += len(xk) + 1
phi0 = fval
derphi0 = numpy.dot(gval,pk)
else:
break
if task[:5] == 'ERROR' or task[1:4] == 'WARN':
stp = None # failed
return stp, fc, gc, fval, old_fval, gval
|
[
"scipy.optimize.minpack2.dcsrch",
"numpy.dot",
"numpy.zeros"
] |
[((336, 354), 'numpy.dot', 'numpy.dot', (['gfk', 'pk'], {}), '(gfk, pk)\n', (345, 354), False, 'import numpy\n'), ((756, 785), 'numpy.zeros', 'numpy.zeros', (['(2,)', 'numpy.intc'], {}), '((2,), numpy.intc)\n', (767, 785), False, 'import numpy\n'), ((798, 823), 'numpy.zeros', 'numpy.zeros', (['(13,)', 'float'], {}), '((13,), float)\n', (809, 823), False, 'import numpy\n'), ((923, 1011), 'scipy.optimize.minpack2.dcsrch', 'minpack2.dcsrch', (['alpha1', 'phi0', 'derphi0', 'c1', 'c2', 'xtol', 'task', 'amin', 'amax', 'isave', 'dsave'], {}), '(alpha1, phi0, derphi0, c1, c2, xtol, task, amin, amax,\n isave, dsave)\n', (938, 1011), False, 'from scipy.optimize import minpack2\n'), ((1474, 1493), 'numpy.dot', 'numpy.dot', (['gval', 'pk'], {}), '(gval, pk)\n', (1483, 1493), False, 'import numpy\n')]
|
import cv2
import numpy as np
import SimpleITK as sitk
def auto_region_growing(img):
clicks=[]
image=img.copy()
image=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
imgInput = sitk.GetImageFromArray(image)
ret, thresh = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
kernel = np.ones((5, 5), np.uint8)
kernel2 = np.ones((3, 3), np.uint8)
thresh = cv2.erode(thresh, kernel)
thresh = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel2, iterations=3)
contours, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_TC89_L1)
for c in contours:
M = cv2.moments(c)
if M["m00"] != 0:
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
clicks.append((cY, cX))
c = clicks[0]
e = image[c[0], c[1]]
seg = sitk.ConnectedThreshold(imgInput, seedList=clicks, lower=int(e - 50), upper=int(e + 50))
img_arr = sitk.GetArrayFromImage(seg)
outimg = np.array(255 * img_arr, dtype='uint8')
edges = cv2.Canny(outimg, 100, 200)
return outimg,edges
if __name__ == '__main__':
image = cv2.imread('bird.jpg', 1)
outimg,edges=auto_region_growing(image)
cv2.imshow('Region Growing', outimg)
cv2.waitKey()
cv2.imshow('edges',edges)
cv2.waitKey()
cv2.destroyAllWindows()
|
[
"cv2.Canny",
"cv2.cvtColor",
"cv2.morphologyEx",
"cv2.threshold",
"cv2.waitKey",
"cv2.destroyAllWindows",
"numpy.ones",
"SimpleITK.GetArrayFromImage",
"cv2.moments",
"cv2.imread",
"numpy.array",
"SimpleITK.GetImageFromArray",
"cv2.erode",
"cv2.imshow"
] |
[((133, 172), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (145, 172), False, 'import cv2\n'), ((190, 219), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['image'], {}), '(image)\n', (212, 219), True, 'import SimpleITK as sitk\n'), ((239, 308), 'cv2.threshold', 'cv2.threshold', (['image', '(0)', '(255)', '(cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)'], {}), '(image, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)\n', (252, 308), False, 'import cv2\n'), ((322, 347), 'numpy.ones', 'np.ones', (['(5, 5)', 'np.uint8'], {}), '((5, 5), np.uint8)\n', (329, 347), True, 'import numpy as np\n'), ((362, 387), 'numpy.ones', 'np.ones', (['(3, 3)', 'np.uint8'], {}), '((3, 3), np.uint8)\n', (369, 387), True, 'import numpy as np\n'), ((401, 426), 'cv2.erode', 'cv2.erode', (['thresh', 'kernel'], {}), '(thresh, kernel)\n', (410, 426), False, 'import cv2\n'), ((440, 503), 'cv2.morphologyEx', 'cv2.morphologyEx', (['thresh', 'cv2.MORPH_OPEN', 'kernel2'], {'iterations': '(3)'}), '(thresh, cv2.MORPH_OPEN, kernel2, iterations=3)\n', (456, 503), False, 'import cv2\n'), ((961, 988), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['seg'], {}), '(seg)\n', (983, 988), True, 'import SimpleITK as sitk\n'), ((1002, 1040), 'numpy.array', 'np.array', (['(255 * img_arr)'], {'dtype': '"""uint8"""'}), "(255 * img_arr, dtype='uint8')\n", (1010, 1040), True, 'import numpy as np\n'), ((1053, 1080), 'cv2.Canny', 'cv2.Canny', (['outimg', '(100)', '(200)'], {}), '(outimg, 100, 200)\n', (1062, 1080), False, 'import cv2\n'), ((1145, 1170), 'cv2.imread', 'cv2.imread', (['"""bird.jpg"""', '(1)'], {}), "('bird.jpg', 1)\n", (1155, 1170), False, 'import cv2\n'), ((1219, 1255), 'cv2.imshow', 'cv2.imshow', (['"""Region Growing"""', 'outimg'], {}), "('Region Growing', outimg)\n", (1229, 1255), False, 'import cv2\n'), ((1260, 1273), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (1271, 1273), False, 'import cv2\n'), ((1278, 1304), 'cv2.imshow', 'cv2.imshow', (['"""edges"""', 'edges'], {}), "('edges', edges)\n", (1288, 1304), False, 'import cv2\n'), ((1308, 1321), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (1319, 1321), False, 'import cv2\n'), ((1326, 1349), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1347, 1349), False, 'import cv2\n'), ((641, 655), 'cv2.moments', 'cv2.moments', (['c'], {}), '(c)\n', (652, 655), False, 'import cv2\n')]
|
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
class STFT(nn.Module):
def __init__(self, fftsize, window_size, stride, win_type="default", trainable=False, online=False):
super(STFT, self).__init__()
self.fftsize = fftsize
self.window_size = window_size
self.stride = stride
if win_type=="default": # sin window
self.window_func = np.sqrt(np.hanning(self.window_size))
elif win_type=="hanning":
self.window_func = np.hanning(self.window_size)
fcoef_r = np.zeros((self.fftsize//2 + 1, 1, self.window_size))
fcoef_i = np.zeros((self.fftsize//2 + 1, 1, self.window_size))
for w in range(self.fftsize//2+1):
for t in range(self.window_size):
fcoef_r[w, 0, t] = np.cos(2. * np.pi * w * t / self.fftsize)
fcoef_i[w, 0, t] = -np.sin(2. * np.pi * w * t / self.fftsize)
fcoef_r = fcoef_r * self.window_func
fcoef_i = fcoef_i * self.window_func
self.fcoef_r = th.tensor(fcoef_r, dtype=th.float)
self.fcoef_i = th.tensor(fcoef_i, dtype=th.float)
self.encoder_r = nn.Conv1d(1, self.fftsize//2+1, self.window_size, bias=False, stride=self.stride)
self.encoder_i = nn.Conv1d(1, self.fftsize//2+1, self.window_size, bias=False, stride=self.stride)
self.encoder_r.weight = th.nn.Parameter(self.fcoef_r)
self.encoder_i.weight = th.nn.Parameter(self.fcoef_i)
if trainable:
self.encoder_r.weight.requires_grad = True
self.encoder_i.weight.requires_grad = True
else:
self.encoder_r.weight.requires_grad = False
self.encoder_i.weight.requires_grad = False
# for online
if online:
#self.input_buffer = th.tensor(np.zeros([1,1,self.window_size]),dtype=th.float,device=th.device('cpu'))
self.input_buffer = th.tensor(np.zeros([1,1,self.window_size]),dtype=th.float)
#import pdb; pdb.set_trace()
def set_buffer_device(self, device):
self.input_buffer = self.input_buffer.to(device)
#import pdb; pdb.set_trace()
return
def forward(self, input):
stft_stride = self.stride
stft_window_size = self.window_size
stft_fftsize = self.fftsize
spec_r = self.encoder_r(input)
spec_i = self.encoder_i(input)
x_spec_real = spec_r[:,1:,:] # remove DC
x_spec_imag = spec_i[:,1:,:] # remove DC
output = th.cat([x_spec_real,x_spec_imag],dim=1)
return output
def forward_online(self, input):
self.input_buffer[:,:,self.window_size-self.stride:] = input
spec_r = self.encoder_r(self.input_buffer)
spec_i = self.encoder_i(self.input_buffer)
output = th.cat([spec_r[:,1:,:],spec_i[:,1:,:]],dim=1)
self.input_buffer[:,:,:self.window_size-self.stride] = self.input_buffer[:,:,self.stride:]
return output
class ISTFT(nn.Module):
def __init__(self, fftsize, window_size, stride, win_type="default", trainable=False, online=False):
super(ISTFT, self).__init__()
self.fftsize = fftsize
self.window_size = window_size
self.stride = stride
gain_ifft = (2.0*self.stride) / self.window_size
if win_type=="default":
self.window_func = gain_ifft * np.sqrt(np.hanning(self.window_size) )
elif win_type=="hanning":
self.window_func = gain_ifft * np.hanning(self.window_size)
coef_cos = np.zeros((self.fftsize//2 + 1, 1, self.window_size))
coef_sin = np.zeros((self.fftsize//2 + 1, 1, self.window_size))
for w in range(self.fftsize//2+1):
alpha = 1.0 if w==0 or w==fftsize//2 else 2.0
alpha /= fftsize
#print alpha
for t in range(self.window_size):
coef_cos[w, 0, t] = alpha * np.cos(2. * np.pi * w * t / self.fftsize)
coef_sin[w, 0, t] = alpha * np.sin(2. * np.pi * w * t / self.fftsize)
self.coef_cos = th.tensor(coef_cos * self.window_func, dtype=th.float)
self.coef_sin = th.tensor(coef_sin * self.window_func, dtype=th.float)
self.decoder_re = nn.ConvTranspose1d(self.fftsize//2+1, 1, self.window_size, bias=False, stride=self.stride)
self.decoder_im = nn.ConvTranspose1d(self.fftsize//2+1, 1, self.window_size, bias=False, stride=self.stride)
self.decoder_re.weight = th.nn.Parameter(self.coef_cos)
self.decoder_im.weight = th.nn.Parameter(self.coef_sin)
if trainable:
self.decoder_re.weight.requires_grad = True
self.decoder_im.weight.requires_grad = True
else:
self.decoder_re.weight.requires_grad = False
self.decoder_im.weight.requires_grad = False
# for online
if online:
self.output_buffer = th.tensor(np.zeros([1,1,self.window_size]),dtype=th.float,device=th.device('cpu'))
self.pad_dc = th.tensor(np.zeros([1,1,1]),dtype=th.float,device=th.device('cpu'))
def set_buffer_device(self, device):
self.output_buffer = self.output_buffer.to(device)
self.pad_dc = self.pad_dc.to(device)
return
def forward(self, input):
batch_size = input.shape[0]
frame_size = input.shape[2]
stft_stride = self.stride
stft_window_size = self.window_size
stft_fft_size = self.fftsize
pad_real_dc = th.tensor(np.zeros([batch_size, 1, frame_size]),dtype=th.float,device=th.device(input.device))
pad_imag_dc = th.tensor(np.zeros([batch_size, 1, frame_size]),dtype=th.float,device=th.device(input.device))
real_part = th.cat([pad_real_dc,input[:,:self.fftsize//2,:]],dim=1)
imag_part = th.cat([pad_imag_dc,input[:,self.fftsize//2:,:]],dim=1)
time_cos = self.decoder_re(real_part)
time_sin = self.decoder_im(imag_part)
output = time_cos - time_sin
return output
def forward_online(self, input):
real_part = th.cat([self.pad_dc,input[:,:self.fftsize//2,:]],dim=1)
imag_part = th.cat([self.pad_dc,input[:,self.fftsize//2:,:]],dim=1)
time_cos = self.decoder_re(real_part)
time_sin = self.decoder_im(imag_part)
self.output_buffer[:,:,:self.window_size-self.stride] = self.output_buffer[:,:,self.stride:]
self.output_buffer[:,:,self.window_size-self.stride:] = 0.0
self.output_buffer = self.output_buffer + time_cos - time_sin
output = self.output_buffer[:,:,:self.stride]
return output
|
[
"torch.nn.Parameter",
"torch.nn.Conv1d",
"numpy.zeros",
"torch.cat",
"torch.nn.ConvTranspose1d",
"numpy.sin",
"numpy.cos",
"torch.device",
"numpy.hanning",
"torch.tensor"
] |
[((623, 677), 'numpy.zeros', 'np.zeros', (['(self.fftsize // 2 + 1, 1, self.window_size)'], {}), '((self.fftsize // 2 + 1, 1, self.window_size))\n', (631, 677), True, 'import numpy as np\n'), ((694, 748), 'numpy.zeros', 'np.zeros', (['(self.fftsize // 2 + 1, 1, self.window_size)'], {}), '((self.fftsize // 2 + 1, 1, self.window_size))\n', (702, 748), True, 'import numpy as np\n'), ((1108, 1142), 'torch.tensor', 'th.tensor', (['fcoef_r'], {'dtype': 'th.float'}), '(fcoef_r, dtype=th.float)\n', (1117, 1142), True, 'import torch as th\n'), ((1166, 1200), 'torch.tensor', 'th.tensor', (['fcoef_i'], {'dtype': 'th.float'}), '(fcoef_i, dtype=th.float)\n', (1175, 1200), True, 'import torch as th\n'), ((1227, 1317), 'torch.nn.Conv1d', 'nn.Conv1d', (['(1)', '(self.fftsize // 2 + 1)', 'self.window_size'], {'bias': '(False)', 'stride': 'self.stride'}), '(1, self.fftsize // 2 + 1, self.window_size, bias=False, stride=\n self.stride)\n', (1236, 1317), True, 'import torch.nn as nn\n'), ((1334, 1424), 'torch.nn.Conv1d', 'nn.Conv1d', (['(1)', '(self.fftsize // 2 + 1)', 'self.window_size'], {'bias': '(False)', 'stride': 'self.stride'}), '(1, self.fftsize // 2 + 1, self.window_size, bias=False, stride=\n self.stride)\n', (1343, 1424), True, 'import torch.nn as nn\n'), ((1450, 1479), 'torch.nn.Parameter', 'th.nn.Parameter', (['self.fcoef_r'], {}), '(self.fcoef_r)\n', (1465, 1479), True, 'import torch as th\n'), ((1512, 1541), 'torch.nn.Parameter', 'th.nn.Parameter', (['self.fcoef_i'], {}), '(self.fcoef_i)\n', (1527, 1541), True, 'import torch as th\n'), ((2584, 2625), 'torch.cat', 'th.cat', (['[x_spec_real, x_spec_imag]'], {'dim': '(1)'}), '([x_spec_real, x_spec_imag], dim=1)\n', (2590, 2625), True, 'import torch as th\n'), ((2873, 2924), 'torch.cat', 'th.cat', (['[spec_r[:, 1:, :], spec_i[:, 1:, :]]'], {'dim': '(1)'}), '([spec_r[:, 1:, :], spec_i[:, 1:, :]], dim=1)\n', (2879, 2924), True, 'import torch as th\n'), ((3617, 3671), 'numpy.zeros', 'np.zeros', (['(self.fftsize // 2 + 1, 1, self.window_size)'], {}), '((self.fftsize // 2 + 1, 1, self.window_size))\n', (3625, 3671), True, 'import numpy as np\n'), ((3689, 3743), 'numpy.zeros', 'np.zeros', (['(self.fftsize // 2 + 1, 1, self.window_size)'], {}), '((self.fftsize // 2 + 1, 1, self.window_size))\n', (3697, 3743), True, 'import numpy as np\n'), ((4141, 4195), 'torch.tensor', 'th.tensor', (['(coef_cos * self.window_func)'], {'dtype': 'th.float'}), '(coef_cos * self.window_func, dtype=th.float)\n', (4150, 4195), True, 'import torch as th\n'), ((4220, 4274), 'torch.tensor', 'th.tensor', (['(coef_sin * self.window_func)'], {'dtype': 'th.float'}), '(coef_sin * self.window_func, dtype=th.float)\n', (4229, 4274), True, 'import torch as th\n'), ((4302, 4400), 'torch.nn.ConvTranspose1d', 'nn.ConvTranspose1d', (['(self.fftsize // 2 + 1)', '(1)', 'self.window_size'], {'bias': '(False)', 'stride': 'self.stride'}), '(self.fftsize // 2 + 1, 1, self.window_size, bias=False,\n stride=self.stride)\n', (4320, 4400), True, 'import torch.nn as nn\n'), ((4419, 4517), 'torch.nn.ConvTranspose1d', 'nn.ConvTranspose1d', (['(self.fftsize // 2 + 1)', '(1)', 'self.window_size'], {'bias': '(False)', 'stride': 'self.stride'}), '(self.fftsize // 2 + 1, 1, self.window_size, bias=False,\n stride=self.stride)\n', (4437, 4517), True, 'import torch.nn as nn\n'), ((4544, 4574), 'torch.nn.Parameter', 'th.nn.Parameter', (['self.coef_cos'], {}), '(self.coef_cos)\n', (4559, 4574), True, 'import torch as th\n'), ((4608, 4638), 'torch.nn.Parameter', 'th.nn.Parameter', (['self.coef_sin'], {}), '(self.coef_sin)\n', (4623, 4638), True, 'import torch as th\n'), ((5792, 5853), 'torch.cat', 'th.cat', (['[pad_real_dc, input[:, :self.fftsize // 2, :]]'], {'dim': '(1)'}), '([pad_real_dc, input[:, :self.fftsize // 2, :]], dim=1)\n', (5798, 5853), True, 'import torch as th\n'), ((5868, 5929), 'torch.cat', 'th.cat', (['[pad_imag_dc, input[:, self.fftsize // 2:, :]]'], {'dim': '(1)'}), '([pad_imag_dc, input[:, self.fftsize // 2:, :]], dim=1)\n', (5874, 5929), True, 'import torch as th\n'), ((6138, 6199), 'torch.cat', 'th.cat', (['[self.pad_dc, input[:, :self.fftsize // 2, :]]'], {'dim': '(1)'}), '([self.pad_dc, input[:, :self.fftsize // 2, :]], dim=1)\n', (6144, 6199), True, 'import torch as th\n'), ((6214, 6275), 'torch.cat', 'th.cat', (['[self.pad_dc, input[:, self.fftsize // 2:, :]]'], {'dim': '(1)'}), '([self.pad_dc, input[:, self.fftsize // 2:, :]], dim=1)\n', (6220, 6275), True, 'import torch as th\n'), ((5568, 5605), 'numpy.zeros', 'np.zeros', (['[batch_size, 1, frame_size]'], {}), '([batch_size, 1, frame_size])\n', (5576, 5605), True, 'import numpy as np\n'), ((5685, 5722), 'numpy.zeros', 'np.zeros', (['[batch_size, 1, frame_size]'], {}), '([batch_size, 1, frame_size])\n', (5693, 5722), True, 'import numpy as np\n'), ((479, 507), 'numpy.hanning', 'np.hanning', (['self.window_size'], {}), '(self.window_size)\n', (489, 507), True, 'import numpy as np\n'), ((574, 602), 'numpy.hanning', 'np.hanning', (['self.window_size'], {}), '(self.window_size)\n', (584, 602), True, 'import numpy as np\n'), ((872, 914), 'numpy.cos', 'np.cos', (['(2.0 * np.pi * w * t / self.fftsize)'], {}), '(2.0 * np.pi * w * t / self.fftsize)\n', (878, 914), True, 'import numpy as np\n'), ((2001, 2035), 'numpy.zeros', 'np.zeros', (['[1, 1, self.window_size]'], {}), '([1, 1, self.window_size])\n', (2009, 2035), True, 'import numpy as np\n'), ((4986, 5020), 'numpy.zeros', 'np.zeros', (['[1, 1, self.window_size]'], {}), '([1, 1, self.window_size])\n', (4994, 5020), True, 'import numpy as np\n'), ((5095, 5114), 'numpy.zeros', 'np.zeros', (['[1, 1, 1]'], {}), '([1, 1, 1])\n', (5103, 5114), True, 'import numpy as np\n'), ((5628, 5651), 'torch.device', 'th.device', (['input.device'], {}), '(input.device)\n', (5637, 5651), True, 'import torch as th\n'), ((5745, 5768), 'torch.device', 'th.device', (['input.device'], {}), '(input.device)\n', (5754, 5768), True, 'import torch as th\n'), ((950, 992), 'numpy.sin', 'np.sin', (['(2.0 * np.pi * w * t / self.fftsize)'], {}), '(2.0 * np.pi * w * t / self.fftsize)\n', (956, 992), True, 'import numpy as np\n'), ((3459, 3487), 'numpy.hanning', 'np.hanning', (['self.window_size'], {}), '(self.window_size)\n', (3469, 3487), True, 'import numpy as np\n'), ((3567, 3595), 'numpy.hanning', 'np.hanning', (['self.window_size'], {}), '(self.window_size)\n', (3577, 3595), True, 'import numpy as np\n'), ((3988, 4030), 'numpy.cos', 'np.cos', (['(2.0 * np.pi * w * t / self.fftsize)'], {}), '(2.0 * np.pi * w * t / self.fftsize)\n', (3994, 4030), True, 'import numpy as np\n'), ((4074, 4116), 'numpy.sin', 'np.sin', (['(2.0 * np.pi * w * t / self.fftsize)'], {}), '(2.0 * np.pi * w * t / self.fftsize)\n', (4080, 4116), True, 'import numpy as np\n'), ((5041, 5057), 'torch.device', 'th.device', (['"""cpu"""'], {}), "('cpu')\n", (5050, 5057), True, 'import torch as th\n'), ((5135, 5151), 'torch.device', 'th.device', (['"""cpu"""'], {}), "('cpu')\n", (5144, 5151), True, 'import torch as th\n')]
|
import cv2
import numpy as np
import params
METERS_PER_ENCODER_TICK = params.WHEEL_TICK_LENGTH
def draw_steering(bgr, steering, servo, center=(320, 420)):
# make steering wheel, lower center
#servo = 128*(servo - 125)/70.0
servo = steering
# sdeg = steering # just 1:1 i guess?
sdeg = params.STEER_DIRECTION*servo # just 1:1 i guess?
srad = sdeg * np.pi / 180.0
S, C = 16*30*np.sin(srad), 16*30*np.cos(srad)
cv2.circle(bgr, center, 30, (255, 255, 255), 1, cv2.LINE_AA)
scenter = (center[0]*16, center[1]*16)
cv2.line(bgr, (int(scenter[0] - C), int(scenter[1] + S)),
(int(scenter[0] + C), int(scenter[1] - S)),
(255, 255, 255), 1, cv2.LINE_AA, 4)
cv2.ellipse(bgr, center, (30, 30), 0, -90, -90 + steering,
(255, 180, 180), 5, cv2.LINE_AA)
cv2.ellipse(bgr, center, (30, 30), 0, -90, -90 + servo,
(0, 180, 255), 2, cv2.LINE_AA)
last_ts = None
last_wheels = None
def draw_speed(bgr, tstamp, wheels, periods, center=(40, 420), radius=30):
# draw a little spedometer in the lower left
# just draw the needle for each period now
global last_ts, last_wheels
av = np.mean(periods[:params.NUM_ENCODERS])
if av != 0:
av = METERS_PER_ENCODER_TICK * 1e6 / av
# cv2.putText(bgr, "%0.1f %0.1f %0.1f %0.1f m/s" % tuple(v), (10, 470),
# cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1,
# cv2.LINE_AA)
if last_ts is None:
last_ts = tstamp
last_wheels = wheels
return
dw = wheels - last_wheels
if np.all(dw == 0):
last_ts = tstamp
last_wheels = wheels
return
# vv = METERS_PER_ENCODER_TICK * np.float32(dw) / (tstamp - last_ts)
# av = 0.5 * np.mean(v[dw != 0] + vv[dw != 0])
mph = 2.23694 * av
# draw ticks
for i in range(13):
phi = (i - 6) * 0.4
C, S = radius * np.cos(phi), radius * np.sin(phi)
cv2.line(bgr, (int(center[0] + S), int(center[1] - C)),
(int(center[0] + 0.8*S), int(center[1] - 0.8*C)),
(255, 255, 255), 1, cv2.LINE_AA)
phi = (mph - 6) * 0.4
C, S = radius * np.cos(phi), radius * np.sin(phi)
cv2.line(bgr, (int(center[0] + S), int(center[1] - C)),
(int(center[0]), int(center[1])),
(180, 255, 180), 2, cv2.LINE_AA)
cv2.putText(bgr, "%0.1f mph" % (mph), (center[0] - 10, center[1] + 40),
cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1,
cv2.LINE_AA)
last_ts = tstamp
last_wheels = wheels
def draw_throttle(img, throttle, center=(320, 470)):
cv2.line(img, center, (center[0] + throttle, center[1]),
throttle > 0 and (0, 255, 0) or (0, 95, 255), 5)
def draw_accelerometer(bgr, accel, gyro, center=(470, 470)):
cv2.circle(bgr, center, 30, (255, 255, 255), 1, cv2.LINE_AA)
cv2.ellipse(bgr, center, (30, 30), 0, -90, -90 - 180*gyro[2] / np.pi,
(100, 255, 180), 3, cv2.LINE_AA)
cv2.line(bgr, center, (int(center[0] - accel[1]*30),
int(center[1] + accel[0]*30)),
(100, 255, 100), 2, cv2.LINE_AA)
|
[
"cv2.line",
"cv2.circle",
"cv2.putText",
"numpy.mean",
"cv2.ellipse",
"numpy.sin",
"numpy.cos",
"numpy.all"
] |
[((446, 506), 'cv2.circle', 'cv2.circle', (['bgr', 'center', '(30)', '(255, 255, 255)', '(1)', 'cv2.LINE_AA'], {}), '(bgr, center, 30, (255, 255, 255), 1, cv2.LINE_AA)\n', (456, 506), False, 'import cv2\n'), ((722, 817), 'cv2.ellipse', 'cv2.ellipse', (['bgr', 'center', '(30, 30)', '(0)', '(-90)', '(-90 + steering)', '(255, 180, 180)', '(5)', 'cv2.LINE_AA'], {}), '(bgr, center, (30, 30), 0, -90, -90 + steering, (255, 180, 180),\n 5, cv2.LINE_AA)\n', (733, 817), False, 'import cv2\n'), ((834, 924), 'cv2.ellipse', 'cv2.ellipse', (['bgr', 'center', '(30, 30)', '(0)', '(-90)', '(-90 + servo)', '(0, 180, 255)', '(2)', 'cv2.LINE_AA'], {}), '(bgr, center, (30, 30), 0, -90, -90 + servo, (0, 180, 255), 2,\n cv2.LINE_AA)\n', (845, 924), False, 'import cv2\n'), ((1188, 1226), 'numpy.mean', 'np.mean', (['periods[:params.NUM_ENCODERS]'], {}), '(periods[:params.NUM_ENCODERS])\n', (1195, 1226), True, 'import numpy as np\n'), ((1595, 1610), 'numpy.all', 'np.all', (['(dw == 0)'], {}), '(dw == 0)\n', (1601, 1610), True, 'import numpy as np\n'), ((2377, 2511), 'cv2.putText', 'cv2.putText', (['bgr', "('%0.1f mph' % mph)", '(center[0] - 10, center[1] + 40)', 'cv2.FONT_HERSHEY_PLAIN', '(1)', '(255, 255, 255)', '(1)', 'cv2.LINE_AA'], {}), "(bgr, '%0.1f mph' % mph, (center[0] - 10, center[1] + 40), cv2.\n FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1, cv2.LINE_AA)\n", (2388, 2511), False, 'import cv2\n'), ((2646, 2756), 'cv2.line', 'cv2.line', (['img', 'center', '(center[0] + throttle, center[1])', '(throttle > 0 and (0, 255, 0) or (0, 95, 255))', '(5)'], {}), '(img, center, (center[0] + throttle, center[1]), throttle > 0 and (\n 0, 255, 0) or (0, 95, 255), 5)\n', (2654, 2756), False, 'import cv2\n'), ((2832, 2892), 'cv2.circle', 'cv2.circle', (['bgr', 'center', '(30)', '(255, 255, 255)', '(1)', 'cv2.LINE_AA'], {}), '(bgr, center, 30, (255, 255, 255), 1, cv2.LINE_AA)\n', (2842, 2892), False, 'import cv2\n'), ((2897, 3006), 'cv2.ellipse', 'cv2.ellipse', (['bgr', 'center', '(30, 30)', '(0)', '(-90)', '(-90 - 180 * gyro[2] / np.pi)', '(100, 255, 180)', '(3)', 'cv2.LINE_AA'], {}), '(bgr, center, (30, 30), 0, -90, -90 - 180 * gyro[2] / np.pi, (\n 100, 255, 180), 3, cv2.LINE_AA)\n', (2908, 3006), False, 'import cv2\n'), ((409, 421), 'numpy.sin', 'np.sin', (['srad'], {}), '(srad)\n', (415, 421), True, 'import numpy as np\n'), ((429, 441), 'numpy.cos', 'np.cos', (['srad'], {}), '(srad)\n', (435, 441), True, 'import numpy as np\n'), ((2185, 2196), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (2191, 2196), True, 'import numpy as np\n'), ((2207, 2218), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (2213, 2218), True, 'import numpy as np\n'), ((1923, 1934), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (1929, 1934), True, 'import numpy as np\n'), ((1945, 1956), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (1951, 1956), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
matplotlib.use('Agg')
def threshold_otsu(hist):
"""Return threshold value based on Otsu's method.
hist : array, or 2-tuple of arrays, optional
Histogram from which to determine the threshold, and optionally a
corresponding array of bin center intensities.
An alternative use of this function is to pass it only hist.
Returns
-------
threshold : float
Upper threshold value. All pixels with an intensity higher than
this value are assumed to be foreground.
References
----------
.. [1] Wikipedia, https://en.wikipedia.org/wiki/Otsu's_Method
"""
counts, bin_centers = hist
bin_centers = bin_centers[:-1]
# class probabilities for all possible thresholds
weight1 = np.cumsum(counts)
weight2 = np.cumsum(counts[::-1])[::-1]
# class means for all possible thresholds
mean1 = np.cumsum(counts * bin_centers) / weight1
mean2 = (np.cumsum((counts * bin_centers)[::-1]) / weight2[::-1])[::-1]
# Clip ends to align class 1 and class 2 variables:
# The last value of ``weight1``/``mean1`` should pair with zero values in
# ``weight2``/``mean2``, which do not exist.
variance12 = weight1[:-1] * weight2[1:] * (mean1[:-1] - mean2[1:]) ** 2
if len(variance12) == 0:
return 0
idx = np.nanargmax(variance12)
threshold = bin_centers[idx]
return threshold
def array2hist(array, binWidth=0.2):
counts, bins = np.histogram(array, bins=np.arange(0, max(array)+binWidth, binWidth))
return counts, bins
def makePlot(hist, thresh, fname):
counts, bins = hist
plt.hist(bins[:-1], bins, weights=counts)
plt.axvline(thresh, color='r')
plt.savefig(fname)
plt.close()
|
[
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.hist",
"matplotlib.pyplot.close",
"numpy.cumsum",
"matplotlib.use",
"numpy.nanargmax",
"matplotlib.pyplot.savefig"
] |
[((70, 91), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (84, 91), False, 'import matplotlib\n'), ((828, 845), 'numpy.cumsum', 'np.cumsum', (['counts'], {}), '(counts)\n', (837, 845), True, 'import numpy as np\n'), ((1382, 1406), 'numpy.nanargmax', 'np.nanargmax', (['variance12'], {}), '(variance12)\n', (1394, 1406), True, 'import numpy as np\n'), ((1679, 1720), 'matplotlib.pyplot.hist', 'plt.hist', (['bins[:-1]', 'bins'], {'weights': 'counts'}), '(bins[:-1], bins, weights=counts)\n', (1687, 1720), True, 'import matplotlib.pyplot as plt\n'), ((1725, 1755), 'matplotlib.pyplot.axvline', 'plt.axvline', (['thresh'], {'color': '"""r"""'}), "(thresh, color='r')\n", (1736, 1755), True, 'import matplotlib.pyplot as plt\n'), ((1760, 1778), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname'], {}), '(fname)\n', (1771, 1778), True, 'import matplotlib.pyplot as plt\n'), ((1783, 1794), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1792, 1794), True, 'import matplotlib.pyplot as plt\n'), ((860, 883), 'numpy.cumsum', 'np.cumsum', (['counts[::-1]'], {}), '(counts[::-1])\n', (869, 883), True, 'import numpy as np\n'), ((948, 979), 'numpy.cumsum', 'np.cumsum', (['(counts * bin_centers)'], {}), '(counts * bin_centers)\n', (957, 979), True, 'import numpy as np\n'), ((1003, 1042), 'numpy.cumsum', 'np.cumsum', (['(counts * bin_centers)[::-1]'], {}), '((counts * bin_centers)[::-1])\n', (1012, 1042), True, 'import numpy as np\n')]
|
import pysam
import os
import re
import numpy as np
import pandas as pd
import time
from pysam import VariantFile
from sklearn.cluster import KMeans
import sklearn.cluster
pd.options.mode.chained_assignment = None
def CallVCF(CandidateDf, VCFFile, refFile, bamFile, CellName):
# transfer candidate format to vcf format
## Select Proper SNVs
# Exact 0/0 position will not appeared in the vcf
Condition1 = CandidateDf['Genotype'].isin(['0/1', '1/1'])
Condition2 = CandidateDf['Decision'].isin(['PCRError', 'PCRErrorLow'])
Condition2_1 = CandidateDf['Decision'].isin(['PCRError'])
Condition2_2 = CandidateDf['Decision'].isin(['PCRErrorLow'])
Condition3 = CandidateDf['AlleleDropOut'].isin([1])
Condition4 = CandidateDf['Vague'].isin(['Yes'])
Condition5 = CandidateDf['DataRange'].isin(['HomoHeteroSNV'])
Condition6 = CandidateDf['PassCode'].isin(['ClusterSNV'])
## make a VCF head
# Info items include: NS, DP, AF, AA
# Filter Items include: AB (Amplicon Bias), PCRError (PCR Error), ADO (Allele drop out)
# Format Items include: GT (0/1, 1/1, 0/0) , L0 (Homozygous Likelihood), L1 (Amplicon Bias Likelihood), L2 (Heterozygous Likelihood), L3 (PCR error Likelihood), DP, AF
with open(VCFFile, 'w') as vcf:
vcf.write("##fileformat=VCFv4.2\n##fileDate=%s\n##source=NBJUDGE\n")
vcf.write("##INFO=<ID=NS,Number=1,Type=Integer,Description=\"Number of Samples With Data\">\n")
vcf.write("##INFO=<ID=DP,Number=1,Type=Integer,Description=\"Total Depth\">\n")
vcf.write("##INFO=<ID=AF,Number=A,Type=Float,Description=\"Allele Frequency\">\n")
vcf.write("##INFO=<ID=AA,Number=1,Type=String,Description=\"Ancestral Allele\">\n")
vcf.write("##FILTER=<ID=IMSNV,Description=\"Intermediate SNV error\">\n")
vcf.write("##FILTER=<ID=LASNV,Description=\"Low Allele Frequency SNV error\">\n")
vcf.write("##FILTER=<ID=ADO,Description=\"Suspected Allele Drop-out Error\">\n")
vcf.write("##FILTER=<ID=NE,Description=\"Not enough homozygous or heterozygous SNVs in the neighborhood\">\n")
vcf.write("##FILTER=<ID=CC,Description=\"SNV located in SNV cluster\">\n")
vcf.write("##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Genotype\">\n")
vcf.write("##FORMAT=<ID=L0,Number=1,Type=Float,Description=\"Homozygous likelihood\">\n")
vcf.write("##FORMAT=<ID=L1,Number=1,Type=Float,Description=\"Intermediate SNV likelihood\">\n")
vcf.write("##FORMAT=<ID=L2,Number=1,Type=Float,Description=\"Heterozygous likelihood\">\n")
vcf.write("##FORMAT=<ID=L3,Number=1,Type=Float,Description=\"Low Allele Frequency likelihood\">\n")
vcf.write("##FORMAT=<ID=DP,Number=1,Type=Integer,Description=\"Read Depth\">\n")
vcf.write("##FORMAT=<ID=AF,Number=4,Type=Integer,Description=\"Top 4 Base count\"\n")
for chrom in set(CandidateDf['Chrom']):
vcf.write('##contig=<ID=%s>\n' % chrom)
vcf.write("##reference=file://%s\n" % refFile)
vcf.write("##bamFile=file://%s\n" % bamFile)
vcf.write("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t%s\n" % CellName)
vcf.flush()
# AddLabel For each SNV in the CandidateDf_VCF
CandidateDf['AF'] = CandidateDf['AltCount'] / CandidateDf['DP']
CandidateDf['INFO'] = 'NS=1;DP=' + CandidateDf['DP'].astype(str) + ";AF=" + CandidateDf['AF'].astype(str) + ";AA=" + CandidateDf['Alt']
CandidateDf['FILTER'] = ""
CandidateDf.loc[Condition2_1, 'FILTER'] += 'AB'
CandidateDf.loc[Condition2_2, 'FILTER'] += 'PCRError'
Condition_TMP = CandidateDf['FILTER'].isin([""])
CandidateDf.loc[Condition3&Condition4&Condition_TMP, 'FILTER'] += 'ADO'
CandidateDf.loc[Condition3&Condition4&(~Condition_TMP), 'FILTER'] += ',ADO'
Condition_TMP = CandidateDf['FILTER'].isin([""])
CandidateDf.loc[(~Condition5)&Condition_TMP, 'FILTER'] += 'NE'
CandidateDf.loc[(~Condition5)&(~Condition_TMP), 'FILTER'] += ',NE'
Condition_TMP = CandidateDf['FILTER'].isin([""])
CandidateDf.loc[(Condition6)&Condition_TMP, 'FILTER'] += 'CC'
CandidateDf.loc[(Condition6)&(~Condition_TMP), 'FILTER'] += ',CC'
Condition_TMP = CandidateDf['FILTER'].isin([""])
CandidateDf.loc[Condition_TMP, 'FILTER'] = 'PASS'
CandidateDf['FORMAT_Label'] = 'GT:L0:L1:L2:L3:DP:AF'
CandidateDf['FORMAT'] = CandidateDf['Genotype'] + ":" + CandidateDf['HomoZygous'].astype(str) + ":" + CandidateDf['HeteroZygous'].astype(str) + ":" + CandidateDf['PCRError'].astype(str) + ":" + CandidateDf['PCRErrorLow'].astype(str) + ":" + CandidateDf['DP'].astype(str) + ":" + CandidateDf['Base1'].astype(str) + "," + CandidateDf['Base2'].astype(str) + "," + CandidateDf['Base3'].astype(str) + "," + CandidateDf['Base4'].astype(str)
CandidateDf_VCF = CandidateDf[Condition1 | Condition2 | Condition3 | Condition4]
CandidateDf_VCF['VCFInfo'] = CandidateDf_VCF['Chrom'] + "\t" + CandidateDf_VCF['Pos'].astype(str) + "\t.\t" + CandidateDf_VCF['Ref'] + "\t" + CandidateDf_VCF['Alt'] + "\t.\t" + CandidateDf_VCF['FILTER'] + '\t' + CandidateDf_VCF['INFO'] + "\t" + CandidateDf_VCF['FORMAT_Label'] + "\t" + CandidateDf_VCF['FORMAT']
for I in CandidateDf_VCF.index:
Record = CandidateDf_VCF.loc[I, 'VCFInfo']
vcf.write(Record + "\n")
vcf.flush()
def WeightKernel(Distance, mu=np.log(3)/10):
# return weight vector
fx = 2 * np.exp(-mu * Distance) / (1 + np.exp(-mu * Distance))
return(fx)
def CalculateP_A(SubDf, Position, mu=np.log(3)/10):
# set kernal
SubDf['Distance'] = np.abs(np.array(SubDf['Pos'])-Position) / 1000.0
WorkDf = SubDf.loc[SubDf['Distance']!=0]
if WorkDf.shape[0] > 0:
Weight = WeightKernel(np.array(WorkDf['Distance'], dtype=float), mu)
if np.sum(Weight) == 0: # All adjacent SNVs located in too far distance
Weight = np.array([1.0] * len(Weight))
ProbDict = {}
for Base_Index in range(1,5):
BaseName = 'Base%s' % Base_Index
RateName = 'BRate%s' % Base_Index
WorkDf[RateName] = WorkDf[BaseName] / WorkDf['DP']
Ex = np.sum(np.array(WorkDf[RateName], dtype=float) * Weight) / np.sum(Weight)
Varx = np.sqrt(np.sum(Weight * (np.array(WorkDf[RateName], dtype=float) - Ex)**2) / np.sum(Weight))
ProbDict[BaseName] = [Ex, Varx]
else: # No data provided
ProbDict = {}
for Base_Index in range(1,5):
BaseName = 'Base%s' % Base_Index
RateName = 'BRate%s' % Base_Index
ProbDict[BaseName] = [0.0000001, 0.0000001]
return(ProbDict)
def DfTailor(SubDf1, SubDf2, I):
# gain Sub dataframe with tha same dim (decided by the smaller Dataframe)
# Tail the larger dataframe around the candidate Point
# I is the position value of candidate SNV point
# discard the current position
SubDf1 = SubDf1.loc[SubDf1['Pos']!=I]
SubDf2 = SubDf2.loc[SubDf2['Pos']!=I]
L1 = SubDf1.shape[0]
L2 = SubDf2.shape[0]
SubDf1['Distance'] = np.abs(np.array(SubDf1['Pos'], dtype=int) - I)
SubDf2['Distance'] = np.abs(np.array(SubDf2['Pos'], dtype=int) - I)
S_DF = SubDf1
L_DF = SubDf2
if L2 < L1:
S_DF = SubDf2
L_DF = SubDf1
# large Df should have Position that is similar to small Df
LocList = []
S_DF.sort_values("Distance", inplace=True)
for Site in S_DF.index:
tmpPos = S_DF.loc[Site, 'Pos']
L_DF['tmpDistance'] = np.abs(np.array(L_DF['Pos'], dtype=int)-tmpPos)
L_DF.sort_values("tmpDistance", inplace=True)
LocAdd = 0
for i in range(0,L_DF.shape[0]):
if L_DF.index[i] not in LocList:
LocList.append(L_DF.index[i])
LocAdd += 1
break
if LocAdd == 0:
S_DF = S_DF.drop(index=[Site])
L_DF = L_DF.loc[LocList]
ResultDF = pd.concat([S_DF, L_DF], sort=True)
return(ResultDF)
def GetNeighbor(SubDf2, I):
SubDf2['Distance'] = [abs(x) for x in list(SubDf2['Pos']-I)]
SubDf2 = SubDf2.loc[SubDf2['Distance']!=0]
SubDf2.sort_values("Distance", inplace=True)
ResultDf = SubDf2.loc[SubDf2.index[0:10]]
return(ResultDf)
def GetNeighbor_Normal(CountDf, I, WindowSize):
'''
Input CountDf & Pos data
Return SNVs within 30000bp
'''
start = I - WindowSize
end = I + WindowSize
return(CountDf.loc[(CountDf['Pos']>=start)&(CountDf['Pos']<=end)])
def highlight(data, Index,color='darkorange'):
'''
Highligh the specific row
'''
ColorDf = pd.DataFrame('', columns=data.columns, index=data.index)
ColorDf.loc[Index] = 'background-color: {}'.format(color)
return(ColorDf)
def MakeShowData(InputDf, PCRDf, InterestedColumn1):
HomoDf = InputDf.loc[InputDf['RawGT']=='Homozygous']
HeterDf = InputDf.loc[InputDf['RawGT']=='Heterozygous']
PCR_eg = {}
for I in PCRDf.index:
currentSite = int(PCRDf.loc[I, 'Pos'])
if PCRDf.loc[I, 'DataRange'] == 'HomoHeter':
WorkDf = GetNeighbor_Normal(InputDf, currentSite, WindowSize)
else:
WorkDf = pd.concat([GetNeighbor(HomoDf, currentSite), GetNeighbor(HeterDf, currentSite)], sort=True)
WorkDf.sort_values("Pos", inplace=True)
df = WorkDf[InterestedColumn1]
PCR_eg[I] = df.style.apply(highlight, Index=I, axis=None)
return(PCR_eg)
## Finish Common Functions
class EstModel():
def __init__(self, bamFile, ref, CellName=None, WindowSize=30000, mu=np.log(3)/10):
'''
Load requriement File Names and File Path
'''
try:
M = pysam.FastaFile(ref)
except:
print("Please identify proper reference file path")
sys.exit(1)
try:
L = pysam.AlignmentFile(bamFile)
except:
print("Please identify proper bam file path")
sys.exit(1)
self.bam = pysam.AlignmentFile(bamFile)
self.ref = pysam.FastaFile(ref)
self.WindowSize = WindowSize
self.mu = mu
self.Candidate = pd.DataFrame()
self.Shadow = None
self.Result = None
self.MergeResult = None
self.ABdf = pd.DataFrame()
self.GapList = [] # record genome region which have depth lower than 10X
self.JUDGEMENT = None
self.Name = os.path.basename(bamFile).split(".bam")[0]
if not CellName:
self.Name = CellName
def MakeCandidateDf(self, chrom, Start, End, AutoCutoff=True):
'''
Convert sorted bam into candidate dataframe
input:
refFile: pysam.FastarFile object for reference genome
samfile: pysam.AlignmentFile object for sorted bam file
chrom: aim chromosome required for dataframe extraction
Start: start position for dataframe
End: end position for dataframe
'''
self.Start = Start
self.End = End
if self.Start >= self.End:
print('Too Small data range, exit!')
sys.exit(1)
refFile = self.ref
samfile = self.bam
WindowSize=self.WindowSize
CountDf = pd.DataFrame(columns=['Chrom', 'Pos', 'Ref', 'RefCount', 'Alt', 'AltCount', 'Base1', 'Base2', 'Base3', 'Base4', 'RawRate'])
str_ref = refFile.fetch(chrom, Start - 1, End + 1)
my_arg = {"fastafile": refFile, "stepper": "samtools", "adjust_capq_threshold": 50, "contig": chrom,
"start": Start, "stop": End, "min_mapping_quality": 40, "truncate":True, "min_base_quality":17}
CandidateDf = pd.DataFrame(columns=['Chrom', 'Pos', 'Ref', 'RefCount', 'Alt', 'AltCount', 'DP', 'Base1', 'Base2', 'Base3', 'Base4', 'RawRate'])
partner = 0 # means no need for 0/0 partner in the Raw genotype decision
partnerDf = pd.DataFrame(columns=['Chrom', 'Pos', 'DP', 'Base1', 'Base2', 'Base3', 'Base4', 'RawRate'])
for pileup_column in samfile.pileup(**my_arg):
pos = pileup_column.reference_pos + 1 # 1-based location
if pos > my_arg['stop'] + self.WindowSize:
break
if pos < my_arg['start']-self.WindowSize:
continue
read_bases_list = pileup_column.get_query_sequences(mark_matches=False, mark_ends=False, add_indels=True)
DP = 0
RefCount = 0
AltCount = 0
BasePool = {'A':0,
'T':0,
'C':0,
'G':0
}
if len(read_bases_list) >=10:
refSeq = str_ref[pos-my_arg['start']].upper()
if refSeq not in ['A', 'T', 'C', 'G']:
continue
for B in read_bases_list:
base_str = B.upper()
if base_str in BasePool.keys():
BasePool[base_str] += 1
else:
BasePool[base_str] = 1
RefCount = BasePool[refSeq]
NonRefList = sorted([BasePool[K] for K in BasePool.keys() if K!=refSeq], reverse=True)
AltCount = NonRefList[0]
if AltCount >= 2: # less than max allele might come from noise
AltBase = sorted([(K, BasePool[K]) for K in BasePool.keys() if K!=refSeq], reverse=True, key=lambda item:item[1])[0][0]
DP = float(sum(NonRefList[0:3]) + RefCount)
BaseCountList = sorted(NonRefList[0:3] + [RefCount], reverse=True)
Info = [my_arg['contig'], pos, refSeq, float(RefCount), AltBase, float(AltCount), DP] + BaseCountList + [BaseCountList[1] / DP]
CandidateDf.loc['%s:%s' % (my_arg['contig'], pos)] = Info
partner += 2 # Now we need a 0/0 genotype partner
elif AltCount > 0: # proper point with Alt Allele
if partner >=1: # Now we need a 0/0 genotype partner
DP = float(sum(NonRefList[0:3]) + RefCount)
BaseCountList = sorted(NonRefList[0:3] + [RefCount], reverse=True)
Info = [my_arg['contig'], pos, DP] + BaseCountList + [BaseCountList[1] / DP]
partnerDf.loc['%s:%s' % (my_arg['contig'], pos)] = Info
partner += -1
else:
continue
else:
continue
else:
continue
# return the total SNV point
print('%s: %s candidate SNVs have been loaded !' % (time.ctime(), CandidateDf.shape[0]))
print('%s: %s shadow base points have been loaded !' % (time.ctime(), partnerDf.shape[0]))
self.Candidate = CandidateDf
self.Shadow = partnerDf
# Identify homozygous and heterozygous Roughly
if self.Candidate.shape[0] > 0 and AutoCutoff:
self.GetCutoffSimple()
def GetCutoffSimple(self):
# decrease the complex of method GetCutoff()
try:
self.Candidate.shape
except:
print("No CandidateDf exist. Run MakeCandidateDf method first")
self.Candidate['RawGT'] = None
CutOFFDf = self.Candidate[['Pos', 'RawRate']]
CutOFFShadow = self.Shadow[['Pos', 'RawRate']]
CutOFFDf.index = self.Candidate.index
CutOFFShadow.index = self.Shadow.index
CutOFFDf['Label'] = 'UnSet'
MixDf = pd.concat([CutOFFDf, CutOFFShadow], axis=0, sort=True).sort_values(by=['RawRate'])
# Make cutoff every 1 MB
# PosMax = list(MixDf['Pos'])[-1]
Orig = list(CutOFFDf['Pos'])[0]-1
estimator = sklearn.cluster.AgglomerativeClustering(2)
# get total center Mean
estimator.fit(pd.DataFrame(MixDf['RawRate']))
MixDf['Label'] = estimator.labels_
MixDf.loc[MixDf['Label']==estimator.labels_[0], 'Label'] = 'Homozygous'
MixDf.loc[MixDf['Label']==estimator.labels_[-1], 'Label'] = 'Heterozygous'
CutOFFDf.loc[CutOFFDf.index, ['Label']] = MixDf.loc[CutOFFDf.index, ['Label']]
#
while CutOFFDf.loc[(CutOFFDf['Pos']>Orig)&(CutOFFDf['Pos']<=Orig+self.WindowSize)].shape[0] >=3:
SubDf = CutOFFDf.loc[(CutOFFDf['Pos']>Orig)&(CutOFFDf['Pos']<=Orig+self.WindowSize)]
SubShadow = CutOFFShadow.loc[(CutOFFShadow['Pos']>Orig)&(CutOFFShadow['Pos']<=Orig+self.WindowSize)]
SubMix = pd.concat([SubDf, SubShadow], axis=0, sort=True).sort_values(by=['RawRate'])
estimator.fit(pd.DataFrame(SubMix['RawRate']))
SubMix['Label'] = estimator.labels_
SubMix.loc[SubMix['Label']==estimator.labels_[0], 'Label'] = 'Homozygous'
SubMix.loc[SubMix['Label']==estimator.labels_[-1], 'Label'] = 'Heterozygous'
CutOFFDf.loc[SubDf.index, ['Label']] = SubMix.loc[SubDf.index, ['Label']]
Orig = list(SubDf['Pos'])[-1]
# check Null cutoffs
self.Candidate['RawGT'] = CutOFFDf['Label']
def AnnotateCountDf1(self):
# The first annotation of candidate SNV Dataframe
self.ABdf = pd.DataFrame(columns=['HeteroBase1', 'HeteroSE', 'HomoBase1', 'HomoSE', 'PCRBase1', 'PCRSE', 'PCRLowBase1'], index=self.Candidate.index)
CountDf = self.Candidate
WindowSize = self.WindowSize
mu = self.mu
CountDf['HomoZygous'] = 1
CountDf['HomoZygous_P'] = 0
CountDf['HomoZygous_P_std'] = 0
CountDf['HeteroZygous'] = 1
CountDf['HeteroZygous_P'] = 0
CountDf['HeteroZygous_P_std'] = 0
CountDf['PCRError'] = 1
CountDf['PCRError_P'] = 0
CountDf['PCRError_P_std'] = 0
CountDf['PCRErrorLow'] = 1
CountDf['PCRErrorLow_P'] = 0
CountDf['DataRange'] = 'AllData'
CountDf['Decision'] = 'Unknown'
CountDf['DecisionGT'] = 'Unknown'
TotalHomoDf = pd.concat([CountDf.loc[CountDf['RawGT']=='Homozygous'], self.Shadow], axis=0, sort=True)
TotalHeterDf = CountDf.loc[CountDf['RawGT']=='Heterozygous']
for I in CountDf.index:
currentPos = CountDf.loc[I, 'Pos']
start = currentPos - WindowSize
end = currentPos + WindowSize
SubDf = CountDf.loc[(CountDf['Pos']>=start)&(CountDf['Pos']<=end)&(CountDf['Pos']!=currentPos)]
SubShadowDf = self.Shadow.loc[(self.Shadow['Pos']>=start)&(self.Shadow['Pos']<=end)&(self.Shadow['Pos']!=currentPos)]
SubDf_Homo = pd.concat([SubDf.loc[SubDf['RawGT']=='Homozygous'], SubShadowDf],axis=0, sort=True)
SubDf_Heter = SubDf.loc[SubDf['RawGT']=='Heterozygous']
DataRange = ''
if (SubDf_Homo.shape[0] < 3) and (SubDf_Heter.shape[0] < 3):
DataRange = 'LonelySNV'
HomoProb = CalculateP_A(TotalHomoDf, currentPos, mu)
HeteroProb = CalculateP_A(TotalHeterDf, currentPos, mu)
PCRProb = CalculateP_A(DfTailor(TotalHomoDf, TotalHeterDf, currentPos), currentPos, mu)
elif (SubDf_Homo.shape[0] < 3) and (SubDf_Heter.shape[0]>=3):
DataRange = 'HeteroEnrichSNV'
HomoProb = CalculateP_A(TotalHomoDf, currentPos, mu)
HeteroProb = CalculateP_A(SubDf_Heter, currentPos, mu)
PCRProb = CalculateP_A(DfTailor(TotalHomoDf, SubDf_Heter, currentPos), currentPos, mu)
elif (SubDf_Homo.shape[0] >= 3) and (SubDf_Heter.shape[0]<3):
DataRange = 'HomoEnrichSNV'
HomoProb = CalculateP_A(SubDf_Homo, currentPos, mu)
HeteroProb = CalculateP_A(TotalHeterDf, currentPos, mu)
PCRProb = CalculateP_A(DfTailor(SubDf_Homo, TotalHeterDf, currentPos), currentPos, mu)
else:
DataRange = 'HomoHeteroSNV'
HomoProb = CalculateP_A(SubDf_Homo, currentPos, mu)
HeteroProb = CalculateP_A(SubDf_Heter, currentPos, mu)
PCRProb = CalculateP_A(DfTailor(SubDf_Homo, SubDf_Heter, currentPos), currentPos, mu)
CountDf.loc[I, 'DataRange'] = DataRange
# fill in the success probability and std value
CountDf.loc[I, 'HomoZygous_P'] = HomoProb['Base2'][0]
CountDf.loc[I, 'HomoZygous_P_std'] = HomoProb['Base2'][1]
CountDf.loc[I, 'HeteroZygous_P'] = HeteroProb['Base2'][0]
CountDf.loc[I, 'HeteroZygous_P_std'] = HeteroProb['Base2'][1]
CountDf.loc[I, 'PCRError_P'] = PCRProb['Base2'][0]
CountDf.loc[I, 'PCRError_P_std'] = PCRProb['Base2'][1]
self.ABdf.loc[I] = [HeteroProb['Base1'][0], HeteroProb['Base1'][1], HomoProb['Base1'][0], HomoProb['Base1'][1], PCRProb['Base1'][0], PCRProb['Base1'][1], HeteroProb['Base1'][0]-3*HeteroProb['Base1'][1]]
for Base in ['Base1', 'Base2', 'Base3', 'Base4']:
CountDf.loc[I, 'HomoZygous'] = CountDf.loc[I, 'HomoZygous'] + (np.log10(HomoProb[Base][0]+0.0000001)*CountDf.loc[I, Base])
CountDf.loc[I, 'HeteroZygous'] = CountDf.loc[I, 'HeteroZygous'] + (np.log10(HeteroProb[Base][0] + 0.0000001)*CountDf.loc[I, Base])
CountDf.loc[I, 'PCRError'] = CountDf.loc[I, 'PCRError'] + (np.log10(PCRProb[Base][0]+0.0000001)*CountDf.loc[I, Base])
PCRLow_P1 = np.max([0.0000001, (HeteroProb["Base1"][0] - 3 * HeteroProb["Base1"][1])]) # avoid the negative value
CountDf.loc[I, 'PCRErrorLow_P'] = PCRLow_P1
CountDf.loc[I, 'PCRErrorLow'] = (np.log10(PCRLow_P1+0.0000001)*CountDf.loc[I, "Base1"]) + (np.log10(1-PCRLow_P1+0.0000001)*CountDf.loc[I, "Base2"])
# Normalize the percentage data
ProbUnit = np.array(CountDf.loc[I, ['HomoZygous', 'HeteroZygous', 'PCRError', 'PCRErrorLow']])
ProbUnit = list(ProbUnit)
# ProbUnit = ProbUnit / np.sum(ProbUnit)
CountDf.loc[I, ['HomoZygous', 'HeteroZygous', 'PCRError', 'PCRErrorLow']] = ProbUnit
DecisionList = ['Homozygous', 'Heterozygous', 'PCRError', 'PCRErrorLow']
Decision = DecisionList[ProbUnit.index(max(ProbUnit))]
CountDf.loc[I, 'Decision'] = Decision
if Decision == 'PCRError':
# we should make a decision which GT to choose
if CountDf.loc[I, 'HomoZygous'] < CountDf.loc[I, 'HeteroZygous']: # HeteroZygous have larger base2 rate than HomoZygous point
CountDf.loc[I, 'DecisionGT'] = 'Heterozygous' # candidate position are more likely to HomoZygous
else:
CountDf.loc[I, 'DecisionGT'] = 'Homozygous' # else candidate position are more likely to HeteroZygous
else:
CountDf.loc[I, 'DecisionGT'] = CountDf.loc[I, 'Decision']
if Decision == 'PCRErrorLow':
CountDf.loc[I, 'DecisionGT'] = 'Heterozygous'
self.Candidate = CountDf
self.ABdf['Pos'] = CountDf['Pos']
self.ABdf.index = CountDf.index
self.ABdf['RawGT'] = CountDf['RawGT']
self.ABdf['RawRate'] = CountDf['RawRate']
self.ABdf['Decision'] = CountDf['Decision']
self.ABdf['DecisionGT'] = CountDf['DecisionGT']
def AnnotateCountDf2(self):
InputDf2 = self.Candidate
WindowSize = self.WindowSize
# Input dataframe after the first annotation
InputDf2['AlleleDropOut'] = 0
for I in InputDf2.index:
currentPos = InputDf2.loc[I, 'Pos']
start = currentPos - WindowSize
end = currentPos + WindowSize
SubDf = InputDf2.loc[(InputDf2['Pos']>=start)&(InputDf2['Pos']<=end)]
SubDf_Heter = SubDf.loc[SubDf['DecisionGT']=='Heterozygous']
if SubDf_Heter.shape[0] == 0: # No hetero SNV around an ADO
InputDf2.loc[I, 'AlleleDropOut'] = 1
self.Candidate = InputDf2
def EstimateError(self):
'''
Estimate 4 types of error
Homozygous:
Heterozygous:
PCR error:
Allele drop out:
'''
print('%s: Start to Calculate!' % time.ctime())
# The first Annotation
CountDf = self.Candidate
WindowSize = self.WindowSize
self.AnnotateCountDf1()
print('%s: The first Annotation finished!' % time.ctime())
# Now add ADO score for each point
self.AnnotateCountDf2()
print('%s: The second Annotation finished!' % time.ctime())
self.Result = self.Candidate
def FillError(self):
self.Candidate['Vague'] = 'No'
for I in self.Candidate.index:
currentPos = self.Candidate.loc[I, 'Pos']
SubDf = self.Candidate.loc[(self.Candidate['Pos']>currentPos-self.WindowSize/3.0)&(self.Candidate['Pos']<currentPos+self.WindowSize/3.0)]
if (SubDf.shape[0] > 10) and (100.0 * SubDf.loc[SubDf['Decision']=='PCRError'].shape[0] / SubDf.shape[0] >= 40):
self.Candidate.loc[SubDf.index, 'Vague'] = 'Yes'
def PassCode(self):
self.Candidate['PassCode'] = 'NormalSNV'
SubCandidate = self.Candidate.loc[self.Candidate['Genotype'].isin(['0/1', '1/1'])]
for I in SubCandidate.index:
currentPos = SubCandidate.loc[I, 'Pos']
Start = currentPos - 10
End = currentPos + 10
SubDf = SubCandidate.loc[(SubCandidate['Pos']>=Start)&(SubCandidate['Pos']<=End)]
altBase = SubCandidate.loc[I, 'Alt']
if altBase not in ['A', 'T', 'C', 'G']:
SubCandidate.loc[I, 'PassCode'] = 'Indel'
if SubDf.shape[0] > 1:
if self.Candidate.loc[I, 'PassCode'] == 'NormalSNV':
self.Candidate.loc[I, 'PassCode'] = 'ClusterSNV'
def BaseCaller(self):
'''
genotyping for each point
'''
# Label SNV candidate with too many Error
self.FillError()
# Cut the result dataframe to ensure all Candidate have neighborhood information
self.Candidate = self.Candidate.loc[(self.Candidate['Pos']>=self.Start)&(self.Candidate['Pos']<=self.End)]
self.ABdf = self.ABdf.loc[(self.ABdf['Pos']>=self.Start)&(self.ABdf['Pos']<=self.End)]
JudgeDf = self.Candidate[['Ref','RefCount','Alt','AltCount', 'Decision','DecisionGT','AlleleDropOut']]
JudgeDf['Genotype'] = '0/0'
JudgeDf['BaseCall'] = ''
# Basecalling
JudgeDf.loc[JudgeDf['DecisionGT']=='Heterozygous', 'Genotype'] = '0/1'
JudgeDf.loc[JudgeDf['DecisionGT']=='Heterozygous', 'BaseCall'] = JudgeDf.loc[JudgeDf['DecisionGT']=='Heterozygous', 'Ref'] + "/" + JudgeDf.loc[JudgeDf['DecisionGT']=='Heterozygous', 'Alt']
# Homozygous SNP 0/0 or 1/1
JudgeDf.loc[(JudgeDf['DecisionGT']=='Homozygous') & (JudgeDf['AltCount']>JudgeDf['RefCount']), 'Genotype'] = '1/1'
JudgeDf.loc[(JudgeDf['DecisionGT']=='Homozygous') & (JudgeDf['AltCount']>JudgeDf['RefCount']), 'BaseCall'] = JudgeDf.loc[(JudgeDf['DecisionGT']=='Homozygous') & (JudgeDf['AltCount']>JudgeDf['RefCount']), 'Alt'] + "/" + JudgeDf.loc[(JudgeDf['DecisionGT']=='Homozygous') & (JudgeDf['AltCount']>JudgeDf['RefCount']), 'Alt']
JudgeDf.loc[(JudgeDf['DecisionGT']=='Homozygous') & (JudgeDf['AltCount']<JudgeDf['RefCount']), 'BaseCall'] = JudgeDf.loc[(JudgeDf['DecisionGT']=='Homozygous') & (JudgeDf['AltCount']<JudgeDf['RefCount']), 'Ref'] + "/" + JudgeDf.loc[(JudgeDf['DecisionGT']=='Homozygous') & (JudgeDf['AltCount']<JudgeDf['RefCount']), 'Ref']
# Select SNV position to loose the demention
JudgeDf_NonSNV = JudgeDf.loc[(JudgeDf['Genotype']=='0/0')] # ref/ref base
JudgeDf_SNVs = JudgeDf.loc[(JudgeDf['Genotype']!='0/0')] # alt is SNV
ConfuseIndex = list(JudgeDf.loc[(JudgeDf['Decision']!='Homozygous')&(JudgeDf['Decision']!='Heterozygous')&(JudgeDf['Genotype']!='0/0')].index) # alt is SNV but high pcr risk
self.JUDGEMENT = {"Non_SNV":JudgeDf_NonSNV,
"SCSNV":JudgeDf_SNVs,
"RawJudgeDf": JudgeDf.loc[(JudgeDf['Genotype']!="0/0") | ((JudgeDf['Decision']!='Homozygous')&(JudgeDf['Decision']!='Heterozygous'))],
"ConfuseIndex": ConfuseIndex}
self.Candidate['Genotype'] = JudgeDf['Genotype']
self.Candidate['BaseCall'] = JudgeDf['BaseCall']
# label Indel and 10bp continuous SNVs
self.PassCode()
# Cut the output data (make sure all point at the center of data)
|
[
"pandas.DataFrame",
"numpy.sum",
"pysam.FastaFile",
"numpy.log",
"os.path.basename",
"pysam.AlignmentFile",
"time.ctime",
"numpy.max",
"numpy.array",
"numpy.exp",
"numpy.log10",
"pandas.concat"
] |
[((7990, 8024), 'pandas.concat', 'pd.concat', (['[S_DF, L_DF]'], {'sort': '(True)'}), '([S_DF, L_DF], sort=True)\n', (7999, 8024), True, 'import pandas as pd\n'), ((8659, 8715), 'pandas.DataFrame', 'pd.DataFrame', (['""""""'], {'columns': 'data.columns', 'index': 'data.index'}), "('', columns=data.columns, index=data.index)\n", (8671, 8715), True, 'import pandas as pd\n'), ((5447, 5456), 'numpy.log', 'np.log', (['(3)'], {}), '(3)\n', (5453, 5456), True, 'import numpy as np\n'), ((5610, 5619), 'numpy.log', 'np.log', (['(3)'], {}), '(3)\n', (5616, 5619), True, 'import numpy as np\n'), ((10027, 10055), 'pysam.AlignmentFile', 'pysam.AlignmentFile', (['bamFile'], {}), '(bamFile)\n', (10046, 10055), False, 'import pysam\n'), ((10075, 10095), 'pysam.FastaFile', 'pysam.FastaFile', (['ref'], {}), '(ref)\n', (10090, 10095), False, 'import pysam\n'), ((10179, 10193), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (10191, 10193), True, 'import pandas as pd\n'), ((10300, 10314), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (10312, 10314), True, 'import pandas as pd\n'), ((11269, 11396), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Chrom', 'Pos', 'Ref', 'RefCount', 'Alt', 'AltCount', 'Base1', 'Base2',\n 'Base3', 'Base4', 'RawRate']"}), "(columns=['Chrom', 'Pos', 'Ref', 'RefCount', 'Alt', 'AltCount',\n 'Base1', 'Base2', 'Base3', 'Base4', 'RawRate'])\n", (11281, 11396), True, 'import pandas as pd\n'), ((11697, 11830), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Chrom', 'Pos', 'Ref', 'RefCount', 'Alt', 'AltCount', 'DP', 'Base1',\n 'Base2', 'Base3', 'Base4', 'RawRate']"}), "(columns=['Chrom', 'Pos', 'Ref', 'RefCount', 'Alt', 'AltCount',\n 'DP', 'Base1', 'Base2', 'Base3', 'Base4', 'RawRate'])\n", (11709, 11830), True, 'import pandas as pd\n'), ((11962, 12057), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['Chrom', 'Pos', 'DP', 'Base1', 'Base2', 'Base3', 'Base4', 'RawRate']"}), "(columns=['Chrom', 'Pos', 'DP', 'Base1', 'Base2', 'Base3',\n 'Base4', 'RawRate'])\n", (11974, 12057), True, 'import pandas as pd\n'), ((17274, 17414), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['HeteroBase1', 'HeteroSE', 'HomoBase1', 'HomoSE', 'PCRBase1', 'PCRSE',\n 'PCRLowBase1']", 'index': 'self.Candidate.index'}), "(columns=['HeteroBase1', 'HeteroSE', 'HomoBase1', 'HomoSE',\n 'PCRBase1', 'PCRSE', 'PCRLowBase1'], index=self.Candidate.index)\n", (17286, 17414), True, 'import pandas as pd\n'), ((18049, 18143), 'pandas.concat', 'pd.concat', (["[CountDf.loc[CountDf['RawGT'] == 'Homozygous'], self.Shadow]"], {'axis': '(0)', 'sort': '(True)'}), "([CountDf.loc[CountDf['RawGT'] == 'Homozygous'], self.Shadow],\n axis=0, sort=True)\n", (18058, 18143), True, 'import pandas as pd\n'), ((5502, 5524), 'numpy.exp', 'np.exp', (['(-mu * Distance)'], {}), '(-mu * Distance)\n', (5508, 5524), True, 'import numpy as np\n'), ((5532, 5554), 'numpy.exp', 'np.exp', (['(-mu * Distance)'], {}), '(-mu * Distance)\n', (5538, 5554), True, 'import numpy as np\n'), ((5818, 5859), 'numpy.array', 'np.array', (["WorkDf['Distance']"], {'dtype': 'float'}), "(WorkDf['Distance'], dtype=float)\n", (5826, 5859), True, 'import numpy as np\n'), ((5876, 5890), 'numpy.sum', 'np.sum', (['Weight'], {}), '(Weight)\n', (5882, 5890), True, 'import numpy as np\n'), ((7143, 7177), 'numpy.array', 'np.array', (["SubDf1['Pos']"], {'dtype': 'int'}), "(SubDf1['Pos'], dtype=int)\n", (7151, 7177), True, 'import numpy as np\n'), ((7215, 7249), 'numpy.array', 'np.array', (["SubDf2['Pos']"], {'dtype': 'int'}), "(SubDf2['Pos'], dtype=int)\n", (7223, 7249), True, 'import numpy as np\n'), ((9609, 9618), 'numpy.log', 'np.log', (['(3)'], {}), '(3)\n', (9615, 9618), True, 'import numpy as np\n'), ((9727, 9747), 'pysam.FastaFile', 'pysam.FastaFile', (['ref'], {}), '(ref)\n', (9742, 9747), False, 'import pysam\n'), ((9881, 9909), 'pysam.AlignmentFile', 'pysam.AlignmentFile', (['bamFile'], {}), '(bamFile)\n', (9900, 9909), False, 'import pysam\n'), ((15924, 15954), 'pandas.DataFrame', 'pd.DataFrame', (["MixDf['RawRate']"], {}), "(MixDf['RawRate'])\n", (15936, 15954), True, 'import pandas as pd\n'), ((18635, 18725), 'pandas.concat', 'pd.concat', (["[SubDf.loc[SubDf['RawGT'] == 'Homozygous'], SubShadowDf]"], {'axis': '(0)', 'sort': '(True)'}), "([SubDf.loc[SubDf['RawGT'] == 'Homozygous'], SubShadowDf], axis=0,\n sort=True)\n", (18644, 18725), True, 'import pandas as pd\n'), ((21443, 21511), 'numpy.max', 'np.max', (["[1e-07, HeteroProb['Base1'][0] - 3 * HeteroProb['Base1'][1]]"], {}), "([1e-07, HeteroProb['Base1'][0] - 3 * HeteroProb['Base1'][1]])\n", (21449, 21511), True, 'import numpy as np\n'), ((21828, 21915), 'numpy.array', 'np.array', (["CountDf.loc[I, ['HomoZygous', 'HeteroZygous', 'PCRError', 'PCRErrorLow']]"], {}), "(CountDf.loc[I, ['HomoZygous', 'HeteroZygous', 'PCRError',\n 'PCRErrorLow']])\n", (21836, 21915), True, 'import numpy as np\n'), ((5673, 5695), 'numpy.array', 'np.array', (["SubDf['Pos']"], {}), "(SubDf['Pos'])\n", (5681, 5695), True, 'import numpy as np\n'), ((6289, 6303), 'numpy.sum', 'np.sum', (['Weight'], {}), '(Weight)\n', (6295, 6303), True, 'import numpy as np\n'), ((7583, 7615), 'numpy.array', 'np.array', (["L_DF['Pos']"], {'dtype': 'int'}), "(L_DF['Pos'], dtype=int)\n", (7591, 7615), True, 'import numpy as np\n'), ((15607, 15661), 'pandas.concat', 'pd.concat', (['[CutOFFDf, CutOFFShadow]'], {'axis': '(0)', 'sort': '(True)'}), '([CutOFFDf, CutOFFShadow], axis=0, sort=True)\n', (15616, 15661), True, 'import pandas as pd\n'), ((16698, 16729), 'pandas.DataFrame', 'pd.DataFrame', (["SubMix['RawRate']"], {}), "(SubMix['RawRate'])\n", (16710, 16729), True, 'import pandas as pd\n'), ((24237, 24249), 'time.ctime', 'time.ctime', ([], {}), '()\n', (24247, 24249), False, 'import time\n'), ((24437, 24449), 'time.ctime', 'time.ctime', ([], {}), '()\n', (24447, 24449), False, 'import time\n'), ((24580, 24592), 'time.ctime', 'time.ctime', ([], {}), '()\n', (24590, 24592), False, 'import time\n'), ((6400, 6414), 'numpy.sum', 'np.sum', (['Weight'], {}), '(Weight)\n', (6406, 6414), True, 'import numpy as np\n'), ((10450, 10475), 'os.path.basename', 'os.path.basename', (['bamFile'], {}), '(bamFile)\n', (10466, 10475), False, 'import os\n'), ((14741, 14753), 'time.ctime', 'time.ctime', ([], {}), '()\n', (14751, 14753), False, 'import time\n'), ((14842, 14854), 'time.ctime', 'time.ctime', ([], {}), '()\n', (14852, 14854), False, 'import time\n'), ((16595, 16643), 'pandas.concat', 'pd.concat', (['[SubDf, SubShadow]'], {'axis': '(0)', 'sort': '(True)'}), '([SubDf, SubShadow], axis=0, sort=True)\n', (16604, 16643), True, 'import pandas as pd\n'), ((21646, 21673), 'numpy.log10', 'np.log10', (['(PCRLow_P1 + 1e-07)'], {}), '(PCRLow_P1 + 1e-07)\n', (21654, 21673), True, 'import numpy as np\n'), ((21704, 21735), 'numpy.log10', 'np.log10', (['(1 - PCRLow_P1 + 1e-07)'], {}), '(1 - PCRLow_P1 + 1e-07)\n', (21712, 21735), True, 'import numpy as np\n'), ((6237, 6276), 'numpy.array', 'np.array', (['WorkDf[RateName]'], {'dtype': 'float'}), '(WorkDf[RateName], dtype=float)\n', (6245, 6276), True, 'import numpy as np\n'), ((21078, 21113), 'numpy.log10', 'np.log10', (['(HomoProb[Base][0] + 1e-07)'], {}), '(HomoProb[Base][0] + 1e-07)\n', (21086, 21113), True, 'import numpy as np\n'), ((21221, 21258), 'numpy.log10', 'np.log10', (['(HeteroProb[Base][0] + 1e-07)'], {}), '(HeteroProb[Base][0] + 1e-07)\n', (21229, 21258), True, 'import numpy as np\n'), ((21360, 21394), 'numpy.log10', 'np.log10', (['(PCRProb[Base][0] + 1e-07)'], {}), '(PCRProb[Base][0] + 1e-07)\n', (21368, 21394), True, 'import numpy as np\n'), ((6348, 6387), 'numpy.array', 'np.array', (['WorkDf[RateName]'], {'dtype': 'float'}), '(WorkDf[RateName], dtype=float)\n', (6356, 6387), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# coding: utf-8
#
# Author: <NAME>
# URL: http://kazuto1011.github.io
# Created: 2017-05-26
from __future__ import print_function
from collections import OrderedDict
import cv2
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import functional as F
class PropagationBase(object):
def __init__(self, model, cuda=False):
self.model = model
self.model.eval()
if cuda:
self.model.cuda()
self.cuda = cuda
self.all_fmaps = OrderedDict()
self.all_grads = OrderedDict()
self._set_hook_func()
self.image = None
def _set_hook_func(self):
raise NotImplementedError
def _encode_one_hot(self, idx):
one_hot = torch.FloatTensor(1, self.preds.size()[-1]).zero_()
one_hot[0][idx] = 1.0
return one_hot.cuda() if self.cuda else one_hot
def forward(self, image):
self.image = image
self.preds = self.model.forward(self.image)
self.probs = F.softmax(self.preds, dim=0)[0]
self.prob, self.idx = self.probs.data.sort(0, True)
return self.prob, self.idx
def backward(self, idx):
self.model.zero_grad()
one_hot = self._encode_one_hot(idx)
self.preds.backward(gradient=one_hot, retain_graph=True)
class GradCAM(PropagationBase):
def _set_hook_func(self):
def func_f(module, input, output):
self.all_fmaps[id(module)] = output.data.cpu()
def func_b(module, grad_in, grad_out):
self.all_grads[id(module)] = grad_out[0].cpu()
for module in self.model.named_modules():
module[1].register_forward_hook(func_f)
module[1].register_backward_hook(func_b)
def _find(self, outputs, target_layer):
for key, value in outputs.items():
for module in self.model.named_modules():
if id(module[1]) == key:
if module[0] == target_layer:
return value
raise ValueError('Invalid layer name: {}'.format(target_layer))
def _normalize(self, grads):
l2_norm = torch.sqrt(torch.mean(torch.pow(grads, 2))) + 1e-5
return grads / l2_norm.data[0]
def _compute_grad_weights(self, grads):
grads = self._normalize(grads)
self.map_size = grads.size()[2:]
return nn.AvgPool2d(self.map_size)(grads)
def generate(self, target_layer):
fmaps = self._find(self.all_fmaps, target_layer)
grads = self._find(self.all_grads, target_layer)
weights = self._compute_grad_weights(grads)
gcam = torch.FloatTensor(self.map_size).zero_()
for fmap, weight in zip(fmaps[0], weights[0]):
res = fmap * weight.data.expand_as(fmap)
gcam += fmap * weight.data.expand_as(fmap)
gcam = F.relu(Variable(gcam))
gcam = gcam.data.cpu().numpy()
gcam -= gcam.min()
if(gcam.max() != 0):
gcam /= gcam.max()
gcam = cv2.resize(gcam, (self.image.size(3), self.image.size(2)))
return gcam
def save(self, filename, gcam, raw_image):
gcam = cv2.applyColorMap(np.uint8(gcam * 255.0), cv2.COLORMAP_JET)
gcam = gcam.astype(np.float) + raw_image.astype(np.float)
if(gcam.max() != 0):
gcam = gcam / gcam.max() * 255.0
cv2.imwrite(filename, np.uint8(gcam))
class BackPropagation(PropagationBase):
def _find(self, outputs, target_layer):
for key, value in outputs.items():
for module in self.model.named_modules():
if id(module[1]) == key:
if module[0] == target_layer:
return value
raise ValueError('Invalid layer name: {}'.format(target_layer))
def _set_hook_func(self):
def func_b(module, grad_in, grad_out):
self.all_grads[id(module)] = grad_in[0].cpu()
for module in self.model.named_modules():
module[1].register_backward_hook(func_b)
def generate(self, target_layer):
grads = self._find(self.all_grads, target_layer)
gradients_as_arr = grads.data[0].numpy()[0]
return gradients_as_arr
def save(self, filename, data):
abs_max = np.maximum(-1 * data.min(), data.max())
data = data / abs_max * 127.0 + 127.0
cv2.imwrite(filename, np.uint8(data))
class GuidedBackPropagation(BackPropagation):
def _set_hook_func(self):
def func_b(module, grad_in, grad_out):
self.all_grads[id(module)] = grad_in[0].cpu()
# Cut off negative gradients
if isinstance(module, nn.ReLU):
return (torch.clamp(grad_in[0], min=0.0),)
for module in self.model.named_modules():
module[1].register_backward_hook(func_b)
|
[
"numpy.uint8",
"torch.autograd.Variable",
"torch.FloatTensor",
"torch.nn.functional.softmax",
"torch.clamp",
"torch.pow",
"collections.OrderedDict",
"torch.nn.AvgPool2d"
] |
[((563, 576), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (574, 576), False, 'from collections import OrderedDict\n'), ((602, 615), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (613, 615), False, 'from collections import OrderedDict\n'), ((1061, 1089), 'torch.nn.functional.softmax', 'F.softmax', (['self.preds'], {'dim': '(0)'}), '(self.preds, dim=0)\n', (1070, 1089), True, 'from torch.nn import functional as F\n'), ((2413, 2440), 'torch.nn.AvgPool2d', 'nn.AvgPool2d', (['self.map_size'], {}), '(self.map_size)\n', (2425, 2440), True, 'import torch.nn as nn\n'), ((2895, 2909), 'torch.autograd.Variable', 'Variable', (['gcam'], {}), '(gcam)\n', (2903, 2909), False, 'from torch.autograd import Variable\n'), ((3214, 3236), 'numpy.uint8', 'np.uint8', (['(gcam * 255.0)'], {}), '(gcam * 255.0)\n', (3222, 3236), True, 'import numpy as np\n'), ((3426, 3440), 'numpy.uint8', 'np.uint8', (['gcam'], {}), '(gcam)\n', (3434, 3440), True, 'import numpy as np\n'), ((4417, 4431), 'numpy.uint8', 'np.uint8', (['data'], {}), '(data)\n', (4425, 4431), True, 'import numpy as np\n'), ((2669, 2701), 'torch.FloatTensor', 'torch.FloatTensor', (['self.map_size'], {}), '(self.map_size)\n', (2686, 2701), False, 'import torch\n'), ((2205, 2224), 'torch.pow', 'torch.pow', (['grads', '(2)'], {}), '(grads, 2)\n', (2214, 2224), False, 'import torch\n'), ((4728, 4760), 'torch.clamp', 'torch.clamp', (['grad_in[0]'], {'min': '(0.0)'}), '(grad_in[0], min=0.0)\n', (4739, 4760), False, 'import torch\n')]
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error as MSE
from sklearn import preprocessing
import math
import re
import warnings
warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
acquired_dict = {
'tr':'tr',
'fa':'fa',
'dr':'dr',
"Traded": "tr",
"Free Agency": "fa",
"Amateur Draft": "dr",
"Amateur Free Agent": "fa",
"Waivers": "tr",
"Purchased":"tr",
"Rule 5 Draft": "dr",
"Expansion Draft": "dr",
"Conditional Deal": "tr",
"Amateur Draft--no sign": "dr",
"MinorLg Draft": "dr",
"Rune 5 returned": "tr"
}
def inflation_calc(row):
inf_dict = {
2017: 1.0,
2016: 1.021299290023666,
2015: 1.0341874211554445,
2014: 1.0354149770208165,
2013: 1.0522113523096537,
2012: 1.0676237183898534,
2011: 1.089717656786951,
2010: 1.1241149062626115,
2009: 1.1425534989302544,
2008: 1.1384885486964882,
2007: 1.1822013870802828,
2006: 1.215873015873016,
2005: 1.2550947260624679,
2004: 1.297617787188989,
2003: 1.3324635790389214,
2002: 1.3626862352679565,
2001: 1.3843112893206078,
2000: 1.4234610917537749
}
return int(row['salary']*inf_dict[row['year']])
def fixtm(t):
if t == '2TM' or t == '3TM' or t == '4TM':
return 'multiple'
elif t == 'TBD':
return 'TBR'
elif t == 'MON':
return "WSN"
elif t == 'ANA':
return 'LAA'
elif t == 'FLA':
return 'MIA'
else: return t
def fix_name(n):
n1 = (' ').join(n.split('\xa0'))
n2 = re.sub(r'[^\w\s]','',n1)
return n2
def train_and_test(cutoff = 1000000):
train_X,train_y,test_X,test_y = load_and_split_data(cutoff)
lr = LinearRegression()
lr.fit(train_X, train_y)
preds = lr.predict(test_X)
error = np.sqrt(MSE(test_y,preds))
return round(10**error,2)
def train_and_test(cutoff = 1000000):
train_X,train_y,test_X,test_y = load_and_split_data(cutoff)
lr = LinearRegression()
lr.fit(train_X, train_y)
preds = lr.predict(test_X)
error = np.sqrt(MSE(test_y,preds))
return round(10**error,2)
def cutoff_df(df,cutoff):
log_10_cut = math.log10(cutoff)
df = df[df['log10_adj'] >= log_10_cut]
return df
def test_cutoffs():
test_cutoffs = [(i+1)*100000 for i in range(20)]
error_list = []
for i in test_cutoffs:
error = train_and_test(i)
error_list.append(error)
return test_cutoffs,error_list
def test_elastic_cutoffs():
test_cutoffs = [(i+1)*100000 for i in range(20)]
error_list = []
for i in test_cutoffs:
error = elastic(i)
error_list.append(error)
return test_cutoffs,error_list
def load_data():
train = pd.read_pickle('batting_00_16.pkl')
test = pd.read_pickle('batting_17.pkl')
return pd.concat([train,test])
def ordered(row):
if row['name'] == row['np']:
return row['next_sal']
else:
return np.nan
def get_salary_for_next_year():
df = load_data()
df = engineer_features(df)
df = df.sort_values(by = ['name','year'])
df['next_sal'] = df['log10_adj'].shift(-1)
df['np'] = df['name'].shift(-1)
df['next_sal'] = df.apply(ordered,axis=1)
df = df.dropna()
df['log10_adj'] = df['next_sal']
df = df.drop(['next_sal','np'],axis=1)
train = df[df['year']<2016]
test = df[df['year']==2016]
return train,test
def engineer_features(df):
df = df[df.pa>200]
df = df.reset_index()
df['pa/g'] = df['pa']/df['g']
df['name'] = df['name'].apply(fix_name)
#adjust team names
df['tm'] = df['tm'].apply(fixtm)
#drop position summary (too many classes), log_sal (unscaled by inflation), rk (same as index)
df.drop(['pos\xa0summary','log_sal','rk','index'],axis=1,inplace=True)
#map values in acquired to 3 classes
df['acquired'] = df['acquired'].map(acquired_dict)
#adjust salary for inflation and take the log-10 for target column
df['adj_salary'] = df.apply(inflation_calc,axis=1)
df['log10_adj'] = np.log10(df['adj_salary'])
#get dummy variables for team, hand, and acquired columns
df = pd.get_dummies(df,columns = ['acquired','bat_hand','tm']).drop(['tm_multiple','bat_hand_rhb','acquired_tr'],axis=1)
#filter datasets for only batters with more than 200 plate appearances in season
return df
def new_features(df):
df['ba'] = df['h']/df['ab']
df['obp'] = (df['h']+df['bb']+df['hbp'])/(df['ab']+df['bb']+df['hbp']+df['sh'])
df['slg'] = (df['h']+df['2b']+2*df['3b']+3*df['hr'])/df['ab']
return df
def scaleColumns(df, cols_to_scale):
min_max_scaler = preprocessing.MinMaxScaler()
for col in cols_to_scale:
df[col] = pd.DataFrame(min_max_scaler.fit_transform(pd.DataFrame(df[col])),columns=[col])
return df
def rescale_numeric(df):
df = df.reset_index().drop(['index'],axis=1)
cols = ['g','pa','rbat','rbaser','rdp',
'rfield',
'rpos',
'raa',
'waa',
'rrep',
'rar',
'war',
'waawl%',
'162wl%',
'owar',
'dwar',
'orar',
'year',
'ab', 'r', 'h', '2b', '3b', 'hr', 'rbi', 'sb', 'cs', 'bb', 'so', 'ibb',
'hbp', 'sh', 'sf', 'gidp', 'years_in_mlb','pa/g','ba','obp','slg']
df = scaleColumns(df,cols)
return df
def combine_with_lehman_data(df):
players = pd.read_csv('baseballdatabank-master/core/People.csv')
#players = players.set_index('playerID')
drop_cols = ['deathYear','deathMonth','deathDay','deathCountry','deathState','deathCity',
'birthYear','birthMonth','birthDay','birthCountry','birthState','birthCity',
'nameGiven','weight','height','bats','throws','finalGame','retroID','bbrefID']
players = players.drop(drop_cols,axis=1)
players['fullname'] = players['nameFirst'] + ' ' + players['nameLast']
players = players.dropna()
players['fullname'] = players['fullname'].apply(lambda x: ''.join(re.sub(r'[^\w\s]','',x).split(' ')).lower())
batting = pd.read_csv('baseballdatabank-master/core/Batting.csv')
bats = batting[batting['yearID'] >= 2000]
bat_join = bats.merge(players,how='left',on='playerID')
keep_cols = ['yearID',
'G',
'AB',
'R',
'H',
'2B',
'3B',
'HR',
'RBI',
'SB',
'CS',
'BB',
'SO',
'IBB',
'HBP',
'SH',
'SF',
'GIDP',
'debut',
'fullname']
bat_join = bat_join[keep_cols]
bat_join.columns = [x.lower() for x in bat_join.columns]
bat_join = bat_join.groupby(['fullname','yearid','debut'],axis=0)['g','ab','r','h','2b','3b','hr','rbi','sb','cs','bb','so','ibb','hbp','sh','sf','gidp'].sum().reset_index()
bat_join['str_g'] = bat_join['g'].apply(str)
bat_join['str_year'] = bat_join['yearid'].apply(str)
bat_join['name_g_y'] = bat_join['fullname'] + ' ' + bat_join['str_g'] + ' ' + bat_join['str_year']
df['str_g'] = df['g'].apply(str)
df['str_year'] = df['year'].apply(str)
df['name'] = df['name'].apply(fix_aoki_and_castell)
df['name_g_y'] = df['name'].apply(lambda x: ''.join(x.split(' ')).lower()) + ' ' + df['str_g'] + ' ' + df['str_year']
df = df.merge(bat_join,how='left',on='name_g_y')
df = df.dropna()
df['debut_year'] = df['debut'].apply(lambda x: int(x.split('-')[0]))
df['years_in_mlb'] = df['year'] - df['debut_year']
df['g'] = df['g_x']
df = df.drop(['g_x','g_y','str_g_x','str_g_y','str_year_x','str_year_y','debut','debut_year','yearid','name_g_y','fullname'],axis=1)
return df
def fix_aoki_and_castell(name):
if name == '<NAME>':
return 'Nori Aoki'
elif name == '<NAME>':
return '<NAME>'
else: return name
def load_and_split_data(cutoff = 1):
#Load dataframes from pickle
train,test = get_salary_for_next_year()
#Combine calculated statistics scraped from baseball-reference with raw stats from Lehman database
train = combine_with_lehman_data(train)
test = combine_with_lehman_data(test)
train,test = new_features(train),new_features(test)
#Rescale numeric features to be (0,1)
train = rescale_numeric(train)
test = rescale_numeric(test)
#Cut dataframe by minimum salary
train = cutoff_df(train,cutoff)
test = cutoff_df(test,cutoff)
#Split into features and response matrices
train_y = train['log10_adj']
test_y = test['log10_adj']
train_X = train.drop(['name','age','log10_adj','salary','adj_salary'],axis=1)
test_X = test.drop(['name','age','log10_adj','salary','adj_salary'],axis=1)
return train_X, train_y, test_X, test_y
def plot_with_cutoff(cut,err):
fig,ax = plt.subplots(figsize=(4,4))
ax.scatter([i/1000 for i in cut],err)
ax.set_title("Error Factor vs Cutoff Salary")
ax.set_ylabel("Error Factor")
ax.set_xlabel("Minimum Salary (Thousands)")
ax.set_yticks([1,1.5,2,2.5,3]);
ax.set_xticks([0,500,1000,1500,2000]);
plt.tight_layout()
def plot_with_cutoff_and_cols(cut,err,cols):
fig,ax = plt.subplots(figsize=(4,4))
ax.scatter([i/1000 for i in cut],err)
ax.scatter([i/1000 for i in cut],[i/10 for i in cols])
ax.set_title("Error Factor vs Cutoff Salary")
ax.set_ylabel("Error Factor")
ax.set_xlabel("Minimum Salary (Thousands)")
ax.set_yticks([1,1.5,2,2.5,3]);
ax.set_xticks([0,500,1000,1500,2000]);
plt.tight_layout()
|
[
"pandas.DataFrame",
"matplotlib.pyplot.tight_layout",
"warnings.filterwarnings",
"pandas.read_csv",
"pandas.get_dummies",
"sklearn.preprocessing.MinMaxScaler",
"matplotlib.pyplot.subplots",
"sklearn.linear_model.LinearRegression",
"math.log10",
"pandas.read_pickle",
"numpy.log10",
"re.sub",
"pandas.concat",
"sklearn.metrics.mean_squared_error"
] |
[((248, 336), 'warnings.filterwarnings', 'warnings.filterwarnings', ([], {'action': '"""ignore"""', 'module': '"""scipy"""', 'message': '"""^internal gelsd"""'}), "(action='ignore', module='scipy', message=\n '^internal gelsd')\n", (271, 336), False, 'import warnings\n'), ((1700, 1727), 're.sub', 're.sub', (['"""[^\\\\w\\\\s]"""', '""""""', 'n1'], {}), "('[^\\\\w\\\\s]', '', n1)\n", (1706, 1727), False, 'import re\n'), ((1857, 1875), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1873, 1875), False, 'from sklearn.linear_model import LinearRegression\n'), ((2131, 2149), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (2147, 2149), False, 'from sklearn.linear_model import LinearRegression\n'), ((2332, 2350), 'math.log10', 'math.log10', (['cutoff'], {}), '(cutoff)\n', (2342, 2350), False, 'import math\n'), ((2890, 2925), 'pandas.read_pickle', 'pd.read_pickle', (['"""batting_00_16.pkl"""'], {}), "('batting_00_16.pkl')\n", (2904, 2925), True, 'import pandas as pd\n'), ((2937, 2969), 'pandas.read_pickle', 'pd.read_pickle', (['"""batting_17.pkl"""'], {}), "('batting_17.pkl')\n", (2951, 2969), True, 'import pandas as pd\n'), ((2981, 3005), 'pandas.concat', 'pd.concat', (['[train, test]'], {}), '([train, test])\n', (2990, 3005), True, 'import pandas as pd\n'), ((4218, 4244), 'numpy.log10', 'np.log10', (["df['adj_salary']"], {}), "(df['adj_salary'])\n", (4226, 4244), True, 'import numpy as np\n'), ((4817, 4845), 'sklearn.preprocessing.MinMaxScaler', 'preprocessing.MinMaxScaler', ([], {}), '()\n', (4843, 4845), False, 'from sklearn import preprocessing\n'), ((5586, 5640), 'pandas.read_csv', 'pd.read_csv', (['"""baseballdatabank-master/core/People.csv"""'], {}), "('baseballdatabank-master/core/People.csv')\n", (5597, 5640), True, 'import pandas as pd\n'), ((6255, 6310), 'pandas.read_csv', 'pd.read_csv', (['"""baseballdatabank-master/core/Batting.csv"""'], {}), "('baseballdatabank-master/core/Batting.csv')\n", (6266, 6310), True, 'import pandas as pd\n'), ((8935, 8963), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(4, 4)'}), '(figsize=(4, 4))\n', (8947, 8963), True, 'import matplotlib.pyplot as plt\n'), ((9221, 9239), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9237, 9239), True, 'import matplotlib.pyplot as plt\n'), ((9299, 9327), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(4, 4)'}), '(figsize=(4, 4))\n', (9311, 9327), True, 'import matplotlib.pyplot as plt\n'), ((9644, 9662), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9660, 9662), True, 'import matplotlib.pyplot as plt\n'), ((1959, 1977), 'sklearn.metrics.mean_squared_error', 'MSE', (['test_y', 'preds'], {}), '(test_y, preds)\n', (1962, 1977), True, 'from sklearn.metrics import mean_squared_error as MSE\n'), ((2233, 2251), 'sklearn.metrics.mean_squared_error', 'MSE', (['test_y', 'preds'], {}), '(test_y, preds)\n', (2236, 2251), True, 'from sklearn.metrics import mean_squared_error as MSE\n'), ((4321, 4379), 'pandas.get_dummies', 'pd.get_dummies', (['df'], {'columns': "['acquired', 'bat_hand', 'tm']"}), "(df, columns=['acquired', 'bat_hand', 'tm'])\n", (4335, 4379), True, 'import pandas as pd\n'), ((4936, 4957), 'pandas.DataFrame', 'pd.DataFrame', (['df[col]'], {}), '(df[col])\n', (4948, 4957), True, 'import pandas as pd\n'), ((6191, 6217), 're.sub', 're.sub', (['"""[^\\\\w\\\\s]"""', '""""""', 'x'], {}), "('[^\\\\w\\\\s]', '', x)\n", (6197, 6217), False, 'import re\n')]
|
import random
from pathlib import Path
from functools import lru_cache
import cv2
import numpy as np
from imutils import paths
import matplotlib.pyplot as plt
from utils import *
def resize_to_fit_downscale(image, down_scale=16):
img_h, img_w = image.shape[:2]
img_h = round_up_dividend(img_h, down_scale)
img_w = round_up_dividend(img_w, down_scale)
image = cv2.resize(image, (img_w, img_h))
return image
def image_to_text_masks(image, annotations, down_scale):
original_img_h, original_img_w = image.shape[:2]
image = resize_to_fit_downscale(image, down_scale)
img_h, img_w = image.shape[:2]
ratio_h = img_h / original_img_h
ratio_w = img_w / original_img_w
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
image = 1.0 - image / 255
all_text_mask = np.zeros((img_h, img_w, 1), dtype=np.uint8)
key_mask = np.zeros((img_h, img_w, 1), dtype=np.uint8)
value_mask = np.zeros((img_h, img_w, 1), dtype=np.uint8)
other_mask = np.zeros((img_h, img_w, 1), dtype=np.uint8)
for field in annotations:
for word in field['words']:
x1, y1, x2, y2 = (np.array(word['box']) * [ratio_w, ratio_h, ratio_w, ratio_h]).astype(int)
all_text_mask[y1:y2, x1:x2] = 1.0
if field['label'] == 'question':
key_mask[y1:y2, x1:x2] = 1.0
elif field['label'] == 'answer':
value_mask[y1:y2, x1:x2] = 1.0
else:
other_mask[y1:y2, x1:x2] = 1.0
background = (1 - key_mask) * (1 - value_mask) * (1 - other_mask)
# background = (1 - key_mask) * (1 - value_mask)
# plt.subplot('231')
# plt.imshow(image)
# plt.subplot('232')
# plt.imshow(all_text_mask[..., 0])
# plt.subplot('233')
# plt.imshow(key_mask[..., 0])
# plt.subplot('234')
# plt.imshow(value_mask[..., 0])
# plt.subplot('235')
# plt.imshow(other_mask[..., 0])
# plt.subplot('236')
# plt.imshow(background[..., 0])
# plt.show()
return (image,
all_text_mask,
key_mask,
value_mask,
other_mask,
background)
def image_to_relation_masks(image, annotations, down_scale):
original_img_h, original_img_w = image.shape[:2]
image = resize_to_fit_downscale(image, down_scale)
img_h, img_w = image.shape[:2]
ratio_h = img_h / original_img_h
ratio_w = img_w / original_img_w
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
image = 1.0 - image / 255
all_text_mask = np.zeros((img_h, img_w, 1), dtype=np.uint8)
key_mask = np.zeros((img_h, img_w, 1), dtype=np.uint8)
value_mask = np.zeros((img_h, img_w, 1), dtype=np.uint8)
other_mask = np.zeros((img_h, img_w, 1), dtype=np.uint8)
annotations_map = {str(x['id']): x for x in annotations}
for field in annotations:
for word in field['words']:
x1, y1, x2, y2 = (np.array(word['box']) * [ratio_w, ratio_h, ratio_w, ratio_h]).astype(int)
all_text_mask[y1:y2, x1:x2] = 1.0
horizontal_relation_mask, vertical_relation_mask = \
draw_relation(all_text_mask.shape, annotations_map, colour=1)
# plt.subplot('231')
# plt.imshow(image)
#
# plt.subplot('232')
# blended = (1 - image) * 0.6 + (1 - all_text_mask[..., 0]) * 0.4
# plt.imshow(blended)
#
# blended = (1 - image) * 0.6 + (1 - horizontal_relation_mask[..., 0]) * 0.4
# plt.subplot('234')
# plt.imshow(blended)
#
# blended = (1 - image) * 0.6 + (1 - vertical_relation_mask[..., 0]) * 0.4
# plt.subplot('235')
# plt.imshow(blended)
#
# relation_mask = np.maximum.reduce([all_text_mask,
# horizontal_relation_mask,
# vertical_relation_mask])
# blended = (1 - image) * 0.6 + (1 - relation_mask[..., 0]) * 0.4
# plt.subplot('236')
# plt.imshow(blended)
#
# plt.show()
return (image,
all_text_mask,
horizontal_relation_mask,
vertical_relation_mask)
@lru_cache(None)
def read_img(img_path):
return cv2.imread(img_path, cv2.IMREAD_COLOR)
mask_cache = {}
def data_generator(data_dir, mask_type, portion=1.0, down_scale=16, shuffle=False):
image_path = Path(data_dir) / 'images'
image_map = {get_file_name(p): p
for p in paths.list_images(image_path)}
annotation_path = Path(data_dir) / 'adjusted_annotations'
if not annotation_path.exists():
annotation_path = Path(data_dir) / 'annotations'
label_map = {get_file_name(p): p
for p in paths.list_files(annotation_path, validExts=('.json'))}
chosen_keys = sorted(list(image_map.keys()))
amount = round(len(chosen_keys) * portion)
if amount > 0:
chosen_keys = chosen_keys[:amount]
else:
chosen_keys = chosen_keys[amount:]
while True:
if shuffle:
random.shuffle(chosen_keys)
for k in chosen_keys:
# print(k)
# image = cv2.imread(image_map[k], cv2.IMREAD_GRAYSCALE) / 255
# image = 1.0 - image
image = read_img(image_map[k])
annotations = read_json(label_map[k])['form']
if mask_type == 'text_detection':
if k not in mask_cache:
mask_cache[k] = image_to_text_masks(image, annotations,
down_scale)
resized_grey_image, all_text_mask, *output_masks = mask_cache[k]
input = resized_grey_image[None, ..., None]
output = all_text_mask[None, ...]
# plt.subplot('121')
# plt.imshow(output_masks[..., 0] * 255)
# plt.subplot('122')
# plt.imshow(output_masks[..., 1])
# plt.show()
yield (input, output)
elif mask_type == 'text_classification':
if k not in mask_cache:
mask_cache[k] = image_to_text_masks(image, annotations,
down_scale)
resized_grey_image, all_text_mask, *output_masks = mask_cache[k]
input = np.dstack([resized_grey_image, all_text_mask])
output_masks = np.dstack(output_masks)
# plt.subplot('121')
# plt.imshow(output_masks[..., 0] * 255)
# plt.subplot('122')
# plt.imshow(output_masks[..., 1])
# plt.show()
yield (input[None, ...], output_masks[None, ...])
elif mask_type == 'relation':
if k not in mask_cache:
mask_cache[k] = image_to_relation_masks(image, annotations,
down_scale)
(
resized_grey_image,
all_text_mask,
horizontal_relation_mask,
vertical_relation_mask
) = mask_cache[k]
input = np.dstack([resized_grey_image, all_text_mask])
yield (input[None, ...],
{'horizontal_relation_mask': horizontal_relation_mask[None,...],
'vertical_relation_mask': vertical_relation_mask[None, ...]})
if __name__ == '__main__':
data_generator('dataset//training_data', mask_type='text')
# sizes = []
# max_w, max_h = 0, 0
# for p in paths.list_images('dataset//training_data//images'):
# img = cv2.imread(p)
# h, w = img.shape[:2]
# sizes.append((w, h))
# if h > max_h:
# max_h = h
# if w > max_w:
# max_w = w
#
# img = np.zeros((max_h, max_w))
# for w, h in sizes:
# img[:h, :w] += 1
#
# img /= np.max(img)
#
# plt.imshow(img)
# plt.show()
#
# plt.subplot('121')
# plt.hist([x[0] for x in sizes])
# plt.subplot('122')
# plt.hist([x[1] for x in sizes])
# plt.show()
|
[
"numpy.dstack",
"imutils.paths.list_images",
"imutils.paths.list_files",
"cv2.cvtColor",
"random.shuffle",
"numpy.zeros",
"cv2.imread",
"pathlib.Path",
"numpy.array",
"functools.lru_cache",
"cv2.resize"
] |
[((4127, 4142), 'functools.lru_cache', 'lru_cache', (['None'], {}), '(None)\n', (4136, 4142), False, 'from functools import lru_cache\n'), ((378, 411), 'cv2.resize', 'cv2.resize', (['image', '(img_w, img_h)'], {}), '(image, (img_w, img_h))\n', (388, 411), False, 'import cv2\n'), ((721, 760), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2GRAY'], {}), '(image, cv2.COLOR_RGB2GRAY)\n', (733, 760), False, 'import cv2\n'), ((816, 859), 'numpy.zeros', 'np.zeros', (['(img_h, img_w, 1)'], {'dtype': 'np.uint8'}), '((img_h, img_w, 1), dtype=np.uint8)\n', (824, 859), True, 'import numpy as np\n'), ((875, 918), 'numpy.zeros', 'np.zeros', (['(img_h, img_w, 1)'], {'dtype': 'np.uint8'}), '((img_h, img_w, 1), dtype=np.uint8)\n', (883, 918), True, 'import numpy as np\n'), ((936, 979), 'numpy.zeros', 'np.zeros', (['(img_h, img_w, 1)'], {'dtype': 'np.uint8'}), '((img_h, img_w, 1), dtype=np.uint8)\n', (944, 979), True, 'import numpy as np\n'), ((997, 1040), 'numpy.zeros', 'np.zeros', (['(img_h, img_w, 1)'], {'dtype': 'np.uint8'}), '((img_h, img_w, 1), dtype=np.uint8)\n', (1005, 1040), True, 'import numpy as np\n'), ((2459, 2498), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_RGB2GRAY'], {}), '(image, cv2.COLOR_RGB2GRAY)\n', (2471, 2498), False, 'import cv2\n'), ((2554, 2597), 'numpy.zeros', 'np.zeros', (['(img_h, img_w, 1)'], {'dtype': 'np.uint8'}), '((img_h, img_w, 1), dtype=np.uint8)\n', (2562, 2597), True, 'import numpy as np\n'), ((2613, 2656), 'numpy.zeros', 'np.zeros', (['(img_h, img_w, 1)'], {'dtype': 'np.uint8'}), '((img_h, img_w, 1), dtype=np.uint8)\n', (2621, 2656), True, 'import numpy as np\n'), ((2674, 2717), 'numpy.zeros', 'np.zeros', (['(img_h, img_w, 1)'], {'dtype': 'np.uint8'}), '((img_h, img_w, 1), dtype=np.uint8)\n', (2682, 2717), True, 'import numpy as np\n'), ((2735, 2778), 'numpy.zeros', 'np.zeros', (['(img_h, img_w, 1)'], {'dtype': 'np.uint8'}), '((img_h, img_w, 1), dtype=np.uint8)\n', (2743, 2778), True, 'import numpy as np\n'), ((4178, 4216), 'cv2.imread', 'cv2.imread', (['img_path', 'cv2.IMREAD_COLOR'], {}), '(img_path, cv2.IMREAD_COLOR)\n', (4188, 4216), False, 'import cv2\n'), ((4335, 4349), 'pathlib.Path', 'Path', (['data_dir'], {}), '(data_dir)\n', (4339, 4349), False, 'from pathlib import Path\n'), ((4483, 4497), 'pathlib.Path', 'Path', (['data_dir'], {}), '(data_dir)\n', (4487, 4497), False, 'from pathlib import Path\n'), ((4425, 4454), 'imutils.paths.list_images', 'paths.list_images', (['image_path'], {}), '(image_path)\n', (4442, 4454), False, 'from imutils import paths\n'), ((4586, 4600), 'pathlib.Path', 'Path', (['data_dir'], {}), '(data_dir)\n', (4590, 4600), False, 'from pathlib import Path\n'), ((4681, 4733), 'imutils.paths.list_files', 'paths.list_files', (['annotation_path'], {'validExts': '""".json"""'}), "(annotation_path, validExts='.json')\n", (4697, 4733), False, 'from imutils import paths\n'), ((5002, 5029), 'random.shuffle', 'random.shuffle', (['chosen_keys'], {}), '(chosen_keys)\n', (5016, 5029), False, 'import random\n'), ((6444, 6490), 'numpy.dstack', 'np.dstack', (['[resized_grey_image, all_text_mask]'], {}), '([resized_grey_image, all_text_mask])\n', (6453, 6490), True, 'import numpy as np\n'), ((6522, 6545), 'numpy.dstack', 'np.dstack', (['output_masks'], {}), '(output_masks)\n', (6531, 6545), True, 'import numpy as np\n'), ((1142, 1163), 'numpy.array', 'np.array', (["word['box']"], {}), "(word['box'])\n", (1150, 1163), True, 'import numpy as np\n'), ((2941, 2962), 'numpy.array', 'np.array', (["word['box']"], {}), "(word['box'])\n", (2949, 2962), True, 'import numpy as np\n'), ((7369, 7415), 'numpy.dstack', 'np.dstack', (['[resized_grey_image, all_text_mask]'], {}), '([resized_grey_image, all_text_mask])\n', (7378, 7415), True, 'import numpy as np\n')]
|
import unittest
from src.point_location import *
from src.structures import *
from src.graph import *
import numpy as np
from numpy import array
from tqdm import tqdm
class TestTrapezoidRep(unittest.TestCase):
def test_three_init(self):
vertices = np.array([[10, 150], [200, 20], [200, 100]])
trap = Trapezoid(vertices, originator_vertices=[])
np.testing.assert_equal(trap.top_line, np.array([[10, 150], [200, 100]]))
np.testing.assert_equal(trap.bottom_line, np.array([[10, 150], [200, 20]]))
self.assertEqual(trap.left_p[0], 10)
self.assertEqual(trap.right_p[0], 200)
def test_four_init(self):
vertices = np.array([[10, 10], [200, 20], [200, 100], [10, 300]])
trap = Trapezoid(vertices, originator_vertices=[])
np.testing.assert_equal(trap.top_line, np.array([[10, 300], [200, 100]]))
np.testing.assert_equal(trap.bottom_line, np.array([[10, 10], [200, 20]]))
self.assertEqual(trap.left_p[0], 10)
self.assertEqual(trap.right_p[0], 200)
def test_specific_1(self):
vertices = np.array([[240., 300.],
[240., 253.33333333],
[100., 300.]])
trap = Trapezoid(vertices, originator_vertices=[])
self.assertTrue(np.allclose(trap.bottom_line, np.array([[100, 300], [240, 253.33333333333]])))
np.testing.assert_equal(trap.top_line, np.array([[100, 300], [240, 300]]))
self.assertEqual(trap.left_p[0], 100)
self.assertEqual(trap.right_p[0], 240)
def test_specific_2(self):
vertices = np.array([[353., 123.98305085],
[275., 122.],
[275., 790.],
[353., 790.]])
trap = Trapezoid(vertices, originator_vertices=[])
np.testing.assert_equal(trap.top_line, np.array([[275., 790.], [353., 790.]]))
self.assertTrue(np.allclose(trap.bottom_line, np.array([[275., 122.], [353., 123.98305085]])))
self.assertEqual(trap.left_p[0], 275)
self.assertEqual(trap.right_p[0], 353)
def test_is_left_pointed(self):
vertices = np.array([[309., 169.],
[471., 170.71247357],
[471., 69.]])
trap = Trapezoid(vertices, originator_vertices=[])
self.assertTrue(trap.is_left_pointed())
class TestTrapezoidIntersection(unittest.TestCase):
def test_left_corner(self):
vertices = np.array([[10, 150], [200, 60], [200, 10]])
trap = Trapezoid(vertices, originator_vertices=[])
edge = np.array([[10, 150], [205, 50]])
self.assertTrue(trap.is_intersected(edge))
def test_right_corner(self):
vertices = np.array([[10, 10], [10, 300], [400, 150]])
trap = Trapezoid(vertices, originator_vertices=[])
edge = np.array([[0, 100], [400, 150]])
self.assertTrue(trap.is_intersected(edge))
def test_left_of_trapezoid(self):
vertices = np.array([[10, 10], [200, 20], [200, 100], [10, 300]])
trap = Trapezoid(vertices, originator_vertices=[])
edge = np.array([[0, 100], [100, 25]])
self.assertTrue(trap.is_intersected(edge))
def test_right_of_trapezoid(self):
pass
def test_no_intersect(self):
vertices = np.array([[10, 10], [200, 20], [200, 100], [10, 300]])
trap = Trapezoid(vertices, originator_vertices=[])
edge = np.array([[0, 20], [10, 40]])
self.assertFalse(trap.is_intersected(edge))
def test_top_tangent(self):
vertices = np.array([[491., 186.],
[237., 179.],
[237., 790.],
[491., 790.]])
trap = Trapezoid(vertices, originator_vertices=[])
edge = np.array([[237, 179],
[353, 114]])
self.assertFalse(trap.is_intersected(edge))
def test_same_upper_right(self):
vertices = np.array([[295., 138.51724138],
[252., 147.4137931 ],
[252., 50.],
[295. , 60.]])
trap = Trapezoid(vertices, originator_vertices=[])
edge = np.array([[242, 60],
[295, 60]])
self.assertTrue(trap.is_intersected(edge))
class TestTrapezoidsRightAdjacent(unittest.TestCase):
def test_trapezoids_above(self):
pass
def test_trapezoids_below(self):
pass
def test_trapezoids_next_to(self):
pass
def test_failure_case(self):
triangles = [np.array([[249, 111],
[184, 172],
[311, 170]]), np.array([[261, 213],
[386, 198],
[283, 268]])]
top_triangle_edges = np.array([[[184, 172], [371, 170]],
[[184, 172], [249, 111]],
[[249, 111], [371, 170]]])
bottom_triangle_edges = np.array([[[386, 198], [283, 268]],
[[261, 213], [283, 268]],
[[261, 213], [386, 198]]])
polygons = Polygons(triangles)
bounds = [10, 10, 790, 790]
point_locator = PointLocator(bounds)
"""
for edge in np.concatenate([top_triangle_edges, bottom_triangle_edges]):
point_locator.add_line(edge)
"""
point_locator.add_line(top_triangle_edges[0])
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(10)), 1)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(184)), 2)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(371)), 1)
self.assertEqual(point_locator.trapezoids.trap_count(), 4)
point_locator.add_line(top_triangle_edges[1])
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(10)), 1)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(184)), 2)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(249)), 1)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(371)), 1)
self.assertEqual(point_locator.trapezoids.trap_count(), 6)
point_locator.add_line(top_triangle_edges[2])
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(10)), 1)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(184)), 2)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(249)), 2)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(371)), 1)
self.assertEqual(point_locator.trapezoids.trap_count(), 7)
point_locator.add_line(bottom_triangle_edges[0])
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(10)), 1)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(184)), 2)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(249)), 2)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(283)), 2)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(371)), 1)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(386)), 1)
self.assertEqual(point_locator.trapezoids.trap_count(), 10)
point_locator.add_line(bottom_triangle_edges[1])
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(10)), 1)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(184)), 2)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(249)), 2)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(261)), 2)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(283)), 2)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(371)), 1)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(386)), 1)
self.assertEqual(point_locator.trapezoids.trap_count(), 12)
point_locator.add_line(bottom_triangle_edges[2])
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(10)), 1)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(184)), 2)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(249)), 2)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(261)), 2)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(283)), 2)
print(point_locator.trapezoids.right_adjacent_to(371))
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(371)), 1)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(386)), 1)
self.assertEqual(point_locator.trapezoids.trap_count(), 13)
point_locator.remove_traps_within_polygons(polygons)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(10)), 1)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(184)), 2)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(249)), 2)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(261)), 2)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(371)), 1)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(283)), 1)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(386)), 1)
class TestGraphBuilding(unittest.TestCase):
def test_random(self):
bounds = [10, 10, 790, 790]
for _ in tqdm(range(100)):
random_polygons = Polygons(Polygons.make_random(bounds, 50))
point_locator = PointLocator(bounds)
for edge in random_polygons.random_edge_sampler():
point_locator.add_line(edge)
graph = Graph(point_locator, 10)
class TestIntegration(unittest.TestCase):
def test_twotriangles(self):
bounds = [10, 10, 790, 790]
point_locator = PointLocator(bounds)
# self.assertEqual(len(point_locator.trapezoids.trapezoids), 1)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(10)), 1)
top_triangle_edges = [np.array([[200, 100], [240, 30]]),
np.array([[200, 100], [280, 100]]),
np.array([[280, 100], [240, 30]])]
bottom_triangle_edges = [np.array([[100, 300], [400, 300]]),
np.array([[100, 300], [400, 200]]),
np.array([[400, 300], [400, 200]])]
point_locator.add_line(top_triangle_edges[0])
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(10)), 1)
sorted_traps = point_locator.trapezoids.right_adjacent_to(10)
trap_idx = sorted_traps[sorted_traps.keys()[0]].index
self.assertEqual(len(point_locator.trapezoids.right_adjacent(trap_idx)), 2)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(200)), 2)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(240)), 1)
self.assertEqual(point_locator.trapezoids.trap_count(), 4)
point_locator.add_line(bottom_triangle_edges[0])
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(10)), 1)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(200)), 2)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(100)), 2)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(240)), 1)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(400)), 1)
self.assertEqual(point_locator.trapezoids.trap_count(), 7)
point_locator.add_line(bottom_triangle_edges[1])
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(10)), 1)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(200)), 2)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(100)), 2)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(240)), 1)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(400)), 1)
self.assertEqual(point_locator.trapezoids.trap_count(), 8)
point_locator.add_line(top_triangle_edges[1])
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(10)), 1)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(200)), 2)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(100)), 2)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(240)), 1)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(280)), 1)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(400)), 1)
self.assertEqual(point_locator.trapezoids.trap_count(), 10)
point_locator.add_line(top_triangle_edges[2])
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(10)), 1)
sorted_traps = point_locator.trapezoids.right_adjacent_to(10)
trap_idx = sorted_traps[sorted_traps.keys()[0]].index
self.assertEqual(len(point_locator.trapezoids.right_adjacent(trap_idx)), 2)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(200)), 2)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(100)), 2)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(240)), 2)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(280)), 1)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(400)), 1)
def test_specific_3(self):
bounds = [10, 10, 790, 790]
point_locator = PointLocator(bounds)
edges = [np.array([[100.41771139, 497.65833091],
[193.75398968, 339.39024785]]),
np.array([[100.41771139, 497.65833091],
[168.82113323, 479.70436783]]),
np.array([[168.82113323, 479.70436783],
[193.75398968, 339.39024785]])]
point_locator.add_line(edges[0])
point_locator.add_line(edges[1])
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(168.82113323)), 1)
point_locator.trapezoids.trap_count()
# self.assertEqual(len(point_locator.trapezoids.right_adjacent(7)), 1)
point_locator.add_line(edges[2])
def test_specific_4(self):
bounds = [10, 10, 79.0, 79.0]
point_locator = PointLocator(bounds)
edges = [np.array([[27.54014023, 50.39508477],
[33.87852725, 21.53020476]]), np.array([[16.20062533, 38.51858695],
[27.54014023, 50.39508477]]), np.array([[16.20062533, 38.51858695],
[33.87852725, 21.53020476]])]
point_locator.add_line(edges[0])
point_locator.add_line(edges[1])
point_locator.trapezoids.trap_count()
point_locator.add_line(edges[2])
def test_specific_5(self):
bounds = [10, 10, 790, 790]
edges = [array([[443, 737],
[550, 780]]), array([[309, 169],
[471, 69]]), array([[309, 169],
[782, 174]]), array([[156, 719],
[550, 780]])]
point_locator = PointLocator(bounds)
point_locator.add_line(edges[0])
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(10)), 1)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(443)), 2)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(550)), 1)
point_locator.add_line(edges[1])
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(10)), 1)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(309)), 2)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(443)), 2)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(471)), 1)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(550)), 1)
self.assertEqual(point_locator.trapezoids.trap_count(), 7)
point_locator.add_line(edges[2])
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(10)), 1)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(309)), 2)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(443)), 2)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(471)), 1)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(550)), 1)
self.assertEqual(len(point_locator.trapezoids.right_adjacent_to(782)), 1)
self.assertEqual(point_locator.trapezoids.trap_count(), 9)
point_locator.add_line(edges[3])
def test_random(self):
bounds = [10, 10, 790, 790]
for _ in tqdm(range(100)):
random_polygons = Polygons(Polygons.make_random(bounds, 40))
point_locator = PointLocator(bounds)
for edge in random_polygons.random_edge_sampler():
point_locator.add_line(edge)
|
[
"numpy.array"
] |
[((263, 307), 'numpy.array', 'np.array', (['[[10, 150], [200, 20], [200, 100]]'], {}), '([[10, 150], [200, 20], [200, 100]])\n', (271, 307), True, 'import numpy as np\n'), ((675, 729), 'numpy.array', 'np.array', (['[[10, 10], [200, 20], [200, 100], [10, 300]]'], {}), '([[10, 10], [200, 20], [200, 100], [10, 300]])\n', (683, 729), True, 'import numpy as np\n'), ((1101, 1166), 'numpy.array', 'np.array', (['[[240.0, 300.0], [240.0, 253.33333333], [100.0, 300.0]]'], {}), '([[240.0, 300.0], [240.0, 253.33333333], [100.0, 300.0]])\n', (1109, 1166), True, 'import numpy as np\n'), ((1614, 1700), 'numpy.array', 'np.array', (['[[353.0, 123.98305085], [275.0, 122.0], [275.0, 790.0], [353.0, 790.0]]'], {}), '([[353.0, 123.98305085], [275.0, 122.0], [275.0, 790.0], [353.0, \n 790.0]])\n', (1622, 1700), True, 'import numpy as np\n'), ((2178, 2242), 'numpy.array', 'np.array', (['[[309.0, 169.0], [471.0, 170.71247357], [471.0, 69.0]]'], {}), '([[309.0, 169.0], [471.0, 170.71247357], [471.0, 69.0]])\n', (2186, 2242), True, 'import numpy as np\n'), ((2507, 2550), 'numpy.array', 'np.array', (['[[10, 150], [200, 60], [200, 10]]'], {}), '([[10, 150], [200, 60], [200, 10]])\n', (2515, 2550), True, 'import numpy as np\n'), ((2625, 2657), 'numpy.array', 'np.array', (['[[10, 150], [205, 50]]'], {}), '([[10, 150], [205, 50]])\n', (2633, 2657), True, 'import numpy as np\n'), ((2762, 2805), 'numpy.array', 'np.array', (['[[10, 10], [10, 300], [400, 150]]'], {}), '([[10, 10], [10, 300], [400, 150]])\n', (2770, 2805), True, 'import numpy as np\n'), ((2880, 2912), 'numpy.array', 'np.array', (['[[0, 100], [400, 150]]'], {}), '([[0, 100], [400, 150]])\n', (2888, 2912), True, 'import numpy as np\n'), ((3022, 3076), 'numpy.array', 'np.array', (['[[10, 10], [200, 20], [200, 100], [10, 300]]'], {}), '([[10, 10], [200, 20], [200, 100], [10, 300]])\n', (3030, 3076), True, 'import numpy as np\n'), ((3151, 3182), 'numpy.array', 'np.array', (['[[0, 100], [100, 25]]'], {}), '([[0, 100], [100, 25]])\n', (3159, 3182), True, 'import numpy as np\n'), ((3340, 3394), 'numpy.array', 'np.array', (['[[10, 10], [200, 20], [200, 100], [10, 300]]'], {}), '([[10, 10], [200, 20], [200, 100], [10, 300]])\n', (3348, 3394), True, 'import numpy as np\n'), ((3469, 3498), 'numpy.array', 'np.array', (['[[0, 20], [10, 40]]'], {}), '([[0, 20], [10, 40]])\n', (3477, 3498), True, 'import numpy as np\n'), ((3607, 3681), 'numpy.array', 'np.array', (['[[491.0, 186.0], [237.0, 179.0], [237.0, 790.0], [491.0, 790.0]]'], {}), '([[491.0, 186.0], [237.0, 179.0], [237.0, 790.0], [491.0, 790.0]])\n', (3615, 3681), True, 'import numpy as np\n'), ((3832, 3866), 'numpy.array', 'np.array', (['[[237, 179], [353, 114]]'], {}), '([[237, 179], [353, 114]])\n', (3840, 3866), True, 'import numpy as np\n'), ((4001, 4091), 'numpy.array', 'np.array', (['[[295.0, 138.51724138], [252.0, 147.4137931], [252.0, 50.0], [295.0, 60.0]]'], {}), '([[295.0, 138.51724138], [252.0, 147.4137931], [252.0, 50.0], [\n 295.0, 60.0]])\n', (4009, 4091), True, 'import numpy as np\n'), ((4242, 4274), 'numpy.array', 'np.array', (['[[242, 60], [295, 60]]'], {}), '([[242, 60], [295, 60]])\n', (4250, 4274), True, 'import numpy as np\n'), ((4821, 4913), 'numpy.array', 'np.array', (['[[[184, 172], [371, 170]], [[184, 172], [249, 111]], [[249, 111], [371, 170]]]'], {}), '([[[184, 172], [371, 170]], [[184, 172], [249, 111]], [[249, 111],\n [371, 170]]])\n', (4829, 4913), True, 'import numpy as np\n'), ((5020, 5112), 'numpy.array', 'np.array', (['[[[386, 198], [283, 268]], [[261, 213], [283, 268]], [[261, 213], [386, 198]]]'], {}), '([[[386, 198], [283, 268]], [[261, 213], [283, 268]], [[261, 213],\n [386, 198]]])\n', (5028, 5112), True, 'import numpy as np\n'), ((414, 447), 'numpy.array', 'np.array', (['[[10, 150], [200, 100]]'], {}), '([[10, 150], [200, 100]])\n', (422, 447), True, 'import numpy as np\n'), ((499, 531), 'numpy.array', 'np.array', (['[[10, 150], [200, 20]]'], {}), '([[10, 150], [200, 20]])\n', (507, 531), True, 'import numpy as np\n'), ((836, 869), 'numpy.array', 'np.array', (['[[10, 300], [200, 100]]'], {}), '([[10, 300], [200, 100]])\n', (844, 869), True, 'import numpy as np\n'), ((921, 952), 'numpy.array', 'np.array', (['[[10, 10], [200, 20]]'], {}), '([[10, 10], [200, 20]])\n', (929, 952), True, 'import numpy as np\n'), ((1430, 1464), 'numpy.array', 'np.array', (['[[100, 300], [240, 300]]'], {}), '([[100, 300], [240, 300]])\n', (1438, 1464), True, 'import numpy as np\n'), ((1882, 1924), 'numpy.array', 'np.array', (['[[275.0, 790.0], [353.0, 790.0]]'], {}), '([[275.0, 790.0], [353.0, 790.0]])\n', (1890, 1924), True, 'import numpy as np\n'), ((4616, 4662), 'numpy.array', 'np.array', (['[[249, 111], [184, 172], [311, 170]]'], {}), '([[249, 111], [184, 172], [311, 170]])\n', (4624, 4662), True, 'import numpy as np\n'), ((4704, 4750), 'numpy.array', 'np.array', (['[[261, 213], [386, 198], [283, 268]]'], {}), '([[261, 213], [386, 198], [283, 268]])\n', (4712, 4750), True, 'import numpy as np\n'), ((10217, 10250), 'numpy.array', 'np.array', (['[[200, 100], [240, 30]]'], {}), '([[200, 100], [240, 30]])\n', (10225, 10250), True, 'import numpy as np\n'), ((10282, 10316), 'numpy.array', 'np.array', (['[[200, 100], [280, 100]]'], {}), '([[200, 100], [280, 100]])\n', (10290, 10316), True, 'import numpy as np\n'), ((10348, 10381), 'numpy.array', 'np.array', (['[[280, 100], [240, 30]]'], {}), '([[280, 100], [240, 30]])\n', (10356, 10381), True, 'import numpy as np\n'), ((10417, 10451), 'numpy.array', 'np.array', (['[[100, 300], [400, 300]]'], {}), '([[100, 300], [400, 300]])\n', (10425, 10451), True, 'import numpy as np\n'), ((10485, 10519), 'numpy.array', 'np.array', (['[[100, 300], [400, 200]]'], {}), '([[100, 300], [400, 200]])\n', (10493, 10519), True, 'import numpy as np\n'), ((10553, 10587), 'numpy.array', 'np.array', (['[[400, 300], [400, 200]]'], {}), '([[400, 300], [400, 200]])\n', (10561, 10587), True, 'import numpy as np\n'), ((13750, 13820), 'numpy.array', 'np.array', (['[[100.41771139, 497.65833091], [193.75398968, 339.39024785]]'], {}), '([[100.41771139, 497.65833091], [193.75398968, 339.39024785]])\n', (13758, 13820), True, 'import numpy as np\n'), ((13866, 13936), 'numpy.array', 'np.array', (['[[100.41771139, 497.65833091], [168.82113323, 479.70436783]]'], {}), '([[100.41771139, 497.65833091], [168.82113323, 479.70436783]])\n', (13874, 13936), True, 'import numpy as np\n'), ((13982, 14052), 'numpy.array', 'np.array', (['[[168.82113323, 479.70436783], [193.75398968, 339.39024785]]'], {}), '([[168.82113323, 479.70436783], [193.75398968, 339.39024785]])\n', (13990, 14052), True, 'import numpy as np\n'), ((14561, 14627), 'numpy.array', 'np.array', (['[[27.54014023, 50.39508477], [33.87852725, 21.53020476]]'], {}), '([[27.54014023, 50.39508477], [33.87852725, 21.53020476]])\n', (14569, 14627), True, 'import numpy as np\n'), ((14636, 14702), 'numpy.array', 'np.array', (['[[16.20062533, 38.51858695], [27.54014023, 50.39508477]]'], {}), '([[16.20062533, 38.51858695], [27.54014023, 50.39508477]])\n', (14644, 14702), True, 'import numpy as np\n'), ((14711, 14777), 'numpy.array', 'np.array', (['[[16.20062533, 38.51858695], [33.87852725, 21.53020476]]'], {}), '([[16.20062533, 38.51858695], [33.87852725, 21.53020476]])\n', (14719, 14777), True, 'import numpy as np\n'), ((15050, 15081), 'numpy.array', 'array', (['[[443, 737], [550, 780]]'], {}), '([[443, 737], [550, 780]])\n', (15055, 15081), False, 'from numpy import array\n'), ((15103, 15133), 'numpy.array', 'array', (['[[309, 169], [471, 69]]'], {}), '([[309, 169], [471, 69]])\n', (15108, 15133), False, 'from numpy import array\n'), ((15156, 15187), 'numpy.array', 'array', (['[[309, 169], [782, 174]]'], {}), '([[309, 169], [782, 174]])\n', (15161, 15187), False, 'from numpy import array\n'), ((15209, 15240), 'numpy.array', 'array', (['[[156, 719], [550, 780]]'], {}), '([[156, 719], [550, 780]])\n', (15214, 15240), False, 'from numpy import array\n'), ((1334, 1380), 'numpy.array', 'np.array', (['[[100, 300], [240, 253.33333333333]]'], {}), '([[100, 300], [240, 253.33333333333]])\n', (1342, 1380), True, 'import numpy as np\n'), ((1976, 2025), 'numpy.array', 'np.array', (['[[275.0, 122.0], [353.0, 123.98305085]]'], {}), '([[275.0, 122.0], [353.0, 123.98305085]])\n', (1984, 2025), True, 'import numpy as np\n')]
|
import numpy as np
from PIL import Image, ImageColor
from pathlib import Path
import torch
import torch.nn.functional as F
from random import shuffle
from torch import tensor
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import Dataset, DataLoader, TensorDataset, Subset
class ColoredMNIST(Dataset):
def __init__(self, train, color_var=0.02):
# get the colored mnist
self.data_path = 'mnists/data/colored_mnist/mnist_10color_jitter_var_%.03f.npy' % color_var
data_dic = np.load(self.data_path, encoding='latin1', allow_pickle=True).item()
if train:
self.ims = data_dic['train_image']
self.labels = tensor(data_dic['train_label'], dtype=torch.long)
else:
self.ims = data_dic['test_image']
self.labels = tensor(data_dic['test_label'], dtype=torch.long)
self.T = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((32, 32), Image.NEAREST),
transforms.ToTensor(),
transforms.Normalize(
(0.5, 0.5, 0.5),
(0.5, 0.5, 0.5),
),
])
def __getitem__(self, idx):
ims, labels = self.T(self.ims[idx]), self.labels[idx]
ret = {
'ims': ims,
'labels': labels,
}
return ret
def __len__(self):
return self.ims.shape[0]
class DoubleColoredMNIST(Dataset):
def __init__(self, train=True):
self.train = train
self.mnist_sz = 32
# get mnist
mnist = datasets.MNIST('mnists/data', train=True, download=True)
if train:
ims, labels = mnist.data[:50000], mnist.targets[:50000]
else:
ims, labels = mnist.data[50000:], mnist.targets[50000:]
self.ims_digit = torch.stack([ims, ims, ims], dim=1)
self.labels = labels
# colors generated by https://mokole.com/palette.html
colors1 = [
'darkgreen', 'darkblue', '#b03060',
'orangered', 'yellow', 'burlywood', 'lime',
'aqua', 'fuchsia', '#6495ed',
]
# shift colors by X
colors2 = [colors1[i - 6] for i in range(len(colors1))]
def get_rgb(x):
t = torch.tensor(ImageColor.getcolor(x, "RGB")) / 255.
return t.view(-1, 1, 1)
self.background_colors = list(map(get_rgb, colors1))
self.object_colors = list(map(get_rgb, colors2))
self.T = transforms.Compose([
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
def __getitem__(self, idx):
i = self.labels[idx] if self.train else np.random.randint(10)
back_color = self.background_colors[i].clone()
back_color += torch.normal(0, 0.01, (3, 1, 1))
i = self.labels[idx] if self.train else np.random.randint(10)
obj_color = self.object_colors[i].clone()
obj_color += torch.normal(0, 0.01, (3, 1, 1))
# get digit
im_digit = (self.ims_digit[idx] / 255.).to(torch.float32)
im_digit = F.interpolate(im_digit[None, :], (self.mnist_sz, self.mnist_sz)).squeeze()
im_digit = (im_digit > 0.1).to(int) # binarize
# plot digit onto the texture
ims = im_digit * (obj_color) + (1 - im_digit) * back_color
ret = {
'ims': self.T(ims),
'labels': self.labels[idx],
}
return ret
def __len__(self):
return self.labels.shape[0]
class WildlifeMNIST(Dataset):
def __init__(self, train=True):
self.train = train
self.mnist_sz = 32
inter_sz = 150
# get mnist
mnist = datasets.MNIST('mnists/data', train=True, download=True)
if train:
ims, labels = mnist.data[:50000], mnist.targets[:50000]
else:
ims, labels = mnist.data[50000:], mnist.targets[50000:]
self.ims_digit = torch.stack([ims, ims, ims], dim=1)
self.labels = labels
# texture paths
background_dir = Path('.') / 'mnists' / 'data' / 'textures' / 'background'
self.background_textures = sorted([im for im in background_dir.glob('*.jpg')])
object_dir = Path('.') / 'mnists' / 'data' / 'textures' / 'object'
self.object_textures = sorted([im for im in object_dir.glob('*.jpg')])
self.T_texture = transforms.Compose([
transforms.Resize((inter_sz, inter_sz), Image.NEAREST),
transforms.RandomCrop(self.mnist_sz, padding=3, padding_mode='reflect'),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
def __getitem__(self, idx):
# get textures
i = self.labels[idx] if self.train else np.random.randint(10)
back_text = Image.open(self.background_textures[i])
back_text = self.T_texture(back_text)
i = self.labels[idx] if self.train else np.random.randint(10)
obj_text = Image.open(self.object_textures[i])
obj_text = self.T_texture(obj_text)
# get digit
im_digit = (self.ims_digit[idx] / 255.).to(torch.float32)
im_digit = F.interpolate(im_digit[None, :], (self.mnist_sz, self.mnist_sz)).squeeze()
im_digit = (im_digit > 0.1).to(int) # binarize
# plot digit onto the texture
ims = im_digit * (obj_text) + (1 - im_digit) * back_text
ret = {
'ims': ims,
'labels': self.labels[idx],
}
return ret
def __len__(self):
return self.labels.shape[0]
def get_dataloaders(dataset, batch_size, workers):
if dataset == 'colored_MNIST':
MNIST = ColoredMNIST
elif dataset == 'double_colored_MNIST':
MNIST = DoubleColoredMNIST
elif dataset == 'wildlife_MNIST':
MNIST = WildlifeMNIST
else:
raise TypeError(f"Unknown dataset: {dataset}")
ds_train = MNIST(train=True)
ds_test = MNIST(train=False)
dl_train = DataLoader(ds_train, batch_size=batch_size,
shuffle=True, num_workers=workers)
dl_test = DataLoader(ds_test, batch_size=batch_size * 2,
shuffle=False, num_workers=workers)
return dl_train, dl_test
TENSOR_DATASETS = ['colored_MNIST', 'colored_MNIST_counterfactual',
'double_colored_MNIST', 'double_colored_MNIST_counterfactual',
'wildlife_MNIST', 'wildlife_MNIST_counterfactual']
def get_tensor_dataloaders(dataset, batch_size=64, ablation=False, cf=None):
# assert dataset in TENSOR_DATASETS, f"Unknown datasets {dataset}"
ds_train = []
if 'counterfactual' in dataset:
tensor = torch.load(f'mnists/data/{dataset}.pth')
dataset = dataset.replace('_counterfactual' + (f'_{cf}' if cf is not None else ''), '')
real_tensor = torch.load(f'mnists/data/{dataset}_train.pth')
ds_r = TensorDataset(*real_tensor[:2])
ds_r = Subset(ds_r, np.arange(50000))
for i in [1e4, 1e5, 1e6] if ablation else [len(tensor[0])]:
ds = TensorDataset(*tensor[:2])
# ds = Subset(ds, np.arange(int(1e5)))
if ablation:
ds = Subset(ds, np.arange(int(i)))
# ds = torch.utils.data.ConcatDataset([ds_r, ds])
ds_train.append(ds)
else:
ds_train.append(TensorDataset(*torch.load(f'mnists/data/{dataset}_train.pth')))
ds_test = TensorDataset(*torch.load(f'mnists/data/{dataset}_test.pth'))
dl_train = []
for i in range(3 if ablation else 1):
dl_train.append(DataLoader(ds_train[i], batch_size=batch_size, num_workers=4,
shuffle=True, pin_memory=True))
dl_test = DataLoader(ds_test, batch_size=batch_size * 10, num_workers=4,
shuffle=False, pin_memory=True)
return dl_train, dl_test
|
[
"numpy.load",
"pathlib.Path",
"numpy.random.randint",
"numpy.arange",
"torch.utils.data.TensorDataset",
"torchvision.transforms.Normalize",
"torch.utils.data.DataLoader",
"torch.load",
"torchvision.transforms.ToPILImage",
"torch.normal",
"torchvision.datasets.MNIST",
"torchvision.transforms.RandomCrop",
"torchvision.transforms.Resize",
"torch.stack",
"PIL.Image.open",
"PIL.ImageColor.getcolor",
"torch.nn.functional.interpolate",
"torch.tensor",
"torchvision.transforms.ToTensor"
] |
[((6007, 6085), 'torch.utils.data.DataLoader', 'DataLoader', (['ds_train'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'num_workers': 'workers'}), '(ds_train, batch_size=batch_size, shuffle=True, num_workers=workers)\n', (6017, 6085), False, 'from torch.utils.data import Dataset, DataLoader, TensorDataset, Subset\n'), ((6126, 6213), 'torch.utils.data.DataLoader', 'DataLoader', (['ds_test'], {'batch_size': '(batch_size * 2)', 'shuffle': '(False)', 'num_workers': 'workers'}), '(ds_test, batch_size=batch_size * 2, shuffle=False, num_workers=\n workers)\n', (6136, 6213), False, 'from torch.utils.data import Dataset, DataLoader, TensorDataset, Subset\n'), ((7746, 7845), 'torch.utils.data.DataLoader', 'DataLoader', (['ds_test'], {'batch_size': '(batch_size * 10)', 'num_workers': '(4)', 'shuffle': '(False)', 'pin_memory': '(True)'}), '(ds_test, batch_size=batch_size * 10, num_workers=4, shuffle=\n False, pin_memory=True)\n', (7756, 7845), False, 'from torch.utils.data import Dataset, DataLoader, TensorDataset, Subset\n'), ((1602, 1658), 'torchvision.datasets.MNIST', 'datasets.MNIST', (['"""mnists/data"""'], {'train': '(True)', 'download': '(True)'}), "('mnists/data', train=True, download=True)\n", (1616, 1658), False, 'from torchvision import datasets\n'), ((1853, 1888), 'torch.stack', 'torch.stack', (['[ims, ims, ims]'], {'dim': '(1)'}), '([ims, ims, ims], dim=1)\n', (1864, 1888), False, 'import torch\n'), ((2793, 2825), 'torch.normal', 'torch.normal', (['(0)', '(0.01)', '(3, 1, 1)'], {}), '(0, 0.01, (3, 1, 1))\n', (2805, 2825), False, 'import torch\n'), ((2968, 3000), 'torch.normal', 'torch.normal', (['(0)', '(0.01)', '(3, 1, 1)'], {}), '(0, 0.01, (3, 1, 1))\n', (2980, 3000), False, 'import torch\n'), ((3704, 3760), 'torchvision.datasets.MNIST', 'datasets.MNIST', (['"""mnists/data"""'], {'train': '(True)', 'download': '(True)'}), "('mnists/data', train=True, download=True)\n", (3718, 3760), False, 'from torchvision import datasets\n'), ((3955, 3990), 'torch.stack', 'torch.stack', (['[ims, ims, ims]'], {'dim': '(1)'}), '([ims, ims, ims], dim=1)\n', (3966, 3990), False, 'import torch\n'), ((4828, 4867), 'PIL.Image.open', 'Image.open', (['self.background_textures[i]'], {}), '(self.background_textures[i])\n', (4838, 4867), False, 'from PIL import Image, ImageColor\n'), ((5004, 5039), 'PIL.Image.open', 'Image.open', (['self.object_textures[i]'], {}), '(self.object_textures[i])\n', (5014, 5039), False, 'from PIL import Image, ImageColor\n'), ((6707, 6747), 'torch.load', 'torch.load', (['f"""mnists/data/{dataset}.pth"""'], {}), "(f'mnists/data/{dataset}.pth')\n", (6717, 6747), False, 'import torch\n'), ((6866, 6912), 'torch.load', 'torch.load', (['f"""mnists/data/{dataset}_train.pth"""'], {}), "(f'mnists/data/{dataset}_train.pth')\n", (6876, 6912), False, 'import torch\n'), ((6928, 6959), 'torch.utils.data.TensorDataset', 'TensorDataset', (['*real_tensor[:2]'], {}), '(*real_tensor[:2])\n', (6941, 6959), False, 'from torch.utils.data import Dataset, DataLoader, TensorDataset, Subset\n'), ((706, 755), 'torch.tensor', 'tensor', (["data_dic['train_label']"], {'dtype': 'torch.long'}), "(data_dic['train_label'], dtype=torch.long)\n", (712, 755), False, 'from torch import tensor\n'), ((842, 890), 'torch.tensor', 'tensor', (["data_dic['test_label']"], {'dtype': 'torch.long'}), "(data_dic['test_label'], dtype=torch.long)\n", (848, 890), False, 'from torch import tensor\n'), ((2694, 2715), 'numpy.random.randint', 'np.random.randint', (['(10)'], {}), '(10)\n', (2711, 2715), True, 'import numpy as np\n'), ((2875, 2896), 'numpy.random.randint', 'np.random.randint', (['(10)'], {}), '(10)\n', (2892, 2896), True, 'import numpy as np\n'), ((4786, 4807), 'numpy.random.randint', 'np.random.randint', (['(10)'], {}), '(10)\n', (4803, 4807), True, 'import numpy as np\n'), ((4963, 4984), 'numpy.random.randint', 'np.random.randint', (['(10)'], {}), '(10)\n', (4980, 4984), True, 'import numpy as np\n'), ((6988, 7004), 'numpy.arange', 'np.arange', (['(50000)'], {}), '(50000)\n', (6997, 7004), True, 'import numpy as np\n'), ((7091, 7117), 'torch.utils.data.TensorDataset', 'TensorDataset', (['*tensor[:2]'], {}), '(*tensor[:2])\n', (7104, 7117), False, 'from torch.utils.data import Dataset, DataLoader, TensorDataset, Subset\n'), ((7470, 7515), 'torch.load', 'torch.load', (['f"""mnists/data/{dataset}_test.pth"""'], {}), "(f'mnists/data/{dataset}_test.pth')\n", (7480, 7515), False, 'import torch\n'), ((7602, 7698), 'torch.utils.data.DataLoader', 'DataLoader', (['ds_train[i]'], {'batch_size': 'batch_size', 'num_workers': '(4)', 'shuffle': '(True)', 'pin_memory': '(True)'}), '(ds_train[i], batch_size=batch_size, num_workers=4, shuffle=True,\n pin_memory=True)\n', (7612, 7698), False, 'from torch.utils.data import Dataset, DataLoader, TensorDataset, Subset\n'), ((545, 606), 'numpy.load', 'np.load', (['self.data_path'], {'encoding': '"""latin1"""', 'allow_pickle': '(True)'}), "(self.data_path, encoding='latin1', allow_pickle=True)\n", (552, 606), True, 'import numpy as np\n'), ((942, 965), 'torchvision.transforms.ToPILImage', 'transforms.ToPILImage', ([], {}), '()\n', (963, 965), False, 'from torchvision import transforms\n'), ((979, 1021), 'torchvision.transforms.Resize', 'transforms.Resize', (['(32, 32)', 'Image.NEAREST'], {}), '((32, 32), Image.NEAREST)\n', (996, 1021), False, 'from torchvision import transforms\n'), ((1035, 1056), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1054, 1056), False, 'from torchvision import transforms\n'), ((1070, 1124), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (1090, 1124), False, 'from torchvision import transforms\n'), ((2547, 2601), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (2567, 2601), False, 'from torchvision import transforms\n'), ((3107, 3171), 'torch.nn.functional.interpolate', 'F.interpolate', (['im_digit[None, :]', '(self.mnist_sz, self.mnist_sz)'], {}), '(im_digit[None, :], (self.mnist_sz, self.mnist_sz))\n', (3120, 3171), True, 'import torch.nn.functional as F\n'), ((4428, 4482), 'torchvision.transforms.Resize', 'transforms.Resize', (['(inter_sz, inter_sz)', 'Image.NEAREST'], {}), '((inter_sz, inter_sz), Image.NEAREST)\n', (4445, 4482), False, 'from torchvision import transforms\n'), ((4496, 4567), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['self.mnist_sz'], {'padding': '(3)', 'padding_mode': '"""reflect"""'}), "(self.mnist_sz, padding=3, padding_mode='reflect')\n", (4517, 4567), False, 'from torchvision import transforms\n'), ((4581, 4602), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (4600, 4602), False, 'from torchvision import transforms\n'), ((4616, 4670), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (4636, 4670), False, 'from torchvision import transforms\n'), ((5190, 5254), 'torch.nn.functional.interpolate', 'F.interpolate', (['im_digit[None, :]', '(self.mnist_sz, self.mnist_sz)'], {}), '(im_digit[None, :], (self.mnist_sz, self.mnist_sz))\n', (5203, 5254), True, 'import torch.nn.functional as F\n'), ((2303, 2332), 'PIL.ImageColor.getcolor', 'ImageColor.getcolor', (['x', '"""RGB"""'], {}), "(x, 'RGB')\n", (2322, 2332), False, 'from PIL import Image, ImageColor\n'), ((7392, 7438), 'torch.load', 'torch.load', (['f"""mnists/data/{dataset}_train.pth"""'], {}), "(f'mnists/data/{dataset}_train.pth')\n", (7402, 7438), False, 'import torch\n'), ((4070, 4079), 'pathlib.Path', 'Path', (['"""."""'], {}), "('.')\n", (4074, 4079), False, 'from pathlib import Path\n'), ((4236, 4245), 'pathlib.Path', 'Path', (['"""."""'], {}), "('.')\n", (4240, 4245), False, 'from pathlib import Path\n')]
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
@s-gupta
Code for estimating unknown transforms by setting up a least square pixel
reprojection error problem using pytorch. See README.md for details.
"""
from __future__ import print_function
import cv2
import os
import pickle
import numpy as np
import json
import sys
import torch
import torch.optim as optim
from absl import app, flags
FLAGS = flags.FLAGS
flags.DEFINE_string('data_dir', None,
'Directory with images and pkl files')
flags.DEFINE_string('calibration_output_file', None,
'File where to write calibration parameters')
flags.DEFINE_boolean('overwrite', False,
'Whether to overwrite output file or not')
flags.DEFINE_float('inlier_pixel_threshold', 20.,
'Max. reprojection error to consider an inlier')
flags.DEFINE_float('min_inlier_fraction', 0.2,
'Fraction of pts to atleast consider as inliers')
flags.DEFINE_integer('n_iters', 10001,
'Number of iterations')
flags.DEFINE_string('typ', 'camera_arm',
'Calibration to run: camera_only | [camera_arm]')
flags.DEFINE_list('to_optimize', ['camera_link'],
'Static transforms to optimize')
AR_SZ = 0.03 / 2.
def mkdir_if_missing(output_dir):
if not os.path.exists(output_dir):
try:
os.makedirs(output_dir)
return True
except Exception:
return False
def load_data(dir_name):
"""
Load data from the pkl file, and load corresponding images.
:params dir_name: name of directory
:type dir_name: string
:return: data from states.pkl file in directory, images from directory
:rtype: dict, list of cv2 images (numpy arrays)
"""
tt = pickle.load(open(os.path.join(dir_name, 'states.pkl')))
imgs = []
for i in range(tt['states'].shape[0]):
img = cv2.imread(os.path.join(
dir_name, 'images', 'img_{:04d}.png'.format(i)))
imgs.append(img)
return tt, imgs
def filter_data(imgs, dt, inds):
"""
Picks out ind rows from various fields in dt.
:params imgs: list of cv2 images (numpy arrays)
:params dt: dictionary with data captured during calibration
:params ind: numpy array of indices to sample from imgs and dt
:type imgs: list of cv2 images (numy arrays)
:type dt: dictionary of transforms
:type inds: numpy array of indices
:return: list of images referred by inds, dictionary dt with data sampled
using indices.
:rtype: list, dict
"""
imgs = [imgs[i] for i in inds]
kk = ['rgb_tf_tree_quat', 'rgb_tf_tree_matrix', 'states', 'ar_quat',
'rgb_tf_tree_trans', 'ar_trans', 'arm_pose_ids', 'ar_tf_tree_quat',
'ar_tf_tree_trans', 'ar_img_loc']
for k in kk:
dt[k] = dt[k][inds, ...]
return imgs, dt
def get_tag_corners(imgs, ar_img_loc, dir_name=None):
"""
Converts ar_img_loc into useable format, by removing entires that are
empty. Also, returns good indices and writes out good images into dir_name
directroy if not None.
"""
pts = []
good_imgs = []
for i, img in enumerate(imgs):
if not np.any(np.isnan(ar_img_loc[i, ...])):
pt = ar_img_loc[i, [1, 0, 3, 2], :]
pts.append(pt)
good_imgs.append(i)
if dir_name is not None:
_img = img * 1
pt_int = pt.astype(np.int32)
for j in range(3):
cv2.line(_img,
(pt_int[j, 0], pt_int[j, 1]),
(pt_int[j + 1, 0], pt_int[j + 1, 1]),
(255, 0, 0),
j + 1)
cv2.imwrite(os.path.join(
dir_name, 'img_{:04d}.jpg'.format(i)), _img)
pts = np.array(pts)
good_imgs = np.array(good_imgs)
return pts, good_imgs
def hamilton_product(qa, qb):
"""
Multiply qa by qb.
:params qa: B X N X 4 quaternions
:params qb: B X N X 4 quaternions
:type qa: torch.Tensor
:type qb: torch.Tensor
:return: B X N X 4
:rtype: torch.Tensor
"""
qa_0 = qa[:, :, 0]
qa_1 = qa[:, :, 1]
qa_2 = qa[:, :, 2]
qa_3 = qa[:, :, 3]
qb_0 = qb[:, :, 0]
qb_1 = qb[:, :, 1]
qb_2 = qb[:, :, 2]
qb_3 = qb[:, :, 3]
# See https://en.wikipedia.org/wiki/Quaternion#Hamilton_product
q_mult_0 = qa_0 * qb_0 - qa_1 * qb_1 - qa_2 * qb_2 - qa_3 * qb_3
q_mult_1 = qa_0 * qb_1 + qa_1 * qb_0 + qa_2 * qb_3 - qa_3 * qb_2
q_mult_2 = qa_0 * qb_2 - qa_1 * qb_3 + qa_2 * qb_0 + qa_3 * qb_1
q_mult_3 = qa_0 * qb_3 + qa_1 * qb_2 - qa_2 * qb_1 + qa_3 * qb_0
return torch.stack([q_mult_0, q_mult_1, q_mult_2, q_mult_3], dim=-1)
def quat_rotate(X, q, inverse=False):
"""
Rotate points by quaternions.
:params X: B X N X 3 points
:params q: B X 4 quaternions
:type X: torch.Tensor
:type q: torch.Tensor
:return: B X N X 3 (rotated points)
:rtype: torch.Tensor
"""
# repeat q along 2nd dim
ones_x = X[[0], :, :][:, :, [0]] * 0 + 1
q = torch.unsqueeze(q, 1) * ones_x
q_conj = torch.cat([q[:, :, [0]], -1 * q[:, :, 1:4]], dim=-1)
X = torch.cat([X[:, :, [0]] * 0, X], dim=-1)
if inverse:
X_rot = hamilton_product(q_conj, hamilton_product(X, q))
else:
X_rot = hamilton_product(q, hamilton_product(X, q_conj))
return X_rot[:, :, 1:4]
def get_transforms_to_optimize(chain, to_optimize_quat, to_optimize_trans,
device):
"""Returns pytorch tensors to optimize along the chain as per
to_optimize_trans and to_optimize_quat."""
opt_pyt_trans, opt_pyt_quat = [], []
for i in range(len(chain) - 1):
t = None
q = None
if chain[i + 1] in to_optimize_trans:
t = torch.zeros(1, 3, device=device, dtype=torch.double,
requires_grad=True)
if chain[i + 1] in to_optimize_quat:
qxyz = torch.zeros(1, 3, device=device, dtype=torch.double,
requires_grad=True)
qw = torch.ones(1, 1, device=device, dtype=torch.double,
requires_grad=True)
q = [qw, qxyz]
opt_pyt_trans.append(t)
opt_pyt_quat.append(q)
return opt_pyt_quat, opt_pyt_trans
def project_pts(pts, quat, trans, opt_pyt_quat, opt_pyt_trans, intrinsics):
"""Projects pts to the camera using the transformation and the camera
intrinsics."""
pts_3d = pts
t_pts_3d = transform_points_from_base(pts_3d, quat, trans,
opt_pyt_quat, opt_pyt_trans)
t_pts_3d = torch.matmul(
intrinsics, t_pts_3d.permute(0, 2, 1)).permute(0, 2, 1)
t_pts_2d = t_pts_3d[:, :, :2] / t_pts_3d[:, :, 2:]
return t_pts_2d
def reprojection_error(t_pts_2d, pts_2d_observed, min_inlier_fraction,
inlier_pixel_threshold, mask=None):
"""Computes re-projection error for observed and projected points."""
n_pts = t_pts_2d.shape[0]
err_all = t_pts_2d - pts_2d_observed
err_all = err_all**2
err_all = err_all.sum(2)
topk_k = int(4 * n_pts * min_inlier_fraction)
topk, _ = torch.topk(err_all.view(-1), k=topk_k, largest=False)
in_px_thresh_pyt = torch.from_numpy(np.array([inlier_pixel_threshold**2]))
in_px_thresh_pyt = in_px_thresh_pyt.double()
topk = torch.max(topk[-1], in_px_thresh_pyt)
err_all_robust = torch.min(topk, err_all)
if mask is not None:
err_all_robust = err_all_robust * mask
err = err_all_robust.sum() / mask.sum()
else:
err = err_all_robust.mean()
err_all = torch.sqrt(err_all)
return err, err_all, topk
def optimize(vars_to_optimize, ar_tag_points, rgb_quat, rgb_trans, ar_quat,
ar_trans, rgb_opt_pyt_quat, rgb_opt_pyt_trans, ar_opt_pyt_quat,
ar_opt_pyt_trans, intrinsics, pts_2d_observed, optimizer_opts,
mask=None):
"""Optimizes for vars_to_optimize by going through the kinematic chain from
ar tag points to camera frame, projecting points using the camera
matrices."""
optimizer = optim.Adam(vars_to_optimize, lr=0.01)
print('Beginning optimization.')
# optimizer = optim.SGD(vars_to_optimize, lr=0.01, momentum=0.9)
for i in range(optimizer_opts['n_iters']):
# zero the parameter gradients
optimizer.zero_grad()
t_pts = transform_points_to_base(ar_tag_points, ar_quat, ar_trans,
ar_opt_pyt_quat, ar_opt_pyt_trans)
t_pts_2d = project_pts(t_pts, rgb_quat, rgb_trans, rgb_opt_pyt_quat,
rgb_opt_pyt_trans, intrinsics)
min_inlier_fraction = optimizer_opts['min_inlier_fraction']
inlier_pixel_threshold = optimizer_opts['inlier_pixel_threshold']
err, err_all, robust_err_threshold = \
reprojection_error(t_pts_2d, pts_2d_observed, min_inlier_fraction,
inlier_pixel_threshold, mask)
robust_err_threshold = np.sqrt(robust_err_threshold.detach().numpy()[0])
err.backward()
optimizer.step()
if np.mod(i, 1000) == 0 or i == optimizer_opts['n_iters'] - 1:
np_err_all = err_all.detach().numpy()
n_inliers = int(np.sum(np_err_all <= robust_err_threshold))
total_pts = np_err_all.size
np_err_all = np_err_all[np_err_all <= robust_err_threshold]
str_ = 'Iteration: {:8d}, ' + \
'N Inliers: {:d} / {:d}, ' + \
'loss: {:0.3f}, ' + \
'reprojection error: {:0.3f}, ' + \
'inlier px threshold: {:0.3f}, '
str_ = str_.format(i, n_inliers, total_pts, err.detach().numpy(),
np.mean(np_err_all), robust_err_threshold)
print(str_)
for v in vars_to_optimize:
print(' ' + str(v.detach().numpy()))
str_ = 'Optimization Finished. Final Loss: {:0.3f}.'
print(str_.format(err.detach().numpy()))
print('')
def setup_camera_arm_solver(dt, imgs, mask, pts_2d, to_optimize_trans,
to_optimize_quat, optimizer_opts,
calibration_output_file=None):
"""This version sets up variables for where the base of the arm is with
respect to the robot base. AR tag corners can then be recovered in terms of
these variables, and a direct optimization problem can be set up in these
terms."""
# Set up variables that need to be optimized.
device = torch.device('cpu')
torch.set_num_threads = 4
torch.manual_seed(4)
rgb_chain = dt['rgb_chain']
ar_chain = dt['ar_chain']
rgb_quat = torch.from_numpy(
dt['rgb_tf_tree_quat'][:, :, [3, 0, 1, 2]].astype(np.float64))
rgb_trans = torch.from_numpy(dt['rgb_tf_tree_trans'].astype(np.float64))
ar_quat = torch.from_numpy(
dt['ar_tf_tree_quat'][:, :, [3, 0, 1, 2]].astype(np.float64))
ar_trans = torch.from_numpy(dt['ar_tf_tree_trans'].astype(np.float64))
intrinsics = torch.from_numpy(
dt['intrinsics'][:, :3]).double().reshape([1, 3, 3])
pts_2d_observed = torch.from_numpy(pts_2d).double()
rgb_opt_pyt_quat, rgb_opt_pyt_trans = \
get_transforms_to_optimize(rgb_chain, to_optimize_quat,
to_optimize_trans, device)
ar_opt_pyt_quat, ar_opt_pyt_trans = \
get_transforms_to_optimize(ar_chain, to_optimize_quat,
to_optimize_trans, device)
ar_tag_points = np.array(
[[AR_SZ, AR_SZ, 0], [AR_SZ, -AR_SZ, 0],
[-AR_SZ, -AR_SZ, 0], [-AR_SZ, AR_SZ, 0]], dtype=np.float64)
ar_tag_points = ar_tag_points[np.newaxis, :, :]
ar_tag_points = np.tile(ar_tag_points, [ar_quat.shape[0], 1, 1])
ar_tag_points = torch.from_numpy(ar_tag_points).double()
# Use gradient descent from pytorch to solve this problem.
vars_pts = []
vars_trans = [t for t in rgb_opt_pyt_trans +
ar_opt_pyt_trans if t is not None]
vars_rotate = []
for q in rgb_opt_pyt_quat + ar_opt_pyt_quat:
if q is not None:
vars_rotate.append(q[0])
vars_rotate.append(q[1])
if len(vars_pts) > 0:
optimize(vars_pts, ar_tag_points, rgb_quat, rgb_trans,
ar_quat, ar_trans, rgb_opt_pyt_quat, rgb_opt_pyt_trans,
ar_opt_pyt_quat, ar_opt_pyt_trans, intrinsics,
pts_2d_observed, optimizer_opts, None)
if len(vars_pts + vars_trans) > 0:
optimize(vars_pts + vars_trans, ar_tag_points, rgb_quat, rgb_trans,
ar_quat, ar_trans, rgb_opt_pyt_quat, rgb_opt_pyt_trans,
ar_opt_pyt_quat, ar_opt_pyt_trans, intrinsics,
pts_2d_observed, optimizer_opts, None)
if len(vars_pts + vars_trans + vars_rotate) > 0:
optimize(vars_pts + vars_trans + vars_rotate, ar_tag_points, rgb_quat,
rgb_trans, ar_quat, ar_trans, rgb_opt_pyt_quat,
rgb_opt_pyt_trans, ar_opt_pyt_quat, ar_opt_pyt_trans,
intrinsics, pts_2d_observed, optimizer_opts, None)
if calibration_output_file is not None:
# Prepare a json with the camera calibration parameters, and write them
# out.
to_write = to_optimize_trans + to_optimize_quat
out = {}
def_trans = np.zeros((3,), dtype=np.float64)
def_quat = np.zeros((4,), dtype=np.float64)
def_quat[3] = 1.0
for t in to_write:
out[t] = {'trans': def_trans.tolist(), 'quat': def_quat.tolist()}
if t in to_optimize_trans:
if t in rgb_chain:
id_ = rgb_chain.index(t)
base_trans = torch.mean(
rgb_trans[:, id_ - 1, :], 0, keepdim=True)
trans = rgb_opt_pyt_trans[id_ - 1]
_from = rgb_chain[id_ - 1]
_to = rgb_chain[id_]
else:
id_ = ar_chain.index(t)
base_trans = torch.mean(
ar_trans[:, id_ - 1, :], 0, keepdim=True)
trans = ar_opt_pyt_trans[id_ - 1]
_from = ar_chain[id_ - 1]
_to = ar_chain[id_]
out[t]['trans'] = (-trans - base_trans).detach().cpu()
out[t]['trans'] = out[t]['trans'].numpy()[0, :].tolist()
if t in to_optimize_quat:
if t in rgb_chain:
id_ = rgb_chain.index(t)
base_quat = rgb_quat[:1, id_ - 1, :]
quat = rgb_opt_pyt_quat[id_ - 1]
_from = rgb_chain[id_ - 1]
_to = rgb_chain[id_]
else:
id_ = ar_chain.index(t)
base_quat = ar_quat[:1, id_ - 1, :]
quat = ar_opt_pyt_quat[id_ - 1]
_from = ar_chain[id_ - 1]
_to = ar_chain[id_]
q = torch.cat(quat, 1)
norm = q.norm(p=2, dim=1, keepdim=True)
q_normalized = q.div(norm)
full_quat = hamilton_product(
q_normalized.view(1, 1, 4), base_quat.view(1, 1, 4))
quat = full_quat.detach().cpu().numpy()
quat = quat[0, 0, [1, 2, 3, 0]]
# Inverting the quat
quat[:3] = -quat[:3]
quat = quat.tolist()
out[t]['quat'] = quat
out[t]['from'] = _from
out[t]['to'] = _to
print('Writing calibration parameters to {:s}.'.format(
calibration_output_file))
with open(calibration_output_file, 'w') as f:
json.dump(out, f, sort_keys=True, indent=4, separators=(',', ': '))
def setup_camera_solver(dt, imgs, mask, pts_2d, to_optimize_trans=[],
to_optimize_quat=[]):
# TODO(s-gupta): Clean up this function, so that it uses the common
# functions that we have defined above.
n_measures = pts_2d.shape[0]
quat = dt['rgb_tf_tree_quat'] * 1.
quat = quat[:, :, [3, 0, 1, 2]]
trans = dt['rgb_tf_tree_trans'] * 1.
intrinsics = dt['intrinsics']
chain = dt['rgb_chain']
init_pts = get_ar_tag_loc(
dt['ar_tf_tree_quat'], dt['ar_tf_tree_trans'], dt['arm_pose_ids'])
init = init_pts.T
init = init[np.newaxis, :, :].astype(np.float64)
init = np.repeat(init, 4, 2)
torch.manual_seed(4)
# Set up variables that will be optimized.
device = torch.device('cpu')
# 3D Points
_pts_3d = torch.from_numpy(init)
_pts_3d.requires_grad = True
# Translation and rotations as needed.
opt_pyt_trans, opt_pyt_quat = [], []
for i in range(trans.shape[1]):
t = None
q = None
if chain[i + 1] in to_optimize_trans:
t = torch.zeros(1, 3, device=device, dtype=torch.double,
requires_grad=True)
if chain[i + 1] in to_optimize_quat:
qxyz = torch.zeros(1, 3, device=device, dtype=torch.double,
requires_grad=True)
qw = torch.ones(1, 1, device=device, dtype=torch.double,
requires_grad=True)
q = [qw, qxyz]
opt_pyt_trans.append(t)
opt_pyt_quat.append(q)
# Load data into pytorch
pyt_quat = torch.from_numpy(quat)
pyt_trans = torch.from_numpy(trans)
pts_2d_observed = torch.from_numpy(
np.transpose(pts_2d, [0, 2, 1])).double()
pyt_mask = torch.from_numpy(mask.astype(np.float64))
intrinsics_pyt = torch.from_numpy(
intrinsics[:, :3]).double().reshape([1, 3, 3])
def project_pts(_pts, pyt_quat, pyt_trans, opt_pyt_quat, opt_pyt_trans,
intrinsics_pyt):
pts_3d = _pts_3d
pts_3d = pts_3d.permute(0, 2, 1)
t_pts_3d = pts_3d.view(1, -1, 3).repeat(n_measures, 1, 1)
t_pts_3d = transform_points_from_base(pts_3d, pyt_quat, pyt_trans,
opt_pyt_quat, opt_pyt_trans)
t_pts_3d = t_pts_3d.permute(0, 2, 1)
t_pts_3d = torch.matmul(intrinsics_pyt, t_pts_3d)
t_pts_2d = t_pts_3d[:, :2, :] / t_pts_3d[:, 2:, :]
return t_pts_2d
def criterion(pyt_mask, t_pts_2d, pts_2d_observed):
err = t_pts_2d - pts_2d_observed
err = err**2
err = err.sum(1)
err_all = err * pyt_mask
# n_pts = t_pts_2d.shape[0]
# topk, _ = torch.topk(err_all.view(-1), k=int(4*n_pts*0.8),
# largest=False)
# mask_robust = torch.le(err_all, topk[-1]).double();
# err_all = err_all * mask_robust
# pyt_mask = pyt_mask * mask_robust
err = err_all.sum() / pyt_mask.sum()
return err, err_all
def optimize(n_iters, vars_to_optimize, _pts, pyt_quat, pyt_trans,
opt_pyt_quat, opt_pyt_trans, intrinsics_pyt, mask,
pts_2d_observed):
optimizer = optim.Adam(vars_to_optimize, lr=0.01)
for i in range(n_iters):
# zero the parameter gradients
optimizer.zero_grad()
t_pts_2d = project_pts(_pts, pyt_quat, pyt_trans, opt_pyt_quat,
opt_pyt_trans, intrinsics_pyt)
err, err_all = criterion(pyt_mask, t_pts_2d, pts_2d_observed)
err.backward()
optimizer.step()
if np.mod(i, 1000) == 0:
print(err.detach().numpy())
print(err_all.detach().numpy())
for v in vars_to_optimize:
print(v.detach().numpy())
print(err.detach().numpy())
# Use gradient descent from pytorch to solve this problem.
vars_pts = [_pts_3d]
vars_trans = [_ for _ in opt_pyt_trans if _ is not None]
vars_rotate = []
for q in opt_pyt_quat:
if q is not None:
vars_rotate.append(q[0])
vars_rotate.append(q[1])
n_iters = 10001
optimize(n_iters, vars_pts, _pts_3d, pyt_quat, pyt_trans, opt_pyt_quat,
opt_pyt_trans, intrinsics_pyt, mask, pts_2d_observed)
optimize(n_iters, vars_trans + vars_pts, _pts_3d, pyt_quat, pyt_trans,
opt_pyt_quat, opt_pyt_trans, intrinsics_pyt, mask,
pts_2d_observed)
optimize(n_iters, vars_rotate + vars_trans + vars_pts, _pts_3d, pyt_quat,
pyt_trans, opt_pyt_quat, opt_pyt_trans, intrinsics_pyt, mask,
pts_2d_observed)
def transform_points_to_base(pts, quat, trans, opt_quat, opt_trans):
"""Transform pts from target frame to base frame.
pts is B x N x 3 """
n_measures = pts.shape[0]
for i in range(trans.shape[1] - 1, -1, -1):
if opt_trans[i] is not None:
pts = pts - opt_trans[i].unsqueeze(1)
pts = pts - trans[:, i, :].unsqueeze(1)
if opt_quat[i] is not None:
q = torch.cat(opt_quat[i], 1)
norm = q.norm(p=2, dim=1, keepdim=True)
q_normalized = q.div(norm).repeat(n_measures, 1)
pts = quat_rotate(pts, q_normalized, inverse=True)
pts = quat_rotate(pts, quat[:, i, :], inverse=True)
return pts
def transform_points_from_base(pts, quat, trans, opt_quat, opt_trans):
"""Transform pts from base frame to target frame.
pts is B x N x 3 """
t_pts_3d = pts
n_measures = pts.shape[0]
for j in range(quat.shape[1]):
t_pts_3d = quat_rotate(t_pts_3d, quat[:, j, :])
if opt_quat[j] is not None:
q = torch.cat(opt_quat[j], 1)
norm = q.norm(p=2, dim=1, keepdim=True)
q_normalized = q.div(norm).repeat(n_measures, 1)
t_pts_3d = quat_rotate(t_pts_3d, q_normalized)
t_pts_3d = t_pts_3d + trans[:, j, :].unsqueeze(1)
if opt_trans[j] is not None:
t_pts_3d = t_pts_3d + opt_trans[j].unsqueeze(1)
return t_pts_3d
def get_ar_tag_loc(ar_tf_tree_quat, ar_tf_tree_trans, arm_pose_ids):
"""Returns the 3D coordinate of the center of the AR tag based on the
kinematic chain. Returns one center per unique arm_pose_ids."""
quat = ar_tf_tree_quat.astype(np.float64)
trans = ar_tf_tree_trans.astype(np.float64)
u_pts, ids = np.unique(arm_pose_ids, return_inverse=True)
pyt_quat = torch.from_numpy(quat[:, :, [3, 0, 1, 2]])
pyt_trans = torch.from_numpy(trans)
pts = np.zeros((quat.shape[0], 1, 3), dtype=np.float64)
opt_pyt_quat = [None for i in range(pyt_trans.shape[1])]
opt_pyt_trans = [None for i in range(pyt_trans.shape[1])]
t_pts = transform_points_to_base(pts, pyt_quat, pyt_trans, opt_pyt_quat,
opt_pyt_trans)
t_pts = t_pts.detach().numpy()
pts_out = []
for i in range(u_pts.size):
pts_out.append(np.mean(t_pts[ids == i, 0, :], 0))
pts_out = np.array(pts_out)
return pts_out
def get_correspondence(arm_pose_ids, pts):
"""Use the arm_pose_ids to determine which corners actually corresspond to
one another. Returns an expanded matrix, and a mask that determines which
points should be used for computing the loss."""
u_pts, ids = np.unique(arm_pose_ids, return_inverse=True)
n_pts = u_pts.size * 4
mask = np.zeros((pts.shape[0], n_pts), dtype=np.bool)
pts_full = np.zeros((pts.shape[0], n_pts, 2), dtype=np.float32)
for i in range(u_pts.size):
pts_full[ids == i, i * 4:i * 4 + 4, :] = pts[ids == i, :, :]
if np.sum(ids == i) >= 3:
mask[ids == i, i * 4:i * 4 + 4] = True
return mask, pts_full
def main(_):
if FLAGS.calibration_output_file is None:
FLAGS.calibration_output_file = os.path.join(FLAGS.data_dir,
'calibrated.json')
if os.path.exists(FLAGS.calibration_output_file) and not FLAGS.overwrite:
print("""Calibration file already exists, please specify --overwrite to
overwrite it with new calibration parameters""")
sys.exit()
optimizer_opts = {'min_inlier_fraction': FLAGS.min_inlier_fraction,
'inlier_pixel_threshold': FLAGS.inlier_pixel_threshold,
'n_iters': FLAGS.n_iters}
print('optimizer_opts: ', optimizer_opts)
print('')
print('transforms to otpimize: ', FLAGS.to_optimize)
print('')
dir_name = FLAGS.data_dir
tag_detect_dir_name = os.path.join(dir_name, 'tag_detect')
mkdir_if_missing(tag_detect_dir_name)
dt, imgs = load_data(dir_name)
dt['ar_img_loc'] = np.array(dt['ar_img_loc'])
ar_img_loc = dt['ar_img_loc']
pts, inds = get_tag_corners(imgs, ar_img_loc, tag_detect_dir_name)
# Remove ones that did not get detected.
imgs, dt = filter_data(imgs, dt, inds)
if FLAGS.typ == 'camera_only':
# Set up correspondence between them based on the pose id for the arm.
mask, pts = get_correspondence(dt['arm_pose_ids'], pts)
to_optimize_trans = FLAGS.to_optimize
to_optimize_quat = FLAGS.to_optimize
calibration_output_file = FLAGS.calibration_output_file
setup_camera_solver(dt, imgs, mask, pts, to_optimize_trans,
to_optimize_quat,
calibration_output_file=calibration_output_file)
else:
mask = []
to_optimize = FLAGS.to_optimize
to_optimize_trans = to_optimize
to_optimize_quat = to_optimize
calibration_output_file = FLAGS.calibration_output_file
setup_camera_arm_solver(dt, imgs, mask, pts, to_optimize_trans,
to_optimize_quat, optimizer_opts,
calibration_output_file)
if __name__ == '__main__':
app.run(main)
|
[
"numpy.sum",
"torch.sqrt",
"torch.cat",
"numpy.isnan",
"numpy.mean",
"absl.flags.DEFINE_boolean",
"numpy.tile",
"torch.device",
"absl.flags.DEFINE_list",
"os.path.join",
"numpy.unique",
"cv2.line",
"torch.ones",
"os.path.exists",
"numpy.transpose",
"absl.flags.DEFINE_integer",
"absl.flags.DEFINE_float",
"torch.zeros",
"torch.matmul",
"numpy.repeat",
"torch.mean",
"json.dump",
"torch.manual_seed",
"numpy.mod",
"torch.optim.Adam",
"torch.max",
"torch.unsqueeze",
"torch.min",
"sys.exit",
"torch.from_numpy",
"torch.stack",
"os.makedirs",
"numpy.zeros",
"absl.flags.DEFINE_string",
"absl.app.run",
"numpy.array"
] |
[((544, 620), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""data_dir"""', 'None', '"""Directory with images and pkl files"""'], {}), "('data_dir', None, 'Directory with images and pkl files')\n", (563, 620), False, 'from absl import app, flags\n'), ((641, 743), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""calibration_output_file"""', 'None', '"""File where to write calibration parameters"""'], {}), "('calibration_output_file', None,\n 'File where to write calibration parameters')\n", (660, 743), False, 'from absl import app, flags\n'), ((760, 847), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""overwrite"""', '(False)', '"""Whether to overwrite output file or not"""'], {}), "('overwrite', False,\n 'Whether to overwrite output file or not')\n", (780, 847), False, 'from absl import app, flags\n'), ((865, 968), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""inlier_pixel_threshold"""', '(20.0)', '"""Max. reprojection error to consider an inlier"""'], {}), "('inlier_pixel_threshold', 20.0,\n 'Max. reprojection error to consider an inlier')\n", (883, 968), False, 'from absl import app, flags\n'), ((983, 1083), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""min_inlier_fraction"""', '(0.2)', '"""Fraction of pts to atleast consider as inliers"""'], {}), "('min_inlier_fraction', 0.2,\n 'Fraction of pts to atleast consider as inliers')\n", (1001, 1083), False, 'from absl import app, flags\n'), ((1099, 1161), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""n_iters"""', '(10001)', '"""Number of iterations"""'], {}), "('n_iters', 10001, 'Number of iterations')\n", (1119, 1161), False, 'from absl import app, flags\n'), ((1183, 1277), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""typ"""', '"""camera_arm"""', '"""Calibration to run: camera_only | [camera_arm]"""'], {}), "('typ', 'camera_arm',\n 'Calibration to run: camera_only | [camera_arm]')\n", (1202, 1277), False, 'from absl import app, flags\n'), ((1294, 1380), 'absl.flags.DEFINE_list', 'flags.DEFINE_list', (['"""to_optimize"""', "['camera_link']", '"""Static transforms to optimize"""'], {}), "('to_optimize', ['camera_link'],\n 'Static transforms to optimize')\n", (1311, 1380), False, 'from absl import app, flags\n'), ((3996, 4009), 'numpy.array', 'np.array', (['pts'], {}), '(pts)\n', (4004, 4009), True, 'import numpy as np\n'), ((4026, 4045), 'numpy.array', 'np.array', (['good_imgs'], {}), '(good_imgs)\n', (4034, 4045), True, 'import numpy as np\n'), ((4863, 4924), 'torch.stack', 'torch.stack', (['[q_mult_0, q_mult_1, q_mult_2, q_mult_3]'], {'dim': '(-1)'}), '([q_mult_0, q_mult_1, q_mult_2, q_mult_3], dim=-1)\n', (4874, 4924), False, 'import torch\n'), ((5324, 5376), 'torch.cat', 'torch.cat', (['[q[:, :, [0]], -1 * q[:, :, 1:4]]'], {'dim': '(-1)'}), '([q[:, :, [0]], -1 * q[:, :, 1:4]], dim=-1)\n', (5333, 5376), False, 'import torch\n'), ((5385, 5425), 'torch.cat', 'torch.cat', (['[X[:, :, [0]] * 0, X]'], {'dim': '(-1)'}), '([X[:, :, [0]] * 0, X], dim=-1)\n', (5394, 5425), False, 'import torch\n'), ((7607, 7644), 'torch.max', 'torch.max', (['topk[-1]', 'in_px_thresh_pyt'], {}), '(topk[-1], in_px_thresh_pyt)\n', (7616, 7644), False, 'import torch\n'), ((7666, 7690), 'torch.min', 'torch.min', (['topk', 'err_all'], {}), '(topk, err_all)\n', (7675, 7690), False, 'import torch\n'), ((7872, 7891), 'torch.sqrt', 'torch.sqrt', (['err_all'], {}), '(err_all)\n', (7882, 7891), False, 'import torch\n'), ((8362, 8399), 'torch.optim.Adam', 'optim.Adam', (['vars_to_optimize'], {'lr': '(0.01)'}), '(vars_to_optimize, lr=0.01)\n', (8372, 8399), True, 'import torch.optim as optim\n'), ((10808, 10827), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (10820, 10827), False, 'import torch\n'), ((10862, 10882), 'torch.manual_seed', 'torch.manual_seed', (['(4)'], {}), '(4)\n', (10879, 10882), False, 'import torch\n'), ((11815, 11928), 'numpy.array', 'np.array', (['[[AR_SZ, AR_SZ, 0], [AR_SZ, -AR_SZ, 0], [-AR_SZ, -AR_SZ, 0], [-AR_SZ, AR_SZ, 0]\n ]'], {'dtype': 'np.float64'}), '([[AR_SZ, AR_SZ, 0], [AR_SZ, -AR_SZ, 0], [-AR_SZ, -AR_SZ, 0], [-\n AR_SZ, AR_SZ, 0]], dtype=np.float64)\n', (11823, 11928), True, 'import numpy as np\n'), ((12014, 12062), 'numpy.tile', 'np.tile', (['ar_tag_points', '[ar_quat.shape[0], 1, 1]'], {}), '(ar_tag_points, [ar_quat.shape[0], 1, 1])\n', (12021, 12062), True, 'import numpy as np\n'), ((16717, 16738), 'numpy.repeat', 'np.repeat', (['init', '(4)', '(2)'], {}), '(init, 4, 2)\n', (16726, 16738), True, 'import numpy as np\n'), ((16744, 16764), 'torch.manual_seed', 'torch.manual_seed', (['(4)'], {}), '(4)\n', (16761, 16764), False, 'import torch\n'), ((16825, 16844), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (16837, 16844), False, 'import torch\n'), ((16875, 16897), 'torch.from_numpy', 'torch.from_numpy', (['init'], {}), '(init)\n', (16891, 16897), False, 'import torch\n'), ((17669, 17691), 'torch.from_numpy', 'torch.from_numpy', (['quat'], {}), '(quat)\n', (17685, 17691), False, 'import torch\n'), ((17708, 17731), 'torch.from_numpy', 'torch.from_numpy', (['trans'], {}), '(trans)\n', (17724, 17731), False, 'import torch\n'), ((22502, 22546), 'numpy.unique', 'np.unique', (['arm_pose_ids'], {'return_inverse': '(True)'}), '(arm_pose_ids, return_inverse=True)\n', (22511, 22546), True, 'import numpy as np\n'), ((22562, 22604), 'torch.from_numpy', 'torch.from_numpy', (['quat[:, :, [3, 0, 1, 2]]'], {}), '(quat[:, :, [3, 0, 1, 2]])\n', (22578, 22604), False, 'import torch\n'), ((22621, 22644), 'torch.from_numpy', 'torch.from_numpy', (['trans'], {}), '(trans)\n', (22637, 22644), False, 'import torch\n'), ((22655, 22704), 'numpy.zeros', 'np.zeros', (['(quat.shape[0], 1, 3)'], {'dtype': 'np.float64'}), '((quat.shape[0], 1, 3), dtype=np.float64)\n', (22663, 22704), True, 'import numpy as np\n'), ((23113, 23130), 'numpy.array', 'np.array', (['pts_out'], {}), '(pts_out)\n', (23121, 23130), True, 'import numpy as np\n'), ((23422, 23466), 'numpy.unique', 'np.unique', (['arm_pose_ids'], {'return_inverse': '(True)'}), '(arm_pose_ids, return_inverse=True)\n', (23431, 23466), True, 'import numpy as np\n'), ((23505, 23551), 'numpy.zeros', 'np.zeros', (['(pts.shape[0], n_pts)'], {'dtype': 'np.bool'}), '((pts.shape[0], n_pts), dtype=np.bool)\n', (23513, 23551), True, 'import numpy as np\n'), ((23567, 23619), 'numpy.zeros', 'np.zeros', (['(pts.shape[0], n_pts, 2)'], {'dtype': 'np.float32'}), '((pts.shape[0], n_pts, 2), dtype=np.float32)\n', (23575, 23619), True, 'import numpy as np\n'), ((24660, 24696), 'os.path.join', 'os.path.join', (['dir_name', '"""tag_detect"""'], {}), "(dir_name, 'tag_detect')\n", (24672, 24696), False, 'import os\n'), ((24797, 24823), 'numpy.array', 'np.array', (["dt['ar_img_loc']"], {}), "(dt['ar_img_loc'])\n", (24805, 24823), True, 'import numpy as np\n'), ((25980, 25993), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (25987, 25993), False, 'from absl import app, flags\n'), ((1460, 1486), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (1474, 1486), False, 'import os\n'), ((5279, 5300), 'torch.unsqueeze', 'torch.unsqueeze', (['q', '(1)'], {}), '(q, 1)\n', (5294, 5300), False, 'import torch\n'), ((7508, 7547), 'numpy.array', 'np.array', (['[inlier_pixel_threshold ** 2]'], {}), '([inlier_pixel_threshold ** 2])\n', (7516, 7547), True, 'import numpy as np\n'), ((13637, 13669), 'numpy.zeros', 'np.zeros', (['(3,)'], {'dtype': 'np.float64'}), '((3,), dtype=np.float64)\n', (13645, 13669), True, 'import numpy as np\n'), ((13689, 13721), 'numpy.zeros', 'np.zeros', (['(4,)'], {'dtype': 'np.float64'}), '((4,), dtype=np.float64)\n', (13697, 13721), True, 'import numpy as np\n'), ((18435, 18473), 'torch.matmul', 'torch.matmul', (['intrinsics_pyt', 't_pts_3d'], {}), '(intrinsics_pyt, t_pts_3d)\n', (18447, 18473), False, 'import torch\n'), ((19280, 19317), 'torch.optim.Adam', 'optim.Adam', (['vars_to_optimize'], {'lr': '(0.01)'}), '(vars_to_optimize, lr=0.01)\n', (19290, 19317), True, 'import torch.optim as optim\n'), ((23933, 23980), 'os.path.join', 'os.path.join', (['FLAGS.data_dir', '"""calibrated.json"""'], {}), "(FLAGS.data_dir, 'calibrated.json')\n", (23945, 23980), False, 'import os\n'), ((24042, 24087), 'os.path.exists', 'os.path.exists', (['FLAGS.calibration_output_file'], {}), '(FLAGS.calibration_output_file)\n', (24056, 24087), False, 'import os\n'), ((24262, 24272), 'sys.exit', 'sys.exit', ([], {}), '()\n', (24270, 24272), False, 'import sys\n'), ((1513, 1536), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (1524, 1536), False, 'import os\n'), ((1939, 1975), 'os.path.join', 'os.path.join', (['dir_name', '"""states.pkl"""'], {}), "(dir_name, 'states.pkl')\n", (1951, 1975), False, 'import os\n'), ((6014, 6086), 'torch.zeros', 'torch.zeros', (['(1)', '(3)'], {'device': 'device', 'dtype': 'torch.double', 'requires_grad': '(True)'}), '(1, 3, device=device, dtype=torch.double, requires_grad=True)\n', (6025, 6086), False, 'import torch\n'), ((6179, 6251), 'torch.zeros', 'torch.zeros', (['(1)', '(3)'], {'device': 'device', 'dtype': 'torch.double', 'requires_grad': '(True)'}), '(1, 3, device=device, dtype=torch.double, requires_grad=True)\n', (6190, 6251), False, 'import torch\n'), ((6300, 6371), 'torch.ones', 'torch.ones', (['(1)', '(1)'], {'device': 'device', 'dtype': 'torch.double', 'requires_grad': '(True)'}), '(1, 1, device=device, dtype=torch.double, requires_grad=True)\n', (6310, 6371), False, 'import torch\n'), ((11422, 11446), 'torch.from_numpy', 'torch.from_numpy', (['pts_2d'], {}), '(pts_2d)\n', (11438, 11446), False, 'import torch\n'), ((12083, 12114), 'torch.from_numpy', 'torch.from_numpy', (['ar_tag_points'], {}), '(ar_tag_points)\n', (12099, 12114), False, 'import torch\n'), ((16011, 16078), 'json.dump', 'json.dump', (['out', 'f'], {'sort_keys': '(True)', 'indent': '(4)', 'separators': "(',', ': ')"}), "(out, f, sort_keys=True, indent=4, separators=(',', ': '))\n", (16020, 16078), False, 'import json\n'), ((17148, 17220), 'torch.zeros', 'torch.zeros', (['(1)', '(3)'], {'device': 'device', 'dtype': 'torch.double', 'requires_grad': '(True)'}), '(1, 3, device=device, dtype=torch.double, requires_grad=True)\n', (17159, 17220), False, 'import torch\n'), ((17313, 17385), 'torch.zeros', 'torch.zeros', (['(1)', '(3)'], {'device': 'device', 'dtype': 'torch.double', 'requires_grad': '(True)'}), '(1, 3, device=device, dtype=torch.double, requires_grad=True)\n', (17324, 17385), False, 'import torch\n'), ((17434, 17505), 'torch.ones', 'torch.ones', (['(1)', '(1)'], {'device': 'device', 'dtype': 'torch.double', 'requires_grad': '(True)'}), '(1, 1, device=device, dtype=torch.double, requires_grad=True)\n', (17444, 17505), False, 'import torch\n'), ((21183, 21208), 'torch.cat', 'torch.cat', (['opt_quat[i]', '(1)'], {}), '(opt_quat[i], 1)\n', (21192, 21208), False, 'import torch\n'), ((21804, 21829), 'torch.cat', 'torch.cat', (['opt_quat[j]', '(1)'], {}), '(opt_quat[j], 1)\n', (21813, 21829), False, 'import torch\n'), ((23064, 23097), 'numpy.mean', 'np.mean', (['t_pts[ids == i, 0, :]', '(0)'], {}), '(t_pts[ids == i, 0, :], 0)\n', (23071, 23097), True, 'import numpy as np\n'), ((23732, 23748), 'numpy.sum', 'np.sum', (['(ids == i)'], {}), '(ids == i)\n', (23738, 23748), True, 'import numpy as np\n'), ((3353, 3381), 'numpy.isnan', 'np.isnan', (['ar_img_loc[i, ...]'], {}), '(ar_img_loc[i, ...])\n', (3361, 3381), True, 'import numpy as np\n'), ((9382, 9397), 'numpy.mod', 'np.mod', (['i', '(1000)'], {}), '(i, 1000)\n', (9388, 9397), True, 'import numpy as np\n'), ((9520, 9562), 'numpy.sum', 'np.sum', (['(np_err_all <= robust_err_threshold)'], {}), '(np_err_all <= robust_err_threshold)\n', (9526, 9562), True, 'import numpy as np\n'), ((10027, 10046), 'numpy.mean', 'np.mean', (['np_err_all'], {}), '(np_err_all)\n', (10034, 10046), True, 'import numpy as np\n'), ((15285, 15303), 'torch.cat', 'torch.cat', (['quat', '(1)'], {}), '(quat, 1)\n', (15294, 15303), False, 'import torch\n'), ((17780, 17811), 'numpy.transpose', 'np.transpose', (['pts_2d', '[0, 2, 1]'], {}), '(pts_2d, [0, 2, 1])\n', (17792, 17811), True, 'import numpy as np\n'), ((19715, 19730), 'numpy.mod', 'np.mod', (['i', '(1000)'], {}), '(i, 1000)\n', (19721, 19730), True, 'import numpy as np\n'), ((3659, 3766), 'cv2.line', 'cv2.line', (['_img', '(pt_int[j, 0], pt_int[j, 1])', '(pt_int[j + 1, 0], pt_int[j + 1, 1])', '(255, 0, 0)', '(j + 1)'], {}), '(_img, (pt_int[j, 0], pt_int[j, 1]), (pt_int[j + 1, 0], pt_int[j + \n 1, 1]), (255, 0, 0), j + 1)\n', (3667, 3766), False, 'import cv2\n'), ((11321, 11362), 'torch.from_numpy', 'torch.from_numpy', (["dt['intrinsics'][:, :3]"], {}), "(dt['intrinsics'][:, :3])\n", (11337, 11362), False, 'import torch\n'), ((14005, 14058), 'torch.mean', 'torch.mean', (['rgb_trans[:, id_ - 1, :]', '(0)'], {'keepdim': '(True)'}), '(rgb_trans[:, id_ - 1, :], 0, keepdim=True)\n', (14015, 14058), False, 'import torch\n'), ((14326, 14378), 'torch.mean', 'torch.mean', (['ar_trans[:, id_ - 1, :]', '(0)'], {'keepdim': '(True)'}), '(ar_trans[:, id_ - 1, :], 0, keepdim=True)\n', (14336, 14378), False, 'import torch\n'), ((17900, 17935), 'torch.from_numpy', 'torch.from_numpy', (['intrinsics[:, :3]'], {}), '(intrinsics[:, :3])\n', (17916, 17935), False, 'import torch\n')]
|
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
from math import sqrt
parser = argparse.ArgumentParser(description='Code from AKAZE local features matching tutorial.')
parser.add_argument('--input1', help='Path to input image 1.', default='graf1.png')
parser.add_argument('--input2', help='Path to input image 2.', default='graf3.png')
parser.add_argument('--homography', help='Path to the homography matrix.', default='H1to3p.xml')
args = parser.parse_args()
img1 = cv.imread(args.input1, cv.IMREAD_GRAYSCALE)
img2 = cv.imread(args.input2, cv.IMREAD_GRAYSCALE)
if img1 is None or img2 is None:
print('Could not open or find the images!')
exit(0)
fs = cv.FileStorage(args.homography, cv.FILE_STORAGE_READ)
homography = fs.getFirstTopLevelNode().mat()
detector = cv.ORB_create(10000)
# descriptor = cv.ORB_create()
descriptor = cv.xfeatures2d.BEBLID_create(0.75)
kpts1 = detector.detect(img1, None)
kpts2 = detector.detect(img2, None)
kpts1, desc1 = descriptor.compute(img1, kpts1)
kpts2, desc2 = descriptor.compute(img2, kpts2)
matcher = cv.DescriptorMatcher_create(cv.DescriptorMatcher_BRUTEFORCE_HAMMING)
nn_matches = matcher.knnMatch(desc1, desc2, 2)
matched1 = []
matched2 = []
nn_match_ratio = 0.8 # Nearest neighbor matching ratio
for m, n in nn_matches:
if m.distance < nn_match_ratio * n.distance:
matched1.append(kpts1[m.queryIdx])
matched2.append(kpts2[m.trainIdx])
inliers1 = []
inliers2 = []
good_matches = []
inlier_threshold = 2.5 # Distance threshold to identify inliers with homography check
for i, m in enumerate(matched1):
# Create the homogeneous point
col = np.ones((3, 1), dtype=np.float64)
col[0:2, 0] = m.pt
# Project from image 1 to image 2
col = np.dot(homography, col)
col /= col[2, 0]
# Calculate euclidean distance
dist = sqrt(pow(col[0, 0] - matched2[i].pt[0], 2) + \
pow(col[1, 0] - matched2[i].pt[1], 2))
if dist < inlier_threshold:
good_matches.append(cv.DMatch(len(inliers1), len(inliers2), 0))
inliers1.append(matched1[i])
inliers2.append(matched2[i])
res = np.empty((max(img1.shape[0], img2.shape[0]), img1.shape[1] + img2.shape[1], 3), dtype=np.uint8)
cv.drawMatches(img1, inliers1, img2, inliers2, good_matches, res)
cv.imwrite("matching_result.png", res)
inlier_ratio = len(inliers1) / float(len(matched1))
print('Matching Results')
print('*******************************')
print('# Keypoints 1: \t', len(kpts1))
print('# Keypoints 2: \t', len(kpts2))
print('# Matches: \t', len(matched1))
print('# Inliers: \t', len(inliers1))
print('# Inliers Ratio: \t', inlier_ratio)
cv.imshow('result', res)
cv.waitKey()
|
[
"argparse.ArgumentParser",
"cv2.xfeatures2d.BEBLID_create",
"cv2.drawMatches",
"cv2.waitKey",
"cv2.imwrite",
"numpy.ones",
"cv2.imread",
"cv2.FileStorage",
"cv2.ORB_create",
"cv2.DescriptorMatcher_create",
"numpy.dot",
"cv2.imshow"
] |
[((122, 215), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Code from AKAZE local features matching tutorial."""'}), "(description=\n 'Code from AKAZE local features matching tutorial.')\n", (145, 215), False, 'import argparse\n'), ((511, 554), 'cv2.imread', 'cv.imread', (['args.input1', 'cv.IMREAD_GRAYSCALE'], {}), '(args.input1, cv.IMREAD_GRAYSCALE)\n', (520, 554), True, 'import cv2 as cv\n'), ((562, 605), 'cv2.imread', 'cv.imread', (['args.input2', 'cv.IMREAD_GRAYSCALE'], {}), '(args.input2, cv.IMREAD_GRAYSCALE)\n', (571, 605), True, 'import cv2 as cv\n'), ((706, 759), 'cv2.FileStorage', 'cv.FileStorage', (['args.homography', 'cv.FILE_STORAGE_READ'], {}), '(args.homography, cv.FILE_STORAGE_READ)\n', (720, 759), True, 'import cv2 as cv\n'), ((817, 837), 'cv2.ORB_create', 'cv.ORB_create', (['(10000)'], {}), '(10000)\n', (830, 837), True, 'import cv2 as cv\n'), ((882, 916), 'cv2.xfeatures2d.BEBLID_create', 'cv.xfeatures2d.BEBLID_create', (['(0.75)'], {}), '(0.75)\n', (910, 916), True, 'import cv2 as cv\n'), ((1094, 1162), 'cv2.DescriptorMatcher_create', 'cv.DescriptorMatcher_create', (['cv.DescriptorMatcher_BRUTEFORCE_HAMMING'], {}), '(cv.DescriptorMatcher_BRUTEFORCE_HAMMING)\n', (1121, 1162), True, 'import cv2 as cv\n'), ((2243, 2308), 'cv2.drawMatches', 'cv.drawMatches', (['img1', 'inliers1', 'img2', 'inliers2', 'good_matches', 'res'], {}), '(img1, inliers1, img2, inliers2, good_matches, res)\n', (2257, 2308), True, 'import cv2 as cv\n'), ((2309, 2347), 'cv2.imwrite', 'cv.imwrite', (['"""matching_result.png"""', 'res'], {}), "('matching_result.png', res)\n", (2319, 2347), True, 'import cv2 as cv\n'), ((2785, 2809), 'cv2.imshow', 'cv.imshow', (['"""result"""', 'res'], {}), "('result', res)\n", (2794, 2809), True, 'import cv2 as cv\n'), ((2810, 2822), 'cv2.waitKey', 'cv.waitKey', ([], {}), '()\n', (2820, 2822), True, 'import cv2 as cv\n'), ((1664, 1697), 'numpy.ones', 'np.ones', (['(3, 1)'], {'dtype': 'np.float64'}), '((3, 1), dtype=np.float64)\n', (1671, 1697), True, 'import numpy as np\n'), ((1769, 1792), 'numpy.dot', 'np.dot', (['homography', 'col'], {}), '(homography, col)\n', (1775, 1792), True, 'import numpy as np\n')]
|
from fdfdpy.derivatives import createDws
from fdfdpy.constants import DEFAULT_MATRIX_FORMAT
import scipy.sparse as sp
import numpy as np
import scipy.sparse.linalg as la
from scipy.sparse.linalg import spsolve as bslash
matrix_format=DEFAULT_MATRIX_FORMAT
matrix_format = 'csc'
def grid_average(center_array, w):
# computes values at cell edges
xy = {'x': 0, 'y': 1}
center_shifted = np.roll(center_array, 1, axis=xy[w])
avg_array = (center_shifted+center_array)/2
return avg_array
def eigenTE_Kx_Ky(wvlen, eps_r, dL, Ky, num_modes = 10):
'''
the eigenvalues should be bounded due to periodicity, i.e. the system should have some degeneracy
with respect to the bloch wavevectors...
:param wvlen:
:param eps_r:
:param a:
:param dx:
:return:
'''
L0 = 1e-6;
eps0 = 8.854e-12 * L0;
mu0 = np.pi * 4e-7 * L0;
c0 = 1 / np.sqrt(eps0 * mu0);
#print(c0, mu0, eps0)
omega = 2*np.pi*c0/wvlen;
N = eps_r.shape;
M = np.prod(N);
Dxf = createDws('x', 'f', dL, N, matrix_format=matrix_format);
Dxb = createDws('x', 'b', dL, N, matrix_format=matrix_format);
Dyf = createDws('y', 'f', dL, N, matrix_format=matrix_format);
Dyb = createDws('y', 'b', dL, N, matrix_format=matrix_format);
# Epxx = grid_average(eps_r, 'x');
# Epyy = grid_average(eps_r, 'y');
#Tez = sp.spdiags(np.diag(eps0*eps_r), 0, M,M, format = matrix_format)
invTepzz = sp.spdiags(1 / eps_r.flatten(), 0, M,M, format=matrix_format)
I = sp.identity(M, format = matrix_format);
K = invTepzz@(-Dxf @ Dxb - Dyf @ Dyb - 1j*((Dyf + Dyb))*Ky + Ky**2*I) - omega ** 2*eps0 *mu0*I ;
M = invTepzz;
C = -invTepzz@(1j * (Dxf + Dxb)); #% lambda
A = sp.bmat([[M, None], [None, I]], format = matrix_format); #A should just be the identity
B = sp.bmat([[C, K], [-I, None]], format = matrix_format);
D= bslash(A,B);
neff = np.sqrt(np.max(np.real(eps_r)));
beta_est = abs(2*np.pi*neff / wvlen);
sigma = beta_est;
sigma = 0;
#get eigenvalues
k, modes = sp.linalg.eigs(D, k=num_modes, sigma = sigma)
return k, modes, A,B;
|
[
"fdfdpy.derivatives.createDws",
"numpy.roll",
"scipy.sparse.bmat",
"scipy.sparse.identity",
"scipy.sparse.linalg.spsolve",
"numpy.real",
"scipy.sparse.linalg.eigs",
"numpy.prod",
"numpy.sqrt"
] |
[((399, 435), 'numpy.roll', 'np.roll', (['center_array', '(1)'], {'axis': 'xy[w]'}), '(center_array, 1, axis=xy[w])\n', (406, 435), True, 'import numpy as np\n'), ((996, 1006), 'numpy.prod', 'np.prod', (['N'], {}), '(N)\n', (1003, 1006), True, 'import numpy as np\n'), ((1018, 1073), 'fdfdpy.derivatives.createDws', 'createDws', (['"""x"""', '"""f"""', 'dL', 'N'], {'matrix_format': 'matrix_format'}), "('x', 'f', dL, N, matrix_format=matrix_format)\n", (1027, 1073), False, 'from fdfdpy.derivatives import createDws\n'), ((1085, 1140), 'fdfdpy.derivatives.createDws', 'createDws', (['"""x"""', '"""b"""', 'dL', 'N'], {'matrix_format': 'matrix_format'}), "('x', 'b', dL, N, matrix_format=matrix_format)\n", (1094, 1140), False, 'from fdfdpy.derivatives import createDws\n'), ((1152, 1207), 'fdfdpy.derivatives.createDws', 'createDws', (['"""y"""', '"""f"""', 'dL', 'N'], {'matrix_format': 'matrix_format'}), "('y', 'f', dL, N, matrix_format=matrix_format)\n", (1161, 1207), False, 'from fdfdpy.derivatives import createDws\n'), ((1219, 1274), 'fdfdpy.derivatives.createDws', 'createDws', (['"""y"""', '"""b"""', 'dL', 'N'], {'matrix_format': 'matrix_format'}), "('y', 'b', dL, N, matrix_format=matrix_format)\n", (1228, 1274), False, 'from fdfdpy.derivatives import createDws\n'), ((1516, 1552), 'scipy.sparse.identity', 'sp.identity', (['M'], {'format': 'matrix_format'}), '(M, format=matrix_format)\n', (1527, 1552), True, 'import scipy.sparse as sp\n'), ((1733, 1786), 'scipy.sparse.bmat', 'sp.bmat', (['[[M, None], [None, I]]'], {'format': 'matrix_format'}), '([[M, None], [None, I]], format=matrix_format)\n', (1740, 1786), True, 'import scipy.sparse as sp\n'), ((1829, 1880), 'scipy.sparse.bmat', 'sp.bmat', (['[[C, K], [-I, None]]'], {'format': 'matrix_format'}), '([[C, K], [-I, None]], format=matrix_format)\n', (1836, 1880), True, 'import scipy.sparse as sp\n'), ((1892, 1904), 'scipy.sparse.linalg.spsolve', 'bslash', (['A', 'B'], {}), '(A, B)\n', (1898, 1904), True, 'from scipy.sparse.linalg import spsolve as bslash\n'), ((2064, 2107), 'scipy.sparse.linalg.eigs', 'sp.linalg.eigs', (['D'], {'k': 'num_modes', 'sigma': 'sigma'}), '(D, k=num_modes, sigma=sigma)\n', (2078, 2107), True, 'import scipy.sparse as sp\n'), ((889, 908), 'numpy.sqrt', 'np.sqrt', (['(eps0 * mu0)'], {}), '(eps0 * mu0)\n', (896, 908), True, 'import numpy as np\n'), ((1931, 1945), 'numpy.real', 'np.real', (['eps_r'], {}), '(eps_r)\n', (1938, 1945), True, 'import numpy as np\n')]
|
import graph_tool.all as gt
import json
import numpy as np
from collections import Counter
from subprocess import Popen, PIPE
from HierarchicalPartitioningTree import PartitionTree, PartitionNode
"""Helpers
This module provides helper functions.
"""
def statistics(G):
"""Provides general graph statistics.
Args:
G (graph_tool.Graph): The graph instance.
Returns:
An object with describing many statistical properties of the graph.
"""
if not G:
return 'No Graph Loaded'
float_formatter = lambda x: '{:.2f}'.format(x)
if G.get_vertex_filter()[0] is not None:
vfilt = G.get_vertex_filter()[0]
v_idx = np.where(vfilt.a == 1)[0]
else:
v_idx = np.arange(G.num_vertices())
deg_counts, deg_bins = gt.vertex_hist(G, 'out', float_count=False)
incl_idx = np.where(deg_counts != 0)[0]
deg_bins = list(deg_bins[incl_idx])
deg_counts = list(deg_counts[incl_idx])
comp, cc_hist = gt.label_components(G)
cc_size_counts = sorted(Counter(cc_hist).items())
cc_sizes = [csc[0] for csc in cc_size_counts]
cc_counts = [csc[1] for csc in cc_size_counts]
num_cc = len(np.unique(comp.a))
if deg_bins[0] == 0:
num_singletons = deg_counts[0]
else:
num_singletons = 0
if G.get_vertex_filter()[0] or G.get_edge_filter()[0]:
# Always correct, but much slower
peel_partition = kcore_decomposition(G)
peel_bins = sorted(peel_partition.keys())
peel_counts = [len(peel_partition[k]) for k in peel_bins]
else:
# NOTE:
# Very fast, but unstable (not always accurate) for graphs with filters
kcore = gt.kcore_decomposition(G)
C = Counter(kcore.a[v_idx])
peel_bins, peel_counts = [list(t) for t in zip(*C.items())]
vlogv = G.num_vertices() * np.log2(G.num_vertices())
return {
'num_vertices': G.num_vertices(),
'num_edges': G.num_edges(),
'num_cc': num_cc,
'num_singletons': num_singletons,
'vlogv': float_formatter(vlogv),
'deg_bins': deg_bins,
'deg_counts': deg_counts,
'cc_sizes': cc_sizes,
'cc_counts': cc_counts,
'peel_bins': peel_bins,
'peel_counts': peel_counts,
}
def kcore_decomposition(G, vlist=[], elist=[]):
"""Peform kcore decomposition (aka graph vertex peeling) on subgraph of G
induced by input vertex and edge indices.
Args:
G (graph_tool.Graph): The graph instance.
vlist (list): List of vertex indices to induce upon.
elist (list): List of edge indices to induce upon.
Returns:
Dict with keys as kcore values and values as vertex IDs.
"""
# initiate filters, if necessary
if vlist or elist:
vp = G.new_vp('bool', vals=False)
ep = G.new_ep('bool', vals=False)
if vlist is []:
vlist = np.ones_like(vp.a)
elif elist is []:
elist = np.ones_like(ep.a)
vp.a[vlist] = True
ep.a[elist] = True
G.set_vertex_filter(vp)
G.set_edge_filter(ep)
cmd = './app/bin/graph_peeling.bin -t core -o core'
p = Popen([cmd], shell=True, stdout=PIPE, stdin=PIPE)
for e in G.edges():
p.stdin.write('{} {}\n'.format(e.source(), e.target()))
p.stdin.flush()
p.stdin.close()
peel_partition = {}
while True:
line = p.stdout.readline()
if line == '':
break
if not line.startswith('Core'):
continue
peel, vertices = line.split(' = ')
peel = int(peel.split('_')[-1])
vertices = vertices.strip().split()
peel_partition[peel] = vertices
return peel_partition
def traverse_tree(T, fully_qualified_label):
"""Traverse hierarchy to find PartitionNode with given label.
Args:
T (PartitionTree): The hierarchy tree instance.
fully_qualified_label (str): Full name of the PartitionNode.
Returns:
PartitionNode with given label.
"""
if fully_qualified_label.lower() == 'root':
return T.root
sub_ids = fully_qualified_label.split('|')
if sub_ids[0].lower() == 'root':
sub_ids = sub_ids[1:]
node = T.root
for s in xrange(len(sub_ids)):
idx = int(sub_ids[s].split('_')[-1])
node = node.children[idx]
return node
def get_indices(T, fully_qualified_label):
"""Traverse hierarchy to find PartitionNode with given label and return its
vertex and edge indices.
Args:
T (PartitionTree): The hierarchy tree instance.
fully_qualified_label (str): Full name of the PartitionNode.
Returns:
vlist, elist: vertex and edge lists, respectively.
"""
node = traverse_tree(T, fully_qualified_label)
if len(node.vertex_indices) != 0:
vlist = node.vertex_indices
elist = node.edge_indices
else:
vlist, elist = PartitionTree.collect_indices(node)
return vlist, elist
def save_adjacency(G, vlist, elist, filename):
"""Induce subgraph of G using input vertex and edge lists and save the
resulting adjacency list to a file.
Args:
G (graph_tool.Graph): The graph instance.
vlist (list): List of vertex indices to induce upon.
elist (list): List of edge indices to induce upon.
filename (str): Filepath to write adjacency.
Returns:
Confirmation or error message.
"""
# get proper indices
vp = G.new_vp('bool', vals=False)
ep = G.new_ep('bool', vals=False)
vp.a[vlist] = True
ep.a[elist] = True
G.set_vertex_filter(vp)
G.set_edge_filter(ep)
with open(filename, 'w') as f:
for v in G.vertices():
neighbors = [u for u in v.out_neighbours()]
# First column for v; following columns are neighbors.
# NOTE: Adjacency list is redundant for undirected graphs
out_str = ('{} ' +
('{} ' * len(neighbors))[:-1] +
'\n').format(v, *neighbors)
f.write(out_str)
return {'msg': 'Adjacency saved as {}'.format(filename)}
def to_vis_json(G, filename=None):
"""Produce Vis.js formatted network data (general).
Args:
G (graph_tool.Graph): The graph instance.
Returns:
Vis.js formatted network data.
"""
nodes = []
for v in G.vertices():
# v_id = G.vp['id'][v]
# label = v_id
v_id = G.vertex_index[v]
label = G.vp['id'][v]
group = 1
title = label
value = v.out_degree()
nodes.append({
'id': v_id,
'label': label,
'title': title,
'value': value,
'group': group,
})
edges = []
for e in G.edges():
# src = G.vp['id'][e.source()]
# tar = G.vp['id'][e.target()]
src = G.vertex_index[e.source()]
tar = G.vertex_index[e.target()]
edges.append({
'id': G.edge_index[e],
'from': src,
'to': tar,
# 'value': 1,
})
return {'nodes': nodes, 'edges': edges}
def to_vis_json_bcc_tree(G, filename=None):
"""Produce Vis.js formatted network data (for BCC trees).
Args:
G (graph_tool.Graph): The graph instance.
Returns:
Vis.js formatted network data.
"""
AP_GROUP = 0
BCC_METANODE_GROUP = 1
nodes = []
for v in G.vertices():
v_id = G.vertex_index[v]
count = G.vp['count'][v]
is_art = G.vp['is_articulation'][v]
if is_art:
label = G.vp['id'][v]
title = 'AP: {}'.format(label)
group = AP_GROUP
else:
title = 'BCC: {} | Count: {}'.format(v_id, count)
label = title
group = BCC_METANODE_GROUP
value = count
nodes.append({
'id': v_id,
'label': label,
'title': title,
'value': value,
'group': group,
})
edges = []
for e in G.edges():
src = G.vertex_index[e.source()]
tar = G.vertex_index[e.target()]
edges.append({
'id': G.edge_index[e],
'from': src,
'to': tar,
'value': G.ep['count'][e],
})
return {'nodes': nodes, 'edges': edges}
def to_vis_json_cluster_map(G,
cluster_assignment,
landmark_map,
spine,
branches):
"""Produce Vis.js formatted network data (for clusters).
Args:
G (graph_tool.Graph): The graph instance.
cluster_assignment (dict): Mapping of vertices to clusters.
landmark_map (set): Set containing landmark vertices.
spine (set): Set containing edges on spine.
branches (set): Set containing edges on spinal branches.
Returns:
Vis.js formatted network data.
"""
nodes = []
for v in G.vertices():
v_id = G.vertex_index[v]
label = G.vp['id'][v]
title = label
value = v.out_degree()
group = cluster_assignment[v_id]
if v_id in landmark_map:
shape = 'star'
border_width = 2
else:
shape = 'dot'
border_width = 1
nodes.append({
'id': v_id,
'label': label,
'title': title,
'value': value,
'group': group,
'shape': shape,
'borderWidth': border_width,
})
edges = []
for e in G.edges():
src = G.vertex_index[e.source()]
tar = G.vertex_index[e.target()]
if e in spine:
category = 'spine'
elif e in branches:
category = 'branch'
else:
category = 'none'
edges.append({
'id': G.edge_index[e],
'from': src,
'to': tar,
'category': category,
# 'value': 1,
})
return {'nodes': nodes, 'edges': edges}
def to_vis_json_metagraph(metanodes, cross_edges):
"""Produce Vis.js formatted network data (for children metagraphs).
Args:
metanodes (dict): Metanodes of the metagraph.
cross_edges (dict): Edges running between metanodes.
Returns:
Vis.js formatted network data.
"""
nodes = []
for mn_id, node_info in metanodes.iteritems():
long_label = node_info['fully_qualified_label']
title_html = \
'<p>{}<br>|V|: {}<br>|E|: {}'.format(long_label,
node_info['num_vertices'],
node_info['num_edges'])
nodes.append({
'id': mn_id,
'label': node_info['short_label'],
'title': title_html,
'value': node_info['num_vertices'],
})
edges = []
for idx, (e, edge_indices) in enumerate(cross_edges.iteritems()):
value = len(edge_indices)
edges.append({
'id': idx,
'from': e[0],
'to': e[1],
'value': value,
'title': 'meta-edge size: {}'.format(value),
})
return {'nodes': nodes, 'edges': edges}
|
[
"graph_tool.all.kcore_decomposition",
"subprocess.Popen",
"numpy.ones_like",
"graph_tool.all.label_components",
"HierarchicalPartitioningTree.PartitionTree.collect_indices",
"graph_tool.all.vertex_hist",
"numpy.where",
"collections.Counter",
"numpy.unique"
] |
[((783, 826), 'graph_tool.all.vertex_hist', 'gt.vertex_hist', (['G', '"""out"""'], {'float_count': '(False)'}), "(G, 'out', float_count=False)\n", (797, 826), True, 'import graph_tool.all as gt\n'), ((976, 998), 'graph_tool.all.label_components', 'gt.label_components', (['G'], {}), '(G)\n', (995, 998), True, 'import graph_tool.all as gt\n'), ((3165, 3214), 'subprocess.Popen', 'Popen', (['[cmd]'], {'shell': '(True)', 'stdout': 'PIPE', 'stdin': 'PIPE'}), '([cmd], shell=True, stdout=PIPE, stdin=PIPE)\n', (3170, 3214), False, 'from subprocess import Popen, PIPE\n'), ((842, 867), 'numpy.where', 'np.where', (['(deg_counts != 0)'], {}), '(deg_counts != 0)\n', (850, 867), True, 'import numpy as np\n'), ((1172, 1189), 'numpy.unique', 'np.unique', (['comp.a'], {}), '(comp.a)\n', (1181, 1189), True, 'import numpy as np\n'), ((1680, 1705), 'graph_tool.all.kcore_decomposition', 'gt.kcore_decomposition', (['G'], {}), '(G)\n', (1702, 1705), True, 'import graph_tool.all as gt\n'), ((1718, 1741), 'collections.Counter', 'Counter', (['kcore.a[v_idx]'], {}), '(kcore.a[v_idx])\n', (1725, 1741), False, 'from collections import Counter\n'), ((4927, 4962), 'HierarchicalPartitioningTree.PartitionTree.collect_indices', 'PartitionTree.collect_indices', (['node'], {}), '(node)\n', (4956, 4962), False, 'from HierarchicalPartitioningTree import PartitionTree, PartitionNode\n'), ((675, 697), 'numpy.where', 'np.where', (['(vfilt.a == 1)'], {}), '(vfilt.a == 1)\n', (683, 697), True, 'import numpy as np\n'), ((2900, 2918), 'numpy.ones_like', 'np.ones_like', (['vp.a'], {}), '(vp.a)\n', (2912, 2918), True, 'import numpy as np\n'), ((1027, 1043), 'collections.Counter', 'Counter', (['cc_hist'], {}), '(cc_hist)\n', (1034, 1043), False, 'from collections import Counter\n'), ((2965, 2983), 'numpy.ones_like', 'np.ones_like', (['ep.a'], {}), '(ep.a)\n', (2977, 2983), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 28 17:30:59 2019
@author: vasgaoweithu
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import sys
import numpy as np
import argparse
import pprint
import pdb
import time
import cv2
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import pickle
from roi_data_layer.roidb import combined_roidb
from roi_data_layer.roibatchLoader import roibatchLoader
from model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from model.rpn.bbox_transform import clip_boxes
from model.nms.nms_wrapper import nms
# from model.utils.cython_nms import nms
from model.rpn.bbox_transform import bbox_transform_inv
from model.utils.net_utils import save_net, load_net, vis_detections
from model.faster_rcnn.vgg16 import vgg16
from model.faster_rcnn.resnet import resnet
from model.utils.blob import im_list_to_blob
import nn as mynn
from xml.etree import ElementTree as ET
from xml.dom import minidom
from torch.nn import functional as F
import pdb
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
def _get_image_blob(im):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
im_shapes: the list of image shapes
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
for target_size in cfg.TEST.SCALES:
im_scale = float(target_size) / float(im_size_max)
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im_list_to_blob([im]))
blob = processed_ims
return blob, np.array(im_scale_factors)
def _get_rois_blob(im_rois, im_scale_factors):
"""Converts RoIs into network inputs.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
im_scale_factors (list): scale factors as returned by _get_image_blob
Returns:
blob (ndarray): R x 5 matrix of RoIs in the image pyramid
"""
rois_blob_real = []
for i in xrange(len(im_scale_factors)):
rois, levels = _project_im_rois(im_rois, np.array([im_scale_factors[i]]))
rois_blob = np.hstack((levels, rois))
rois_blob_real.append(rois_blob.astype(np.float32, copy=False))
return rois_blob_real
def _project_im_rois(im_rois, scales):
"""Project image RoIs into the image pyramid built by _get_image_blob.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
scales (list): scale factors as returned by _get_image_blob
Returns:
rois (ndarray): R x 4 matrix of projected RoI coordinates
levels (list): image pyramid levels used by each projected RoI
"""
im_rois = im_rois.astype(np.float, copy=False)
if len(scales) > 1:
widths = im_rois[:, 2] - im_rois[:, 0] + 1
heights = im_rois[:, 3] - im_rois[:, 1] + 1
areas = widths * heights
scaled_areas = areas[:, np.newaxis] * (scales[np.newaxis, :] ** 2)
diff_areas = np.abs(scaled_areas - 224 * 224)
levels = diff_areas.argmin(axis=1)[:, np.newaxis]
else:
levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)
rois = im_rois * scales[levels]
return rois, levels
def _get_blobs(im, rois):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {'data' : None, 'rois' : None}
blobs['data'], im_scale_factors = _get_image_blob(im)
blobs['rois'] = _get_rois_blob(rois, im_scale_factors)
return blobs, im_scale_factors
def iou_other_self_mutual(bb, bbgt):
bb = bb.astype(np.float32)
bbgt = bbgt.astype(np.float32)
bi = np.concatenate([np.maximum(bb[:,0:1], bbgt[0:1]), np.maximum(bb[:,1:2], bbgt[1:2]),
np.minimum(bb[:,2:3], bbgt[2:3]), np.minimum(bb[:,3:4], bbgt[3:4])], axis=1)
iw = bi[:,2] - bi[:,0] + 1
ih = bi[:,3] - bi[:,1] + 1
other_area = (bb[:,2] - bb[:,0] + 1) * (bb[:,3] - bb[:,1] + 1)
self_area = (bbgt[2] - bbgt[0] + 1) * (bbgt[3] - bbgt[1] + 1)
mask = (np.greater(iw,0)*np.greater(ih,0)).astype(np.float32)
cross_area = iw * ih * mask
return cross_area/other_area, cross_area/self_area, cross_area/(other_area+self_area-cross_area)
def im_detect(data, rois, labels, model):
inputs = {'data': [Variable(torch.from_numpy(data))],
'rois': [Variable(torch.from_numpy(rois))],
'labels': [Variable(torch.from_numpy(labels))],
'seg_map': [Variable(torch.from_numpy(np.zeros((1,1))))]}
pcl_prob0, pcl_prob1, pcl_prob2 = model(**inputs)
scores = pcl_prob2
scores = scores.data.cpu().numpy()
return scores[:, 1:].copy()
def im_detect_cls(data, rois, labels, net):
data_tensor = Variable(torch.from_numpy(data)).cuda()
rois_tensor = Variable(torch.from_numpy(rois)).cuda()
base_feat = fasterRCNN.RCNN_base(data_tensor)
pooled_feat = fasterRCNN.RCNN_roi_pool(base_feat, rois_tensor.view(-1,5).type(base_feat.dtype))
fc_feat = fasterRCNN._head_to_tail(pooled_feat)
cls0_score0 = fasterRCNN.RCNN_cls0_score0(fc_feat)
cls0_score1 = fasterRCNN.RCNN_cls0_score1(fc_feat)
cls0_prob = F.softmax(cls0_score0,1)*F.softmax(cls0_score1,0)
cls1_score0 = fasterRCNN.RCNN_cls1_score0(fc_feat)
cls1_score1 = fasterRCNN.RCNN_cls1_score1(fc_feat)
cls1_prob = F.softmax(cls1_score0,1)*F.softmax(cls1_score1,0)
return cls0_prob.data.cpu().numpy(), cls1_prob.data.cpu().numpy()
def parse_xml_12(xml_file, re_xml_file, gt_truth, image_name):
tree = ET.parse(xml_file)
root = ET.Element('annotation')
ET.SubElement(root, 'folder').text = tree.find('folder').text
ET.SubElement(root, 'filename').text = tree.find('filename').text
source = ET.SubElement(root, 'source')
ET.SubElement(source, 'database').text = tree.find('source').find('database').text
ET.SubElement(source, 'annotation').text = tree.find('source').find('annotation').text
ET.SubElement(source, 'image').text = tree.find('source').find('image').text
size = ET.SubElement(root, 'size')
ET.SubElement(size, 'width').text = tree.find('size').find('width').text
ET.SubElement(size, 'height').text = tree.find('size').find('height').text
ET.SubElement(size, 'depth').text = tree.find('size').find('depth').text
ET.SubElement(root, 'segmented').text = tree.find('segmented').text
for obj in gt_truth:
obj_struct = ET.SubElement(root, 'object')
ET.SubElement(obj_struct, 'name').text = obj[0]
bndbox = ET.SubElement(obj_struct, 'bndbox')
ET.SubElement(bndbox, 'xmin').text = str(obj[1])
ET.SubElement(bndbox, 'ymin').text = str(obj[2])
ET.SubElement(bndbox, 'xmax').text = str(obj[3])
ET.SubElement(bndbox, 'ymax').text = str(obj[4])
xmltsr = minidom.parseString(ET.tostring(root)).toprettyxml(indent=6*' ')
open(re_xml_file, 'w').close()
with open(re_xml_file, 'w') as f:
f.write(xmltsr)
def parse_xml_07(xml_file, re_xml_file, gt_truth, image_name):
tree = ET.parse(xml_file)
root = ET.Element('annotation')
ET.SubElement(root, 'folder').text = tree.find('folder').text
ET.SubElement(root, 'filename').text = tree.find('filename').text
source = ET.SubElement(root, 'source')
ET.SubElement(source, 'database').text = tree.find('source').find('database').text
ET.SubElement(source, 'annotation').text = tree.find('source').find('annotation').text
ET.SubElement(source, 'image').text = tree.find('source').find('image').text
ET.SubElement(source, 'flickrid').text = tree.find('source').find('flickrid').text
owner = ET.SubElement(root, 'owner')
ET.SubElement(owner, 'flickrid').text = tree.find('owner').find('flickrid').text
ET.SubElement(owner, 'name').text = tree.find('owner').find('name').text
size = ET.SubElement(root, 'size')
ET.SubElement(size, 'width').text = tree.find('size').find('width').text
ET.SubElement(size, 'height').text = tree.find('size').find('height').text
ET.SubElement(size, 'depth').text = tree.find('size').find('depth').text
ET.SubElement(root, 'segmented').text = tree.find('segmented').text
for obj in gt_truth:
obj_struct = ET.SubElement(root, 'object')
ET.SubElement(obj_struct, 'name').text = obj[0]
bndbox = ET.SubElement(obj_struct, 'bndbox')
ET.SubElement(bndbox, 'xmin').text = str(obj[1])
ET.SubElement(bndbox, 'ymin').text = str(obj[2])
ET.SubElement(bndbox, 'xmax').text = str(obj[3])
ET.SubElement(bndbox, 'ymax').text = str(obj[4])
xmltsr = minidom.parseString(ET.tostring(root)).toprettyxml(indent=6*' ')
open(re_xml_file, 'w').close()
with open(re_xml_file, 'w') as f:
f.write(xmltsr)
def _get_seg_map(seg_map_path, im_scale_factors):
seg_map_path = seg_map_path.replace('JPEGImages','SEG_MAP')
seg_map_path = seg_map_path.replace('jpg','png')
seg_map = cv2.imread(seg_map_path)
seg_map = seg_map[:,:,0]
if(roidb[0]['flipped']):
seg_map = np.flip(seg_map, axis=1)
seg_maps = []
for im_scale in im_scale_factors:
seg_map = cv2.resize(seg_map, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_NEAREST)
seg_maps.append(seg_map.astype('float32'))
return seg_maps
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--dataset', dest='dataset',
help='training dataset',
default='pascal_voc', type=str)
parser.add_argument('--imdb', dest='imdbval_name',
help='tesing imdb',
default='voc_2007_test', type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default='cfgs/vgg16.yml', type=str)
parser.add_argument('--net', dest='net',
help='vgg16, res50, res101, res152',
default='res101', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--load_dir', dest='load_dir',
help='directory to load models', default="models",
type=str)
parser.add_argument('--cuda', dest='cuda',
help='whether use CUDA',
action='store_true')
parser.add_argument('--gcn', dest='gcn',
help='whether use large imag scale',
action='store_true')
parser.add_argument('--mGPUs', dest='mGPUs',
help='whether use multiple GPUs',
action='store_true')
parser.add_argument('--cag', dest='class_agnostic',
help='whether perform class_agnostic bbox regression',
action='store_true')
parser.add_argument('--parallel_type', dest='parallel_type',
help='which part of model to parallel, 0: all, 1: model before roi pooling',
default=0, type=int)
parser.add_argument('--checksession', dest='checksession',
help='checksession to load model',
default=1, type=int)
parser.add_argument('--checkepoch', dest='checkepoch',
help='checkepoch to load network',
default=1, type=int)
parser.add_argument('--checkpoint', dest='checkpoint',
help='checkpoint to load network',
default=10021, type=int)
parser.add_argument('--vis', dest='vis',
help='visualization mode',
action='store_true')
args = parser.parse_args()
return args
lr = cfg.TRAIN.LEARNING_RATE
momentum = cfg.TRAIN.MOMENTUM
weight_decay = cfg.TRAIN.WEIGHT_DECAY
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if torch.cuda.is_available() and not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
np.random.seed(cfg.RNG_SEED)
args.cfg_file = "cfgs/{}_gcn.yml".format(args.net) if args.gcn else "cfgs/{}_scale.yml".format(args.net)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
print('Using config:')
pprint.pprint(cfg)
cfg.TRAIN.USE_FLIPPED = False
imdb, roidb, ratio_list, ratio_index = combined_roidb(args.imdbval_name, False)
imdb.competition_mode(on=True)
print('{:d} roidb entries'.format(len(roidb)))
input_dir = os.path.join(args.load_dir, args.net, args.dataset)
if not os.path.exists(input_dir):
raise Exception('There is no input directory for loading network from ' + input_dir)
load_name = os.path.join(input_dir,
'faster_rcnn_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))
# initilize the network here.
if args.net == 'vgg16':
fasterRCNN = vgg16(imdb.classes, pretrained=False)
elif args.net == 'res101':
fasterRCNN = resnet(imdb.classes, 101, pretrained=False)
elif args.net == 'res50':
fasterRCNN = resnet(imdb.classes, 50, pretrained=False)
elif args.net == 'res152':
fasterRCNN = resnet(imdb.classes, 152, pretrained=False)
else:
print("network is not defined")
pdb.set_trace()
fasterRCNN.create_architecture()
print("load checkpoint %s" % (load_name))
checkpoint = torch.load(load_name)
tmp_state_dict = checkpoint['model']
correct_state_dict = {k:tmp_state_dict['module.'+k] for k in fasterRCNN.state_dict()}
fasterRCNN.load_state_dict(correct_state_dict)
# fasterRCNN.load_state_dict(checkpoint['model'])
if 'pooling_mode' in checkpoint.keys():
cfg.POOLING_MODE = checkpoint['pooling_mode']
print('load model successfully!')
if args.cuda:
cfg.CUDA = True
if args.cuda:
fasterRCNN.cuda()
#fasterRCNN = mynn.DataParallel(fasterRCNN, minibatch=True)
start = time.time()
max_per_image = 100
vis = args.vis
if vis:
thresh = 0.05
else:
thresh = 0.0
num_images = len(imdb.image_index)
_t = {'im_detect': time.time(), 'misc': time.time()}
fasterRCNN.eval()
empty_array = np.transpose(np.array([[],[],[],[],[]]), (1,0))
all_proposal_num = 0.
consistent_proposal_num_1 = 0.
consistent_proposal_num_2 = 0.
num_1 = 0.
num_2 = 0.
for i in range(num_images):
image_name = imdb.image_path_at(i)
image_name = os.path.basename(image_name).split('.')[0]
im = cv2.imread(imdb.image_path_at(i))
boxes = roidb[i]['boxes']
labels = roidb[i]['labels']
det_tic = time.time()
blobs, unused_im_scale_factors = _get_blobs(im, boxes)
for j in range(len(blobs['data'])):
scores_tmp_1, scores_tmp_2 = im_detect_cls(blobs['data'][j], blobs['rois'][j], roidb[i]['labels'], fasterRCNN)
scores_tmp = im_detect(blobs['data'][j], blobs['rois'][j], roidb[i]['labels'], fasterRCNN)
if j == 0:
scores_1 = scores_tmp_1.copy()
scores_2 = scores_tmp_2.copy()
scores = scores_tmp.copy()
else:
scores_1 += scores_tmp_1
scores_2 += scores_tmp_2
scores += scores
pred_boxes = boxes.copy()
for j in range(imdb.num_classes):
if labels[0,j] > 0:
all_proposal_num += 1
cls_name = imdb._classes[j]
ind_1 = np.argmax(scores_1[:, j])
ind_2 = np.argmax(scores_2[:, j])
ind = np.argmax(scores[:, j])
gt_box_1 = boxes[ind_1, :].copy()
gt_box_2 = boxes[ind_2, :].copy()
gt_box = boxes[ind, :].copy()
_, _, mutual_iou_1 = iou_other_self_mutual(np.expand_dims(gt_box_1,0), gt_box)
_, _, mutual_iou_2 = iou_other_self_mutual(np.expand_dims(gt_box_2,0), gt_box)
if mutual_iou_1[0] > 0.8:
consistent_proposal_num_1 += 1
if mutual_iou_2[0] > 0.8:
consistent_proposal_num_2 += 1
if ind_1 ==ind:
num_1 += 1
if ind_2 == ind:
num_2 += 1
det_toc = time.time()
detect_time = det_toc - det_tic
misc_tic = time.time()
misc_toc = time.time()
nms_time = misc_toc - misc_tic
sys.stdout.write('im_detect: {:d}/{:d} {:.3f}s {:.3f}s \r' \
.format(i + 1, num_images, detect_time, nms_time))
sys.stdout.flush()
end = time.time()
rate_1 = consistent_proposal_num_1 / all_proposal_num
rate_2 = consistent_proposal_num_2 / all_proposal_num
if not os.path.exists('consistent_rate_0.8.txt'):
open('consistent_rate_0.8.txt', 'w').close()
with open('consistent_rate_0.8.txt', 'a+') as f:
f.write(str(rate_1)+'\t')
f.write(str(rate_2)+'\n')
if not os.path.exists('consistent_rate_1.txt'):
open('consistent_rate_1.txt', 'w').close()
with open('consistent_rate_1.txt', 'a+') as f:
f.write(str(num_1 / all_proposal_num)+'\t')
f.write(str(num_2 / all_proposal_num)+'\n')
print("test time: %0.4fs" % (end - start))
print('all proposal:{:.1f}, inconsistent proposal:{:.1f}, rate is {:.6f}'\
.format(all_proposal_num, consistent_proposal_num_1, rate_1))
print('all proposal:{:.1f}, inconsistent proposal:{:.1f}, rate is {:.6f}'\
.format(all_proposal_num, consistent_proposal_num_2, rate_2))
|
[
"numpy.random.seed",
"argparse.ArgumentParser",
"numpy.abs",
"numpy.maximum",
"numpy.argmax",
"numpy.greater",
"model.utils.config.cfg_from_file",
"sys.stdout.flush",
"pprint.pprint",
"xml.etree.ElementTree.SubElement",
"os.path.join",
"torch.load",
"xml.etree.ElementTree.Element",
"os.path.exists",
"numpy.max",
"roi_data_layer.roidb.combined_roidb",
"xml.etree.ElementTree.tostring",
"model.utils.blob.im_list_to_blob",
"cv2.resize",
"xml.etree.ElementTree.parse",
"numpy.minimum",
"os.path.basename",
"model.faster_rcnn.resnet.resnet",
"numpy.hstack",
"torch.cuda.is_available",
"model.utils.config.cfg_from_list",
"torch.from_numpy",
"numpy.flip",
"numpy.zeros",
"numpy.expand_dims",
"time.time",
"torch.nn.functional.softmax",
"cv2.imread",
"numpy.array",
"pdb.set_trace",
"model.faster_rcnn.vgg16.vgg16"
] |
[((1748, 1769), 'numpy.max', 'np.max', (['im_shape[0:2]'], {}), '(im_shape[0:2])\n', (1754, 1769), True, 'import numpy as np\n'), ((6126, 6144), 'xml.etree.ElementTree.parse', 'ET.parse', (['xml_file'], {}), '(xml_file)\n', (6134, 6144), True, 'from xml.etree import ElementTree as ET\n'), ((6156, 6180), 'xml.etree.ElementTree.Element', 'ET.Element', (['"""annotation"""'], {}), "('annotation')\n", (6166, 6180), True, 'from xml.etree import ElementTree as ET\n'), ((6335, 6364), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['root', '"""source"""'], {}), "(root, 'source')\n", (6348, 6364), True, 'from xml.etree import ElementTree as ET\n'), ((6640, 6667), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['root', '"""size"""'], {}), "(root, 'size')\n", (6653, 6667), True, 'from xml.etree import ElementTree as ET\n'), ((7656, 7674), 'xml.etree.ElementTree.parse', 'ET.parse', (['xml_file'], {}), '(xml_file)\n', (7664, 7674), True, 'from xml.etree import ElementTree as ET\n'), ((7686, 7710), 'xml.etree.ElementTree.Element', 'ET.Element', (['"""annotation"""'], {}), "('annotation')\n", (7696, 7710), True, 'from xml.etree import ElementTree as ET\n'), ((7865, 7894), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['root', '"""source"""'], {}), "(root, 'source')\n", (7878, 7894), True, 'from xml.etree import ElementTree as ET\n'), ((8258, 8286), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['root', '"""owner"""'], {}), "(root, 'owner')\n", (8271, 8286), True, 'from xml.etree import ElementTree as ET\n'), ((8465, 8492), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['root', '"""size"""'], {}), "(root, 'size')\n", (8478, 8492), True, 'from xml.etree import ElementTree as ET\n'), ((9599, 9623), 'cv2.imread', 'cv2.imread', (['seg_map_path'], {}), '(seg_map_path)\n', (9609, 9623), False, 'import cv2\n'), ((10058, 10123), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train a Fast R-CNN network"""'}), "(description='Train a Fast R-CNN network')\n", (10081, 10123), False, 'import argparse\n'), ((12845, 12873), 'numpy.random.seed', 'np.random.seed', (['cfg.RNG_SEED'], {}), '(cfg.RNG_SEED)\n', (12859, 12873), True, 'import numpy as np\n'), ((13141, 13159), 'pprint.pprint', 'pprint.pprint', (['cfg'], {}), '(cfg)\n', (13154, 13159), False, 'import pprint\n'), ((13234, 13274), 'roi_data_layer.roidb.combined_roidb', 'combined_roidb', (['args.imdbval_name', '(False)'], {}), '(args.imdbval_name, False)\n', (13248, 13274), False, 'from roi_data_layer.roidb import combined_roidb\n'), ((13373, 13424), 'os.path.join', 'os.path.join', (['args.load_dir', 'args.net', 'args.dataset'], {}), '(args.load_dir, args.net, args.dataset)\n', (13385, 13424), False, 'import os\n'), ((14222, 14243), 'torch.load', 'torch.load', (['load_name'], {}), '(load_name)\n', (14232, 14243), False, 'import torch\n'), ((14752, 14763), 'time.time', 'time.time', ([], {}), '()\n', (14761, 14763), False, 'import time\n'), ((17283, 17294), 'time.time', 'time.time', ([], {}), '()\n', (17292, 17294), False, 'import time\n'), ((1932, 2026), 'cv2.resize', 'cv2.resize', (['im_orig', 'None', 'None'], {'fx': 'im_scale', 'fy': 'im_scale', 'interpolation': 'cv2.INTER_LINEAR'}), '(im_orig, None, None, fx=im_scale, fy=im_scale, interpolation=cv2\n .INTER_LINEAR)\n', (1942, 2026), False, 'import cv2\n'), ((2183, 2209), 'numpy.array', 'np.array', (['im_scale_factors'], {}), '(im_scale_factors)\n', (2191, 2209), True, 'import numpy as np\n'), ((2729, 2754), 'numpy.hstack', 'np.hstack', (['(levels, rois)'], {}), '((levels, rois))\n', (2738, 2754), True, 'import numpy as np\n'), ((3596, 3628), 'numpy.abs', 'np.abs', (['(scaled_areas - 224 * 224)'], {}), '(scaled_areas - 224 * 224)\n', (3602, 3628), True, 'import numpy as np\n'), ((3714, 3759), 'numpy.zeros', 'np.zeros', (['(im_rois.shape[0], 1)'], {'dtype': 'np.int'}), '((im_rois.shape[0], 1), dtype=np.int)\n', (3722, 3759), True, 'import numpy as np\n'), ((5745, 5770), 'torch.nn.functional.softmax', 'F.softmax', (['cls0_score0', '(1)'], {}), '(cls0_score0, 1)\n', (5754, 5770), True, 'from torch.nn import functional as F\n'), ((5770, 5795), 'torch.nn.functional.softmax', 'F.softmax', (['cls0_score1', '(0)'], {}), '(cls0_score1, 0)\n', (5779, 5795), True, 'from torch.nn import functional as F\n'), ((5926, 5951), 'torch.nn.functional.softmax', 'F.softmax', (['cls1_score0', '(1)'], {}), '(cls1_score0, 1)\n', (5935, 5951), True, 'from torch.nn import functional as F\n'), ((5951, 5976), 'torch.nn.functional.softmax', 'F.softmax', (['cls1_score1', '(0)'], {}), '(cls1_score1, 0)\n', (5960, 5976), True, 'from torch.nn import functional as F\n'), ((6185, 6214), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['root', '"""folder"""'], {}), "(root, 'folder')\n", (6198, 6214), True, 'from xml.etree import ElementTree as ET\n'), ((6251, 6282), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['root', '"""filename"""'], {}), "(root, 'filename')\n", (6264, 6282), True, 'from xml.etree import ElementTree as ET\n'), ((6369, 6402), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['source', '"""database"""'], {}), "(source, 'database')\n", (6382, 6402), True, 'from xml.etree import ElementTree as ET\n'), ((6456, 6491), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['source', '"""annotation"""'], {}), "(source, 'annotation')\n", (6469, 6491), True, 'from xml.etree import ElementTree as ET\n'), ((6547, 6577), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['source', '"""image"""'], {}), "(source, 'image')\n", (6560, 6577), True, 'from xml.etree import ElementTree as ET\n'), ((6672, 6700), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['size', '"""width"""'], {}), "(size, 'width')\n", (6685, 6700), True, 'from xml.etree import ElementTree as ET\n'), ((6749, 6778), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['size', '"""height"""'], {}), "(size, 'height')\n", (6762, 6778), True, 'from xml.etree import ElementTree as ET\n'), ((6828, 6856), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['size', '"""depth"""'], {}), "(size, 'depth')\n", (6841, 6856), True, 'from xml.etree import ElementTree as ET\n'), ((6910, 6942), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['root', '"""segmented"""'], {}), "(root, 'segmented')\n", (6923, 6942), True, 'from xml.etree import ElementTree as ET\n'), ((7029, 7058), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['root', '"""object"""'], {}), "(root, 'object')\n", (7042, 7058), True, 'from xml.etree import ElementTree as ET\n'), ((7132, 7167), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['obj_struct', '"""bndbox"""'], {}), "(obj_struct, 'bndbox')\n", (7145, 7167), True, 'from xml.etree import ElementTree as ET\n'), ((7715, 7744), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['root', '"""folder"""'], {}), "(root, 'folder')\n", (7728, 7744), True, 'from xml.etree import ElementTree as ET\n'), ((7781, 7812), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['root', '"""filename"""'], {}), "(root, 'filename')\n", (7794, 7812), True, 'from xml.etree import ElementTree as ET\n'), ((7899, 7932), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['source', '"""database"""'], {}), "(source, 'database')\n", (7912, 7932), True, 'from xml.etree import ElementTree as ET\n'), ((7986, 8021), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['source', '"""annotation"""'], {}), "(source, 'annotation')\n", (7999, 8021), True, 'from xml.etree import ElementTree as ET\n'), ((8077, 8107), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['source', '"""image"""'], {}), "(source, 'image')\n", (8090, 8107), True, 'from xml.etree import ElementTree as ET\n'), ((8158, 8191), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['source', '"""flickrid"""'], {}), "(source, 'flickrid')\n", (8171, 8191), True, 'from xml.etree import ElementTree as ET\n'), ((8291, 8323), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['owner', '"""flickrid"""'], {}), "(owner, 'flickrid')\n", (8304, 8323), True, 'from xml.etree import ElementTree as ET\n'), ((8376, 8404), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['owner', '"""name"""'], {}), "(owner, 'name')\n", (8389, 8404), True, 'from xml.etree import ElementTree as ET\n'), ((8497, 8525), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['size', '"""width"""'], {}), "(size, 'width')\n", (8510, 8525), True, 'from xml.etree import ElementTree as ET\n'), ((8574, 8603), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['size', '"""height"""'], {}), "(size, 'height')\n", (8587, 8603), True, 'from xml.etree import ElementTree as ET\n'), ((8653, 8681), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['size', '"""depth"""'], {}), "(size, 'depth')\n", (8666, 8681), True, 'from xml.etree import ElementTree as ET\n'), ((8735, 8767), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['root', '"""segmented"""'], {}), "(root, 'segmented')\n", (8748, 8767), True, 'from xml.etree import ElementTree as ET\n'), ((8854, 8883), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['root', '"""object"""'], {}), "(root, 'object')\n", (8867, 8883), True, 'from xml.etree import ElementTree as ET\n'), ((8957, 8992), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['obj_struct', '"""bndbox"""'], {}), "(obj_struct, 'bndbox')\n", (8970, 8992), True, 'from xml.etree import ElementTree as ET\n'), ((9700, 9724), 'numpy.flip', 'np.flip', (['seg_map'], {'axis': '(1)'}), '(seg_map, axis=1)\n', (9707, 9724), True, 'import numpy as np\n'), ((9800, 9895), 'cv2.resize', 'cv2.resize', (['seg_map', 'None', 'None'], {'fx': 'im_scale', 'fy': 'im_scale', 'interpolation': 'cv2.INTER_NEAREST'}), '(seg_map, None, None, fx=im_scale, fy=im_scale, interpolation=cv2\n .INTER_NEAREST)\n', (9810, 9895), False, 'import cv2\n'), ((12712, 12737), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (12735, 12737), False, 'import torch\n'), ((13019, 13047), 'model.utils.config.cfg_from_file', 'cfg_from_file', (['args.cfg_file'], {}), '(args.cfg_file)\n', (13032, 13047), False, 'from model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir\n'), ((13084, 13112), 'model.utils.config.cfg_from_list', 'cfg_from_list', (['args.set_cfgs'], {}), '(args.set_cfgs)\n', (13097, 13112), False, 'from model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir\n'), ((13434, 13459), 'os.path.exists', 'os.path.exists', (['input_dir'], {}), '(input_dir)\n', (13448, 13459), False, 'import os\n'), ((13756, 13793), 'model.faster_rcnn.vgg16.vgg16', 'vgg16', (['imdb.classes'], {'pretrained': '(False)'}), '(imdb.classes, pretrained=False)\n', (13761, 13793), False, 'from model.faster_rcnn.vgg16 import vgg16\n'), ((14918, 14929), 'time.time', 'time.time', ([], {}), '()\n', (14927, 14929), False, 'import time\n'), ((14939, 14950), 'time.time', 'time.time', ([], {}), '()\n', (14948, 14950), False, 'import time\n'), ((15002, 15032), 'numpy.array', 'np.array', (['[[], [], [], [], []]'], {}), '([[], [], [], [], []])\n', (15010, 15032), True, 'import numpy as np\n'), ((15416, 15427), 'time.time', 'time.time', ([], {}), '()\n', (15425, 15427), False, 'import time\n'), ((16973, 16984), 'time.time', 'time.time', ([], {}), '()\n', (16982, 16984), False, 'import time\n'), ((17040, 17051), 'time.time', 'time.time', ([], {}), '()\n', (17049, 17051), False, 'import time\n'), ((17069, 17080), 'time.time', 'time.time', ([], {}), '()\n', (17078, 17080), False, 'import time\n'), ((17255, 17273), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (17271, 17273), False, 'import sys\n'), ((17416, 17457), 'os.path.exists', 'os.path.exists', (['"""consistent_rate_0.8.txt"""'], {}), "('consistent_rate_0.8.txt')\n", (17430, 17457), False, 'import os\n'), ((17634, 17673), 'os.path.exists', 'os.path.exists', (['"""consistent_rate_1.txt"""'], {}), "('consistent_rate_1.txt')\n", (17648, 17673), False, 'import os\n'), ((2117, 2138), 'model.utils.blob.im_list_to_blob', 'im_list_to_blob', (['[im]'], {}), '([im])\n', (2132, 2138), False, 'from model.utils.blob import im_list_to_blob\n'), ((2676, 2707), 'numpy.array', 'np.array', (['[im_scale_factors[i]]'], {}), '([im_scale_factors[i]])\n', (2684, 2707), True, 'import numpy as np\n'), ((4253, 4286), 'numpy.maximum', 'np.maximum', (['bb[:, 0:1]', 'bbgt[0:1]'], {}), '(bb[:, 0:1], bbgt[0:1])\n', (4263, 4286), True, 'import numpy as np\n'), ((4287, 4320), 'numpy.maximum', 'np.maximum', (['bb[:, 1:2]', 'bbgt[1:2]'], {}), '(bb[:, 1:2], bbgt[1:2])\n', (4297, 4320), True, 'import numpy as np\n'), ((4346, 4379), 'numpy.minimum', 'np.minimum', (['bb[:, 2:3]', 'bbgt[2:3]'], {}), '(bb[:, 2:3], bbgt[2:3])\n', (4356, 4379), True, 'import numpy as np\n'), ((4380, 4413), 'numpy.minimum', 'np.minimum', (['bb[:, 3:4]', 'bbgt[3:4]'], {}), '(bb[:, 3:4], bbgt[3:4])\n', (4390, 4413), True, 'import numpy as np\n'), ((7067, 7100), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['obj_struct', '"""name"""'], {}), "(obj_struct, 'name')\n", (7080, 7100), True, 'from xml.etree import ElementTree as ET\n'), ((7176, 7205), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['bndbox', '"""xmin"""'], {}), "(bndbox, 'xmin')\n", (7189, 7205), True, 'from xml.etree import ElementTree as ET\n'), ((7233, 7262), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['bndbox', '"""ymin"""'], {}), "(bndbox, 'ymin')\n", (7246, 7262), True, 'from xml.etree import ElementTree as ET\n'), ((7290, 7319), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['bndbox', '"""xmax"""'], {}), "(bndbox, 'xmax')\n", (7303, 7319), True, 'from xml.etree import ElementTree as ET\n'), ((7347, 7376), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['bndbox', '"""ymax"""'], {}), "(bndbox, 'ymax')\n", (7360, 7376), True, 'from xml.etree import ElementTree as ET\n'), ((8892, 8925), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['obj_struct', '"""name"""'], {}), "(obj_struct, 'name')\n", (8905, 8925), True, 'from xml.etree import ElementTree as ET\n'), ((9001, 9030), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['bndbox', '"""xmin"""'], {}), "(bndbox, 'xmin')\n", (9014, 9030), True, 'from xml.etree import ElementTree as ET\n'), ((9058, 9087), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['bndbox', '"""ymin"""'], {}), "(bndbox, 'ymin')\n", (9071, 9087), True, 'from xml.etree import ElementTree as ET\n'), ((9115, 9144), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['bndbox', '"""xmax"""'], {}), "(bndbox, 'xmax')\n", (9128, 9144), True, 'from xml.etree import ElementTree as ET\n'), ((9172, 9201), 'xml.etree.ElementTree.SubElement', 'ET.SubElement', (['bndbox', '"""ymax"""'], {}), "(bndbox, 'ymax')\n", (9185, 9201), True, 'from xml.etree import ElementTree as ET\n'), ((13840, 13883), 'model.faster_rcnn.resnet.resnet', 'resnet', (['imdb.classes', '(101)'], {'pretrained': '(False)'}), '(imdb.classes, 101, pretrained=False)\n', (13846, 13883), False, 'from model.faster_rcnn.resnet import resnet\n'), ((4630, 4647), 'numpy.greater', 'np.greater', (['iw', '(0)'], {}), '(iw, 0)\n', (4640, 4647), True, 'import numpy as np\n'), ((4647, 4664), 'numpy.greater', 'np.greater', (['ih', '(0)'], {}), '(ih, 0)\n', (4657, 4664), True, 'import numpy as np\n'), ((4890, 4912), 'torch.from_numpy', 'torch.from_numpy', (['data'], {}), '(data)\n', (4906, 4912), False, 'import torch\n'), ((4946, 4968), 'torch.from_numpy', 'torch.from_numpy', (['rois'], {}), '(rois)\n', (4962, 4968), False, 'import torch\n'), ((5004, 5028), 'torch.from_numpy', 'torch.from_numpy', (['labels'], {}), '(labels)\n', (5020, 5028), False, 'import torch\n'), ((5318, 5340), 'torch.from_numpy', 'torch.from_numpy', (['data'], {}), '(data)\n', (5334, 5340), False, 'import torch\n'), ((5376, 5398), 'torch.from_numpy', 'torch.from_numpy', (['rois'], {}), '(rois)\n', (5392, 5398), False, 'import torch\n'), ((7429, 7446), 'xml.etree.ElementTree.tostring', 'ET.tostring', (['root'], {}), '(root)\n', (7440, 7446), True, 'from xml.etree import ElementTree as ET\n'), ((9254, 9271), 'xml.etree.ElementTree.tostring', 'ET.tostring', (['root'], {}), '(root)\n', (9265, 9271), True, 'from xml.etree import ElementTree as ET\n'), ((13929, 13971), 'model.faster_rcnn.resnet.resnet', 'resnet', (['imdb.classes', '(50)'], {'pretrained': '(False)'}), '(imdb.classes, 50, pretrained=False)\n', (13935, 13971), False, 'from model.faster_rcnn.resnet import resnet\n'), ((16200, 16225), 'numpy.argmax', 'np.argmax', (['scores_1[:, j]'], {}), '(scores_1[:, j])\n', (16209, 16225), True, 'import numpy as np\n'), ((16248, 16273), 'numpy.argmax', 'np.argmax', (['scores_2[:, j]'], {}), '(scores_2[:, j])\n', (16257, 16273), True, 'import numpy as np\n'), ((16294, 16317), 'numpy.argmax', 'np.argmax', (['scores[:, j]'], {}), '(scores[:, j])\n', (16303, 16317), True, 'import numpy as np\n'), ((5082, 5098), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (5090, 5098), True, 'import numpy as np\n'), ((14018, 14061), 'model.faster_rcnn.resnet.resnet', 'resnet', (['imdb.classes', '(152)'], {'pretrained': '(False)'}), '(imdb.classes, 152, pretrained=False)\n', (14024, 14061), False, 'from model.faster_rcnn.resnet import resnet\n'), ((14110, 14125), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (14123, 14125), False, 'import pdb\n'), ((15246, 15274), 'os.path.basename', 'os.path.basename', (['image_name'], {}), '(image_name)\n', (15262, 15274), False, 'import os\n'), ((16530, 16557), 'numpy.expand_dims', 'np.expand_dims', (['gt_box_1', '(0)'], {}), '(gt_box_1, 0)\n', (16544, 16557), True, 'import numpy as np\n'), ((16623, 16650), 'numpy.expand_dims', 'np.expand_dims', (['gt_box_2', '(0)'], {}), '(gt_box_2, 0)\n', (16637, 16650), True, 'import numpy as np\n')]
|
import tensorflow as tf
import numpy as np
from tensorflow import keras
# GRADED FUNCTION: house_model
def house_model(y_new):
xs = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], dtype=float)
ys = np.array([1.0, 1.5, 2.0, 2.5, 3.0, 3.5], dtype=float)
# alternate solution
# xs = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], dtype=float)
# ys = 0.5 * (xs + 1)
model = tf.keras.Sequential([keras.layers.Dense(units=1, input_shape=[1])])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(xs, ys, epochs=1000)
return model.predict(y_new)[0]
if __name__ == "__main__":
prediction = house_model([7.0])
print(prediction)
|
[
"numpy.array",
"tensorflow.keras.layers.Dense"
] |
[((138, 191), 'numpy.array', 'np.array', (['[1.0, 2.0, 3.0, 4.0, 5.0, 6.0]'], {'dtype': 'float'}), '([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], dtype=float)\n', (146, 191), True, 'import numpy as np\n'), ((201, 254), 'numpy.array', 'np.array', (['[1.0, 1.5, 2.0, 2.5, 3.0, 3.5]'], {'dtype': 'float'}), '([1.0, 1.5, 2.0, 2.5, 3.0, 3.5], dtype=float)\n', (209, 254), True, 'import numpy as np\n'), ((406, 450), 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', ([], {'units': '(1)', 'input_shape': '[1]'}), '(units=1, input_shape=[1])\n', (424, 450), False, 'from tensorflow import keras\n')]
|
#! /usr/bin/env python
# coding=utf-8
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
from scipy.signal import lfilter
from scipy.signal.windows import get_window
def _pre_emphasize_data(data, emphasize_value=0.97):
filter_window = np.asarray([1, -emphasize_value])
pre_emphasized_data = lfilter(filter_window, 1, data)
return pre_emphasized_data
def get_length_in_samp(sampling_rate_in_hz, length_in_s):
return int(sampling_rate_in_hz * length_in_s)
def get_group_delay(raw_data, sampling_rate_in_hz, window_length_in_s,
window_shift_in_s, num_fft_points, window_type):
X_stft_transform = _get_stft(raw_data, sampling_rate_in_hz,
window_length_in_s, window_shift_in_s,
num_fft_points, window_type=window_type)
Y_stft_transform = _get_stft(raw_data, sampling_rate_in_hz,
window_length_in_s, window_shift_in_s,
num_fft_points, window_type=window_type,
data_transformation='group_delay')
X_stft_transform_real = np.real(X_stft_transform)
X_stft_transform_imag = np.imag(X_stft_transform)
Y_stft_transform_real = np.real(Y_stft_transform)
Y_stft_transform_imag = np.imag(Y_stft_transform)
nominator = np.multiply(X_stft_transform_real,
Y_stft_transform_real) + np.multiply(
X_stft_transform_imag, Y_stft_transform_imag)
denominator = np.square(np.abs(X_stft_transform))
group_delay = np.divide(nominator, denominator + 1e-10)
assert not np.isnan(
group_delay).any(), 'There are NaN values in group delay'
return np.transpose(group_delay)
def get_phase_stft_magnitude(raw_data, sampling_rate_in_hz, window_length_in_s,
window_shift_in_s, num_fft_points, window_type):
stft = _get_stft(raw_data, sampling_rate_in_hz, window_length_in_s,
window_shift_in_s, num_fft_points, window_type=window_type)
abs_stft = np.abs(stft)
phase = np.angle(stft)
stft_phase = np.concatenate((phase, abs_stft), axis=1)
return np.transpose(stft_phase)
def get_stft_magnitude(raw_data, sampling_rate_in_hz, window_length_in_s,
window_shift_in_s, num_fft_points, window_type):
stft = _get_stft(raw_data, sampling_rate_in_hz, window_length_in_s,
window_shift_in_s, num_fft_points, window_type=window_type)
stft_magnitude = np.abs(stft)
return np.transpose(stft_magnitude)
def _get_stft(raw_data, sampling_rate_in_hz, window_length_in_s,
window_shift_in_s, num_fft_points, window_type,
data_transformation=None):
pre_emphasized_data = _pre_emphasize_data(raw_data)
stft = _short_time_fourier_transform(pre_emphasized_data,
sampling_rate_in_hz,
window_length_in_s, window_shift_in_s,
num_fft_points, window_type,
data_transformation)
non_symmetric_stft = get_non_symmetric_data(stft)
return non_symmetric_stft
def _short_time_fourier_transform(data, sampling_rate_in_hz, window_length_in_s,
window_shift_in_s, num_fft_points,
window_type, data_transformation=None):
window_length_in_samp = get_length_in_samp(window_length_in_s,
sampling_rate_in_hz)
window_shift_in_samp = get_length_in_samp(window_shift_in_s,
sampling_rate_in_hz)
if (num_fft_points < window_length_in_samp):
num_fft_points = window_length_in_samp
preprocessed_data_matrix = _preprocess_to_padded_matrix(data,
window_length_in_samp,
window_shift_in_samp)
weighted_data_matrix = _weight_data_matrix(
preprocessed_data_matrix,
window_type,
data_transformation=data_transformation
)
fft = np.fft.fft(weighted_data_matrix, n=num_fft_points)
return fft
def _preprocess_to_padded_matrix(data, window_length_in_samp,
window_shift_in_samp):
num_input = data.shape[0]
num_output = get_num_output_padded_to_fit_input(num_input,
window_length_in_samp,
window_shift_in_samp)
zero_padded_matrix = np.zeros((num_output, window_length_in_samp),
dtype=np.float)
for num_output_idx in range(num_output):
start_idx = window_shift_in_samp * num_output_idx
is_last_output = num_output_idx == num_output - 1
end_idx = start_idx + window_length_in_samp if not is_last_output else num_input
end_padded_idx = window_length_in_samp if not is_last_output else end_idx - start_idx
zero_padded_matrix[num_output_idx, :end_padded_idx] = data[
start_idx:end_idx]
return zero_padded_matrix
def get_num_output_padded_to_fit_input(num_input, window_length_in_samp,
window_shift_in_samp):
num_output_valid = (num_input - window_length_in_samp) / float(
window_shift_in_samp) + 1
return int(np.ceil(num_output_valid))
def _weight_data_matrix(data_matrix, window_type, data_transformation=None):
window_length_in_samp = data_matrix[0].shape[0]
window = get_window(window_type, window_length_in_samp, fftbins=False)
if (data_transformation == 'group_delay'):
window *= np.arange(window_length_in_samp)
return data_matrix * window
def get_non_symmetric_length(symmetric_length):
return int(symmetric_length / 2) + 1
def get_non_symmetric_data(data):
num_fft_points = data.shape[-1]
num_ess_fft_points = get_non_symmetric_length(num_fft_points)
return data[:, :num_ess_fft_points]
def get_max_length_stft_based(length_in_samp, window_length_in_s,
window_shift_in_s, sampling_rate_in_hz):
window_length_in_samp = get_length_in_samp(window_length_in_s,
sampling_rate_in_hz)
window_shift_in_samp = get_length_in_samp(window_shift_in_s,
sampling_rate_in_hz)
return get_num_output_padded_to_fit_input(length_in_samp,
window_length_in_samp,
window_shift_in_samp)
def calculate_incr_var(var_prev, mean_prev, mean, length):
return var_prev + (length - mean_prev) * (length - mean)
def calculate_incr_mean(count, mean, length):
return mean + (length - mean) / float(count)
|
[
"numpy.divide",
"numpy.abs",
"scipy.signal.windows.get_window",
"numpy.multiply",
"numpy.ceil",
"scipy.signal.lfilter",
"numpy.angle",
"numpy.asarray",
"numpy.fft.fft",
"numpy.transpose",
"numpy.zeros",
"numpy.isnan",
"numpy.imag",
"numpy.arange",
"numpy.real",
"numpy.concatenate"
] |
[((881, 914), 'numpy.asarray', 'np.asarray', (['[1, -emphasize_value]'], {}), '([1, -emphasize_value])\n', (891, 914), True, 'import numpy as np\n'), ((941, 972), 'scipy.signal.lfilter', 'lfilter', (['filter_window', '(1)', 'data'], {}), '(filter_window, 1, data)\n', (948, 972), False, 'from scipy.signal import lfilter\n'), ((1772, 1797), 'numpy.real', 'np.real', (['X_stft_transform'], {}), '(X_stft_transform)\n', (1779, 1797), True, 'import numpy as np\n'), ((1826, 1851), 'numpy.imag', 'np.imag', (['X_stft_transform'], {}), '(X_stft_transform)\n', (1833, 1851), True, 'import numpy as np\n'), ((1880, 1905), 'numpy.real', 'np.real', (['Y_stft_transform'], {}), '(Y_stft_transform)\n', (1887, 1905), True, 'import numpy as np\n'), ((1934, 1959), 'numpy.imag', 'np.imag', (['Y_stft_transform'], {}), '(Y_stft_transform)\n', (1941, 1959), True, 'import numpy as np\n'), ((2203, 2244), 'numpy.divide', 'np.divide', (['nominator', '(denominator + 1e-10)'], {}), '(nominator, denominator + 1e-10)\n', (2212, 2244), True, 'import numpy as np\n'), ((2347, 2372), 'numpy.transpose', 'np.transpose', (['group_delay'], {}), '(group_delay)\n', (2359, 2372), True, 'import numpy as np\n'), ((2701, 2713), 'numpy.abs', 'np.abs', (['stft'], {}), '(stft)\n', (2707, 2713), True, 'import numpy as np\n'), ((2726, 2740), 'numpy.angle', 'np.angle', (['stft'], {}), '(stft)\n', (2734, 2740), True, 'import numpy as np\n'), ((2758, 2799), 'numpy.concatenate', 'np.concatenate', (['(phase, abs_stft)'], {'axis': '(1)'}), '((phase, abs_stft), axis=1)\n', (2772, 2799), True, 'import numpy as np\n'), ((2811, 2835), 'numpy.transpose', 'np.transpose', (['stft_phase'], {}), '(stft_phase)\n', (2823, 2835), True, 'import numpy as np\n'), ((3158, 3170), 'numpy.abs', 'np.abs', (['stft'], {}), '(stft)\n', (3164, 3170), True, 'import numpy as np\n'), ((3182, 3210), 'numpy.transpose', 'np.transpose', (['stft_magnitude'], {}), '(stft_magnitude)\n', (3194, 3210), True, 'import numpy as np\n'), ((4844, 4894), 'numpy.fft.fft', 'np.fft.fft', (['weighted_data_matrix'], {'n': 'num_fft_points'}), '(weighted_data_matrix, n=num_fft_points)\n', (4854, 4894), True, 'import numpy as np\n'), ((5297, 5358), 'numpy.zeros', 'np.zeros', (['(num_output, window_length_in_samp)'], {'dtype': 'np.float'}), '((num_output, window_length_in_samp), dtype=np.float)\n', (5305, 5358), True, 'import numpy as np\n'), ((6341, 6402), 'scipy.signal.windows.get_window', 'get_window', (['window_type', 'window_length_in_samp'], {'fftbins': '(False)'}), '(window_type, window_length_in_samp, fftbins=False)\n', (6351, 6402), False, 'from scipy.signal.windows import get_window\n'), ((1976, 2033), 'numpy.multiply', 'np.multiply', (['X_stft_transform_real', 'Y_stft_transform_real'], {}), '(X_stft_transform_real, Y_stft_transform_real)\n', (1987, 2033), True, 'import numpy as np\n'), ((2064, 2121), 'numpy.multiply', 'np.multiply', (['X_stft_transform_imag', 'Y_stft_transform_imag'], {}), '(X_stft_transform_imag, Y_stft_transform_imag)\n', (2075, 2121), True, 'import numpy as np\n'), ((2159, 2183), 'numpy.abs', 'np.abs', (['X_stft_transform'], {}), '(X_stft_transform)\n', (2165, 2183), True, 'import numpy as np\n'), ((6170, 6195), 'numpy.ceil', 'np.ceil', (['num_output_valid'], {}), '(num_output_valid)\n', (6177, 6195), True, 'import numpy as np\n'), ((6468, 6500), 'numpy.arange', 'np.arange', (['window_length_in_samp'], {}), '(window_length_in_samp)\n', (6477, 6500), True, 'import numpy as np\n'), ((2260, 2281), 'numpy.isnan', 'np.isnan', (['group_delay'], {}), '(group_delay)\n', (2268, 2281), True, 'import numpy as np\n')]
|
"""
Module for managing the face_recognition recognition method.
To install this method follow instructions at https://github.com/ageitgey/face_recognition#installation
"""
import face_recognition
import numpy as np
import dlib
class FaceRecognition:
def __init__(self):
pass
def predict(self, image, normalize=True):
"""
Get encoding of the face.
:param np.array image: Face image
:param bool normalize: Return normalized vector
:return: Face encoding
"""
bb = (0, image.shape[1], image.shape[0], 0)
encoding = face_recognition.face_encodings(
image, known_face_locations=[bb])
if normalize:
return encoding[0].astype(np.float64) / np.linalg.norm(encoding[0])
else:
return encoding[0].astype(np.float64)
|
[
"face_recognition.face_encodings",
"numpy.linalg.norm"
] |
[((598, 663), 'face_recognition.face_encodings', 'face_recognition.face_encodings', (['image'], {'known_face_locations': '[bb]'}), '(image, known_face_locations=[bb])\n', (629, 663), False, 'import face_recognition\n'), ((752, 779), 'numpy.linalg.norm', 'np.linalg.norm', (['encoding[0]'], {}), '(encoding[0])\n', (766, 779), True, 'import numpy as np\n')]
|
"""Human3.6M dataset."""
import copy
import json
import os
import pickle as pk
import numpy as np
import scipy.misc
import torch.utils.data as data
from hybrik.utils.bbox import bbox_clip_xyxy, bbox_xywh_to_xyxy
from hybrik.utils.pose_utils import cam2pixel, pixel2cam, reconstruction_error
from hybrik.utils.presets import SimpleTransform3DSMPL
class H36mSMPL(data.Dataset):
""" Human3.6M smpl dataset. 17 Human3.6M joints + 29 SMPL joints
Parameters
----------
ann_file: str,
Path to the annotation json file.
root: str, default './data/h36m'
Path to the h36m dataset.
train: bool, default is True
If true, will set as training mode.
skip_empty: bool, default is False
Whether skip entire image if no valid label is found.
"""
CLASSES = ['person']
EVAL_JOINTS = [6, 5, 4, 1, 2, 3, 16, 15, 14, 11, 12, 13, 8, 10]
num_joints = 17 + 29
num_thetas = 24
bbox_3d_shape = (2000, 2000, 2000)
joints_name_17 = (
'Pelvis', # 0
'L_Hip', 'L_Knee', 'L_Ankle', # 3
'R_Hip', 'R_Knee', 'R_Ankle', # 6
'Torso', 'Neck', # 8
'Nose', 'Head', # 10
'L_Shoulder', 'L_Elbow', 'L_Wrist', # 13
'R_Shoulder', 'R_Elbow', 'R_Wrist', # 16
)
joints_name_29 = (
'pelvis', 'left_hip', 'right_hip', # 2
'spine1', 'left_knee', 'right_knee', # 5
'spine2', 'left_ankle', 'right_ankle', # 8
'spine3', 'left_foot', 'right_foot', # 11
'neck', 'left_collar', 'right_collar', # 14
'jaw', # 15
'left_shoulder', 'right_shoulder', # 17
'left_elbow', 'right_elbow', # 19
'left_wrist', 'right_wrist', # 21
'left_thumb', 'right_thumb', # 23
'head', 'left_middle', 'right_middle', # 26
'left_bigtoe', 'right_bigtoe' # 28
)
joints_name_14 = (
'R_Ankle', 'R_Knee', 'R_Hip', # 2
'L_Hip', 'L_Knee', 'L_Ankle', # 5
'R_Wrist', 'R_Elbow', 'R_Shoulder', # 8
'L_Shoulder', 'L_Elbow', 'L_Wrist', # 11
'Neck', 'Head'
)
action_name = ['Directions', 'Discussion', 'Eating', 'Greeting', 'Phoning', 'Posing', 'Purchases',
'Sitting', 'SittingDown', 'Smoking', 'Photo', 'Waiting', 'Walking', 'WalkDog', 'WalkTogether']
block_list = ['s_09_act_05_subact_02_ca', 's_09_act_10_subact_02_ca', 's_09_act_13_subact_01_ca']
def __init__(self,
cfg,
ann_file,
root='./data/h36m',
train=True,
skip_empty=True,
dpg=False,
lazy_import=False):
self._cfg = cfg
self.protocol = cfg.DATASET.PROTOCOL
self._ann_file = os.path.join(
root, 'annotations', ann_file + f'_protocol_{self.protocol}.json')
self._lazy_import = lazy_import
self._root = root
self._skip_empty = skip_empty
self._train = train
self._dpg = dpg
self._det_bbox_file = getattr(cfg.DATASET.SET_LIST[0], 'DET_BOX', None)
self._scale_factor = cfg.DATASET.SCALE_FACTOR
self._color_factor = cfg.DATASET.COLOR_FACTOR
self._rot = cfg.DATASET.ROT_FACTOR
self._input_size = cfg.MODEL.IMAGE_SIZE
self._output_size = cfg.MODEL.HEATMAP_SIZE
self._occlusion = cfg.DATASET.OCCLUSION
self._crop = cfg.MODEL.EXTRA.CROP
self._sigma = cfg.MODEL.EXTRA.SIGMA
self._depth_dim = getattr(cfg.MODEL.EXTRA, 'DEPTH_DIM', None)
self._check_centers = False
self.num_class = len(self.CLASSES)
self.num_joints = cfg.MODEL.NUM_JOINTS
self.num_joints_half_body = cfg.DATASET.NUM_JOINTS_HALF_BODY
self.prob_half_body = cfg.DATASET.PROB_HALF_BODY
self.augment = cfg.MODEL.EXTRA.AUGMENT
self.dz_factor = cfg.MODEL.EXTRA.get('FACTOR', None)
self._loss_type = cfg.LOSS['TYPE']
self.upper_body_ids = (7, 8, 9, 10, 11, 12, 13, 14, 15, 16)
self.lower_body_ids = (0, 1, 2, 3, 4, 5, 6)
self.kinematic = cfg.MODEL.EXTRA.get('KINEMATIC', False)
self.classfier = cfg.MODEL.EXTRA.get('WITHCLASSFIER', False)
self.root_idx_17 = self.joints_name_17.index('Pelvis')
self.lshoulder_idx_17 = self.joints_name_17.index('L_Shoulder')
self.rshoulder_idx_17 = self.joints_name_17.index('R_Shoulder')
self.root_idx_smpl = self.joints_name_29.index('pelvis')
self.lshoulder_idx_29 = self.joints_name_29.index('left_shoulder')
self.rshoulder_idx_29 = self.joints_name_29.index('right_shoulder')
self._items, self._labels = self._lazy_load_json()
if cfg.MODEL.EXTRA.PRESET == 'simple_smpl_3d':
self.transformation = SimpleTransform3DSMPL(
self, scale_factor=self._scale_factor,
color_factor=self._color_factor,
occlusion=self._occlusion,
input_size=self._input_size,
output_size=self._output_size,
depth_dim=self._depth_dim,
bbox_3d_shape=self.bbox_3d_shape,
rot=self._rot, sigma=self._sigma,
train=self._train, add_dpg=self._dpg,
loss_type=self._loss_type, scale_mult=1)
def __getitem__(self, idx):
# get image id
img_path = self._items[idx]
img_id = int(self._labels[idx]['img_id'])
# load ground truth, including bbox, keypoints, image size
label = copy.deepcopy(self._labels[idx])
img = scipy.misc.imread(img_path, mode='RGB')
# img = load_image(img_path)
# img = cv2.imread(img_path, cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
# transform ground truth into training label and apply data augmentation
target = self.transformation(img, label)
img = target.pop('image')
bbox = target.pop('bbox')
return img, target, img_id, bbox
def __len__(self):
return len(self._items)
def _lazy_load_json(self):
if os.path.exists(self._ann_file + '_smpl_annot_keypoint.pkl') and self._lazy_import:
print('Lazy load annot...')
with open(self._ann_file + '_smpl_annot_keypoint.pkl', 'rb') as fid:
items, labels = pk.load(fid)
else:
items, labels = self._load_jsons()
try:
with open(self._ann_file + '_smpl_annot_keypoint.pkl', 'wb') as fid:
pk.dump((items, labels), fid, pk.HIGHEST_PROTOCOL)
except Exception as e:
print(e)
print('Skip writing to .pkl file.')
return items, labels
def _load_jsons(self):
"""Load all image paths and labels from JSON annotation files into buffer."""
items = []
labels = []
with open(self._ann_file, 'r') as fid:
database = json.load(fid)
# iterate through the annotations
bbox_scale_list = []
det_bbox_set = {}
if self._det_bbox_file is not None:
bbox_list = json.load(open(os.path.join(
self._root, 'annotations', self._det_bbox_file + f'_protocol_{self.protocol}.json'), 'r'))
for item in bbox_list:
image_id = item['image_id']
det_bbox_set[image_id] = item['bbox']
for ann_image, ann_annotations in zip(database['images'], database['annotations']):
ann = dict()
for k, v in ann_image.items():
assert k not in ann.keys()
ann[k] = v
for k, v in ann_annotations.items():
ann[k] = v
skip = False
for name in self.block_list:
if name in ann['file_name']:
skip = True
if skip:
continue
image_id = ann['image_id']
width, height = ann['width'], ann['height']
if self._det_bbox_file is not None:
xmin, ymin, xmax, ymax = bbox_clip_xyxy(
bbox_xywh_to_xyxy(det_bbox_set[ann['file_name']]), width, height)
else:
xmin, ymin, xmax, ymax = bbox_clip_xyxy(
bbox_xywh_to_xyxy(ann['bbox']), width, height)
f, c = np.array(ann['cam_param']['f'], dtype=np.float32), np.array(
ann['cam_param']['c'], dtype=np.float32)
joint_cam_17 = np.array(ann['h36m_joints']).reshape(17, 3)
joint_cam = np.array(ann['smpl_joints'])
if joint_cam.size == 24 * 3:
joint_cam_29 = np.zeros((29, 3))
joint_cam_29[:24, :] = joint_cam.reshape(24, 3)
else:
joint_cam_29 = joint_cam.reshape(29, 3)
beta = np.array(ann['betas'])
theta = np.array(ann['thetas']).reshape(self.num_thetas, 3)
joint_img_17 = cam2pixel(joint_cam_17, f, c)
joint_img_17[:, 2] = joint_img_17[:, 2] - joint_cam_17[self.root_idx_17, 2]
joint_relative_17 = joint_cam_17 - joint_cam_17[self.root_idx_17, :]
joint_img_29 = cam2pixel(joint_cam_29, f, c)
joint_img_29[:, 2] = joint_img_29[:, 2] - joint_cam_29[self.root_idx_smpl, 2]
joint_vis_17 = np.ones((17, 3))
joint_vis_29 = np.ones((29, 3))
root_cam = np.array(ann['root_coord'])
abs_path = os.path.join(self._root, 'images', ann['file_name'])
if 'angle_twist' in ann.keys():
twist = ann['angle_twist']
angle = np.array(twist['angle'])
cos = np.array(twist['cos'])
sin = np.array(twist['sin'])
assert (np.cos(angle) - cos < 1e-6).all(), np.cos(angle) - cos
assert (np.sin(angle) - sin < 1e-6).all(), np.sin(angle) - sin
phi = np.stack((cos, sin), axis=1)
# phi_weight = np.ones_like(phi)
phi_weight = (angle > -10) * 1.0 # invalid angles are set to be -999
phi_weight = np.stack([phi_weight, phi_weight], axis=1)
else:
phi = np.zeros((23, 2))
phi_weight = np.zeros_like(phi)
items.append(abs_path)
labels.append({
'bbox': (xmin, ymin, xmax, ymax),
'img_id': image_id,
'img_path': abs_path,
'width': width,
'height': height,
'joint_img_17': joint_img_17,
'joint_vis_17': joint_vis_17,
'joint_cam_17': joint_cam_17,
'joint_relative_17': joint_relative_17,
'joint_img_29': joint_img_29,
'joint_vis_29': joint_vis_29,
'joint_cam_29': joint_cam_29,
'twist_phi': phi,
'twist_weight': phi_weight,
'beta': beta,
'theta': theta,
'root_cam': root_cam,
'f': f,
'c': c
})
bbox_scale_list.append(max(xmax - xmin, ymax - ymin))
return items, labels
@property
def joint_pairs_17(self):
"""Joint pairs which defines the pairs of joint to be swapped
when the image is flipped horizontally."""
return ((1, 4), (2, 5), (3, 6), (11, 14), (12, 15), (13, 16))
@property
def joint_pairs_24(self):
"""Joint pairs which defines the pairs of joint to be swapped
when the image is flipped horizontally."""
return ((1, 2), (4, 5), (7, 8), (10, 11), (13, 14), (16, 17), (18, 19), (20, 21), (22, 23))
@property
def joint_pairs_29(self):
"""Joint pairs which defines the pairs of joint to be swapped
when the image is flipped horizontally."""
return ((1, 2), (4, 5), (7, 8), (10, 11), (13, 14), (16, 17), (18, 19), (20, 21), (22, 23), (25, 26), (27, 28))
@property
def bone_pairs(self):
"""Bone pairs which defines the pairs of bone to be swapped
when the image is flipped horizontally."""
return ((0, 3), (1, 4), (2, 5), (10, 13), (11, 14), (12, 15))
def _get_box_center_area(self, bbox):
"""Get bbox center"""
c = np.array([(bbox[0] + bbox[2]) / 2.0, (bbox[1] + bbox[3]) / 2.0])
area = (bbox[3] - bbox[1]) * (bbox[2] - bbox[0])
return c, area
def _get_keypoints_center_count(self, keypoints):
"""Get geometric center of all keypoints"""
keypoint_x = np.sum(keypoints[:, 0, 0] * (keypoints[:, 0, 1] > 0))
keypoint_y = np.sum(keypoints[:, 1, 0] * (keypoints[:, 1, 1] > 0))
num = float(np.sum(keypoints[:, 0, 1]))
return np.array([keypoint_x / num, keypoint_y / num]), num
def evaluate_uvd_24(self, preds, result_dir):
print('Evaluation start...')
gts = self._labels
assert len(gts) == len(preds)
sample_num = len(gts)
pred_save = []
error = np.zeros((sample_num, 24)) # joint error
error_x = np.zeros((sample_num, 24)) # joint error
error_y = np.zeros((sample_num, 24)) # joint error
error_z = np.zeros((sample_num, 24)) # joint error
# error for each sequence
error_action = [[] for _ in range(len(self.action_name))]
for n in range(sample_num):
gt = gts[n]
image_id = gt['img_id']
f = gt['f']
c = gt['c']
bbox = gt['bbox']
gt_3d_root = gt['root_cam'].copy()
gt_3d_kpt = gt['joint_cam_29'][:24].copy()
# restore coordinates to original space
pred_2d_kpt = preds[image_id]['uvd_jts'][:24].copy()
# pred_2d_kpt[:, 0] = pred_2d_kpt[:, 0] / self._output_size[1] * bbox[2] + bbox[0]
# pred_2d_kpt[:, 1] = pred_2d_kpt[:, 1] / self._output_size[0] * bbox[3] + bbox[1]
pred_2d_kpt[:, 2] = pred_2d_kpt[:, 2] * self.bbox_3d_shape[2] + gt_3d_root[2]
# back project to camera coordinate system
pred_3d_kpt = pixel2cam(pred_2d_kpt, f, c)
# root joint alignment
pred_3d_kpt = pred_3d_kpt - pred_3d_kpt[self.root_idx_smpl]
gt_3d_kpt = gt_3d_kpt - gt_3d_kpt[self.root_idx_smpl]
if self.protocol == 1:
# rigid alignment for PA MPJPE (protocol #1)
pred_3d_kpt = reconstruction_error(pred_3d_kpt, gt_3d_kpt)
# error calculate
error[n] = np.sqrt(np.sum((pred_3d_kpt - gt_3d_kpt)**2, 1))
error_x[n] = np.abs(pred_3d_kpt[:, 0] - gt_3d_kpt[:, 0])
error_y[n] = np.abs(pred_3d_kpt[:, 1] - gt_3d_kpt[:, 1])
error_z[n] = np.abs(pred_3d_kpt[:, 2] - gt_3d_kpt[:, 2])
img_name = gt['img_path']
action_idx = int(img_name[img_name.find(
'act') + 4:img_name.find('act') + 6]) - 2
error_action[action_idx].append(error[n].copy())
# prediction save
pred_save.append({'image_id': image_id, 'joint_cam': pred_3d_kpt.tolist(
), 'bbox': bbox, 'root_cam': gt_3d_root.tolist()}) # joint_cam is root-relative coordinate
# total error
tot_err = np.mean(error)
tot_err_x = np.mean(error_x)
tot_err_y = np.mean(error_y)
tot_err_z = np.mean(error_z)
metric = 'PA MPJPE' if self.protocol == 1 else 'MPJPE'
eval_summary = f'UVD_24 Protocol {self.protocol} error ({metric}) >> tot: {tot_err:2f}, x: {tot_err_x:2f}, y: {tot_err_y:.2f}, z: {tot_err_z:2f}\n'
# error for each action
for i in range(len(error_action)):
err = np.mean(np.array(error_action[i]))
eval_summary += (self.action_name[i] + ': %.2f ' % err)
print(eval_summary)
# prediction save
with open(result_dir, 'w') as f:
json.dump(pred_save, f)
print("Test result is saved at " + result_dir)
return tot_err
def evaluate_xyz_24(self, preds, result_dir):
print('Evaluation start...')
gts = self._labels
assert len(gts) == len(preds)
sample_num = len(gts)
pred_save = []
error = np.zeros((sample_num, 24)) # joint error
error_align = np.zeros((sample_num, 24)) # joint error
error_x = np.zeros((sample_num, 24)) # joint error
error_y = np.zeros((sample_num, 24)) # joint error
error_z = np.zeros((sample_num, 24)) # joint error
# error for each sequence
error_action = [[] for _ in range(len(self.action_name))]
for n in range(sample_num):
gt = gts[n]
image_id = gt['img_id']
bbox = gt['bbox']
gt_3d_root = gt['root_cam'].copy()
gt_3d_kpt = gt['joint_cam_29'][:24].copy()
# gt_vis = gt['joint_vis']
# restore coordinates to original space
pred_3d_kpt = preds[image_id]['xyz_24'].copy() * self.bbox_3d_shape[2]
# root joint alignment
pred_3d_kpt = pred_3d_kpt - pred_3d_kpt[self.root_idx_smpl]
gt_3d_kpt = gt_3d_kpt - gt_3d_kpt[self.root_idx_smpl]
# rigid alignment for PA MPJPE
pred_3d_kpt_align = reconstruction_error(pred_3d_kpt.copy(), gt_3d_kpt.copy())
# error calculate
error[n] = np.sqrt(np.sum((pred_3d_kpt - gt_3d_kpt)**2, 1))
error_align[n] = np.sqrt(np.sum((pred_3d_kpt_align - gt_3d_kpt)**2, 1))
error_x[n] = np.abs(pred_3d_kpt[:, 0] - gt_3d_kpt[:, 0])
error_y[n] = np.abs(pred_3d_kpt[:, 1] - gt_3d_kpt[:, 1])
error_z[n] = np.abs(pred_3d_kpt[:, 2] - gt_3d_kpt[:, 2])
img_name = gt['img_path']
action_idx = int(img_name[img_name.find(
'act') + 4:img_name.find('act') + 6]) - 2
error_action[action_idx].append(error[n].copy())
# prediction save
pred_save.append({'image_id': image_id, 'joint_cam': pred_3d_kpt.tolist(
), 'bbox': bbox, 'root_cam': gt_3d_root.tolist()}) # joint_cam is root-relative coordinate
# total error
tot_err = np.mean(error)
tot_err_align = np.mean(error_align)
tot_err_x = np.mean(error_x)
tot_err_y = np.mean(error_y)
tot_err_z = np.mean(error_z)
metric = 'PA MPJPE' if self.protocol == 1 else 'MPJPE'
eval_summary = f'XYZ_24 Protocol {self.protocol} error ({metric}) >> tot: {tot_err:2f}, tot_pa: {tot_err_align:2f}, x: {tot_err_x:2f}, y: {tot_err_y:.2f}, z: {tot_err_z:2f}\n'
# error for each action
for i in range(len(error_action)):
err = np.mean(np.array(error_action[i]))
eval_summary += (self.action_name[i] + ': %.2f ' % err)
print(eval_summary)
# prediction save
with open(result_dir, 'w') as f:
json.dump(pred_save, f)
print("Test result is saved at " + result_dir)
return tot_err
def evaluate_xyz_17(self, preds, result_dir):
print('Evaluation start...')
gts = self._labels
assert len(gts) == len(preds), (len(gts), len(preds))
sample_num = len(gts)
pred_save = []
error = np.zeros((sample_num, len(self.EVAL_JOINTS))) # joint error
error_align = np.zeros((sample_num, len(self.EVAL_JOINTS))) # joint error
error_x = np.zeros((sample_num, len(self.EVAL_JOINTS))) # joint error
error_y = np.zeros((sample_num, len(self.EVAL_JOINTS))) # joint error
error_z = np.zeros((sample_num, len(self.EVAL_JOINTS))) # joint error
# error for each sequence
error_action = [[] for _ in range(len(self.action_name))]
for n in range(sample_num):
gt = gts[n]
image_id = gt['img_id']
bbox = gt['bbox']
gt_3d_root = gt['root_cam'].copy()
gt_3d_kpt = gt['joint_relative_17'].copy()
# gt_vis = gt['joint_vis']
# restore coordinates to original space
pred_3d_kpt = preds[image_id]['xyz_17'].copy() * self.bbox_3d_shape[2]
# root joint alignment
pred_3d_kpt = pred_3d_kpt - pred_3d_kpt[self.root_idx_17]
gt_3d_kpt = gt_3d_kpt - gt_3d_kpt[self.root_idx_17]
# rigid alignment for PA MPJPE
# pred_3d_kpt_align = rigid_align(pred_3d_kpt.copy(), gt_3d_kpt.copy())
pred_3d_kpt_align = reconstruction_error(pred_3d_kpt.copy(), gt_3d_kpt.copy())
# pred_3d_kpt_align = pred_3d_kpt_align - pred_3d_kpt_align[self.root_idx_17]
# select eval 14 joints
pred_3d_kpt = np.take(pred_3d_kpt, self.EVAL_JOINTS, axis=0)
gt_3d_kpt = np.take(gt_3d_kpt, self.EVAL_JOINTS, axis=0)
pred_3d_kpt_align = np.take(pred_3d_kpt_align, self.EVAL_JOINTS, axis=0)
# error calculate
error[n] = np.sqrt(np.sum((pred_3d_kpt - gt_3d_kpt)**2, 1))
error_align[n] = np.sqrt(np.sum((pred_3d_kpt_align - gt_3d_kpt)**2, 1))
error_x[n] = np.abs(pred_3d_kpt[:, 0] - gt_3d_kpt[:, 0])
error_y[n] = np.abs(pred_3d_kpt[:, 1] - gt_3d_kpt[:, 1])
error_z[n] = np.abs(pred_3d_kpt[:, 2] - gt_3d_kpt[:, 2])
img_name = gt['img_path']
action_idx = int(img_name[img_name.find(
'act') + 4:img_name.find('act') + 6]) - 2
error_action[action_idx].append(error[n].copy())
# prediction save
pred_save.append({'image_id': image_id, 'joint_cam': pred_3d_kpt.tolist(
), 'bbox': bbox, 'root_cam': gt_3d_root.tolist()}) # joint_cam is root-relative coordinate
# total error
tot_err = np.mean(error)
tot_err_align = np.mean(error_align)
tot_err_x = np.mean(error_x)
tot_err_y = np.mean(error_y)
tot_err_z = np.mean(error_z)
metric = 'PA MPJPE' if self.protocol == 1 else 'MPJPE'
eval_summary = f'XYZ_17 Protocol {self.protocol} error ({metric}) >> tot: {tot_err:2f}, tot_pa: {tot_err_align:2f}, x: {tot_err_x:2f}, y: {tot_err_y:.2f}, z: {tot_err_z:2f}\n'
# error for each action
for i in range(len(error_action)):
err = np.mean(np.array(error_action[i]))
eval_summary += (self.action_name[i] + ': %.2f ' % err)
print(eval_summary)
# prediction save
with open(result_dir, 'w') as f:
json.dump(pred_save, f)
print("Test result is saved at " + result_dir)
return tot_err_align
|
[
"pickle.dump",
"numpy.sum",
"numpy.abs",
"hybrik.utils.bbox.bbox_xywh_to_xyxy",
"numpy.ones",
"numpy.mean",
"pickle.load",
"numpy.sin",
"os.path.join",
"numpy.zeros_like",
"os.path.exists",
"hybrik.utils.pose_utils.pixel2cam",
"json.dump",
"copy.deepcopy",
"numpy.stack",
"hybrik.utils.pose_utils.reconstruction_error",
"hybrik.utils.presets.SimpleTransform3DSMPL",
"hybrik.utils.pose_utils.cam2pixel",
"numpy.cos",
"json.load",
"numpy.zeros",
"numpy.array",
"numpy.take"
] |
[((2948, 3027), 'os.path.join', 'os.path.join', (['root', '"""annotations"""', "(ann_file + f'_protocol_{self.protocol}.json')"], {}), "(root, 'annotations', ann_file + f'_protocol_{self.protocol}.json')\n", (2960, 3027), False, 'import os\n'), ((5715, 5747), 'copy.deepcopy', 'copy.deepcopy', (['self._labels[idx]'], {}), '(self._labels[idx])\n', (5728, 5747), False, 'import copy\n'), ((12461, 12525), 'numpy.array', 'np.array', (['[(bbox[0] + bbox[2]) / 2.0, (bbox[1] + bbox[3]) / 2.0]'], {}), '([(bbox[0] + bbox[2]) / 2.0, (bbox[1] + bbox[3]) / 2.0])\n', (12469, 12525), True, 'import numpy as np\n'), ((12734, 12787), 'numpy.sum', 'np.sum', (['(keypoints[:, 0, 0] * (keypoints[:, 0, 1] > 0))'], {}), '(keypoints[:, 0, 0] * (keypoints[:, 0, 1] > 0))\n', (12740, 12787), True, 'import numpy as np\n'), ((12809, 12862), 'numpy.sum', 'np.sum', (['(keypoints[:, 1, 0] * (keypoints[:, 1, 1] > 0))'], {}), '(keypoints[:, 1, 0] * (keypoints[:, 1, 1] > 0))\n', (12815, 12862), True, 'import numpy as np\n'), ((13201, 13227), 'numpy.zeros', 'np.zeros', (['(sample_num, 24)'], {}), '((sample_num, 24))\n', (13209, 13227), True, 'import numpy as np\n'), ((13265, 13291), 'numpy.zeros', 'np.zeros', (['(sample_num, 24)'], {}), '((sample_num, 24))\n', (13273, 13291), True, 'import numpy as np\n'), ((13327, 13353), 'numpy.zeros', 'np.zeros', (['(sample_num, 24)'], {}), '((sample_num, 24))\n', (13335, 13353), True, 'import numpy as np\n'), ((13389, 13415), 'numpy.zeros', 'np.zeros', (['(sample_num, 24)'], {}), '((sample_num, 24))\n', (13397, 13415), True, 'import numpy as np\n'), ((15445, 15459), 'numpy.mean', 'np.mean', (['error'], {}), '(error)\n', (15452, 15459), True, 'import numpy as np\n'), ((15480, 15496), 'numpy.mean', 'np.mean', (['error_x'], {}), '(error_x)\n', (15487, 15496), True, 'import numpy as np\n'), ((15517, 15533), 'numpy.mean', 'np.mean', (['error_y'], {}), '(error_y)\n', (15524, 15533), True, 'import numpy as np\n'), ((15554, 15570), 'numpy.mean', 'np.mean', (['error_z'], {}), '(error_z)\n', (15561, 15570), True, 'import numpy as np\n'), ((16422, 16448), 'numpy.zeros', 'np.zeros', (['(sample_num, 24)'], {}), '((sample_num, 24))\n', (16430, 16448), True, 'import numpy as np\n'), ((16486, 16512), 'numpy.zeros', 'np.zeros', (['(sample_num, 24)'], {}), '((sample_num, 24))\n', (16494, 16512), True, 'import numpy as np\n'), ((16546, 16572), 'numpy.zeros', 'np.zeros', (['(sample_num, 24)'], {}), '((sample_num, 24))\n', (16554, 16572), True, 'import numpy as np\n'), ((16606, 16632), 'numpy.zeros', 'np.zeros', (['(sample_num, 24)'], {}), '((sample_num, 24))\n', (16614, 16632), True, 'import numpy as np\n'), ((16666, 16692), 'numpy.zeros', 'np.zeros', (['(sample_num, 24)'], {}), '((sample_num, 24))\n', (16674, 16692), True, 'import numpy as np\n'), ((18386, 18400), 'numpy.mean', 'np.mean', (['error'], {}), '(error)\n', (18393, 18400), True, 'import numpy as np\n'), ((18425, 18445), 'numpy.mean', 'np.mean', (['error_align'], {}), '(error_align)\n', (18432, 18445), True, 'import numpy as np\n'), ((18466, 18482), 'numpy.mean', 'np.mean', (['error_x'], {}), '(error_x)\n', (18473, 18482), True, 'import numpy as np\n'), ((18503, 18519), 'numpy.mean', 'np.mean', (['error_y'], {}), '(error_y)\n', (18510, 18519), True, 'import numpy as np\n'), ((18540, 18556), 'numpy.mean', 'np.mean', (['error_z'], {}), '(error_z)\n', (18547, 18556), True, 'import numpy as np\n'), ((21953, 21967), 'numpy.mean', 'np.mean', (['error'], {}), '(error)\n', (21960, 21967), True, 'import numpy as np\n'), ((21992, 22012), 'numpy.mean', 'np.mean', (['error_align'], {}), '(error_align)\n', (21999, 22012), True, 'import numpy as np\n'), ((22033, 22049), 'numpy.mean', 'np.mean', (['error_x'], {}), '(error_x)\n', (22040, 22049), True, 'import numpy as np\n'), ((22070, 22086), 'numpy.mean', 'np.mean', (['error_y'], {}), '(error_y)\n', (22077, 22086), True, 'import numpy as np\n'), ((22107, 22123), 'numpy.mean', 'np.mean', (['error_z'], {}), '(error_z)\n', (22114, 22123), True, 'import numpy as np\n'), ((4973, 5349), 'hybrik.utils.presets.SimpleTransform3DSMPL', 'SimpleTransform3DSMPL', (['self'], {'scale_factor': 'self._scale_factor', 'color_factor': 'self._color_factor', 'occlusion': 'self._occlusion', 'input_size': 'self._input_size', 'output_size': 'self._output_size', 'depth_dim': 'self._depth_dim', 'bbox_3d_shape': 'self.bbox_3d_shape', 'rot': 'self._rot', 'sigma': 'self._sigma', 'train': 'self._train', 'add_dpg': 'self._dpg', 'loss_type': 'self._loss_type', 'scale_mult': '(1)'}), '(self, scale_factor=self._scale_factor, color_factor=\n self._color_factor, occlusion=self._occlusion, input_size=self.\n _input_size, output_size=self._output_size, depth_dim=self._depth_dim,\n bbox_3d_shape=self.bbox_3d_shape, rot=self._rot, sigma=self._sigma,\n train=self._train, add_dpg=self._dpg, loss_type=self._loss_type,\n scale_mult=1)\n', (4994, 5349), False, 'from hybrik.utils.presets import SimpleTransform3DSMPL\n'), ((6267, 6326), 'os.path.exists', 'os.path.exists', (["(self._ann_file + '_smpl_annot_keypoint.pkl')"], {}), "(self._ann_file + '_smpl_annot_keypoint.pkl')\n", (6281, 6326), False, 'import os\n'), ((7116, 7130), 'json.load', 'json.load', (['fid'], {}), '(fid)\n', (7125, 7130), False, 'import json\n'), ((8725, 8753), 'numpy.array', 'np.array', (["ann['smpl_joints']"], {}), "(ann['smpl_joints'])\n", (8733, 8753), True, 'import numpy as np\n'), ((9001, 9023), 'numpy.array', 'np.array', (["ann['betas']"], {}), "(ann['betas'])\n", (9009, 9023), True, 'import numpy as np\n'), ((9124, 9153), 'hybrik.utils.pose_utils.cam2pixel', 'cam2pixel', (['joint_cam_17', 'f', 'c'], {}), '(joint_cam_17, f, c)\n', (9133, 9153), False, 'from hybrik.utils.pose_utils import cam2pixel, pixel2cam, reconstruction_error\n'), ((9351, 9380), 'hybrik.utils.pose_utils.cam2pixel', 'cam2pixel', (['joint_cam_29', 'f', 'c'], {}), '(joint_cam_29, f, c)\n', (9360, 9380), False, 'from hybrik.utils.pose_utils import cam2pixel, pixel2cam, reconstruction_error\n'), ((9498, 9514), 'numpy.ones', 'np.ones', (['(17, 3)'], {}), '((17, 3))\n', (9505, 9514), True, 'import numpy as np\n'), ((9542, 9558), 'numpy.ones', 'np.ones', (['(29, 3)'], {}), '((29, 3))\n', (9549, 9558), True, 'import numpy as np\n'), ((9583, 9610), 'numpy.array', 'np.array', (["ann['root_coord']"], {}), "(ann['root_coord'])\n", (9591, 9610), True, 'import numpy as np\n'), ((9635, 9687), 'os.path.join', 'os.path.join', (['self._root', '"""images"""', "ann['file_name']"], {}), "(self._root, 'images', ann['file_name'])\n", (9647, 9687), False, 'import os\n'), ((12883, 12909), 'numpy.sum', 'np.sum', (['keypoints[:, 0, 1]'], {}), '(keypoints[:, 0, 1])\n', (12889, 12909), True, 'import numpy as np\n'), ((12926, 12972), 'numpy.array', 'np.array', (['[keypoint_x / num, keypoint_y / num]'], {}), '([keypoint_x / num, keypoint_y / num])\n', (12934, 12972), True, 'import numpy as np\n'), ((14289, 14317), 'hybrik.utils.pose_utils.pixel2cam', 'pixel2cam', (['pred_2d_kpt', 'f', 'c'], {}), '(pred_2d_kpt, f, c)\n', (14298, 14317), False, 'from hybrik.utils.pose_utils import cam2pixel, pixel2cam, reconstruction_error\n'), ((14792, 14835), 'numpy.abs', 'np.abs', (['(pred_3d_kpt[:, 0] - gt_3d_kpt[:, 0])'], {}), '(pred_3d_kpt[:, 0] - gt_3d_kpt[:, 0])\n', (14798, 14835), True, 'import numpy as np\n'), ((14861, 14904), 'numpy.abs', 'np.abs', (['(pred_3d_kpt[:, 1] - gt_3d_kpt[:, 1])'], {}), '(pred_3d_kpt[:, 1] - gt_3d_kpt[:, 1])\n', (14867, 14904), True, 'import numpy as np\n'), ((14930, 14973), 'numpy.abs', 'np.abs', (['(pred_3d_kpt[:, 2] - gt_3d_kpt[:, 2])'], {}), '(pred_3d_kpt[:, 2] - gt_3d_kpt[:, 2])\n', (14936, 14973), True, 'import numpy as np\n'), ((16097, 16120), 'json.dump', 'json.dump', (['pred_save', 'f'], {}), '(pred_save, f)\n', (16106, 16120), False, 'import json\n'), ((17733, 17776), 'numpy.abs', 'np.abs', (['(pred_3d_kpt[:, 0] - gt_3d_kpt[:, 0])'], {}), '(pred_3d_kpt[:, 0] - gt_3d_kpt[:, 0])\n', (17739, 17776), True, 'import numpy as np\n'), ((17802, 17845), 'numpy.abs', 'np.abs', (['(pred_3d_kpt[:, 1] - gt_3d_kpt[:, 1])'], {}), '(pred_3d_kpt[:, 1] - gt_3d_kpt[:, 1])\n', (17808, 17845), True, 'import numpy as np\n'), ((17871, 17914), 'numpy.abs', 'np.abs', (['(pred_3d_kpt[:, 2] - gt_3d_kpt[:, 2])'], {}), '(pred_3d_kpt[:, 2] - gt_3d_kpt[:, 2])\n', (17877, 17914), True, 'import numpy as np\n'), ((19111, 19134), 'json.dump', 'json.dump', (['pred_save', 'f'], {}), '(pred_save, f)\n', (19120, 19134), False, 'import json\n'), ((20887, 20933), 'numpy.take', 'np.take', (['pred_3d_kpt', 'self.EVAL_JOINTS'], {'axis': '(0)'}), '(pred_3d_kpt, self.EVAL_JOINTS, axis=0)\n', (20894, 20933), True, 'import numpy as np\n'), ((20958, 21002), 'numpy.take', 'np.take', (['gt_3d_kpt', 'self.EVAL_JOINTS'], {'axis': '(0)'}), '(gt_3d_kpt, self.EVAL_JOINTS, axis=0)\n', (20965, 21002), True, 'import numpy as np\n'), ((21035, 21087), 'numpy.take', 'np.take', (['pred_3d_kpt_align', 'self.EVAL_JOINTS'], {'axis': '(0)'}), '(pred_3d_kpt_align, self.EVAL_JOINTS, axis=0)\n', (21042, 21087), True, 'import numpy as np\n'), ((21300, 21343), 'numpy.abs', 'np.abs', (['(pred_3d_kpt[:, 0] - gt_3d_kpt[:, 0])'], {}), '(pred_3d_kpt[:, 0] - gt_3d_kpt[:, 0])\n', (21306, 21343), True, 'import numpy as np\n'), ((21369, 21412), 'numpy.abs', 'np.abs', (['(pred_3d_kpt[:, 1] - gt_3d_kpt[:, 1])'], {}), '(pred_3d_kpt[:, 1] - gt_3d_kpt[:, 1])\n', (21375, 21412), True, 'import numpy as np\n'), ((21438, 21481), 'numpy.abs', 'np.abs', (['(pred_3d_kpt[:, 2] - gt_3d_kpt[:, 2])'], {}), '(pred_3d_kpt[:, 2] - gt_3d_kpt[:, 2])\n', (21444, 21481), True, 'import numpy as np\n'), ((22678, 22701), 'json.dump', 'json.dump', (['pred_save', 'f'], {}), '(pred_save, f)\n', (22687, 22701), False, 'import json\n'), ((6503, 6515), 'pickle.load', 'pk.load', (['fid'], {}), '(fid)\n', (6510, 6515), True, 'import pickle as pk\n'), ((8511, 8560), 'numpy.array', 'np.array', (["ann['cam_param']['f']"], {'dtype': 'np.float32'}), "(ann['cam_param']['f'], dtype=np.float32)\n", (8519, 8560), True, 'import numpy as np\n'), ((8562, 8611), 'numpy.array', 'np.array', (["ann['cam_param']['c']"], {'dtype': 'np.float32'}), "(ann['cam_param']['c'], dtype=np.float32)\n", (8570, 8611), True, 'import numpy as np\n'), ((8826, 8843), 'numpy.zeros', 'np.zeros', (['(29, 3)'], {}), '((29, 3))\n', (8834, 8843), True, 'import numpy as np\n'), ((9800, 9824), 'numpy.array', 'np.array', (["twist['angle']"], {}), "(twist['angle'])\n", (9808, 9824), True, 'import numpy as np\n'), ((9847, 9869), 'numpy.array', 'np.array', (["twist['cos']"], {}), "(twist['cos'])\n", (9855, 9869), True, 'import numpy as np\n'), ((9892, 9914), 'numpy.array', 'np.array', (["twist['sin']"], {}), "(twist['sin'])\n", (9900, 9914), True, 'import numpy as np\n'), ((10095, 10123), 'numpy.stack', 'np.stack', (['(cos, sin)'], {'axis': '(1)'}), '((cos, sin), axis=1)\n', (10103, 10123), True, 'import numpy as np\n'), ((10287, 10329), 'numpy.stack', 'np.stack', (['[phi_weight, phi_weight]'], {'axis': '(1)'}), '([phi_weight, phi_weight], axis=1)\n', (10295, 10329), True, 'import numpy as np\n'), ((10370, 10387), 'numpy.zeros', 'np.zeros', (['(23, 2)'], {}), '((23, 2))\n', (10378, 10387), True, 'import numpy as np\n'), ((10417, 10435), 'numpy.zeros_like', 'np.zeros_like', (['phi'], {}), '(phi)\n', (10430, 10435), True, 'import numpy as np\n'), ((14619, 14663), 'hybrik.utils.pose_utils.reconstruction_error', 'reconstruction_error', (['pred_3d_kpt', 'gt_3d_kpt'], {}), '(pred_3d_kpt, gt_3d_kpt)\n', (14639, 14663), False, 'from hybrik.utils.pose_utils import cam2pixel, pixel2cam, reconstruction_error\n'), ((14726, 14767), 'numpy.sum', 'np.sum', (['((pred_3d_kpt - gt_3d_kpt) ** 2)', '(1)'], {}), '((pred_3d_kpt - gt_3d_kpt) ** 2, 1)\n', (14732, 14767), True, 'import numpy as np\n'), ((15893, 15918), 'numpy.array', 'np.array', (['error_action[i]'], {}), '(error_action[i])\n', (15901, 15918), True, 'import numpy as np\n'), ((17583, 17624), 'numpy.sum', 'np.sum', (['((pred_3d_kpt - gt_3d_kpt) ** 2)', '(1)'], {}), '((pred_3d_kpt - gt_3d_kpt) ** 2, 1)\n', (17589, 17624), True, 'import numpy as np\n'), ((17661, 17708), 'numpy.sum', 'np.sum', (['((pred_3d_kpt_align - gt_3d_kpt) ** 2)', '(1)'], {}), '((pred_3d_kpt_align - gt_3d_kpt) ** 2, 1)\n', (17667, 17708), True, 'import numpy as np\n'), ((18907, 18932), 'numpy.array', 'np.array', (['error_action[i]'], {}), '(error_action[i])\n', (18915, 18932), True, 'import numpy as np\n'), ((21150, 21191), 'numpy.sum', 'np.sum', (['((pred_3d_kpt - gt_3d_kpt) ** 2)', '(1)'], {}), '((pred_3d_kpt - gt_3d_kpt) ** 2, 1)\n', (21156, 21191), True, 'import numpy as np\n'), ((21228, 21275), 'numpy.sum', 'np.sum', (['((pred_3d_kpt_align - gt_3d_kpt) ** 2)', '(1)'], {}), '((pred_3d_kpt_align - gt_3d_kpt) ** 2, 1)\n', (21234, 21275), True, 'import numpy as np\n'), ((22474, 22499), 'numpy.array', 'np.array', (['error_action[i]'], {}), '(error_action[i])\n', (22482, 22499), True, 'import numpy as np\n'), ((6699, 6749), 'pickle.dump', 'pk.dump', (['(items, labels)', 'fid', 'pk.HIGHEST_PROTOCOL'], {}), '((items, labels), fid, pk.HIGHEST_PROTOCOL)\n', (6706, 6749), True, 'import pickle as pk\n'), ((7311, 7411), 'os.path.join', 'os.path.join', (['self._root', '"""annotations"""', "(self._det_bbox_file + f'_protocol_{self.protocol}.json')"], {}), "(self._root, 'annotations', self._det_bbox_file +\n f'_protocol_{self.protocol}.json')\n", (7323, 7411), False, 'import os\n'), ((8283, 8332), 'hybrik.utils.bbox.bbox_xywh_to_xyxy', 'bbox_xywh_to_xyxy', (["det_bbox_set[ann['file_name']]"], {}), "(det_bbox_set[ann['file_name']])\n", (8300, 8332), False, 'from hybrik.utils.bbox import bbox_clip_xyxy, bbox_xywh_to_xyxy\n'), ((8444, 8474), 'hybrik.utils.bbox.bbox_xywh_to_xyxy', 'bbox_xywh_to_xyxy', (["ann['bbox']"], {}), "(ann['bbox'])\n", (8461, 8474), False, 'from hybrik.utils.bbox import bbox_clip_xyxy, bbox_xywh_to_xyxy\n'), ((8657, 8685), 'numpy.array', 'np.array', (["ann['h36m_joints']"], {}), "(ann['h36m_joints'])\n", (8665, 8685), True, 'import numpy as np\n'), ((9044, 9067), 'numpy.array', 'np.array', (["ann['thetas']"], {}), "(ann['thetas'])\n", (9052, 9067), True, 'import numpy as np\n'), ((9974, 9987), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (9980, 9987), True, 'import numpy as np\n'), ((10053, 10066), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (10059, 10066), True, 'import numpy as np\n'), ((9939, 9952), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (9945, 9952), True, 'import numpy as np\n'), ((10018, 10031), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (10024, 10031), True, 'import numpy as np\n')]
|
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import numpy as np
from ._cutils import is_symmetric_and_hollow_cy
from ._cutils import distmat_reorder_cy, distmat_reorder_condensed_cy
def is_symmetric_and_hollow(mat):
"""
Check if a Distance Matrix is symmetric and hollow.
Equivalent to [not (mat.T != mat).any(), np.trace(mat) == 0]
Parameters
----------
mat : 2D array_like
Distance matrix.
Result:
-------
is_symmetric: Boolean
not (mat.T != mat).any()
is_hollow: Boolean
np.trace(mat) == 0
"""
# is_symmetric_and_hollow_cy is optimized
# for the common cas of c_contiguous.
# For all other cases, make a copy.
if not mat.flags.c_contiguous:
mat = np.asarray(mat, order='C')
return is_symmetric_and_hollow_cy(mat)
def is_symmetric(mat):
"""
Check if a Distance Matrix is symmetric.
Equivalent to not (mat.T != mat).any()
Parameters
----------
mat : 2D array_like
Distance matrix.
Result:
-------
is_symmetric: Boolean
not (mat.T != mat).any()
"""
# the is_hollow check is really cheap,
# so can reuse is_symmetric_and_hollow
return is_symmetric_and_hollow(mat)[0]
def is_hollow(mat):
"""
Check if a Distance Matrix is hollow.
Equivalent to np.trace(mat) == 0
Parameters
----------
mat : 2D array_like
Distance matrix.
Result:
-------
is_hollow: Boolean
np.trace(mat) == 0
"""
# is_symmetric_and_hollow_cy spends most
# of its time in symetry check, just use numpy
return (np.trace(mat) == 0)
def distmat_reorder_buf(in_mat, reorder_vec, out_mat, validate=False):
"""
Reorder the rows and columns of a distance matrix
given a reorder vector.
Not all of the columns need to be used.
For example:
[ [0, 1, 2, 3] ,
[1, 0, 4, 5] ,
[2, 4, 0, 6] ,
[3, 5, 6, 0] ]
with
[1,0,3,2]
will result in
[ [0, 1, 5, 4] ,
[1, 0, 3, 2] ,
[5, 3, 0, 6] ,
[4, 2, 6, 0] ]
Parameters
----------
in_mat : 2D array_like
Distance matrix
reorder_vec : 1D_array_like
List of permutation indexes
out_mat : 2D array_like
Output, Distance matrix,
must be in c_order and same size as reorder_vec
validate: boolean
Optional, if True, validate reorder_vec content, detaults to False
"""
np_reorder = np.asarray(reorder_vec, dtype=np.long)
if validate:
maxsize = in_mat.shape[0]
bad_cnt = np.where((np_reorder < 0) or (np_reorder >= maxsize))[0].size
if bad_cnt > 0:
raise ValueError("Invalid reorder_vec")
if not in_mat.flags.c_contiguous:
in_mat = np.asarray(in_mat, order='C')
distmat_reorder_cy(in_mat, np_reorder, out_mat)
def distmat_reorder(in_mat, reorder_vec, validate=False):
"""
Reorder the rows and columns of a distance matrix
given a reorder vector.
Not all of the columns need to be used.
For example:
[ [0, 1, 2, 3] ,
[1, 0, 4, 5] ,
[2, 4, 0, 6] ,
[3, 5, 6, 0] ]
with
[1,0,3,2]
will result in
[ [0, 1, 5, 4] ,
[1, 0, 3, 2] ,
[5, 3, 0, 6] ,
[4, 2, 6, 0] ]
Parameters
----------
in_mat : 2D array_like
Distance matrix, must be in c_order
reorder_vec : 1D_array_like
List of permutation indexes
validate: boolean
Optional, if True, validate reorder_vec content, detaults to False
Returns
-------
out_mat : 2D array_like
Distance matrix
"""
np_reorder = np.asarray(reorder_vec, dtype=np.long)
if validate:
maxsize = in_mat.shape[0]
bad_cnt = np.where((np_reorder < 0) or (np_reorder >= maxsize))[0].size
if bad_cnt > 0:
raise ValueError("Invalid reorder_vec")
if not in_mat.flags.c_contiguous:
in_mat = np.asarray(in_mat, order='C')
out_mat = np.empty([np_reorder.size, np_reorder.size], in_mat.dtype)
distmat_reorder_cy(in_mat, np_reorder, out_mat)
return out_mat
def distmat_reorder_condensed(in_mat, reorder_vec, validate=False):
"""
Reorder the rows and columns of a distance matrix
given a reorder vector.
Not all of the columns need to be used.
For example:
[ [0, 1, 2, 3] ,
[1, 0, 4, 5] ,
[2, 4, 0, 6] ,
[3, 5, 6, 0] ]
with
[1,0,3,2]
will result in
[ 1, 5, 4 , 3, 2, 6 ]
Parameters
----------
in_mat : 2D array_like
Distance matrix, must be in c_order
reorder_vec : 1D_array_like
List of permutation indexes
validate: boolean
Optional, if True, validate reorder_vec content, detaults to False
Returns
-------
out_mat_condensed : 1D array_like
Condensed distance matrix
"""
np_reorder = np.asarray(reorder_vec, dtype=np.long)
if validate:
maxsize = in_mat.shape[0]
bad_cnt = np.where((np_reorder < 0) or (np_reorder >= maxsize))[0].size
if bad_cnt > 0:
raise ValueError("Invalid reorder_vec")
if not in_mat.flags.c_contiguous:
in_mat = np.asarray(in_mat, order='C')
csize = np.long(((np_reorder.size-1)*np_reorder.size)/2)
out_mat_condensed = np.empty([csize], in_mat.dtype)
distmat_reorder_condensed_cy(in_mat, np_reorder, out_mat_condensed)
return out_mat_condensed
|
[
"numpy.trace",
"numpy.empty",
"numpy.asarray",
"numpy.where",
"numpy.long"
] |
[((2784, 2822), 'numpy.asarray', 'np.asarray', (['reorder_vec'], {'dtype': 'np.long'}), '(reorder_vec, dtype=np.long)\n', (2794, 2822), True, 'import numpy as np\n'), ((3971, 4009), 'numpy.asarray', 'np.asarray', (['reorder_vec'], {'dtype': 'np.long'}), '(reorder_vec, dtype=np.long)\n', (3981, 4009), True, 'import numpy as np\n'), ((4318, 4376), 'numpy.empty', 'np.empty', (['[np_reorder.size, np_reorder.size]', 'in_mat.dtype'], {}), '([np_reorder.size, np_reorder.size], in_mat.dtype)\n', (4326, 4376), True, 'import numpy as np\n'), ((5219, 5257), 'numpy.asarray', 'np.asarray', (['reorder_vec'], {'dtype': 'np.long'}), '(reorder_vec, dtype=np.long)\n', (5229, 5257), True, 'import numpy as np\n'), ((5564, 5616), 'numpy.long', 'np.long', (['((np_reorder.size - 1) * np_reorder.size / 2)'], {}), '((np_reorder.size - 1) * np_reorder.size / 2)\n', (5571, 5616), True, 'import numpy as np\n'), ((5637, 5668), 'numpy.empty', 'np.empty', (['[csize]', 'in_mat.dtype'], {}), '([csize], in_mat.dtype)\n', (5645, 5668), True, 'import numpy as np\n'), ((1056, 1082), 'numpy.asarray', 'np.asarray', (['mat'], {'order': '"""C"""'}), "(mat, order='C')\n", (1066, 1082), True, 'import numpy as np\n'), ((1929, 1942), 'numpy.trace', 'np.trace', (['mat'], {}), '(mat)\n', (1937, 1942), True, 'import numpy as np\n'), ((3086, 3115), 'numpy.asarray', 'np.asarray', (['in_mat'], {'order': '"""C"""'}), "(in_mat, order='C')\n", (3096, 3115), True, 'import numpy as np\n'), ((4273, 4302), 'numpy.asarray', 'np.asarray', (['in_mat'], {'order': '"""C"""'}), "(in_mat, order='C')\n", (4283, 4302), True, 'import numpy as np\n'), ((5521, 5550), 'numpy.asarray', 'np.asarray', (['in_mat'], {'order': '"""C"""'}), "(in_mat, order='C')\n", (5531, 5550), True, 'import numpy as np\n'), ((2892, 2941), 'numpy.where', 'np.where', (['(np_reorder < 0 or np_reorder >= maxsize)'], {}), '(np_reorder < 0 or np_reorder >= maxsize)\n', (2900, 2941), True, 'import numpy as np\n'), ((4079, 4128), 'numpy.where', 'np.where', (['(np_reorder < 0 or np_reorder >= maxsize)'], {}), '(np_reorder < 0 or np_reorder >= maxsize)\n', (4087, 4128), True, 'import numpy as np\n'), ((5327, 5376), 'numpy.where', 'np.where', (['(np_reorder < 0 or np_reorder >= maxsize)'], {}), '(np_reorder < 0 or np_reorder >= maxsize)\n', (5335, 5376), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.dates as mdates
from datetime import datetime
import matplotlib.patches as patches
from matplotlib.backends.backend_pdf import PdfPages
import math
import operator
def plot(ctx):
"Delegation status over time"
switches = [opts['_switch'] for _, opts in ctx.topo.graph.nodes(data=True) if opts.get('_switch')]
fig, ax = plt.subplots(figsize=(16, 6))
tvalues = []
for switch in switches:
if switch.label == 'DS':
for t, report in enumerate(switch.reports):
#print(t, report.cnt_port_delegation_status)
for p, status in enumerate(report.cnt_port_delegation_status):
if status == 1:
tvalues.append(t)
p = patches.Rectangle((t-1,p-0.5), 1,1, color='orange', alpha=0.3)
ax.add_patch(p)
#else:
# p = patches.Rectangle((t-1,p-0.5), 1,1, color='grey', alpha=0.1)
# ax.add_patch(p)
for p in range(1,21):
ax.hlines(p-0.5, 0, len(tvalues), color='grey', linestyle='-', linewidth=1, alpha=0.3)
x1 = (int(math.ceil(min(tvalues) / 25.0)) * 25) - 25
x2 = int(math.ceil(max(tvalues) / 25.0)) * 25
ax.set_xlim(x1,x2)
ax.set_ylim(-0.5,20.5)
ax.set_yticks(np.arange(21))
ax.set_xlabel('time (s)')
ax.set_ylabel('port number')
plt.gca().xaxis.grid(True, color='grey', linestyle='--', linewidth=1, alpha=0.3)
plt.show()
|
[
"matplotlib.pyplot.show",
"matplotlib.patches.Rectangle",
"numpy.arange",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.subplots"
] |
[((403, 432), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(16, 6)'}), '(figsize=(16, 6))\n', (415, 432), True, 'import matplotlib.pyplot as plt\n'), ((1572, 1582), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1580, 1582), True, 'import matplotlib.pyplot as plt\n'), ((1402, 1415), 'numpy.arange', 'np.arange', (['(21)'], {}), '(21)\n', (1411, 1415), True, 'import numpy as np\n'), ((1487, 1496), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1494, 1496), True, 'import matplotlib.pyplot as plt\n'), ((813, 881), 'matplotlib.patches.Rectangle', 'patches.Rectangle', (['(t - 1, p - 0.5)', '(1)', '(1)'], {'color': '"""orange"""', 'alpha': '(0.3)'}), "((t - 1, p - 0.5), 1, 1, color='orange', alpha=0.3)\n", (830, 881), True, 'import matplotlib.patches as patches\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.