code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
"""
=============================================================================
Eindhoven University of Technology
==============================================================================
Source Name : inferenceToyCase.py
This file load weights of a pretrained model and runs inference
Author : <NAME>
Date : 09/08/2019
Reference : <NAME>, <NAME>, and <NAME>,
"Deep probabilistic subsampling for task-adaptive compressed sensing", 2019
==============================================================================
"""
import sys, os.path as path, os
import numpy as np
from keras import backend as K
from keras.callbacks import ReduceLROnPlateau, TensorBoard
import keras
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator
from matplotlib import pyplot as plt
import tensorflow as tf
from keras.utils import to_categorical
from keras.models import Model
from sklearn.model_selection import train_test_split
import myModel
#=============================================================================
versionName = "Toy_loupe_recon_fact32_lr0.0001_lrMult10_EM_0.0001-0.0005-0-50"
weightFile = "weights-833-0.03"
savedir = os.path.join(os.path.dirname(__file__),versionName)
indComp = versionName.find('fact')
try:
comp = int(versionName[indComp+4:indComp+6])
except:
comp = int(versionName[indComp+4:indComp+5])
circle = False
DPSsamp = False
Bahadir = False
uniform = False
if versionName.find("GumbelTopK") > -1:
gumbelTopK = True
DPSsamp = True
else:
gumbelTopK = False
if versionName.find("DPS") > -1:
DPSsamp = True # If true, sub-sampling is learned by LASSY. If false, we use a fixed sub-sampling pattern (uniform or random)
elif versionName.find("loupe") > -1:
Bahadir = True
elif versionName.find("uniform") > -1:
uniform = True # In case DPSsamp is False, we use a non-trainable (fixed) sampling pattern which is either uniform, circular or random
elif versionName.find("lpf") > -1:
circle = True # In case DPSsamp is False, we use a non-trainable (fixed) sampling pattern which is either uniform, circular or random
#=============================================================================
#%%
"""
=============================================================================
Load testset
=============================================================================
"""
input_size = (32,32)
nr_examples = 1000
test_x = np.load('testSet.npy')
test_y = np.load('testSetY.npy')
#disp_example = 11
#plt.imshow(test_y[disp_example,:,:,0])
#
#%%
"""
=============================================================================
Parameter definitions
=============================================================================
"""
input_dim = [input_size[0],input_size[1],2] # Dimensions of the inputs to the network: [fourier bins, IQ components]
target_dim = [input_size[0],input_size[1],2] # Dimension of the targets for the network: [time steps]
mux_out = np.prod(input_dim[0:2])//comp # Multiplexer output dims: the amount of samples to be sampled from the input
"""
=============================================================================
Model definition
=============================================================================
"""
def SSIM(y_true, y_pred):
return tf.image.ssim(y_true, y_pred, max_val=1)
def PSNR(y_true, y_pred):
return tf.image.psnr(y_true, y_pred, max_val=1)
loss = 'mean_squared_error'
metrics = [SSIM, PSNR,'mean_squared_error']
if not Bahadir:
model = myModel.full_model(
input_dim,
target_dim,
comp,
mux_out,
2,
[],
DPSsamp,
Bahadir,
uniform,
circle,
1000,
32,
gumbelTopK)
## Print model summary:
model.load_weights(os.path.join(savedir,weightFile+".h5"))
model.compile(optimizer='adam',loss=loss, metrics=metrics)
else:
import ThresholdingLOUPE
model = ThresholdingLOUPE.LoadModelLOUPE(comp,input_dim,savedir,weightFile)
sgd = keras.optimizers.SGD(lr=1e-4, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd,loss=loss, metrics=metrics)
model.summary()
#%%
"""
=============================================================================
Inference
=============================================================================
"""
pred = model.predict(test_x)
#%%
"""
=============================================================================
Evaluate
=============================================================================
"""
def SSIM(y_true, y_pred):
return tf.image.ssim(y_true, y_pred, max_val=10)
def PSNR(y_true, y_pred):
return tf.image.psnr(y_true, y_pred, max_val=10)
loss = 'mean_squared_error'
metrics = [SSIM, PSNR,'mean_squared_error']
model.compile('adam',loss,metrics)
loss,SSIM,PSNR,MSE = model.evaluate(test_x,test_y)
print("MSE across {} examples: {}".format(nr_examples,MSE))
print("PSNR across {} examples: {}".format(nr_examples,PSNR))
print("SSIM across {} examples: {}".format(nr_examples,SSIM))
with open(savedir+"\\results.txt", "w") as text_file:
print("MSE across {} examples: {} \n".format(nr_examples,MSE),file=text_file)
print("PSNR across {} examples: {} \n".format(nr_examples,PSNR),file=text_file)
print("SSIM across {} examples: {}".format(nr_examples,SSIM),file=text_file)
#%%
"""
=============================================================================
Display
=============================================================================
"""
savefigs = True
#%%
"""
=============================================================================
Display
=============================================================================
"""
savefigs = True
disp_examples = [18, 60]
for i in range(len(disp_examples)):
disp_example = disp_examples[i]
plt.figure()
spect =np.sqrt(test_x[disp_example,:,:,0]**2+test_x[disp_example,:,:,1]**2)
plt.imshow(20*np.log10(0.001+spect/np.max(spect)), cmap='jet',vmin=-40,vmax=0)
plt.axis('off')
if savefigs:
plt.savefig(savedir+'\\example_{}_input_40dB.png'.format(disp_example),bbox_inches='tight')
plt.savefig(savedir+'\\example_{}_input_40dB.svg'.format(disp_example),bbox_inches='tight')
plt.pause(.1)
plt.figure()
plt.imshow(test_y[disp_example,:,:,0], cmap='gray',vmin=0,vmax=np.max(test_y[disp_example]))
plt.axis('off')
plt.pause(.5)
if savefigs:
plt.savefig(savedir+'\\example_{}_target.png'.format(disp_example),bbox_inches='tight')
plt.savefig(savedir+'\\example_{}_target.svg'.format(disp_example),bbox_inches='tight')
plt.pause(.1)
plt.figure()
plt.imshow(pred[disp_example,:,:,0], cmap='gray',vmin=0,vmax=np.max(test_y[disp_example]))
plt.axis('off')
if savefigs:
plt.savefig(savedir+'\\example_{}_prediction.png'.format(disp_example),bbox_inches='tight')
plt.savefig(savedir+'\\example_{}_prediction.svg'.format(disp_example),bbox_inches='tight')
plt.pause(.1)
#%%
#% Display samples
n_MC = 1000
if DPSsamp:
model_sampling = Model(inputs = model.input, outputs = model.get_layer("AtranA_0").output)
patterns = model_sampling.predict_on_batch(tf.zeros((n_MC,32,32,2)))[:,:,:,0]
pattern = patterns[0]
model_distribution = Model(inputs = model.input, outputs = model.get_layer("CreateSampleMatrix").output)
logits = model_distribution.predict_on_batch(tf.zeros((1,32,32,2)))
unnormDist = np.exp(logits)
distribution = np.transpose(np.transpose(unnormDist) / np.sum(unnormDist,1))
distribution = np.reshape(np.sum(distribution,axis=0),(input_dim[0],input_dim[1]))
elif Bahadir:
model_sampling = Model(inputs = model.input, outputs = model.get_layer("HardSampleMask").output)
patterns = []
Mask = model_sampling.predict_on_batch(tf.zeros((1,input_dim[0],input_dim[1],input_dim[2])))
#Note order of distribution and pattern in Mask variable is switches due to the fftshift function
pattern = Mask[0]
renormMask = Mask[1]
thresh = np.random.uniform(0.0,1.0,(input_dim[0],input_dim[1]))
sampleMask = ThresholdingLOUPE.sigmoid(12 * (renormMask-thresh))
plt.figure()
plt.imshow(sampleMask, cmap="hot_r")
plt.xticks([])
plt.yticks([])
plt.savefig(os.path.join(savedir,'NonHardSamplesTraining.svg'), bbox_inches="tight")
#plt.colorbar(shrink=0.5)
plt.pause(.1)
def MCsamplingLOUPE(renormMask):
MCsamples = []
#We explicitly have to run the thresholding code here again, as otherwise we do not use different uniform noise every MC sampling
for i in range(n_MC):
thresh = np.random.uniform(0.0,1.0,(input_dim[0],input_dim[1]))
sampleMask = ThresholdingLOUPE.sigmoid(12 * (renormMask-thresh))
# Make sure to only select M hard samples
sampleCoord = ThresholdingLOUPE.largest_indices(sampleMask,mux_out)
hardSamples = np.zeros_like(sampleMask)
hardSamples[sampleCoord[0],sampleCoord[1]] = 1
MCsamples.append(hardSamples)
return np.stack(MCsamples,axis=0)
patterns = MCsamplingLOUPE(renormMask)
else:
model_sampling = Model(inputs = model.input, outputs = model.get_layer("CreateSampleMatrix").output)
pattern = np.expand_dims(model_sampling.predict_on_batch(tf.zeros((n_MC,32,32,2))),0)[0,:,:,0]
#%%
# Plot one realization
print('One realization')
plt.imshow(pattern,cmap='gray_r', vmin=0, vmax=1)
plt.axis('off')
if savefigs:
plt.savefig(savedir+'\hardSamples.png',bbox_inches='tight')
plt.savefig(savedir+'\hardSamples.svg',bbox_inches='tight')
plt.pause(.1)
# Plot MC plots
if DPSsamp or Bahadir:
print(str(n_MC)+'times MC sampling')
plt.figure()
plt.imshow(-np.mean(patterns,0),cmap='gray')
plt.axis('off')
if savefigs:
plt.savefig(savedir+'\hardSamples_MCdist.svg',bbox_inches='tight')
plt.pause(.1)
# Plot trained distribution
if DPSsamp or Bahadir:
print('Trained distribution')
plt.figure()
plt.imshow(distribution,cmap='gray_r')
plt.axis('off')
if savefigs:
plt.savefig(savedir+'\hardSamples_dist.svg',bbox_inches='tight')
plt.pause(.1)
|
[
"numpy.load",
"numpy.sum",
"tensorflow.image.ssim",
"tensorflow.image.psnr",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.exp",
"os.path.join",
"numpy.prod",
"numpy.zeros_like",
"keras.optimizers.SGD",
"matplotlib.pyplot.imshow",
"os.path.dirname",
"matplotlib.pyplot.yticks",
"numpy.transpose",
"ThresholdingLOUPE.largest_indices",
"numpy.max",
"ThresholdingLOUPE.sigmoid",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.xticks",
"numpy.stack",
"ThresholdingLOUPE.LoadModelLOUPE",
"numpy.random.uniform",
"myModel.full_model",
"matplotlib.pyplot.axis",
"tensorflow.zeros",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] |
[((2587, 2609), 'numpy.load', 'np.load', (['"""testSet.npy"""'], {}), "('testSet.npy')\n", (2594, 2609), True, 'import numpy as np\n'), ((2619, 2642), 'numpy.load', 'np.load', (['"""testSetY.npy"""'], {}), "('testSetY.npy')\n", (2626, 2642), True, 'import numpy as np\n'), ((10057, 10107), 'matplotlib.pyplot.imshow', 'plt.imshow', (['pattern'], {'cmap': '"""gray_r"""', 'vmin': '(0)', 'vmax': '(1)'}), "(pattern, cmap='gray_r', vmin=0, vmax=1)\n", (10067, 10107), True, 'from matplotlib import pyplot as plt\n'), ((10107, 10122), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (10115, 10122), True, 'from matplotlib import pyplot as plt\n'), ((1294, 1319), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1309, 1319), False, 'import sys, os.path as path, os\n'), ((3152, 3175), 'numpy.prod', 'np.prod', (['input_dim[0:2]'], {}), '(input_dim[0:2])\n', (3159, 3175), True, 'import numpy as np\n'), ((3489, 3529), 'tensorflow.image.ssim', 'tf.image.ssim', (['y_true', 'y_pred'], {'max_val': '(1)'}), '(y_true, y_pred, max_val=1)\n', (3502, 3529), True, 'import tensorflow as tf\n'), ((3568, 3608), 'tensorflow.image.psnr', 'tf.image.psnr', (['y_true', 'y_pred'], {'max_val': '(1)'}), '(y_true, y_pred, max_val=1)\n', (3581, 3608), True, 'import tensorflow as tf\n'), ((3712, 3836), 'myModel.full_model', 'myModel.full_model', (['input_dim', 'target_dim', 'comp', 'mux_out', '(2)', '[]', 'DPSsamp', 'Bahadir', 'uniform', 'circle', '(1000)', '(32)', 'gumbelTopK'], {}), '(input_dim, target_dim, comp, mux_out, 2, [], DPSsamp,\n Bahadir, uniform, circle, 1000, 32, gumbelTopK)\n', (3730, 3836), False, 'import myModel\n'), ((4364, 4434), 'ThresholdingLOUPE.LoadModelLOUPE', 'ThresholdingLOUPE.LoadModelLOUPE', (['comp', 'input_dim', 'savedir', 'weightFile'], {}), '(comp, input_dim, savedir, weightFile)\n', (4396, 4434), False, 'import ThresholdingLOUPE\n'), ((4447, 4520), 'keras.optimizers.SGD', 'keras.optimizers.SGD', ([], {'lr': '(0.0001)', 'decay': '(1e-06)', 'momentum': '(0.9)', 'nesterov': '(True)'}), '(lr=0.0001, decay=1e-06, momentum=0.9, nesterov=True)\n', (4467, 4520), False, 'import keras\n'), ((5029, 5070), 'tensorflow.image.ssim', 'tf.image.ssim', (['y_true', 'y_pred'], {'max_val': '(10)'}), '(y_true, y_pred, max_val=10)\n', (5042, 5070), True, 'import tensorflow as tf\n'), ((5109, 5150), 'tensorflow.image.psnr', 'tf.image.psnr', (['y_true', 'y_pred'], {'max_val': '(10)'}), '(y_true, y_pred, max_val=10)\n', (5122, 5150), True, 'import tensorflow as tf\n'), ((6297, 6309), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6307, 6309), True, 'from matplotlib import pyplot as plt\n'), ((6321, 6406), 'numpy.sqrt', 'np.sqrt', (['(test_x[disp_example, :, :, 0] ** 2 + test_x[disp_example, :, :, 1] ** 2)'], {}), '(test_x[disp_example, :, :, 0] ** 2 + test_x[disp_example, :, :, 1] ** 2\n )\n', (6328, 6406), True, 'import numpy as np\n'), ((6477, 6492), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (6485, 6492), True, 'from matplotlib import pyplot as plt\n'), ((6719, 6733), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (6728, 6733), True, 'from matplotlib import pyplot as plt\n'), ((6747, 6759), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6757, 6759), True, 'from matplotlib import pyplot as plt\n'), ((6861, 6876), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (6869, 6876), True, 'from matplotlib import pyplot as plt\n'), ((6881, 6895), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.5)'], {}), '(0.5)\n', (6890, 6895), True, 'from matplotlib import pyplot as plt\n'), ((7113, 7127), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (7122, 7127), True, 'from matplotlib import pyplot as plt\n'), ((7136, 7148), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7146, 7148), True, 'from matplotlib import pyplot as plt\n'), ((7248, 7263), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (7256, 7263), True, 'from matplotlib import pyplot as plt\n'), ((7490, 7504), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (7499, 7504), True, 'from matplotlib import pyplot as plt\n'), ((7966, 7980), 'numpy.exp', 'np.exp', (['logits'], {}), '(logits)\n', (7972, 7980), True, 'import numpy as np\n'), ((10140, 10203), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(savedir + '\\\\hardSamples.png')"], {'bbox_inches': '"""tight"""'}), "(savedir + '\\\\hardSamples.png', bbox_inches='tight')\n", (10151, 10203), True, 'from matplotlib import pyplot as plt\n'), ((10204, 10267), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(savedir + '\\\\hardSamples.svg')"], {'bbox_inches': '"""tight"""'}), "(savedir + '\\\\hardSamples.svg', bbox_inches='tight')\n", (10215, 10267), True, 'from matplotlib import pyplot as plt\n'), ((10268, 10282), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (10277, 10282), True, 'from matplotlib import pyplot as plt\n'), ((10370, 10382), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10380, 10382), True, 'from matplotlib import pyplot as plt\n'), ((10436, 10451), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (10444, 10451), True, 'from matplotlib import pyplot as plt\n'), ((10661, 10673), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (10671, 10673), True, 'from matplotlib import pyplot as plt\n'), ((10678, 10717), 'matplotlib.pyplot.imshow', 'plt.imshow', (['distribution'], {'cmap': '"""gray_r"""'}), "(distribution, cmap='gray_r')\n", (10688, 10717), True, 'from matplotlib import pyplot as plt\n'), ((10721, 10736), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (10729, 10736), True, 'from matplotlib import pyplot as plt\n'), ((4212, 4253), 'os.path.join', 'os.path.join', (['savedir', "(weightFile + '.h5')"], {}), "(savedir, weightFile + '.h5')\n", (4224, 4253), False, 'import sys, os.path as path, os\n'), ((7921, 7945), 'tensorflow.zeros', 'tf.zeros', (['(1, 32, 32, 2)'], {}), '((1, 32, 32, 2))\n', (7929, 7945), True, 'import tensorflow as tf\n'), ((8092, 8120), 'numpy.sum', 'np.sum', (['distribution'], {'axis': '(0)'}), '(distribution, axis=0)\n', (8098, 8120), True, 'import numpy as np\n'), ((8571, 8628), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)', '(input_dim[0], input_dim[1])'], {}), '(0.0, 1.0, (input_dim[0], input_dim[1]))\n', (8588, 8628), True, 'import numpy as np\n'), ((8644, 8697), 'ThresholdingLOUPE.sigmoid', 'ThresholdingLOUPE.sigmoid', (['(12 * (renormMask - thresh))'], {}), '(12 * (renormMask - thresh))\n', (8669, 8697), False, 'import ThresholdingLOUPE\n'), ((8707, 8719), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8717, 8719), True, 'from matplotlib import pyplot as plt\n'), ((8725, 8761), 'matplotlib.pyplot.imshow', 'plt.imshow', (['sampleMask'], {'cmap': '"""hot_r"""'}), "(sampleMask, cmap='hot_r')\n", (8735, 8761), True, 'from matplotlib import pyplot as plt\n'), ((8767, 8781), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (8777, 8781), True, 'from matplotlib import pyplot as plt\n'), ((8787, 8801), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (8797, 8801), True, 'from matplotlib import pyplot as plt\n'), ((8928, 8942), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (8937, 8942), True, 'from matplotlib import pyplot as plt\n'), ((10482, 10552), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(savedir + '\\\\hardSamples_MCdist.svg')"], {'bbox_inches': '"""tight"""'}), "(savedir + '\\\\hardSamples_MCdist.svg', bbox_inches='tight')\n", (10493, 10552), True, 'from matplotlib import pyplot as plt\n'), ((10557, 10571), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (10566, 10571), True, 'from matplotlib import pyplot as plt\n'), ((10762, 10830), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(savedir + '\\\\hardSamples_dist.svg')"], {'bbox_inches': '"""tight"""'}), "(savedir + '\\\\hardSamples_dist.svg', bbox_inches='tight')\n", (10773, 10830), True, 'from matplotlib import pyplot as plt\n'), ((10835, 10849), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.1)'], {}), '(0.1)\n', (10844, 10849), True, 'from matplotlib import pyplot as plt\n'), ((6827, 6855), 'numpy.max', 'np.max', (['test_y[disp_example]'], {}), '(test_y[disp_example])\n', (6833, 6855), True, 'import numpy as np\n'), ((7214, 7242), 'numpy.max', 'np.max', (['test_y[disp_example]'], {}), '(test_y[disp_example])\n', (7220, 7242), True, 'import numpy as np\n'), ((7699, 7726), 'tensorflow.zeros', 'tf.zeros', (['(n_MC, 32, 32, 2)'], {}), '((n_MC, 32, 32, 2))\n', (7707, 7726), True, 'import tensorflow as tf\n'), ((8013, 8037), 'numpy.transpose', 'np.transpose', (['unnormDist'], {}), '(unnormDist)\n', (8025, 8037), True, 'import numpy as np\n'), ((8040, 8061), 'numpy.sum', 'np.sum', (['unnormDist', '(1)'], {}), '(unnormDist, 1)\n', (8046, 8061), True, 'import numpy as np\n'), ((8345, 8400), 'tensorflow.zeros', 'tf.zeros', (['(1, input_dim[0], input_dim[1], input_dim[2])'], {}), '((1, input_dim[0], input_dim[1], input_dim[2]))\n', (8353, 8400), True, 'import tensorflow as tf\n'), ((8819, 8870), 'os.path.join', 'os.path.join', (['savedir', '"""NonHardSamplesTraining.svg"""'], {}), "(savedir, 'NonHardSamplesTraining.svg')\n", (8831, 8870), False, 'import sys, os.path as path, os\n'), ((9687, 9714), 'numpy.stack', 'np.stack', (['MCsamples'], {'axis': '(0)'}), '(MCsamples, axis=0)\n', (9695, 9714), True, 'import numpy as np\n'), ((10399, 10419), 'numpy.mean', 'np.mean', (['patterns', '(0)'], {}), '(patterns, 0)\n', (10406, 10419), True, 'import numpy as np\n'), ((9225, 9282), 'numpy.random.uniform', 'np.random.uniform', (['(0.0)', '(1.0)', '(input_dim[0], input_dim[1])'], {}), '(0.0, 1.0, (input_dim[0], input_dim[1]))\n', (9242, 9282), True, 'import numpy as np\n'), ((9305, 9358), 'ThresholdingLOUPE.sigmoid', 'ThresholdingLOUPE.sigmoid', (['(12 * (renormMask - thresh))'], {}), '(12 * (renormMask - thresh))\n', (9330, 9358), False, 'import ThresholdingLOUPE\n'), ((9450, 9504), 'ThresholdingLOUPE.largest_indices', 'ThresholdingLOUPE.largest_indices', (['sampleMask', 'mux_out'], {}), '(sampleMask, mux_out)\n', (9483, 9504), False, 'import ThresholdingLOUPE\n'), ((9530, 9555), 'numpy.zeros_like', 'np.zeros_like', (['sampleMask'], {}), '(sampleMask)\n', (9543, 9555), True, 'import numpy as np\n'), ((9962, 9989), 'tensorflow.zeros', 'tf.zeros', (['(n_MC, 32, 32, 2)'], {}), '((n_MC, 32, 32, 2))\n', (9970, 9989), True, 'import tensorflow as tf\n'), ((6429, 6442), 'numpy.max', 'np.max', (['spect'], {}), '(spect)\n', (6435, 6442), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
#
# SPECCLIENT -- Client methods for the Spectroscopic Data Service
#
__authors__ = '<NAME> <<EMAIL>>'
__version__ = 'v1.2.0'
'''
Client methods for the Spectroscopic Data Service.
Spectro Client Interface
------------------------
client = getClient (context='<context>', profile='<profile>')
status = isAlive (svc_url=DEF_SERVICE_URL, timeout=2)
set_svc_url (svc_url)
svc_url = get_svc_url ()
set_context (context)
ctx = get_context ()
ctxs = list_contexts (context, fmt='text')
ctxs = list_contexts (context=None, fmt='text')
set_profile (profile)
prof = get_profile ()
profs = list_profiles (profile, fmt='text')
profs = list_profiles (profile=None, fmt='text')
catalogs = catalogs (context='default', profile='default')
QUERY INTERFACE:
id_list = query (<region> | <coord, size> | <ra, dec, size>,
constraint=<sql_where_clause>,
context=None, profile=None, **kw)
ACCESS INTERFACE:
list = getSpec (id_list, fmt='numpy',
out=None, align=False, cutout=None,
context=None, profile=None, **kw)
PLOT INTERFACE:
plot (spec, context=None, profile=None, out=None, **kw)
status = prospect (spec, context=context, profile=profile, **kw)
image = preview (id, context=context, profile=profile, **kw)
image = plotGrid (id_list, nx, ny, page=<N>,
context=context, profile=profile, **kw)
image = stackedImage (id_list, fmt='png|numpy',
align=False, yflip=False,
context=context, profile=profile, **kw)
UTILITY METHODS:
df = to_pandas (npy_data)
spec1d = to_Spectrum1D (npy_data)
tab = to_Table (npy_data)
Import via
.. code-block:: python
from dl import specClient
'''
import os
import sys
import socket
import json
import numpy as np
import pandas as pd
from io import BytesIO
from PIL import Image
# Turn off some annoying astropy warnings
import warnings
from astropy.utils.exceptions import AstropyWarning
warnings.simplefilter('ignore', AstropyWarning)
import logging
logging.disable(logging.WARNING)
logging.getLogger("specutils").setLevel(logging.CRITICAL)
from specutils import Spectrum1D
from specutils import SpectrumCollection
from astropy import units as u
#from astropy.nddata import StdDevUncertainty
from astropy.nddata import InverseVariance
from astropy.table import Table
from matplotlib import pyplot as plt # visualization libs
try:
import pycurl_requests as requests # faster 'requests' lib
except ImportError:
import requests
import pycurl # low-level interface
from urllib.parse import quote_plus # URL encoding
# Data Lab imports.
#from dl import queryClient
from dl import storeClient
from dl.Util import def_token
from dl.Util import multimethod
from dl.helpers.utils import convert
# Python version check.
is_py3 = sys.version_info.major == 3
# The URL of the service to access. This may be changed by passing a new
# URL into the set_svc_url() method before beginning.
DEF_SERVICE_ROOT = "https://datalab.noao.edu"
# Allow the service URL for dev/test systems to override the default.
THIS_HOST = socket.gethostname()
if THIS_HOST[:5] == 'dldev':
DEF_SERVICE_ROOT = "http://dldev.datalab.noao.edu"
elif THIS_HOST[:6] == 'dltest':
DEF_SERVICE_ROOT = "http://dltest.datalab.noao.edu"
elif THIS_HOST[:5] == 'gp12':
DEF_SERVICE_ROOT = "http://gp06.datalab.noao.edu:6998"
# Allow the service URL for dev/test systems to override the default.
sock = socket.socket(type=socket.SOCK_DGRAM) # host IP address
sock.connect(('8.8.8.8', 1)) # Example IP address, see RFC 5737
THIS_IP, _ = sock.getsockname()
DEF_SERVICE_URL = DEF_SERVICE_ROOT + "/spec"
SM_SERVICE_URL = DEF_SERVICE_ROOT + "/storage"
QM_SERVICE_URL = DEF_SERVICE_ROOT + "/query"
# Use cURL for requests when possible.
USE_CURL = True
# The requested service "profile". A profile refers to the specific
# machines and services used by the service.
DEF_SERVICE_PROFILE = "default"
# The requested dataset "context". A context refers to the specific dataset
# being served. This determines what is allowed within certain methods.
DEF_SERVICE_CONTEXT = "default"
# Use a /tmp/AM_DEBUG file as a way to turn on debugging in the client code.
DEBUG = os.path.isfile('/tmp/SPEC_DEBUG')
VERBOSE = os.path.isfile('/tmp/SPEC_VERBOSE')
# ######################################################################
#
# Spectroscopic Data Client Interface
#
# This API provides convenience methods that allow an application to
# import the Client class without having to explicitly instantiate a
# class object. The parameter descriptions and example usage is given
# in the comments for the class methods. Module methods have their
# docstrings patched below.
#
# ######################################################################
# ###################################
# Spectroscopic Data error class
# ###################################
class dlSpecError(Exception):
'''A throwable error class.
'''
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
# ###################################
# Py2/Py3 Compatability Utilities
# ###################################
def spcToString(s):
'''spcToString -- Force a return value to be type 'string' for all
Python versions.
'''
if is_py3:
if isinstance(s, bytes):
strval = str(s.decode())
elif isinstance(s, str):
strval = s
else:
if isinstance(s, bytes) or isinstance(s, unicode):
strval = str(s)
else:
strval = s
return strval
# -----------------------------
# Utility Methods
# -----------------------------
# --------------------------------------------------------------------
# SET_SVC_URL -- Set the ServiceURL to call.
#
def set_svc_url(svc_url):
return sp_client.set_svc_url(svc_url.strip('/'))
# --------------------------------------------------------------------
# GET_SVC_URL -- Get the ServiceURL to call.
#
def get_svc_url():
return sp_client.get_svc_url()
# --------------------------------------------------------------------
# SET_PROFILE -- Set the service profile to use.
#
def set_profile(profile):
return sp_client.set_profile(profile)
# --------------------------------------------------------------------
# GET_PROFILE -- Get the service profile to use.
#
def get_profile():
return sp_client.get_profile()
# --------------------------------------------------------------------
# SET_CONTEXT -- Set the dataset context to use.
#
def set_context(context):
return sp_client.set_context(context)
# --------------------------------------------------------------------
# GET_CONTEXT -- Get the dataset context to use.
#
def get_context():
return sp_client.get_context()
# --------------------------------------------------------------------
# ISALIVE -- Ping the service to see if it responds.
#
def isAlive(svc_url=DEF_SERVICE_URL, timeout=5):
return sp_client.isAlive(svc_url=svc_url, timeout=timeout)
# --------------------------------------------------------------------
# LIST_PROFILES -- List the available service profiles.
#
@multimethod('spc', 1, False)
def list_profiles(profile, fmt='text'):
return sp_client._list_profiles(profile=profile, fmt=fmt)
@multimethod('spc', 0, False)
def list_profiles(profile=None, fmt='text'):
'''Retrieve the profiles supported by the spectro data service.
Usage:
list_profiles ([profile], fmt='text')
MultiMethod Usage:
------------------
specClient.list_profiles (profile)
specClient.list_profiles ()
Parameters
----------
profile: str
A specific profile configuration to list. If None, a list of
available profiles is returned.
format: str
Result format: One of 'text' or 'json'
Returns
-------
profiles: list/dict
A list of the names of the supported profiles or a dictionary of
the specific profile
Example
-------
.. code-block:: python
profiles = specClient.list_profiles(profile)
profiles = specClient.list_profiles()
'''
return sp_client._list_profiles(profile=profile, fmt=fmt)
# --------------------------------------------------------------------
# LIST_CONTEXTS -- List the available dataset contexts.
#
@multimethod('spc',1,False)
def list_contexts(context, fmt='text'):
return sp_client._list_contexts(context=context, fmt=fmt)
@multimethod('spc',0,False)
def list_contexts(context=None, fmt='text'):
'''Retrieve the contexts supported by the spectro data service.
Usage:
list_contexts ([context], fmt='text')
MultiMethod Usage:
------------------
specClient.list_contexts (context)
specClient.list_contexts ()
Parameters
----------
contexts: str
A specific contexts configuration to list. If None, a list of
available contexts is returned.
format: str
Result format: One of 'text' or 'json'
Returns
-------
contexts: list/dict
A list of the names of the supported contexts or a dictionary of
the specific contexts
Example
-------
.. code-block:: python
contexts = specClient.list_contexts(context)
contexts = specClient.list_contexts()
'''
return sp_client._list_contexts(context=context, fmt=fmt)
# --------------------------------------------------------------------
# CATALOGS -- List available catalogs for a given dataset context
#
def catalogs(context='default', profile='default', fmt='text'):
'''List available catalogs for a given dataset context
'''
return sp_client.catalogs(context=context, profile=profile, fmt=fmt)
# --------------------------------------------------------------------
# TO_SPECTRUM1D -- Utility method to convert a Numpy array to Spectrum1D
#
def to_Spectrum1D(npy_data):
'''Utility method to convert a Numpy array to Spectrum1D
'''
return sp_client.to_Spectrum1D(npy_data)
# --------------------------------------------------------------------
# TO_PANDAS -- Utility method to convert a Numpy array to a Pandas DataFrame
#
def to_pandas(npy_data):
'''Utility method to convert a Numpy array to a Pandas DataFrame
'''
return sp_client.to_pandas(npy_data)
# --------------------------------------------------------------------
# TO_TABLE -- Utility method to convert a Numpy array to an Astropy Table
#
def to_Table(npy_data):
'''Utility method to convert a Numpy array to an Astropy Table object.
'''
return sp_client.to_Table(npy_data)
#######################################
# Spectroscopic Data Client Methods
#######################################
# --------------------------------------------------------------------
# QUERY -- Query for spectra by position.
#
@multimethod('spc',3,False)
def query(ra, dec, size, constraint=None, out=None,
context=None, profile=None, **kw):
return sp_client._query(ra=ra, dec=dec, size=size,
pos=None,
region=None,
constraint=constraint,
out=out,
context=context, profile=profile, **kw)
@multimethod('spc',2,False)
def query(pos, size, constraint=None, out=None,
context=None, profile=None, **kw):
return sp_client._query(ra=None, dec=None, size=size,
pos=pos,
region=None,
constraint=constraint,
out=out,
context=context, profile=profile, **kw)
@multimethod('spc',1,False)
def query(region, constraint=None, out=None,
context=None, profile=None, **kw):
return sp_client._query(ra=None, dec=None, size=None,
pos=None,
region=region,
constraint=constraint,
out=out,
context=context, profile=profile, **kw)
@multimethod('spc',0,False)
def query(constraint=None, out=None, context=None, profile=None, **kw):
'''Query for a list of spectrum IDs that can then be retrieved from
the service.
Usage:
id_list = query(ra, dec, size, constraint=None, out=None,
context=None, profile=None, **kw)
id_list = query(pos, size, constraint=None, out=None,
context=None, profile=None, **kw)
id_list = query(region, constraint=None, out=None,
context=None, profile=None, **kw)
id_list = query(constraint=None, out=None,
context=None, profile=None, **kw)
Parameters
----------
ra: float
RA search center specified in degrees.
dec: float
Dec of search center specified in degrees.
size: float
Size of search center specified in degrees.
pos: Astropy Coord object
Coordinate of search center specified as an Astropy Coord object.
region: float
Array of polygon vertices (in degrees) defining a search region.
constraint: str
A valid SQL syntax that can be used as a WHERE constraint in the
search query.
out: str
Output filename to create. If None or an empty string the query
results are returned directly to the client. Otherwise, results
are writeen to the named file with one identifier per line. A
Data Lab 'vos://' prefix will save results to the named virtual
storage file.
context: str
Dataset context.
profile: str
Data service profile.
**kw: dict
Optional keyword arguments. Supported keywords currently include:
For context='sdss_dr16' | 'default':
fields:
specobjid # or 'bestobjid', etc
tuple # a plate/mjd/fiber tuple
Service will always return array of 'specobjid'
value, the p/m/f tuple is extracted from the
bitmask value by the client.
catalog:
<schema>.<table> # alternative catalog to query e.g. a
# VAC from earlier DR (must support an
# ra/dec search and return a specobjid-
# like value)
For all contexts:
verbose = False
Print verbose messages during retrieval
debug = False
Print debug messages during retrieval
Returns
-------
result: array
An array of spectrum IDs appropriate for the dataset context.
Example
-------
1) Query by position:
.. code-block:: python
id_list = spec.query (0.125, 12.123, 0.1)
'''
return sp_client._query(ra=None, dec=None, size=None,
pos=None,
region=None,
constraint=constraint,
out=out,
context=context, profile=profile, **kw)
# --------------------------------------------------------------------
# GETSPEC -- Retrieve spectra for a list of objects.
#
def getSpec(id_list, fmt='numpy', out=None, align=False, cutout=None,
context=None, profile=None, **kw):
'''Get spectra for a list of object IDs.
Usage:
getSpec(id_list, fmt='numpy', out=None, align=False, cutout=None,
context=None, profile=None, **kw)
Parameters
----------
id_list: list object
List of object identifiers.
fmt: str
Return format of spectra
out:
Output file or return to caller if None
align:
Align spectra to common wavelength grid with zero-padding
cutout:
Wavelength cutout range (as "<start>-<end>")
context: str
Dataset context.
profile: str
Data service profile.
**kw: dict
Optional keyword arguments. Supported keywords currently include:
values = None
Spectrum vectors to return.
token = None
Data Lab auth token.
id_col = None
Name of ID column in input table.
verbose = False
Print verbose messages during retrieval
debug = False
Print debug messages during retrieval
Returns
-------
result: object or array of objects or 'OK' string
Example
-------
1) Retrieve spectra individually:
.. code-block:: python
id_list = spec.query (0.125, 12.123, 0.1)
for id in id_list:
spec = spec.getSpec (id)
.... do something
2) Retrieve spectra in bulk:
.. code-block:: python
spec = spec.getSpec (id_list, fmt='numpy')
.... 'spec' is an array of NumPy objects that may be
different sizes
'''
return sp_client.getSpec(id_list=id_list, fmt=fmt, out=out,
align=align, cutout=cutout,
context=context, profile=profile, **kw)
# --------------------------------------------------------------------
# PLOT -- Utility to batch plot a single spectrum, display plot directly.
#
def plot(spec, context=None, profile=None, out=None, **kw):
'''Utility to batch plot a single spectrum.
Usage:
spec.plot(id, context=None, profile=None, **kw)
Parameters
----------
spec: object ID or data array
Spectrum to be plotted. If 'spec' is a numpy array or a
Spectrum1D object the data are plotted directly, otherwise
the value is assumed to be an object ID that will be retrieved
from the service.
out: str
Output filename. If specified, plot saved as PNG.
context: str
Dataset context.
profile: str
Data service profile.
**kw: dict
Optional keyword arguments. Supported keywords currently include:
rest_frame - Whether or not to plot the spectra in the
rest-frame (def: True)
z - Redshift value
xlim - Set the xrange of the plot
ylim - Set the yrange of the plot
values - A comma-delimited string of which values to plot,
a combination of 'flux,model,sky,ivar'
mark_lines - Which lines to mark. No lines marked if None or
an empty string, otherwise one of 'em|abs|all|both'
grid - Plot grid lines (def: True)
dark - Dark-mode plot colors (def: True)
em_lines - List of emission lines to plot. If not given,
all the lines in the default list will be plotted.
abs_lines - Lines of absorption lines to plot. If not given,
all the lines in the default list will be plotted.
spec_args - Plotting kwargs for the spectrum
model_args - Plotting kwargs for the model
ivar_args - Plotting kwargs for the ivar
sky_args - Plotting kwargs for the sky
Returns
-------
Nothing
Example
-------
1) Plot a single spectrum, save to a virtual storage file
.. code-block:: python
spec.plot (specID, context='sdss_dr16', out='vos://spec.png')
'''
return sp_client.plot(spec, context=context, profile=profile,
out=None, **kw)
# --------------------------------------------------------------------
# PROSPECT -- Utility wrapper to launch the interactive PROSPECT tool.
#
def prospect(spec, context=None, profile=None, **kw):
'''Utility wrapper to launch the interactive PROSPECT tool.
Usage:
stat = prospect(spec, context=None, profile=None, **kw)
Parameters
----------
spec: object ID or data array
Spectrum to be plotted. If 'spec' is a numpy array or a
Spectrum1D object the data are plotted directly, otherwise
the value is assumed to be an object ID that will be retrieved
from the service.
context: str
Dataset context.
profile: str
Data service profile.
**kw: dict
Optional keyword arguments. Supported keywords currently include:
TBD
Returns
-------
result: str
Status 'OK' string or error message.
Example
-------
1) Plot ....
.. code-block:: python
stat = spec.prospect (specID)
'''
pass
# --------------------------------------------------------------------
# PREVIEW -- Get a preview plot of a spectrum
#
def preview(spec, context=None, profile=None, **kw):
'''Get a preview plot of a spectrum
Usage:
spec.preview(spec, context=None, profile=None, **kw):
Parameters
----------
id_list: list object
List of object identifiers.
context: str
Dataset context.
profile: str
Data service profile.
**kw: dict
Optional keyword arguments. Supported keywords currently include:
N/A
Returns
-------
image: A PNG image object
Example
-------
1) Display a preview plot a given spectrum:
.. code-block:: python
from IPython.display import display, Image
display(Image(spec.preview(id),
format='png', width=400, height=100, unconfined=True))
'''
pass
return sp_client.preview(spec, context=context, profile=profile, **kw)
# --------------------------------------------------------------------
# PLOTGRID -- Get a grid of preview plots of a spectrum list.
#
def plotGrid(id_list, nx, ny, page=0, context=None, profile=None, **kw):
'''Get a grid of preview plots of a spectrum list.
Usage:
image = plotGrid(id_list, nx, ny, page=0,
context=None, profile=None, **kw):
Parameters
----------
id_list: list object
List of object identifiers.
nx: int
Number of plots in the X dimension
ny: int
Number of plots in the Y dimension
page: int
Dataset context.
context: str
Dataset context.
profile: str
Data service profile.
**kw: dict
Optional keyword arguments. Supported keywords currently include:
verbose = False
Print verbose messages during retrieval
debug = False
Print debug messages during retrieval
Returns
-------
image: A PNG image object
Example
-------
1) Display a 5x5 grid of preview plots for a list:
.. code-block:: python
npages = np.round((len(id_list) / 25) + (25 / len(id_list))
for pg in range(npages):
data = spec.getGridPlot(id_list, 5, 5, page=pg)
display(Image(data, format='png',
width=400, height=100, unconfined=True))
'''
return sp_client.plotGrid(id_list, nx, ny, page=page,
context=context, profile=profile, **kw)
# --------------------------------------------------------------------
# STACKEDIMAGE -- Get a stacked image of a list of spectra.
#
def stackedImage(id_list, align=False, yflip=False,
context=None, profile=None, **kw):
'''Get ...
Usage:
Parameters
----------
id_list: list object
List of spectrum identifiers.
context: str
Dataset context.
profile: str
Data service profile.
**kw: dict
Optional keyword arguments. Supported keywords currently include:
verbose = False
Print verbose messages during retrieval
debug = False
Print debug messages during retrieval
Returns
-------
result: ....
Example
-------
1) Query ....
.. code-block:: python
id_list = spec.query (0.125, 12.123, 0.1)
'''
pass
return sp_client.stackedImage(id_list, align=align, yflip=yflip,
context=context, profile=profile, **kw)
#######################################
# Spectroscopic Data Client Class
#######################################
class specClient(object):
'''
SPECCLIENT -- Client-side methods to access the Data Lab
Spectroscopic Data Service.
'''
def __init__(self, context='default', profile='default'):
'''Initialize the specClient class.
'''
self.svc_url = DEF_SERVICE_URL # service URL
self.qm_svc_url = QM_SERVICE_URL # Query Manager service URL
self.sm_svc_url = SM_SERVICE_URL # Storage Manager service URL
self.auth_token = def_token(None) # default auth token (not used)
self.svc_profile = profile # service profile
self.svc_context = context # dataset context
self.hostip = THIS_IP
self.hostname = THIS_HOST
self.debug = DEBUG # interface debug flag
self.verbose = VERBOSE # interface verbose flag
# Get the server-side config for the context. Note this must also
# be updated whenever we do a set_svc_url() or set_context().
self.context = self._list_contexts(context)
# Standard Data Lab service methods.
#
def set_svc_url(self, svc_url):
'''Set the URL of the Spectroscopic Data Service to be used.
Parameters
----------
svc_url: str
Spectroscopic Data service base URL to call.
Returns
-------
Nothing
Example
-------
.. code-block:: python
from dl import specClient
specClient.set_svc_url("http://localhost:7001/")
'''
self.svc_url = spcToString(svc_url.strip('/'))
self.context = self._list_contexts(context=self.svc_context)
def get_svc_url(self):
'''Return the currently-used Spectroscopic Data Service URL.
Parameters
----------
None
Returns
-------
service_url: str
The currently-used Spectroscopic Data Service URL.
Example
-------
.. code-block:: python
from dl import specClient
service_url = specClient.get_svc_url()
'''
return spcToString(self.svc_url)
def set_profile(self, profile):
'''Set the requested service profile.
Parameters
----------
profile: str
Requested service profile string.
Returns
-------
Nothing
Example
-------
.. code-block:: python
from dl import specClient
profile = specClient.client.set_profile("dev")
'''
url = self.svc_url + '/validate?what=profile&value=%s' % profile
if spcToString(self.curl_get(url)) == 'OK':
self.svc_profile = spcToString(profile)
return 'OK'
else:
raise Exception('Invalid profile "%s"' % profile)
return 'OK'
def get_profile(self):
'''Get the requested service profile.
Parameters
----------
None
Returns
-------
profile: str
The currently requested service profile.
Example
-------
.. code-block:: python
from dl import specClient
profile = specClient.client.get_profile()
'''
return spcToString(self.svc_profile)
def set_context(self, context):
'''Set the requested dataset context.
Parameters
----------
context: str
Requested dataset context string.
Returns
-------
Nothing
Example
-------
.. code-block:: python
from dl import specClient
context = specClient.client.set_context("dev")
'''
url = self.svc_url + '/validate?what=context&value=%s' % context
if spcToString(self.curl_get(url)) == 'OK':
self.svc_context = spcToString(context)
self.context = self._list_contexts(context=self.svc_context)
return 'OK'
else:
raise Exception('Invalid context "%s"' % context)
def get_context(self):
'''Get the requested dataset context.
Parameters
----------
None
Returns
-------
context: str
The currently requested dataset context.
Example
-------
.. code-block:: python
from dl import specClient
context = specClient.client.get_context()
'''
return spcToString(self.svc_context)
def isAlive(self, svc_url=DEF_SERVICE_URL):
'''Check whether the service at the given URL is alive and responding.
This is a simple call to the root service URL or ping() method.
Parameters
----------
service_url: str
The Query Service URL to ping.
Returns
-------
result: bool
True if service responds properly, False otherwise
Example
-------
.. code-block:: python
from dl import specClient
if specClient.isAlive():
print("Spec Server is alive")
'''
url = svc_url
try:
r = requests.get(url, timeout=2)
resp = r.text
if r.status_code != 200:
return False
elif resp is not None and r.text.lower()[:11] != "hello world":
return False
except Exception:
return False
return True
###################################################
# UTILITY METHODS
###################################################
@multimethod('_spc',1,True)
def list_profiles(self, profile, fmt='text'):
'''Usage: specClient.client.list_profiles (profile, ...)
'''
return self._list_profiles(profile=profile, fmt=fmt)
@multimethod('_spc',0,True)
def list_profiles(self, profile=None, fmt='text'):
'''Usage: specClient.client.list_profiles (...)
'''
return self._list_profiles(profile=profile, fmt=fmt)
def _list_profiles(self, profile=None, fmt='text'):
'''Implementation of the list_profiles() method.
'''
headers = self.getHeaders(def_token(None))
svc_url = '%s/profiles?' % self.svc_url
svc_url += "profile=%s&" % profile
svc_url += "format=%s" % fmt
r = requests.get(svc_url, headers=headers)
profiles = spcToString(r.content)
if '{' in profiles:
profiles = json.loads(profiles)
return profiles
@multimethod('_spc',1,True)
def list_contexts(self, context, fmt='text'):
'''Usage: specClient.client.list_contexts (context, ...)
'''
return self._list_contexts(context=context, fmt=fmt)
@multimethod('_spc',0,True)
def list_contexts(self, context=None, fmt='text'):
'''Usage: specClient.client.list_contexts (...)
'''
return self._list_contexts(context=context, fmt=fmt)
def _list_contexts(self, context=None, fmt='text'):
'''Implementation of the list_contexts() method.
'''
headers = self.getHeaders(def_token(None))
svc_url = '%s/contexts?' % self.svc_url
svc_url += "context=%s&" % context
svc_url += "format=%s" % fmt
r = requests.get(svc_url, headers=headers)
contexts = spcToString(r.content)
if '{' in contexts:
contexts = json.loads(contexts)
return contexts
def catalogs(self, context='default', profile='default', fmt='text'):
'''Usage: specClient.client.catalogs (...)
'''
headers = self.getHeaders(None)
svc_url = '%s/catalogs?' % self.svc_url
svc_url += "context=%s&" % context
svc_url += "profile=%s&" % profile
svc_url += "format=%s" % fmt
r = requests.get(svc_url, headers=headers)
catalogs = spcToString(r.text)
if '{' in catalogs:
catalogs = json.loads(catalogs)
return spcToString(catalogs)
# --------------------------------------------------------------------
# TO_SPECTRUM1D -- Utility method to convert a Numpy array to Spectrum1D
#
def to_Spectrum1D(self, npy_data):
''' Convert a Numpy spectrum array to a Spectrum1D object.
'''
if npy_data.ndim == 2:
lamb = 10**npy_data['loglam'][0] * u.AA
else:
lamb = 10**npy_data['loglam'] * u.AA
flux = npy_data['flux'] * 10**-17 * u.Unit('erg cm-2 s-1 AA-1')
mask = npy_data['flux'] == 0
flux_unit = u.Unit('erg cm-2 s-1 AA-1')
uncertainty = InverseVariance(npy_data['ivar'] / flux_unit**2)
spec1d = Spectrum1D(spectral_axis=lamb, flux=flux,
uncertainty=uncertainty, mask=mask)
spec1d.meta['sky'] = npy_data['sky'] * 10**-17 * flux_unit
spec1d.meta['model'] = npy_data['model'] * 10**-17 * flux_unit
spec1d.meta['ivar'] = npy_data['ivar']
return spec1d
# --------------------------------------------------------------------
# TO_PANDAS -- Utility method to convert a Numpy array to a Pandas DataFrame
#
def to_pandas(self, npy_data):
'''Utility method to convert a Numpy array to a Pandas DataFrame
'''
return pd.DataFrame(data=npy_data, columns=npy_data.dtype.names)
# --------------------------------------------------------------------
# TO_TABLE -- Utility method to convert a Numpy array to an Astropy Table
#
def to_Table(self, npy_data):
'''Utility method to convert a Numpy array to an Astropy Table object.
'''
return Table(data=npy_data, names=npy_data.dtype.names)
###################################################
# SERVICE METHODS
###################################################
# --------------------------------------------------------------------
# QUERY -- Query for spectra by position.
#
@multimethod('_spc',3,True)
def query(self, ra, dec, size, constraint=None, out=None,
context=None, profile=None, **kw):
return self._query(ra=ra, dec=dec, size=size,
pos=None,
region=None,
constraint=constraint,
out=out,
context=context, profile=profile, **kw)
@multimethod('_spc',2,True)
def query(self, pos, size, constraint=None, out=None,
context=None, profile=None, **kw):
return self._query(ra=None, dec=None, size=None,
pos=pos,
region=None,
constraint=constraint,
out=out,
context=context, profile=profile, **kw)
@multimethod('_spc',1,True)
def query(self, region, constraint=None, out=None,
context=None, profile=None, **kw):
return self._query(ra=None, dec=None, size=None,
pos=None,
region=region,
constraint=constraint,
out=out,
context=context, profile=profile, **kw)
@multimethod('_spc',0,True)
def query(self, constraint=None, out=None,
context=None, profile=None, **kw):
'''Query for a list of spectrum IDs that can then be retrieved from
the service.
Usage:
id_list = query(ra, dec, size, constraint=None, out=None,
context=None, profile=None, **kw)
id_list = query(pos, size, constraint=None, out=None,
context=None, profile=None, **kw)
id_list = query(region, constraint=None, out=None,
context=None, profile=None, **kw)
id_list = query(constraint=None, out=None,
context=None, profile=None, **kw)
Parameters
----------
ra: float
RA search center specified in degrees.
dec: float
Dec of search center specified in degrees.
size: float
Size of search center specified in degrees.
pos: Astropy Coord object
Coordinate of search center specified as an Astropy Coord object.
region: float
Array of polygon vertices (in degrees) defining a search region.
out: str
Save query results to output filename. May be a 'vos://' URI or
local filename. If set to an empty string, the ID list is
returned as an ascii string.
constraint: str
A valid SQL syntax that can be used as a WHERE constraint in the
search query.
context: str
Dataset context.
profile: str
Data service profile.
**kw: dict
Optional keyword arguments. Supported keywords currently include:
For context='sdss_dr16' | 'default':
fields:
specobjid # or 'bestobjid', etc
tuple # a plate/mjd/fiber/run2d tuple
Service will always return array of 'specobjid'
value, the p/m/f tuple is extracted from the
bitmask value by the client.
catalog:
<schema>.<table> # alternative catalog to query e.g. a
# VAC from earlier DR (must support an
# ra/dec search and return a specobjid-
# like value)
For all contexts:
timeout = 600 # Query timeout
token = None # User auth token
verbose = False # Print verbose output
debug = False # Print debug messages
Returns
-------
result: array
An array of spectrum IDs appropriate for the dataset context.
Example
-------
1) Query by position:
.. code-block:: python
id_list = spec.query (0.125, 12.123, 0.1)
'''
return self._query(ra=None, dec=None, size=None,
pos=None,
region=None,
constraint=constraint,
out=out,
context=context, profile=profile, **kw)
def _query(self,
ra=None, dec=None, size=None,
pos=None,
region=None,
constraint=None, out=None,
context=None, profile=None, **kw):
'''Implementation of the query() method.
'''
if context in [None, '']:
context = self.svc_context
if profile in [None, '']:
profile = self.svc_profile
# Process optional keyword arguments.
ofields = kw['fields'] if 'fields' in kw else self.context['id_main']
catalog = kw['catalog'] if 'catalog' in kw else self.context['catalog']
if context == 'default' or context.startswith('sdss'):
if ofields == 'tuple':
ofields = 'plate,mjd,fiberid,run2d'
timeout = kw['timeout'] if 'timeout' in kw else 600
token = kw['token'] if 'token' in kw else self.auth_token
verbose = kw['verbose'] if 'verbose' in kw else self.verbose
debug = kw['debug'] if 'debug' in kw else self.debug
# Build the query URL constraint clause.
_size = size
if region is not None:
pquery = 'q3c_poly_query(ra,dec,ARRAY%s)' % region
elif pos is not None:
pquery = 'q3c_radial_query(ra,dec,%f,%f,%f)' % \
(pos.ra.degree, pos.dec.degree, _size)
elif ra is not None and dec is not None:
pquery = 'q3c_radial_query(ra,dec,%f,%f,%f)' % (ra, dec, _size)
else:
pquery = ''
# Create the query string for the IDs, adding any user-defined
# fields or constraints.
cond = pquery
if constraint not in [None, '']:
if constraint.strip()[:5].lower() in ['limit', 'order'] or pquery == '':
cond += ' %s' % constraint
else:
cond += ' AND %s' % constraint
# Set service call headers.
headers = self.getHeaders(None)
# Query for the ID/fields.
_svc_url = '%s/query?' % self.svc_url # base service URL
_svc_url += 'id=&' # no ID value
_svc_url += 'fields=%s&' % quote_plus(ofields) # fields to retrieve
_svc_url += 'catalog=%s&' % quote_plus(catalog) # catalog to query
_svc_url += 'cond=%s&' % quote_plus(cond) # WHERE condition
_svc_url += 'context=%s&' % context # dataset context
_svc_url += 'profile=%s&' % profile # service profile
_svc_url += 'debug=%s&' % debug # system debug flag
_svc_url += 'verbose=%s' % False # system verbose flag
r = requests.get(_svc_url, headers=headers)
_res = spcToString(r.content)
# Query result is in CSV, convert to a named table.
res = convert(_res, outfmt='table')
if out in [None, '']:
if ofields.count(',') > 0:
return res
else:
return np.array(res[self.context['id_main']])
else:
# Note: memory expensive for large lists .....
csv_text = _res
if out.startswith('vos://'):
return storeClient.saveAs(csv_text, out)[0]
else:
with open(out, "w") as fd:
fd.write(csv_text)
fd.write('\n')
return 'OK'
# --------------------------------------------------------------------
# GETSPEC -- Retrieve spectra for a list of objects.
#
def getSpec(self, id_list, fmt='numpy', out=None, align=False,
cutout=None, context=None, profile=None, **kw):
'''Get spectra for a list of object IDs.
Usage:
getSpec(id_list, fmt='numpy', out=None, align=False, cutout=None,
context=None, profile=None, **kw)
Parameters
----------
id_list: list object
List of object identifiers.
format:
Return format of spectra
out:
Output filename or return to caller if None
align:
Align spectra to common wavelength grid with zero-padding
cutout:
Wavelength cutout range (as "<start>-<end>")
context: str
Dataset context.
profile: str
Data service profile.
**kw: dict
Optional keyword arguments. Supported keywords currently include:
values = None
Spectrum vectors to return.
token = None
Data Lab auth token.
id_col = None
Name of ID column in input table.
verbose = False
Print verbose messages during retrieval
debug = False
Print debug messages during retrieval
Returns
-------
result: object or array of objects or 'OK' string
Example
-------
1) Retrieve spectra individually:
.. code-block:: python
id_list = spec.query (0.125, 12.123, 0.1)
for id in id_list:
spec = spec.getSpec (id)
.... do something
2) Retrieve spectra in bulk:
.. code-block:: python
spec = spec.getSpec (id_list, fmt='numpy')
.... 'spec' is an array of NumPy objects that may be
different sizes
'''
if context in [None, '']:
context = self.svc_context
if profile in [None, '']:
profile = self.svc_profile
# Process optional parameters.
values = kw['values'] if 'values' in kw else 'all'
token = kw['token'] if 'token' in kw else self.auth_token
id_col = kw['id_col'] if 'id_col' in kw else None
verbose = kw['verbose'] if 'verbose' in kw else self.verbose
debug = kw['debug'] if 'debug' in kw else self.debug
# Set service call headers.
headers = {'Content-Type': 'application/x-www-form-urlencoded',
'X-DL-ClientVersion': __version__,
'X-DL-OriginIP': self.hostip,
'X-DL-OriginHost': self.hostname,
'X-DL-AuthToken': def_token(None)}
if debug:
print('getSpec(): in ty id_list = ' + str(type(id_list)))
# Extract the ID list from the input value.
ids = self.extractIDList(id_list)
# Force alignment for SpectrumCollection format.
if fmt.lower() == 'spectrumcollection':
align = True
if debug:
print('ty ids: ' + str(type(ids)))
# Initialize the payload.
data = {'id_list': str(ids),
'values': values,
'format': fmt,
'align': align,
'cutout': str(cutout),
'w0': 0.0,
'w1': 0.0,
'context': context,
'profile': profile,
'debug': debug,
'verbose': verbose
}
# Get the limits of the collection
url = '%s/listSpan' % self.svc_url
resp = requests.post(url, data=data, headers=headers)
v = json.loads(resp.text)
data['w0'], data['w1'] = v['w0'], v['w1']
url = '%s/getSpec' % self.svc_url
if align:
# If we're aligning columns, the server will pad the values
# and return a common array size.
resp = requests.post(url, data=data, headers=headers)
_data = np.load(BytesIO(resp.content), allow_pickle=False)
else:
# If not aligning columns, request each spectrum individually
# so we can return a list object.
_data = []
for id in ids:
data['id_list'] = str(id)
resp = requests.post(url, data=data, headers=headers)
if fmt.lower() == 'fits':
_data.append(resp.content)
else:
np_data = np.load(BytesIO(resp.content), allow_pickle=False)
_data.append(np_data)
if fmt.lower() != 'fits':
_data = np.array(_data)
if fmt.lower() == 'fits':
# Note: assumes a single file is requested.
if len(_data) == 1:
return _data[0]
else:
return _data
else:
if fmt.lower()[:5] == 'numpy': # NUMPY array
if len(_data) == 1:
return _data[0]
else:
return _data
elif fmt.lower()[:6] == 'pandas': # Pandas DataFrame
if len(_data) == 1:
return self.to_pandas(_data[0])
else:
pd_data = []
for d in _data:
pd_data.append(self.to_pandas(d))
return pd_data
elif fmt.lower()[:6] == 'tables': # Astropy Table
if len(_data) == 1:
return self.to_Table(_data[0])
else:
tb_data = []
for d in _data:
tb_data.append(self.to_Table(d))
return tb_data
elif fmt.lower()[:8] == 'spectrum': # Spectrum1D
if len(_data) == 1:
return self.to_Spectrum1D(_data[0])
elif align:
return self.to_Spectrum1D(_data)
else:
sp_data = []
for i in range(len(_data)):
sp_data.append(self.to_Spectrum1D(_data[i]))
# Convert to a SpectrumCollection object if requested.
if fmt.lower() == 'spectrumcollection':
return SpectrumCollection.from_spectra(sp_data)
else:
return sp_data
else:
raise Exception("Unknown return format '%s'" % fmt)
# --------------------------------------------------------------------
# PLOT -- Utility to batch plot a single spectrum, display plot directly.
#
def plot(self, spec, context=None, profile=None, out=None, **kw):
'''Utility to batch plot a single spectrum.
Usage:
spec.plot(id, context=None, profile=None, **kw)
Parameters
----------
spec: object ID or data array
Spectrum to be plotted. If 'spec' is a numpy array or a
Spectrum1D object the data are plotted directly, otherwise
the value is assumed to be an object ID that will be retrieved
from the service.
out: str
Output filename. If specified, plot saved as PNG.
context: str
Dataset context.
profile: str
Data service profile.
**kw: dict
Optional keyword arguments. Supported keywords currently include:
rest_frame - Whether or not to plot the spectra in the
rest-frame. If True, the wavelengths are assumed
to have been already shifted and no 'z' value is
required. If False, wavelengths are assumed to
be in observed frame and a 'z' value is required
to overplot absorption/emission lines. (def: True)
z - Redshift value (def: None)
xlim - Set the xrange of the plot
ylim - Set the yrange of the plot
title - Plot title (def: object ID)
xlabel - Plot x-axis label (def: wavelength)
ylabel - Plot y-axis label (def: flux units)
out - Saved output filename.
values - A comma-delimited string of which values to plot,
a combination of 'flux,model,sky,ivar'
mark_lines - Which lines to mark. No lines marked if None or
an empty string, otherwise one of 'em|abs|all|both'
grid - Plot grid lines (def: True)
dark - Dark-mode plot colors (def: True)
em_lines - List of emission lines to plot. If not given,
all the lines in the default list will be plotted.
abs_lines - Lines of absorption lines to plot. If not given,
all the lines in the default list will be plotted.
spec_args - Plotting kwargs for the spectrum
model_args - Plotting kwargs for the model
ivar_args - Plotting kwargs for the ivar
sky_args - Plotting kwargs for the sky
Returns
-------
Nothing
Example
-------
1) Plot a single spectrum, save to a virtual storage file
.. code-block:: python
spec.plot (specID, context='sdss_dr16', out='vos://spec.png')
'''
verbose = kw['verbose'] if 'verbose' in kw else self.verbose
debug = kw['debug'] if 'debug' in kw else self.debug
if context in [None, '']:
context = self.svc_context
if profile in [None, '']:
profile = self.svc_profile
# See whether we've been passed a spectrum ID or a data.
_id = None
if isinstance(spec, int) or \
isinstance(spec, np.int64) or isinstance(spec, np.uint64) or \
isinstance(spec, tuple) or \
isinstance(spec, str):
_id = spec
dlist = sp_client.getSpec(spec, context=context, profile=profile)
data = dlist
wavelength = 10.0**data['loglam']
flux = data['flux']
model = data['model']
sky = data['sky']
ivar = data['ivar']
else:
if isinstance(spec, np.ndarray) or \
isinstance(spec, pd.core.frame.DataFrame):
wavelength = 10.0**spec['loglam']
flux = spec['flux']
model = spec['model']
sky = spec['sky']
ivar = spec['ivar']
elif isinstance(spec, Spectrum1D):
wavelength = np.array(spec.spectral_axis.value)
flux = spec.flux
model = spec.meta['model']
sky = spec.meta['sky']
ivar = spec.meta['ivar']
# Get the wavelength frame and redshift for the plot.
if 'z' in kw:
z = float(kw['z']) # use supplied value
del(kw['z'])
else:
z = None
if _id is not None:
# Query for the redshift field of the catalog.
headers = self.getHeaders(None)
_svc_url = '%s/query?' % self.svc_url # base service URL
_svc_url += "id=%s&" % str(_id)
_svc_url += "fields=%s&" % self.context['redshift']
_svc_url += "catalog=%s&" % self.context['catalog']
_svc_url += "cond=&"
_svc_url += "context=%s&" % context
_svc_url += "profile=%s&" % profile
_svc_url += "debug=%s&" % debug
_svc_url += "verbose=%s" % verbose
r = requests.get(_svc_url, headers=headers)
if r.status_code == 200:
_val = spcToString(r.content).split('\n')[1:-1][0]
z = float(_val)
if 'rest_frame' in kw:
rest_frame = (str(kw['rest_frame']).lower() == 'true')
del(kw['rest_frame'])
else:
rest_frame = True
if self.context['rest_frame'] == 'false':
# Data is in the observed rest frame, convert to rest frame if we
# have a redshift.
if rest_frame:
if z is not None:
wavelength /= (1 + z)
else:
warnings.warn('Redshift needed to plot in rest frame.')
rest_frame = False
else:
# Data is already in rest frame, convert to observed frame if we
# have a redshift.
if not rest_frame:
if z is not None:
wavelength *= (1 + z)
else:
warning.warn('Redshift needed to plot in observed frame.')
rest_frame = True
self._plotSpec(wavelength, flux, model=model, sky=sky, ivar=ivar,
rest_frame=rest_frame, z=z, **kw)
# --------------------------------------------------------------------
# PROSPECT -- Utility wrapper to launch the interactive PROSPECT tool.
#
def prospect(self, spec, context=None, profile=None, **kw):
'''Utility wrapper to launch the interactive PROSPECT tool.
Usage:
stat = prospect(spec, context=None, profile=None, **kw)
Parameters
----------
spec: object ID or data array
Spectrum to be plotted. If 'spec' is a numpy array or a
Spectrum1D object the data are plotted directly, otherwise
the value is assumed to be an object ID that will be retrieved
from the service.
context: str
Dataset context.
profile: str
Data service profile.
**kw: dict
Optional keyword arguments. Supported keywords currently include:
TBD
Returns
-------
result: str
Status 'OK' string or error message.
Example
-------
1) Plot ....
.. code-block:: python
stat = spec.prospect (specID)
'''
if context in [None, '']:
context = self.svc_context
if profile in [None, '']:
profile = self.svc_profile
pass
# --------------------------------------------------------------------
# PREVIEW -- Get a preview plot of a spectrum
#
def preview(self, spec, context=None, profile=None, **kw):
'''Get a preview plot of a spectrum
Usage:
spec.preview(spec, context=None, profile=None, **kw):
Parameters
----------
spec: objectID
Object identifiers.
context: str
Dataset context.
profile: str
Data service profile.
**kw: dict
Optional keyword arguments. Supported keywords currently include:
N/A
Returns
-------
image: A PNG image object
Example
-------
1) Display a preview plot a given spectrum:
.. code-block:: python
from IPython.display import display, Image
display(Image(spec.preview(id),
format='png', width=400, height=100, unconfined=True))
'''
if context in [None, '']:
context = self.svc_context
if profile in [None, '']:
profile = self.svc_profile
url = self.svc_url + '/preview?id=%s' % str(spec)
url = url + '&context=%s&profile=%s' % (context, profile)
try:
if USE_CURL:
return Image.open(BytesIO(self.curl_get(url)))
else:
return Image.open(BytesIO(requests.get(url, timeout=2).content))
except Exception as e:
raise Exception("Error getting plot data: " + str(e))
# --------------------------------------------------------------------
# PLOTGRID -- Get a grid of preview plots of a spectrum list.
#
def plotGrid(self, id_list, nx, ny, page=0,
context=None, profile=None, **kw):
'''Get a grid of preview plots of a spectrum list.
Usage:
image = plotGrid(id_list, nx, ny, page=0,
context=None, profile=None, **kw):
Parameters
----------
id_list: list object
List of object identifiers.
nx: int
Number of plots in the X dimension
ny: int
Number of plots in the Y dimension
page: int
Dataset context.
context: str
Dataset context.
profile: str
Data service profile.
**kw: dict
Optional keyword arguments. Supported keywords currently include:
verbose = False
Print verbose messages during retrieval
debug = False
Print debug messages during retrieval
Returns
-------
image: A PNG image object
Example
-------
1) Display a 5x5 grid of preview plots for a list:
.. code-block:: python
npages = np.round((len(id_list) / 25) + (25 / len(id_list))
for pg in range(npages):
data = spec.getGridPlot(id_list, 5, 5, page=pg)
display(Image(data, format='png',
width=400, height=100, unconfined=True))
'''
if context in [None, '']:
context = self.svc_context
if profile in [None, '']:
profile = self.svc_profile
# Process optional parameters.
token = kw['token'] if 'token' in kw else self.auth_token
verbose = kw['verbose'] if 'verbose' in kw else self.verbose
debug = kw['debug'] if 'debug' in kw else self.debug
fmt = kw['fmt'] if 'fmt' in kw else 'png'
# Set service call headers.
headers = {'Content-Type': 'application/x-www-form-urlencoded',
'X-DL-ClientVersion': __version__,
'X-DL-OriginIP': self.hostip,
'X-DL-OriginHost': self.hostname,
'X-DL-AuthToken': token}
# Build the query URL string.
url = '%s/plotGrid' % self.svc_url
if isinstance(id_list, list) or isinstance(id_list, np.ndarray):
n_ids = len(id_list)
sz_grid = nx * ny
if sz_grid >= n_ids: # Use the whole list.
ids = id_list
p_start = 0
p_end = len(id_list) - 1
else:
p_start = page * sz_grid
p_end = min(n_ids, p_start + sz_grid)
ids = id_list[p_start:p_end]
else:
ids = id_list
# Initialize the payload.
data = {'id_list': str(list(ids)),
'ncols': ny,
'context': context,
'profile': profile,
'debug': debug,
'verbose': verbose
}
resp = requests.post(url, data=data, headers=headers)
if fmt == 'png':
return Image.open(BytesIO(resp.content))
else:
return resp.content
# --------------------------------------------------------------------
# STACKEDIMAGE -- Get a stacked image of a list of spectra.
#
def stackedImage(self, id_list, align=False, yflip=False,
context=None, profile=None, **kw):
'''Get ...
Usage:
Parameters
----------
id_list: list object
List of spectrum identifiers.
context: str
Dataset context.
profile: str
Data service profile.
**kw: dict
Optional keyword arguments. Supported keywords currently include:
verbose = False
Print verbose messages during retrieval
debug = False
Print debug messages during retrieval
Returns
-------
result: ....
Example
-------
1) Query ....
.. code-block:: python
id_list = spec.query (0.125, 12.123, 0.1)
'''
if context in [None, '']:
context = self.svc_context
if profile in [None, '']:
profile = self.svc_profile
# Process optional parameters.
scale = kw['scale'] if 'scale' in kw else (1.0, 1.0)
if isinstance(scale, float):
xscale = yscale = scale
else:
xscale = scale[0]
yscale = scale[1]
thickness = kw['thickness'] if 'thickness' in kw else 1
inverse = kw['inverse'] if 'inverse' in kw else False
cmap = kw['cmap'] if 'cmap' in kw else 'gray'
width = kw['width'] if 'width' in kw else 0
height = kw['height'] if 'height' in kw else 0
token = kw['token'] if 'token' in kw else self.auth_token
verbose = kw['verbose'] if 'verbose' in kw else self.verbose
debug = kw['debug'] if 'debug' in kw else self.debug
fmt = kw['fmt'] if 'fmt' in kw else 'png'
# Set service call headers.
headers = {'Content-Type': 'application/x-www-form-urlencoded',
'X-DL-ClientVersion': __version__,
'X-DL-OriginIP': self.hostip,
'X-DL-OriginHost': self.hostname,
'X-DL-AuthToken': token}
# Build the query URL string.
url = '%s/stackedImage' % self.svc_url
# Initialize the payload.
data = {'id_list': str(list(id_list)),
'context': context,
'xscale': xscale,
'yscale': yscale,
'thickness': thickness,
'cmap': cmap,
'inverse': inverse,
'width': width,
'height': height,
'profile': profile,
'debug': debug,
'verbose': verbose
}
resp = requests.post(url, data=data, headers=headers)
if fmt == 'png':
return Image.open(BytesIO(resp.content))
else:
return resp.content
###################################################
# STATIC UTILITY METHODS
###################################################
# --------------------------------------------------------------------
# _PLOTSPEC -- Plot a spectrum.
#
@staticmethod
def _plotSpec(wavelength, flux, model=None, sky=None, ivar=None,
rest_frame=True, z=0.0, xlim=None, ylim=None,
title=None, xlabel=None, ylabel=None, out=None, **kw):
"""Plot a spectrum.
Inputs:
* wavelength - Array of spectrum wavelength values to plot.
* flux - Array of spectrum flux values to plot.
* model - Array of model spectrum values to plot (if not None).
* sky - Array of sky spectrum values to plot (if not None).
* ivar - Array of inverse-variance values to plot (if not None).
* rest_frame - Whether or not to plot the spectra in the
rest-frame. If True, the wavelengths are assumed
to have been already shifted and no 'z' value is
required. If False, wavelengths are assumed to
be in observed frame and a 'z' value is required
to overplot absorption/emission lines. (def: True)
* z - Redshift (def: None)
* xlim - Setting the xrange of the plot (e.g. '[5000.0,6000.0]')
* ylim - Setting the yrange of the plot (e.g. '[0.0,25.0]')
* title - Plot title (def: object ID)
* xlabel - Plot x-axis label (def: wavelength)
* ylabel - Plot y-axis label (def: flux units)
* out - Saved output filename.
Optional kwargs:
* values - A comma-delimited string of which values to plot, a
combination of 'flux,model,sky,ivar'
* mark_lines - Which lines to mark. No lines marked if None or
an empty string, otherwise one of 'em|abs|all|both'
* grid - Plot grid lines (def: True)
* dark - Dark-mode plot colors (def: True)
* em_lines - List of emission lines to plot. If not given, all
the lines in the default list will be plotted.
* abs_lines - Lines of absorption lines to plot. If not given,
all the lines in the default list will be plotted.
* spec_args - Plotting kwargs for the spectrum.
* model_args - Plotting kwargs for the model.
* ivar_args - Plotting kwargs for the ivar.
* sky_args - Plotting kwargs for the sky.
"""
def labelLines(lines, ax, color, yloc):
'''Select only those lines that are visible in
the x-range of the plot.
'''
if rest_frame is False and z is None:
warnings.warn(
'Redshift required to mark lines in observed frame')
return
for ii in range(len(lines)):
# If rest_frame=False, shift lines to the observed frame.
lam = lines[ii]['lambda']
if rest_frame is False:
lam = lam * (1+z)
# Plot only lines within the x-range of the plot.
if ((lam > xbounds[0]) & (lam < xbounds[1])):
ax.axvline(lam, color=color, lw=1.0, linestyle=':')
ax.annotate(lines[ii]['label'], xy=(lam, yloc),
xycoords=ax.get_xaxis_transform(),
fontsize=12, rotation=90, color=color)
# Process the optional kwargs.
dark = kw['dark'] if 'dark' in kw else True
grid = kw['grid'] if 'grid' in kw else True
mark_lines = kw['mark_lines'] if 'mark_lines' in kw else 'all'
em_lines = kw['em_lines'] if 'em_lines' in kw else None
abs_lines = kw['abs_lines'] if 'abs_lines' in kw else None
values = kw['values'] if 'values' in kw else 'flux,model'
if 'spec_args' in kw:
spec_args = kw['spec_args']
else:
spec_args = {'color': '#ababab', 'linewidth': 1.0, 'alpha': 1.0}
if 'model_args' in kw:
model_args = kw['model_args']
else:
model_args = {'color': 'red', 'linewidth': 1.2}
if 'sky_args' in kw:
sky_args = kw['sky_args']
else:
sky_args = {'color': 'brown', 'linewidth': 1.0}
if 'ivar_args' in kw:
ivar_args = kw['ivar_args']
else:
ivar_args = {'color': 'blue', 'linewidth': 1.0}
# Setting up the plot
if dark:
fig = plt.figure(dpi=100, figsize=(12, 5), facecolor='#2F4F4F')
plt.rcParams['axes.facecolor'] = '#121212'
plt.rcParams['axes.edgecolor'] = '#00FFFF'
else:
fig = plt.figure(dpi=100, figsize=(12, 5))
plt.rcParams['axes.facecolor'] = '#FFFFFF'
ax = fig.add_subplot(111)
if 'flux' in values:
if ivar is None:
ax.plot(wavelength, flux, label='Flux', **spec_args)
else:
ax.plot(wavelength, flux * (ivar > 0), label='Flux',
**spec_args)
if 'model' in values and model is not None:
if ivar is None:
ax.plot(wavelength, model, label='Model', **model_args)
else:
ax.plot(wavelength, model * (ivar > 0), label='Model',
**model_args)
if 'sky' in values and sky is not None and ivar is not None:
if ivar is None:
ax.plot(wavelength, sky, label='Sky', **model_args)
else:
ax.plot(wavelength, sky * (ivar > 0), label='Sky', **sky_args)
if 'ivar' in values and ivar is not None:
ax.plot(wavelength, ivar * (ivar > 0), label='Ivar', **ivar_args)
plt.xlim(xlim)
plt.ylim(ylim)
am_color = ('#00FF00' if dark else 'black')
if ylabel is None:
if rest_frame:
plt.xlabel('Rest Wavelength ($\AA$)', color=am_color)
else:
if z is not None:
plt.xlabel('Observed Wavelength ($\AA$) z=%.3g' % z,
color=am_color)
else:
plt.xlabel('Observed Wavelength ($\AA$) z=(unknown)',
color=am_color)
else:
plt.xlabel(xlabel, color=am_color)
if ylabel is None:
ylab = '$F_{\lambda}$ ($10^{-17}~ergs~s^{-1}~cm^{-2}~\AA^{-1}$)'
plt.ylabel(ylab, color=am_color)
else:
plt.ylabel(ylabel, color=am_color)
if dark:
ax.tick_params(color='cyan', labelcolor='yellow')
if grid:
plt.grid(color='gray', linestyle='dashdot', linewidth=0.5)
if title not in [None, '']:
ax.set_title(title, c=am_color)
# Plotting Absorption/Emission lines - only works if either of the
# lines is set to True
if mark_lines not in [None, '']:
if mark_lines.lower() in ['all', 'both']:
opt = 'ea'
else:
opt = mark_lines.lower()
# Select any lines listed by the user.
e_lines = _em_lines
if em_lines is not None:
e_lines = list(filter(lambda x: x['name'] in em_lines,
_em_lines))
a_lines = _abs_lines
if abs_lines is not None:
a_lines = list(filter(lambda x: x['name'] in abs_lines,
_abs_lines))
xbounds = ax.get_xbound() # Getting the x-range of the plot
lcol = ['#FFFF00', '#00FFFF'] if dark else ['#FF0000', '#0000FF']
if 'e' in opt:
labelLines(e_lines, ax, lcol[0], 0.875)
if 'a' in opt:
labelLines(a_lines, ax, lcol[1], 0.05)
leg = ax.legend()
if dark:
for text in leg.get_texts():
plt.setp(text, color='w')
if out is not None:
plt.savefig(out)
else:
plt.show()
###################################################
# PRIVATE UTILITY METHODS
###################################################
def debug(self, debug_val):
'''Toggle debug flag.
'''
self.debug = debug_val
def retBoolValue(self, url):
'''Utility method to call a boolean service at the given URL.
'''
response = ""
try:
# Add the auth token to the reauest header.
if self.auth_token != None:
headers = {'X-DL-AuthToken': self.auth_token}
r = requests.get(url, headers=headers)
else:
r = requests.get(url)
response = spcToString(r.content)
if r.status_code != 200:
raise Exception(r.content)
except Exception:
return spcToString(r.content)
else:
return response
def getHeaders(self, token):
'''Get default tracking headers.
'''
tok = def_token(token)
user, uid, gid, hash = tok.strip().split('.', 3)
hdrs = {'Content-Type': 'text/ascii',
'X-DL-ClientVersion': __version__,
'X-DL-OriginIP': self.hostip,
'X-DL-OriginHost': self.hostname,
'X-DL-User': user,
'X-DL-AuthToken': tok}
return hdrs
def getFromURL(self, svc_url, path, token):
'''Get something from a URL. Return a 'response' object.
'''
try:
hdrs = self.getHeaders(token)
resp = requests.get("%s%s" % (svc_url, path), headers=hdrs)
except Exception as e:
raise dlSpecError(str(e))
return resp
def curl_get(self, url):
'''Utility routine to use cURL to return a URL
'''
b_obj = BytesIO()
crl = pycurl.Curl()
crl.setopt(crl.URL, url)
crl.setopt(crl.WRITEDATA, b_obj)
crl.perform()
crl.close()
return b_obj.getvalue()
def extractIDList(self, id_list, id_col=None):
'''Extract a 1-D array or single identifier from an input ID type.
'''
if isinstance(id_list, str):
# Input is a string. This may be a text string of identifiers,
# a filename or a CSV table.
if os.path.exists(id_list):
# Read list from a local file.
with open(id_list, 'r') as fd:
_list = fd.read()
if _list.startswith(self.context['id_main']): # CSV string?
ids = _list.split('\n')[1:-1]
else:
ids = _list.split('\n')[:-1]
elif id_list.startswith('vos://'):
# Read list from virtual storage.
ids = storeClient.get(id_list).split('\n')[:-1]
elif id_list.find(',') > 0 or \
id_list.startswith(self.context['id_main']): # CSV string?
pdata = convert(id_list, outfmt='pandas')
ids = np.array(pdata[self.context['id_main']])
else:
ids = np.array([id_list])
el = ids[0]
if isinstance(el, str):
cnv_list = []
if el[0] == '(': # Assume a tuple
for el in ids:
if el != '':
cnv_list.append(el[1:-1])
else:
for el in ids:
if el != '':
cnv_list.append(int(el))
ids = np.array(cnv_list)
elif isinstance(id_list, int) or \
isinstance(id_list, np.int64) or \
isinstance(id_list, np.uint64) or\
isinstance(id_list, tuple):
# Input is a single integer or tuple type (e.g. a specobjid
# or a (plate,mjd,fiber), simply convert it to a list.
ids = [id_list]
elif isinstance(id_list, list):
# Input is already a list type, just return it.
ids = id_list
elif isinstance(id_list, pd.core.frame.DataFrame):
try:
ids = id_list[self.context['id_main']].tolist()
except KeyError as e:
ids = None
elif isinstance(id_list, np.ndarray):
# Input is a numpy array. If it's a 1-D array assume it contains
# identifiers and convert to a list, otherwise try to extract the
# id column.
if id_list.ndim == 1 and id_list.dtype.names is None:
ids = id_list.tolist()
else:
try:
if id_list.dtype.names is not None:
# structured array
ids = id_list[self.context['id_main']].tolist()
else:
# ndarray, use first column
ids = id_list[:, 0].tolist()
except ValueError as e:
ids = None
else:
ids = np.array(id_list[self.context['id_main']])
return ids
# ###################################
# Spectroscopic Data Client Handles
# ###################################
def getClient(context='default', profile='default'):
'''Get a new instance of the specClient client.
Parameters
----------
context: str
Dataset context
profile: str
Service profile
Returns
-------
client: specClient
An specClient object
Example
-------
.. code-block:: python
new_client = specClient.getClient()
'''
return specClient(context=context, profile=profile)
# Get the default client object.
sp_client = client = getClient(context='default', profile='default')
# ##########################################
# Patch the docstrings for module functions
# ##########################################
set_svc_url.__doc__ = sp_client.set_svc_url.__doc__
get_svc_url.__doc__ = sp_client.get_svc_url.__doc__
set_profile.__doc__ = sp_client.set_profile.__doc__
get_profile.__doc__ = sp_client.get_profile.__doc__
set_context.__doc__ = sp_client.set_context.__doc__
get_context.__doc__ = sp_client.get_context.__doc__
# Define a set of spectral lines.
#
# This is the set of emission lines from the SDSS spZline files.
# Wavelengths are in air for lambda > 2000, vacuum for lambda < 2000.
#
# Emission Lines
_em_lines = [
{"name": "Ly-alpha", "lambda": 1215.67, "label": "Ly$\\alpha$"},
{"name": "N V 1240", "lambda": 1240.81, "label": "N V"},
{"name": "C IV 1549", "lambda": 1549.48, "label": "C IV"},
{"name": "He II 1640", "lambda": 1640.42, "label": "He II"},
{"name": "C III] 1908", "lambda": 1908.734, "label": "C III]"},
{"name": "Mg II 2799", "lambda": 2800.315, "label": "Mg II"},
{"name": "[O II] 3725", "lambda": 3727.092, "label": " "},
{"name": "[O II] 3727", "lambda": 3729.875, "label": "[O II]"},
{"name": "[Ne III] 3868", "lambda": 3869.857, "label": "[Ne III]"},
{"name": "H-zeta", "lambda": 3890.151, "label": "H$\\zeta$"},
{"name": "[Ne III] 3970", "lambda": 3971.123, "label": "[Ne III]"},
{"name": "H-epsilon", "lambda": 3971.195, "label": "H$\\epsilon$"},
{"name": "H-delta", "lambda": 4102.892, "label": "H$\\delta$"},
{"name": "H-gamma", "lambda": 4341.684, "label": "H$\\beta$"},
{"name": "[O III] 4363", "lambda": 4364.435, "label": "[O III]"},
{"name": "He II 4685", "lambda": 4686.991, "label": "He II"},
{"name": "H-beta", "lambda": 4862.683, "label": "H$\\beta$"},
{"name": "[O III] 4959", "lambda": 4960.294, "label": "[O III]"},
{"name": "[O III] 5007", "lambda": 5008.239, "label": "[O III]"},
{"name": "He II 5411", "lambda": 5413.025, "label": "He II"},
{"name": "[O I] 5577", "lambda": 5578.888, "label": "[O I]"},
{"name": "[N II] 5755", "lambda": 5756.186, "label": "[Ne II]"},
{"name": "He I 5876", "lambda": 5877.308, "label": "He I"},
{"name": "[O I] 6300", "lambda": 6302.046, "label": "[O I]"},
{"name": "[S III] 6312", "lambda": 6313.806, "label": "[S III]"},
{"name": "[O I] 6363", "lambda": 6365.535, "label": "[O I]"},
{"name": "[N II] 6548", "lambda": 6549.859, "label": "[N II]"},
{"name": "H-alpha", "lambda": 6564.614, "label": "H$\\alpha$"},
{"name": "[N II] 6583", "lambda": 6585.268, "label": "[N II]"},
{"name": "[S II] 6716", "lambda": 6718.294, "label": "[S II]"},
{"name": "[S II] 6730", "lambda": 6732.678, "label": "[S II]"},
{"name": "[Ar III] 7135", "lambda": 7137.758, "label": "[Ar III]"}
]
# Absorption lines
_abs_lines = [
{"name": "H12", "lambda": 3751.22, "label": "H12"},
{"name": "H11", "lambda": 3771.70, "label": "H11"},
{"name": "H10", "lambda": 3798.98, "label": "H10"},
{"name": "H9", "lambda": 3836.48, "label": "H9"},
{"name": "H-zeta", "lambda": 3890.151, "label": "H$\\zeta$"},
{"name": "K (Ca II 3933)", "lambda": 3934.814, "label": "K (Ca II)"},
{"name": "H (Ca II 3968)", "lambda": 3969.623, "label": "H (Ca II)"},
{"name": "H-epsilon", "lambda": 3971.195, "label": "H$\\epsilon$"},
{"name": "H-delta", "lambda": 4102.892, "label": "H$\\delta$"},
{"name": "G (Ca I 4307)", "lambda": 4308.952, "label": "G (Ca I)"},
{"name": "H-gamma", "lambda": 4341.684, "label": "H$\\gamma$"},
{"name": "H-beta", "lambda": 4862.683, "label": "H$\\beta$"},
{"name": "Mg I 5183", "lambda": 5185.048, "label": " "},
{"name": "Mg I 5172", "lambda": 5174.125, "label": " "},
{"name": "Mg I 5167", "lambda": 5168.762, "label": "Mg I"},
{"name": "D2 (Na I 5889)", "lambda": 5891.582, "label": " "},
{"name": "D1 (Na I 5895)", "lambda": 5897.554, "label": "D1,2 (Na I)"},
{"name": "H-alpha", "lambda": 6564.614, "label": "H$\\alpha$"}
]
def airtovac(l):
'''Convert air wavelengths (greater than 2000A) to vacuum wavelengths.
'''
if l < 2000.0:
return l
vac = l
for iter in range(2):
sigma2 = (1.0e4 / vac) * (1.0e4 / vac)
fact = 1.0 + 5.792105e-2 / (238.0185 - sigma2) + \
1.67917e-3 / (57.362 - sigma2)
vac = l * fact
return vac
|
[
"matplotlib.pyplot.savefig",
"socket.socket",
"dl.helpers.utils.convert",
"os.path.isfile",
"matplotlib.pyplot.figure",
"dl.Util.def_token",
"requests.post",
"specutils.SpectrumCollection.from_spectra",
"pandas.DataFrame",
"warnings.simplefilter",
"json.loads",
"os.path.exists",
"matplotlib.pyplot.setp",
"socket.gethostname",
"requests.get",
"specutils.Spectrum1D",
"io.BytesIO",
"dl.Util.multimethod",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylim",
"logging.disable",
"dl.storeClient.saveAs",
"matplotlib.pyplot.ylabel",
"pycurl.Curl",
"matplotlib.pyplot.grid",
"astropy.units.Unit",
"matplotlib.pyplot.xlim",
"astropy.table.Table",
"urllib.parse.quote_plus",
"numpy.array",
"warnings.warn",
"matplotlib.pyplot.xlabel",
"astropy.nddata.InverseVariance",
"dl.storeClient.get",
"logging.getLogger"
] |
[((2320, 2367), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""', 'AstropyWarning'], {}), "('ignore', AstropyWarning)\n", (2341, 2367), False, 'import warnings\n'), ((2384, 2416), 'logging.disable', 'logging.disable', (['logging.WARNING'], {}), '(logging.WARNING)\n', (2399, 2416), False, 'import logging\n'), ((3465, 3485), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (3483, 3485), False, 'import socket\n'), ((3826, 3863), 'socket.socket', 'socket.socket', ([], {'type': 'socket.SOCK_DGRAM'}), '(type=socket.SOCK_DGRAM)\n', (3839, 3863), False, 'import socket\n'), ((4600, 4633), 'os.path.isfile', 'os.path.isfile', (['"""/tmp/SPEC_DEBUG"""'], {}), "('/tmp/SPEC_DEBUG')\n", (4614, 4633), False, 'import os\n'), ((4644, 4679), 'os.path.isfile', 'os.path.isfile', (['"""/tmp/SPEC_VERBOSE"""'], {}), "('/tmp/SPEC_VERBOSE')\n", (4658, 4679), False, 'import os\n'), ((7604, 7632), 'dl.Util.multimethod', 'multimethod', (['"""spc"""', '(1)', '(False)'], {}), "('spc', 1, False)\n", (7615, 7632), False, 'from dl.Util import multimethod\n'), ((7739, 7767), 'dl.Util.multimethod', 'multimethod', (['"""spc"""', '(0)', '(False)'], {}), "('spc', 0, False)\n", (7750, 7767), False, 'from dl.Util import multimethod\n'), ((8800, 8828), 'dl.Util.multimethod', 'multimethod', (['"""spc"""', '(1)', '(False)'], {}), "('spc', 1, False)\n", (8811, 8828), False, 'from dl.Util import multimethod\n'), ((8932, 8960), 'dl.Util.multimethod', 'multimethod', (['"""spc"""', '(0)', '(False)'], {}), "('spc', 0, False)\n", (8943, 8960), False, 'from dl.Util import multimethod\n'), ((11326, 11354), 'dl.Util.multimethod', 'multimethod', (['"""spc"""', '(3)', '(False)'], {}), "('spc', 3, False)\n", (11337, 11354), False, 'from dl.Util import multimethod\n'), ((11744, 11772), 'dl.Util.multimethod', 'multimethod', (['"""spc"""', '(2)', '(False)'], {}), "('spc', 2, False)\n", (11755, 11772), False, 'from dl.Util import multimethod\n'), ((12160, 12188), 'dl.Util.multimethod', 'multimethod', (['"""spc"""', '(1)', '(False)'], {}), "('spc', 1, False)\n", (12171, 12188), False, 'from dl.Util import multimethod\n'), ((12576, 12604), 'dl.Util.multimethod', 'multimethod', (['"""spc"""', '(0)', '(False)'], {}), "('spc', 0, False)\n", (12587, 12604), False, 'from dl.Util import multimethod\n'), ((30644, 30672), 'dl.Util.multimethod', 'multimethod', (['"""_spc"""', '(1)', '(True)'], {}), "('_spc', 1, True)\n", (30655, 30672), False, 'from dl.Util import multimethod\n'), ((30866, 30894), 'dl.Util.multimethod', 'multimethod', (['"""_spc"""', '(0)', '(True)'], {}), "('_spc', 0, True)\n", (30877, 30894), False, 'from dl.Util import multimethod\n'), ((31583, 31611), 'dl.Util.multimethod', 'multimethod', (['"""_spc"""', '(1)', '(True)'], {}), "('_spc', 1, True)\n", (31594, 31611), False, 'from dl.Util import multimethod\n'), ((31805, 31833), 'dl.Util.multimethod', 'multimethod', (['"""_spc"""', '(0)', '(True)'], {}), "('_spc', 0, True)\n", (31816, 31833), False, 'from dl.Util import multimethod\n'), ((35033, 35061), 'dl.Util.multimethod', 'multimethod', (['"""_spc"""', '(3)', '(True)'], {}), "('_spc', 3, True)\n", (35044, 35061), False, 'from dl.Util import multimethod\n'), ((35461, 35489), 'dl.Util.multimethod', 'multimethod', (['"""_spc"""', '(2)', '(True)'], {}), "('_spc', 2, True)\n", (35472, 35489), False, 'from dl.Util import multimethod\n'), ((35887, 35915), 'dl.Util.multimethod', 'multimethod', (['"""_spc"""', '(1)', '(True)'], {}), "('_spc', 1, True)\n", (35898, 35915), False, 'from dl.Util import multimethod\n'), ((36313, 36341), 'dl.Util.multimethod', 'multimethod', (['"""_spc"""', '(0)', '(True)'], {}), "('_spc', 0, True)\n", (36324, 36341), False, 'from dl.Util import multimethod\n'), ((2417, 2447), 'logging.getLogger', 'logging.getLogger', (['"""specutils"""'], {}), "('specutils')\n", (2434, 2447), False, 'import logging\n'), ((25470, 25485), 'dl.Util.def_token', 'def_token', (['None'], {}), '(None)\n', (25479, 25485), False, 'from dl.Util import def_token\n'), ((31397, 31435), 'requests.get', 'requests.get', (['svc_url'], {'headers': 'headers'}), '(svc_url, headers=headers)\n', (31409, 31435), False, 'import requests\n'), ((32336, 32374), 'requests.get', 'requests.get', (['svc_url'], {'headers': 'headers'}), '(svc_url, headers=headers)\n', (32348, 32374), False, 'import requests\n'), ((32879, 32917), 'requests.get', 'requests.get', (['svc_url'], {'headers': 'headers'}), '(svc_url, headers=headers)\n', (32891, 32917), False, 'import requests\n'), ((33621, 33648), 'astropy.units.Unit', 'u.Unit', (['"""erg cm-2 s-1 AA-1"""'], {}), "('erg cm-2 s-1 AA-1')\n", (33627, 33648), True, 'from astropy import units as u\n'), ((33671, 33721), 'astropy.nddata.InverseVariance', 'InverseVariance', (["(npy_data['ivar'] / flux_unit ** 2)"], {}), "(npy_data['ivar'] / flux_unit ** 2)\n", (33686, 33721), False, 'from astropy.nddata import InverseVariance\n'), ((33738, 33815), 'specutils.Spectrum1D', 'Spectrum1D', ([], {'spectral_axis': 'lamb', 'flux': 'flux', 'uncertainty': 'uncertainty', 'mask': 'mask'}), '(spectral_axis=lamb, flux=flux, uncertainty=uncertainty, mask=mask)\n', (33748, 33815), False, 'from specutils import Spectrum1D\n'), ((34353, 34410), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'npy_data', 'columns': 'npy_data.dtype.names'}), '(data=npy_data, columns=npy_data.dtype.names)\n', (34365, 34410), True, 'import pandas as pd\n'), ((34713, 34761), 'astropy.table.Table', 'Table', ([], {'data': 'npy_data', 'names': 'npy_data.dtype.names'}), '(data=npy_data, names=npy_data.dtype.names)\n', (34718, 34761), False, 'from astropy.table import Table\n'), ((42285, 42324), 'requests.get', 'requests.get', (['_svc_url'], {'headers': 'headers'}), '(_svc_url, headers=headers)\n', (42297, 42324), False, 'import requests\n'), ((42438, 42467), 'dl.helpers.utils.convert', 'convert', (['_res'], {'outfmt': '"""table"""'}), "(_res, outfmt='table')\n", (42445, 42467), False, 'from dl.helpers.utils import convert\n'), ((46836, 46882), 'requests.post', 'requests.post', (['url'], {'data': 'data', 'headers': 'headers'}), '(url, data=data, headers=headers)\n', (46849, 46882), False, 'import requests\n'), ((46895, 46916), 'json.loads', 'json.loads', (['resp.text'], {}), '(resp.text)\n', (46905, 46916), False, 'import json\n'), ((62595, 62641), 'requests.post', 'requests.post', (['url'], {'data': 'data', 'headers': 'headers'}), '(url, data=data, headers=headers)\n', (62608, 62641), False, 'import requests\n'), ((65593, 65639), 'requests.post', 'requests.post', (['url'], {'data': 'data', 'headers': 'headers'}), '(url, data=data, headers=headers)\n', (65606, 65639), False, 'import requests\n'), ((71784, 71798), 'matplotlib.pyplot.xlim', 'plt.xlim', (['xlim'], {}), '(xlim)\n', (71792, 71798), True, 'from matplotlib import pyplot as plt\n'), ((71807, 71821), 'matplotlib.pyplot.ylim', 'plt.ylim', (['ylim'], {}), '(ylim)\n', (71815, 71821), True, 'from matplotlib import pyplot as plt\n'), ((75098, 75114), 'dl.Util.def_token', 'def_token', (['token'], {}), '(token)\n', (75107, 75114), False, 'from dl.Util import def_token\n'), ((75916, 75925), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (75923, 75925), False, 'from io import BytesIO\n'), ((75940, 75953), 'pycurl.Curl', 'pycurl.Curl', ([], {}), '()\n', (75951, 75953), False, 'import pycurl\n'), ((30202, 30230), 'requests.get', 'requests.get', (['url'], {'timeout': '(2)'}), '(url, timeout=2)\n', (30214, 30230), False, 'import requests\n'), ((31238, 31253), 'dl.Util.def_token', 'def_token', (['None'], {}), '(None)\n', (31247, 31253), False, 'from dl.Util import def_token\n'), ((31529, 31549), 'json.loads', 'json.loads', (['profiles'], {}), '(profiles)\n', (31539, 31549), False, 'import json\n'), ((32177, 32192), 'dl.Util.def_token', 'def_token', (['None'], {}), '(None)\n', (32186, 32192), False, 'from dl.Util import def_token\n'), ((32468, 32488), 'json.loads', 'json.loads', (['contexts'], {}), '(contexts)\n', (32478, 32488), False, 'import json\n'), ((33008, 33028), 'json.loads', 'json.loads', (['catalogs'], {}), '(catalogs)\n', (33018, 33028), False, 'import json\n'), ((33536, 33563), 'astropy.units.Unit', 'u.Unit', (['"""erg cm-2 s-1 AA-1"""'], {}), "('erg cm-2 s-1 AA-1')\n", (33542, 33563), True, 'from astropy import units as u\n'), ((41785, 41804), 'urllib.parse.quote_plus', 'quote_plus', (['ofields'], {}), '(ofields)\n', (41795, 41804), False, 'from urllib.parse import quote_plus\n'), ((41863, 41882), 'urllib.parse.quote_plus', 'quote_plus', (['catalog'], {}), '(catalog)\n', (41873, 41882), False, 'from urllib.parse import quote_plus\n'), ((41935, 41951), 'urllib.parse.quote_plus', 'quote_plus', (['cond'], {}), '(cond)\n', (41945, 41951), False, 'from urllib.parse import quote_plus\n'), ((45919, 45934), 'dl.Util.def_token', 'def_token', (['None'], {}), '(None)\n', (45928, 45934), False, 'from dl.Util import def_token\n'), ((47165, 47211), 'requests.post', 'requests.post', (['url'], {'data': 'data', 'headers': 'headers'}), '(url, data=data, headers=headers)\n', (47178, 47211), False, 'import requests\n'), ((70526, 70583), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'dpi': '(100)', 'figsize': '(12, 5)', 'facecolor': '"""#2F4F4F"""'}), "(dpi=100, figsize=(12, 5), facecolor='#2F4F4F')\n", (70536, 70583), True, 'from matplotlib import pyplot as plt\n'), ((70726, 70762), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'dpi': '(100)', 'figsize': '(12, 5)'}), '(dpi=100, figsize=(12, 5))\n', (70736, 70762), True, 'from matplotlib import pyplot as plt\n'), ((72345, 72379), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {'color': 'am_color'}), '(xlabel, color=am_color)\n', (72355, 72379), True, 'from matplotlib import pyplot as plt\n'), ((72496, 72528), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylab'], {'color': 'am_color'}), '(ylab, color=am_color)\n', (72506, 72528), True, 'from matplotlib import pyplot as plt\n'), ((72555, 72589), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {'color': 'am_color'}), '(ylabel, color=am_color)\n', (72565, 72589), True, 'from matplotlib import pyplot as plt\n'), ((72699, 72757), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'color': '"""gray"""', 'linestyle': '"""dashdot"""', 'linewidth': '(0.5)'}), "(color='gray', linestyle='dashdot', linewidth=0.5)\n", (72707, 72757), True, 'from matplotlib import pyplot as plt\n'), ((74035, 74051), 'matplotlib.pyplot.savefig', 'plt.savefig', (['out'], {}), '(out)\n', (74046, 74051), True, 'from matplotlib import pyplot as plt\n'), ((74078, 74088), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (74086, 74088), True, 'from matplotlib import pyplot as plt\n'), ((75660, 75712), 'requests.get', 'requests.get', (["('%s%s' % (svc_url, path))"], {'headers': 'hdrs'}), "('%s%s' % (svc_url, path), headers=hdrs)\n", (75672, 75712), False, 'import requests\n'), ((76410, 76433), 'os.path.exists', 'os.path.exists', (['id_list'], {}), '(id_list)\n', (76424, 76433), False, 'import os\n'), ((42606, 42644), 'numpy.array', 'np.array', (["res[self.context['id_main']]"], {}), "(res[self.context['id_main']])\n", (42614, 42644), True, 'import numpy as np\n'), ((47240, 47261), 'io.BytesIO', 'BytesIO', (['resp.content'], {}), '(resp.content)\n', (47247, 47261), False, 'from io import BytesIO\n'), ((47532, 47578), 'requests.post', 'requests.post', (['url'], {'data': 'data', 'headers': 'headers'}), '(url, data=data, headers=headers)\n', (47545, 47578), False, 'import requests\n'), ((47875, 47890), 'numpy.array', 'np.array', (['_data'], {}), '(_data)\n', (47883, 47890), True, 'import numpy as np\n'), ((55163, 55202), 'requests.get', 'requests.get', (['_svc_url'], {'headers': 'headers'}), '(_svc_url, headers=headers)\n', (55175, 55202), False, 'import requests\n'), ((62697, 62718), 'io.BytesIO', 'BytesIO', (['resp.content'], {}), '(resp.content)\n', (62704, 62718), False, 'from io import BytesIO\n'), ((65695, 65716), 'io.BytesIO', 'BytesIO', (['resp.content'], {}), '(resp.content)\n', (65702, 65716), False, 'from io import BytesIO\n'), ((68696, 68762), 'warnings.warn', 'warnings.warn', (['"""Redshift required to mark lines in observed frame"""'], {}), "('Redshift required to mark lines in observed frame')\n", (68709, 68762), False, 'import warnings\n'), ((71944, 71998), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Rest Wavelength ($\\\\AA$)"""'], {'color': 'am_color'}), "('Rest Wavelength ($\\\\AA$)', color=am_color)\n", (71954, 71998), True, 'from matplotlib import pyplot as plt\n'), ((73968, 73993), 'matplotlib.pyplot.setp', 'plt.setp', (['text'], {'color': '"""w"""'}), "(text, color='w')\n", (73976, 73993), True, 'from matplotlib import pyplot as plt\n'), ((74668, 74702), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (74680, 74702), False, 'import requests\n'), ((74741, 74758), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (74753, 74758), False, 'import requests\n'), ((77681, 77699), 'numpy.array', 'np.array', (['cnv_list'], {}), '(cnv_list)\n', (77689, 77699), True, 'import numpy as np\n'), ((42811, 42844), 'dl.storeClient.saveAs', 'storeClient.saveAs', (['csv_text', 'out'], {}), '(csv_text, out)\n', (42829, 42844), False, 'from dl import storeClient\n'), ((54107, 54141), 'numpy.array', 'np.array', (['spec.spectral_axis.value'], {}), '(spec.spectral_axis.value)\n', (54115, 54141), True, 'import numpy as np\n'), ((55833, 55888), 'warnings.warn', 'warnings.warn', (['"""Redshift needed to plot in rest frame."""'], {}), "('Redshift needed to plot in rest frame.')\n", (55846, 55888), False, 'import warnings\n'), ((72070, 72142), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (["('Observed Wavelength ($\\\\AA$) z=%.3g' % z)"], {'color': 'am_color'}), "('Observed Wavelength ($\\\\AA$) z=%.3g' % z, color=am_color)\n", (72080, 72142), True, 'from matplotlib import pyplot as plt\n'), ((72215, 72288), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Observed Wavelength ($\\\\AA$) z=(unknown)"""'], {'color': 'am_color'}), "('Observed Wavelength ($\\\\AA$) z=(unknown)', color=am_color)\n", (72225, 72288), True, 'from matplotlib import pyplot as plt\n'), ((47728, 47749), 'io.BytesIO', 'BytesIO', (['resp.content'], {}), '(resp.content)\n', (47735, 47749), False, 'from io import BytesIO\n'), ((77078, 77111), 'dl.helpers.utils.convert', 'convert', (['id_list'], {'outfmt': '"""pandas"""'}), "(id_list, outfmt='pandas')\n", (77085, 77111), False, 'from dl.helpers.utils import convert\n'), ((77139, 77179), 'numpy.array', 'np.array', (["pdata[self.context['id_main']]"], {}), "(pdata[self.context['id_main']])\n", (77147, 77179), True, 'import numpy as np\n'), ((77220, 77239), 'numpy.array', 'np.array', (['[id_list]'], {}), '([id_list])\n', (77228, 77239), True, 'import numpy as np\n'), ((59231, 59259), 'requests.get', 'requests.get', (['url'], {'timeout': '(2)'}), '(url, timeout=2)\n', (59243, 59259), False, 'import requests\n'), ((79170, 79212), 'numpy.array', 'np.array', (["id_list[self.context['id_main']]"], {}), "(id_list[self.context['id_main']])\n", (79178, 79212), True, 'import numpy as np\n'), ((76885, 76909), 'dl.storeClient.get', 'storeClient.get', (['id_list'], {}), '(id_list)\n', (76900, 76909), False, 'from dl import storeClient\n'), ((49537, 49577), 'specutils.SpectrumCollection.from_spectra', 'SpectrumCollection.from_spectra', (['sp_data'], {}), '(sp_data)\n', (49568, 49577), False, 'from specutils import SpectrumCollection\n')]
|
import os,sys
import pandas as pd
import numpy as np
import json,time
import tensorflow as tf
import filterSlidingWindow
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense,Activation, Dropout,BatchNormalization,Conv2D,Conv1D,Flatten,LSTM,MaxPool1D,TimeDistributed
from sklearn.preprocessing import LabelEncoder,LabelBinarizer
from sklearn.model_selection import train_test_split, StratifiedKFold
from tensorflow.keras.callbacks import TensorBoard
from sklearn.metrics import confusion_matrix
class dataModel:
def __init__(self):
"""
loads, preprocess and splits data on start up.
"""
#initialize parameters
self.xTrain = None
self.yTrain = None
self.xValidation = None
self.yValidation = None
self.xTest = None
self.yTest = None
self.nClasses = None
self.models = None
self.model = None
self.loadPreprocessData()
def loadPreprocessData(self):
"""
Loads data from the file data.json.
Each trial is then filtered and windowed according to below parameters
Afterwards the data is split stratified wise into training, validation and test sets.
"""
fs = 32
windowSize = fs*4
slide = fs*1
cutoff = 5
order = 4
labeledData = filterSlidingWindow.loadFileApplyfilterAndSlidingWindow(windowSize,slide,cutoff,order)
self.stratifyData(labeledData)
def stratifyData(self,data):
"""
Given data is split stratified wise into 70% training, 15% validation and 15% test sets.
"""
xTrain,xValidation,yTrain,yValidation = train_test_split(data["trials"],data['labels'],test_size = 0.3,random_state=42,stratify=data['labels'])
xValidation,xTest,yValidation,yTest = train_test_split(xValidation,yValidation,test_size = 0.5,random_state=42,stratify=yValidation)
encoder = LabelBinarizer()
self.xTrain = np.array(xTrain).reshape((len(xTrain),128,3))
self.xValidation = np.array(xValidation).reshape((len(xValidation),128,3))
self.xTest = np.array(xTest).reshape((len(xTest),128,3))
self.yTrainDecoded = yTrain
self.yTrain = encoder.fit_transform(yTrain)
self.yValidation = encoder.fit_transform(yValidation)
self.yTest = encoder.fit_transform(yTest)
#after transforming to one hot label vectors, the length is equal to the amount of classes
self.nClasses = len(self.yTest[0])
def buildNets1DLSTM(self):
"""
According to below lists, this function will build and compile several versions
of a 1d convolutional neural network followed by a LSTM
"""
models = []
denseLayers = [0]
CNNLayers = [3]
filters = [128]
for dense in denseLayers:
for CNNLayer in CNNLayers:
for filt in filters:
nameOfModel = "{}-conv-{}-filter-{}-dense-{}".format(CNNLayer,filt,dense,int(time.time()))
model = Sequential()
model.add(Conv1D(input_shape = (128,3,),kernel_size = (3), padding = "valid", filters = filt))
model.add(Activation("elu"))
model.add(BatchNormalization())
model.add(MaxPool1D(pool_size=2,padding="valid"))
for _ in range(CNNLayer):
model.add(Conv1D(kernel_size = (2), padding = "valid", filters = filt))
model.add(Activation("elu"))
model.add(BatchNormalization())
model.add(MaxPool1D(pool_size=2,padding="valid"))
#model.add(Flatten())
model.add(LSTM(100))
for _ in range(dense):
model.add(Dense(filt))
model.add(Activation("elu"))
model.add(Dense(self.nClasses, activation = "softmax" ))
model.compile(loss='categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
#model.summary()
keyVals = {"model":model,
"name":nameOfModel}
models.append(keyVals)
self.models = models
def crossTrainEval(self,name,epochs,batchSize):
"""
This method will run 10x cross fold validation on the compiled model stored in
self.model
"""
print("training network {}".format(name))
skf = StratifiedKFold(n_splits=10, shuffle=True)
cross = skf.split(self.xTrain,self.yTrainDecoded)
trainAccuracies = []
evalAccuracies = []
for train,test in cross:
model= keras.models.clone_model(self.model)
model.build((None, 10)) # replace 10 with number of variables in input layer
model.compile(optimizer='adam', loss='categorical_crossentropy',metrics = ['accuracy'])
model.set_weights(self.model.get_weights())
model.fit(self.xTrain[train],self.yTrain[train],epochs = epochs, batch_size = batchSize,
verbose = 0, validation_data = (self.xTrain[test],self.yTrain[test]))
trainAccuracies.append(model.evaluate(self.xTrain[train],self.yTrain[train])[1])
evalAccuracies.append(model.evaluate(self.xTrain[test],self.yTrain[test])[1])
accuracies = (np.mean(trainAccuracies),np.mean(evalAccuracies))
print("crossval accuracies are {}".format(accuracies))
return accuracies
def trainNetwork(self,epochs,batchSize):
"""
This method will train the current model stored in
self.model with validation data
"""
print("training network")
self.model.fit(self.xTrain,self.yTrain,epochs = epochs, batch_size = batchSize,
verbose = 1, validation_data = (self.xValidation,self.yValidation))
def validateNetwork(self):
"""
This method will get the validation accuracy of the trained model stored in
self.model
"""
accuracy = self.model.evaluate(self.xValidation,self.yValidation)
print("The accurcy on the Eval Data was: " + str(accuracy))
return accuracy
def testNetwork(self):
"""
This method will get the unseen test accuracy of the trained model stored in
self.model
"""
accuracy = self.model.evaluate(self.xTest,self.yTest)
print("The accurcy on the test Data was: " + str(accuracy))
dataModel1 = dataModel()
dataModel1.buildNets1DLSTM()
for keyVals in dataModel1.models:
dataModel1.model = keyVals["model"]
name = keyVals["name"]
print("10x crossfold validating {}".format(name))
dataModel1.crossTrainEval(name,epochs=200,batchSize=100)
print("training the {}".format(name))
dataModel1.trainNetwork(epochs=200,batchSize =100)
valAcc = dataModel1.validateNetwork()
if valAcc >= 0.9:
break
dataModel1.testNetwork()
|
[
"filterSlidingWindow.loadFileApplyfilterAndSlidingWindow",
"sklearn.preprocessing.LabelBinarizer",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.models.clone_model",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv1D",
"sklearn.model_selection.train_test_split",
"tensorflow.keras.layers.MaxPool1D",
"time.time",
"numpy.mean",
"sklearn.model_selection.StratifiedKFold",
"numpy.array",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.LSTM"
] |
[((1417, 1510), 'filterSlidingWindow.loadFileApplyfilterAndSlidingWindow', 'filterSlidingWindow.loadFileApplyfilterAndSlidingWindow', (['windowSize', 'slide', 'cutoff', 'order'], {}), '(windowSize, slide,\n cutoff, order)\n', (1472, 1510), False, 'import filterSlidingWindow\n'), ((1751, 1860), 'sklearn.model_selection.train_test_split', 'train_test_split', (["data['trials']", "data['labels']"], {'test_size': '(0.3)', 'random_state': '(42)', 'stratify': "data['labels']"}), "(data['trials'], data['labels'], test_size=0.3,\n random_state=42, stratify=data['labels'])\n", (1767, 1860), False, 'from sklearn.model_selection import train_test_split, StratifiedKFold\n'), ((1902, 2002), 'sklearn.model_selection.train_test_split', 'train_test_split', (['xValidation', 'yValidation'], {'test_size': '(0.5)', 'random_state': '(42)', 'stratify': 'yValidation'}), '(xValidation, yValidation, test_size=0.5, random_state=42,\n stratify=yValidation)\n', (1918, 2002), False, 'from sklearn.model_selection import train_test_split, StratifiedKFold\n'), ((2016, 2032), 'sklearn.preprocessing.LabelBinarizer', 'LabelBinarizer', ([], {}), '()\n', (2030, 2032), False, 'from sklearn.preprocessing import LabelEncoder, LabelBinarizer\n'), ((4713, 4755), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(10)', 'shuffle': '(True)'}), '(n_splits=10, shuffle=True)\n', (4728, 4755), False, 'from sklearn.model_selection import train_test_split, StratifiedKFold\n'), ((4923, 4959), 'tensorflow.keras.models.clone_model', 'keras.models.clone_model', (['self.model'], {}), '(self.model)\n', (4947, 4959), False, 'from tensorflow import keras\n'), ((5594, 5618), 'numpy.mean', 'np.mean', (['trainAccuracies'], {}), '(trainAccuracies)\n', (5601, 5618), True, 'import numpy as np\n'), ((5619, 5642), 'numpy.mean', 'np.mean', (['evalAccuracies'], {}), '(evalAccuracies)\n', (5626, 5642), True, 'import numpy as np\n'), ((2055, 2071), 'numpy.array', 'np.array', (['xTrain'], {}), '(xTrain)\n', (2063, 2071), True, 'import numpy as np\n'), ((2128, 2149), 'numpy.array', 'np.array', (['xValidation'], {}), '(xValidation)\n', (2136, 2149), True, 'import numpy as np\n'), ((2205, 2220), 'numpy.array', 'np.array', (['xTest'], {}), '(xTest)\n', (2213, 2220), True, 'import numpy as np\n'), ((3154, 3166), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3164, 3166), False, 'from tensorflow.keras.models import Sequential\n'), ((3197, 3271), 'tensorflow.keras.layers.Conv1D', 'Conv1D', ([], {'input_shape': '(128, 3)', 'kernel_size': '(3)', 'padding': '"""valid"""', 'filters': 'filt'}), "(input_shape=(128, 3), kernel_size=3, padding='valid', filters=filt)\n", (3203, 3271), False, 'from tensorflow.keras.layers import Dense, Activation, Dropout, BatchNormalization, Conv2D, Conv1D, Flatten, LSTM, MaxPool1D, TimeDistributed\n'), ((3312, 3329), 'tensorflow.keras.layers.Activation', 'Activation', (['"""elu"""'], {}), "('elu')\n", (3322, 3329), False, 'from tensorflow.keras.layers import Dense, Activation, Dropout, BatchNormalization, Conv2D, Conv1D, Flatten, LSTM, MaxPool1D, TimeDistributed\n'), ((3361, 3381), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3379, 3381), False, 'from tensorflow.keras.layers import Dense, Activation, Dropout, BatchNormalization, Conv2D, Conv1D, Flatten, LSTM, MaxPool1D, TimeDistributed\n'), ((3413, 3452), 'tensorflow.keras.layers.MaxPool1D', 'MaxPool1D', ([], {'pool_size': '(2)', 'padding': '"""valid"""'}), "(pool_size=2, padding='valid')\n", (3422, 3452), False, 'from tensorflow.keras.layers import Dense, Activation, Dropout, BatchNormalization, Conv2D, Conv1D, Flatten, LSTM, MaxPool1D, TimeDistributed\n'), ((3871, 3880), 'tensorflow.keras.layers.LSTM', 'LSTM', (['(100)'], {}), '(100)\n', (3875, 3880), False, 'from tensorflow.keras.layers import Dense, Activation, Dropout, BatchNormalization, Conv2D, Conv1D, Flatten, LSTM, MaxPool1D, TimeDistributed\n'), ((4077, 4119), 'tensorflow.keras.layers.Dense', 'Dense', (['self.nClasses'], {'activation': '"""softmax"""'}), "(self.nClasses, activation='softmax')\n", (4082, 4119), False, 'from tensorflow.keras.layers import Dense, Activation, Dropout, BatchNormalization, Conv2D, Conv1D, Flatten, LSTM, MaxPool1D, TimeDistributed\n'), ((3112, 3123), 'time.time', 'time.time', ([], {}), '()\n', (3121, 3123), False, 'import json, time\n'), ((3554, 3606), 'tensorflow.keras.layers.Conv1D', 'Conv1D', ([], {'kernel_size': '(2)', 'padding': '"""valid"""', 'filters': 'filt'}), "(kernel_size=2, padding='valid', filters=filt)\n", (3560, 3606), False, 'from tensorflow.keras.layers import Dense, Activation, Dropout, BatchNormalization, Conv2D, Conv1D, Flatten, LSTM, MaxPool1D, TimeDistributed\n'), ((3650, 3667), 'tensorflow.keras.layers.Activation', 'Activation', (['"""elu"""'], {}), "('elu')\n", (3660, 3667), False, 'from tensorflow.keras.layers import Dense, Activation, Dropout, BatchNormalization, Conv2D, Conv1D, Flatten, LSTM, MaxPool1D, TimeDistributed\n'), ((3703, 3723), 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), '()\n', (3721, 3723), False, 'from tensorflow.keras.layers import Dense, Activation, Dropout, BatchNormalization, Conv2D, Conv1D, Flatten, LSTM, MaxPool1D, TimeDistributed\n'), ((3759, 3798), 'tensorflow.keras.layers.MaxPool1D', 'MaxPool1D', ([], {'pool_size': '(2)', 'padding': '"""valid"""'}), "(pool_size=2, padding='valid')\n", (3768, 3798), False, 'from tensorflow.keras.layers import Dense, Activation, Dropout, BatchNormalization, Conv2D, Conv1D, Flatten, LSTM, MaxPool1D, TimeDistributed\n'), ((3960, 3971), 'tensorflow.keras.layers.Dense', 'Dense', (['filt'], {}), '(filt)\n', (3965, 3971), False, 'from tensorflow.keras.layers import Dense, Activation, Dropout, BatchNormalization, Conv2D, Conv1D, Flatten, LSTM, MaxPool1D, TimeDistributed\n'), ((4007, 4024), 'tensorflow.keras.layers.Activation', 'Activation', (['"""elu"""'], {}), "('elu')\n", (4017, 4024), False, 'from tensorflow.keras.layers import Dense, Activation, Dropout, BatchNormalization, Conv2D, Conv1D, Flatten, LSTM, MaxPool1D, TimeDistributed\n')]
|
#!/usr/bin/env python3
import numpy as np
from matplotlib import pyplot as plt
positiondata = np.loadtxt("positiondata.txt", delimiter=' ')
measurementdata = np.loadtxt("measurementdata.txt", delimiter=' ')
posteriordata = np.loadtxt("posterior.txt", delimiter=' ')
plt.figure(figsize=(10,4))
plt.plot(np.arange(0, 50, 1), measurementdata, 'bx', label='measurements')
total = 0
for hypothesis in posteriordata:
plt.plot(np.arange(0, 50, 1), hypothesis, color="orange", linestyle='-', label='_nolegend_', alpha=0.01)
total += 1
plt.plot(np.array([]), color="orange", label="estimate")
plt.plot(np.arange(0, 50, 1), positiondata, 'r--', label='ground truth')
plt.legend(loc="upper left")
plt.savefig('kalman_img1.png', dpi=300)
|
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.loadtxt",
"numpy.array",
"matplotlib.pyplot.savefig"
] |
[((98, 143), 'numpy.loadtxt', 'np.loadtxt', (['"""positiondata.txt"""'], {'delimiter': '""" """'}), "('positiondata.txt', delimiter=' ')\n", (108, 143), True, 'import numpy as np\n'), ((162, 210), 'numpy.loadtxt', 'np.loadtxt', (['"""measurementdata.txt"""'], {'delimiter': '""" """'}), "('measurementdata.txt', delimiter=' ')\n", (172, 210), True, 'import numpy as np\n'), ((227, 269), 'numpy.loadtxt', 'np.loadtxt', (['"""posterior.txt"""'], {'delimiter': '""" """'}), "('posterior.txt', delimiter=' ')\n", (237, 269), True, 'import numpy as np\n'), ((271, 298), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 4)'}), '(figsize=(10, 4))\n', (281, 298), True, 'from matplotlib import pyplot as plt\n'), ((670, 698), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (680, 698), True, 'from matplotlib import pyplot as plt\n'), ((699, 738), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""kalman_img1.png"""'], {'dpi': '(300)'}), "('kalman_img1.png', dpi=300)\n", (710, 738), True, 'from matplotlib import pyplot as plt\n'), ((308, 327), 'numpy.arange', 'np.arange', (['(0)', '(50)', '(1)'], {}), '(0, 50, 1)\n', (317, 327), True, 'import numpy as np\n'), ((548, 560), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (556, 560), True, 'import numpy as np\n'), ((606, 625), 'numpy.arange', 'np.arange', (['(0)', '(50)', '(1)'], {}), '(0, 50, 1)\n', (615, 625), True, 'import numpy as np\n'), ((428, 447), 'numpy.arange', 'np.arange', (['(0)', '(50)', '(1)'], {}), '(0, 50, 1)\n', (437, 447), True, 'import numpy as np\n')]
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# graph_tool -- a general graph manipulation python module
#
# Copyright (C) 2006-2018 <NAME> <<EMAIL>>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
``graph_tool.spectral`` - Spectral properties
---------------------------------------------
Summary
+++++++
.. autosummary::
:nosignatures:
adjacency
laplacian
incidence
transition
modularity_matrix
Contents
++++++++
"""
from __future__ import division, absolute_import, print_function
from .. import _degree, _prop, Graph, GraphView, _limit_args
from .. stats import label_self_loops
import numpy
import scipy.sparse
import scipy.sparse.linalg
from .. dl_import import dl_import
dl_import("from . import libgraph_tool_spectral")
__all__ = ["adjacency", "laplacian", "incidence", "transition", "modularity_matrix"]
def adjacency(g, weight=None, index=None):
r"""Return the adjacency matrix of the graph.
Parameters
----------
g : :class:`~graph_tool.Graph`
Graph to be used.
weight : :class:`~graph_tool.PropertyMap` (optional, default: True)
Edge property map with the edge weights.
index : :class:`~graph_tool.PropertyMap` (optional, default: None)
Vertex property map specifying the row/column indexes. If not provided, the
internal vertex index is used.
Returns
-------
a : :class:`~scipy.sparse.csr_matrix`
The (sparse) adjacency matrix.
Notes
-----
The adjacency matrix is defined as
.. math::
a_{i,j} =
\begin{cases}
1 & \text{if } (j, i) \in E, \\
2 & \text{if } i = j \text{ and } (i, i) \in E, \\
0 & \text{otherwise},
\end{cases}
where :math:`E` is the edge set.
In the case of weighted edges, the entry values are multiplied by the weight
of the respective edge.
In the case of networks with parallel edges, the entries in the matrix
become simply the edge multiplicities (or twice them for the diagonal).
.. note::
For directed graphs the definition above means that the entry
:math:`a_{i,j}` corresponds to the directed edge :math:`j\to
i`. Although this is a typical definition in network and graph theory
literature, many also use the transpose of this matrix.
Examples
--------
.. testsetup::
import scipy.linalg
from pylab import *
>>> g = gt.collection.data["polblogs"]
>>> A = gt.adjacency(g)
>>> ew, ev = scipy.linalg.eig(A.todense())
>>> figure(figsize=(8, 2))
<...>
>>> scatter(real(ew), imag(ew), c=sqrt(abs(ew)), linewidths=0, alpha=0.6)
<...>
>>> xlabel(r"$\operatorname{Re}(\lambda)$")
Text(...)
>>> ylabel(r"$\operatorname{Im}(\lambda)$")
Text(...)
>>> tight_layout()
>>> savefig("adjacency-spectrum.pdf")
.. testcode::
:hide:
savefig("adjacency-spectrum.png")
.. figure:: adjacency-spectrum.*
:align: center
Adjacency matrix spectrum for the political blog network.
References
----------
.. [wikipedia-adjacency] http://en.wikipedia.org/wiki/Adjacency_matrix
"""
if index is None:
if g.get_vertex_filter()[0] is not None:
index = g.new_vertex_property("int64_t")
index.fa = numpy.arange(g.num_vertices())
else:
index = g.vertex_index
E = g.num_edges() if g.is_directed() else 2 * g.num_edges()
data = numpy.zeros(E, dtype="double")
i = numpy.zeros(E, dtype="int32")
j = numpy.zeros(E, dtype="int32")
libgraph_tool_spectral.adjacency(g._Graph__graph, _prop("v", g, index),
_prop("e", g, weight), data, i, j)
if E > 0:
V = max(g.num_vertices(), max(i.max() + 1, j.max() + 1))
else:
V = g.num_vertices()
m = scipy.sparse.coo_matrix((data, (i,j)), shape=(V, V))
m = m.tocsr()
return m
@_limit_args({"deg": ["total", "in", "out"]})
def laplacian(g, deg="total", normalized=False, weight=None, index=None):
r"""Return the Laplacian matrix of the graph.
Parameters
----------
g : :class:`~graph_tool.Graph`
Graph to be used.
deg : str (optional, default: "total")
Degree to be used, in case of a directed graph.
normalized : bool (optional, default: False)
Whether to compute the normalized Laplacian.
weight : :class:`~graph_tool.PropertyMap` (optional, default: True)
Edge property map with the edge weights.
index : :class:`~graph_tool.PropertyMap` (optional, default: None)
Vertex property map specifying the row/column indexes. If not provided, the
internal vertex index is used.
Returns
-------
l : :class:`~scipy.sparse.csr_matrix`
The (sparse) Laplacian matrix.
Notes
-----
The weighted Laplacian matrix is defined as
.. math::
\ell_{ij} =
\begin{cases}
\Gamma(v_i) & \text{if } i = j \\
-w_{ij} & \text{if } i \neq j \text{ and } (j, i) \in E \\
0 & \text{otherwise}.
\end{cases}
Where :math:`\Gamma(v_i)=\sum_j A_{ij}w_{ij}` is sum of the weights of the
edges incident on vertex :math:`v_i`. The normalized version is
.. math::
\ell_{ij} =
\begin{cases}
1 & \text{ if } i = j \text{ and } \Gamma(v_i) \neq 0 \\
-\frac{w_{ij}}{\sqrt{\Gamma(v_i)\Gamma(v_j)}} & \text{ if } i \neq j \text{ and } (j, i) \in E \\
0 & \text{otherwise}.
\end{cases}
In the case of unweighted edges, it is assumed :math:`w_{ij} = 1`.
For directed graphs, it is assumed :math:`\Gamma(v_i)=\sum_j A_{ij}w_{ij} +
\sum_j A_{ji}w_{ji}` if ``deg=="total"``, :math:`\Gamma(v_i)=\sum_j A_{ji}w_{ji}`
if ``deg=="out"`` or :math:`\Gamma(v_i)=\sum_j A_{ij}w_{ij}` ``deg=="in"``.
.. note::
For directed graphs the definition above means that the entry
:math:`\ell_{i,j}` corresponds to the directed edge :math:`j\to
i`. Although this is a typical definition in network and graph theory
literature, many also use the transpose of this matrix.
Examples
--------
.. testsetup::
import scipy.linalg
from pylab import *
>>> g = gt.collection.data["polblogs"]
>>> L = gt.laplacian(g)
>>> ew, ev = scipy.linalg.eig(L.todense())
>>> figure(figsize=(8, 2))
<...>
>>> scatter(real(ew), imag(ew), c=sqrt(abs(ew)), linewidths=0, alpha=0.6)
<...>
>>> xlabel(r"$\operatorname{Re}(\lambda)$")
Text(...)
>>> ylabel(r"$\operatorname{Im}(\lambda)$")
Text(...)
>>> tight_layout()
>>> savefig("laplacian-spectrum.pdf")
.. testcode::
:hide:
savefig("laplacian-spectrum.png")
.. figure:: laplacian-spectrum.*
:align: center
Laplacian matrix spectrum for the political blog network.
>>> L = gt.laplacian(g, normalized=True)
>>> ew, ev = scipy.linalg.eig(L.todense())
>>> figure(figsize=(8, 2))
<...>
>>> scatter(real(ew), imag(ew), c=sqrt(abs(ew)), linewidths=0, alpha=0.6)
<...>
>>> xlabel(r"$\operatorname{Re}(\lambda)$")
Text(...)
>>> ylabel(r"$\operatorname{Im}(\lambda)$")
Text(...)
>>> tight_layout()
>>> savefig("norm-laplacian-spectrum.pdf")
.. testcode::
:hide:
savefig("norm-laplacian-spectrum.png")
.. figure:: norm-laplacian-spectrum.*
:align: center
Normalized Laplacian matrix spectrum for the political blog network.
References
----------
.. [wikipedia-laplacian] http://en.wikipedia.org/wiki/Laplacian_matrix
"""
if index is None:
if g.get_vertex_filter()[0] is not None:
index = g.new_vertex_property("int64_t")
index.fa = numpy.arange(g.num_vertices())
else:
index = g.vertex_index
V = g.num_vertices()
nself = int(label_self_loops(g, mark_only=True).a.sum())
E = g.num_edges() - nself
if not g.is_directed():
E *= 2
N = E + g.num_vertices()
data = numpy.zeros(N, dtype="double")
i = numpy.zeros(N, dtype="int32")
j = numpy.zeros(N, dtype="int32")
if normalized:
libgraph_tool_spectral.norm_laplacian(g._Graph__graph, _prop("v", g, index),
_prop("e", g, weight), deg, data, i, j)
else:
libgraph_tool_spectral.laplacian(g._Graph__graph, _prop("v", g, index),
_prop("e", g, weight), deg, data, i, j)
if E > 0:
V = max(g.num_vertices(), max(i.max() + 1, j.max() + 1))
else:
V = g.num_vertices()
m = scipy.sparse.coo_matrix((data, (i, j)), shape=(V, V))
m = m.tocsr()
return m
def incidence(g, vindex=None, eindex=None):
r"""Return the incidence matrix of the graph.
Parameters
----------
g : :class:`~graph_tool.Graph`
Graph to be used.
vindex : :class:`~graph_tool.PropertyMap` (optional, default: None)
Vertex property map specifying the row indexes. If not provided, the
internal vertex index is used.
eindex : :class:`~graph_tool.PropertyMap` (optional, default: None)
Edge property map specifying the column indexes. If not provided, the
internal edge index is used.
Returns
-------
a : :class:`~scipy.sparse.csr_matrix`
The (sparse) incidence matrix.
Notes
-----
For undirected graphs, the incidence matrix is defined as
.. math::
b_{i,j} =
\begin{cases}
1 & \text{if vertex } v_i \text{and edge } e_j \text{ are incident}, \\
0 & \text{otherwise}
\end{cases}
For directed graphs, the definition is
.. math::
b_{i,j} =
\begin{cases}
1 & \text{if edge } e_j \text{ enters vertex } v_i, \\
-1 & \text{if edge } e_j \text{ leaves vertex } v_i, \\
0 & \text{otherwise}
\end{cases}
Examples
--------
.. testsetup::
gt.seed_rng(42)
>>> g = gt.random_graph(100, lambda: (2,2))
>>> m = gt.incidence(g)
>>> print(m.todense())
[[-1. -1. 0. ... 0. 0. 0.]
[ 0. 0. 0. ... 0. 0. 0.]
[ 0. 0. 0. ... 0. 0. 0.]
...
[ 0. 0. -1. ... 0. 0. 0.]
[ 0. 0. 0. ... 0. 0. 0.]
[ 0. 0. 0. ... 0. 0. 0.]]
References
----------
.. [wikipedia-incidence] http://en.wikipedia.org/wiki/Incidence_matrix
"""
if vindex is None:
if g.get_edge_filter()[0] is not None:
vindex = g.new_vertex_property("int64_t")
vindex.fa = numpy.arange(g.num_vertices())
else:
vindex = g.vertex_index
if eindex is None:
if g.get_edge_filter()[0] is not None:
eindex = g.new_edge_property("int64_t")
eindex.fa = numpy.arange(g.num_edges())
else:
eindex = g.edge_index
E = g.num_edges()
if E == 0:
raise ValueError("Cannot construct incidence matrix for a graph with no edges.")
data = numpy.zeros(2 * E, dtype="double")
i = numpy.zeros(2 * E, dtype="int32")
j = numpy.zeros(2 * E, dtype="int32")
libgraph_tool_spectral.incidence(g._Graph__graph, _prop("v", g, vindex),
_prop("e", g, eindex), data, i, j)
m = scipy.sparse.coo_matrix((data, (i,j)))
m = m.tocsr()
return m
def transition(g, weight=None, index=None):
r"""Return the transition matrix of the graph.
Parameters
----------
g : :class:`~graph_tool.Graph`
Graph to be used.
weight : :class:`~graph_tool.PropertyMap` (optional, default: True)
Edge property map with the edge weights.
index : :class:`~graph_tool.PropertyMap` (optional, default: None)
Vertex property map specifying the row/column indexes. If not provided, the
internal vertex index is used.
Returns
-------
T : :class:`~scipy.sparse.csr_matrix`
The (sparse) transition matrix.
Notes
-----
The transition matrix is defined as
.. math::
T_{ij} = \frac{A_{ij}}{k_j}
where :math:`k_i = \sum_j A_{ji}`, and :math:`A_{ij}` is the adjacency
matrix.
In the case of weighted edges, the values of the adjacency matrix are
multiplied by the edge weights.
.. note::
For directed graphs the definition above means that the entry
:math:`T_{ij}` corresponds to the directed edge :math:`j\to
i`. Although this is a typical definition in network and graph theory
literature, many also use the transpose of this matrix.
Examples
--------
.. testsetup::
import scipy.linalg
from pylab import *
>>> g = gt.collection.data["polblogs"]
>>> T = gt.transition(g)
>>> ew, ev = scipy.linalg.eig(T.todense())
>>> figure(figsize=(8, 2))
<...>
>>> scatter(real(ew), imag(ew), c=sqrt(abs(ew)), linewidths=0, alpha=0.6)
<...>
>>> xlabel(r"$\operatorname{Re}(\lambda)$")
Text(...)
>>> ylabel(r"$\operatorname{Im}(\lambda)$")
Text(...)
>>> tight_layout()
>>> savefig("transition-spectrum.pdf")
.. testcode::
:hide:
savefig("transition-spectrum.png")
.. figure:: transition-spectrum.*
:align: center
Transition matrix spectrum for the political blog network.
References
----------
.. [wikipedia-transition] https://en.wikipedia.org/wiki/Stochastic_matrix
"""
if index is None:
if g.get_vertex_filter()[0] is not None:
index = g.new_vertex_property("int64_t")
index.fa = numpy.arange(g.num_vertices())
else:
index = g.vertex_index
E = g.num_edges() if g.is_directed() else 2 * g.num_edges()
data = numpy.zeros(E, dtype="double")
i = numpy.zeros(E, dtype="int32")
j = numpy.zeros(E, dtype="int32")
libgraph_tool_spectral.transition(g._Graph__graph, _prop("v", g, index),
_prop("e", g, weight), data, i, j)
if E > 0:
V = max(g.num_vertices(), max(i.max() + 1, j.max() + 1))
else:
V = g.num_vertices()
m = scipy.sparse.coo_matrix((data, (i,j)), shape=(V, V))
m = m.tocsr()
return m
def modularity_matrix(g, weight=None, index=None):
r"""Return the modularity matrix of the graph.
Parameters
----------
g : :class:`~graph_tool.Graph`
Graph to be used.
weight : :class:`~graph_tool.PropertyMap` (optional, default: True)
Edge property map with the edge weights.
index : :class:`~graph_tool.PropertyMap` (optional, default: None)
Vertex property map specifying the row/column indexes. If not provided, the
internal vertex index is used.
Returns
-------
B : :class:`~scipy.sparse.linalg.LinearOperator`
The (sparse) modularity matrix, represented as a
:class:`~scipy.sparse.linalg.LinearOperator`.
Notes
-----
The modularity matrix is defined as
.. math::
B_{ij} = A_{ij} - \frac{k^+_i k^-_j}{2E}
where :math:`k^+_i = \sum_j A_{ji}`, :math:`k^-_i = \sum_j A_{ij}`,
:math:`2E=\sum_{ij}A_{ij}` and :math:`A_{ij}` is the adjacency matrix.
In the case of weighted edges, the values of the adjacency matrix are
multiplied by the edge weights.
Examples
--------
.. testsetup::
import scipy.linalg
from pylab import *
>>> g = gt.collection.data["polblogs"]
>>> B = gt.modularity_matrix(g)
>>> B = B * np.identity(B.shape[0]) # transform to a dense matrix
>>> ew, ev = scipy.linalg.eig(B)
>>> figure(figsize=(8, 2))
<...>
>>> scatter(real(ew), imag(ew), c=sqrt(abs(ew)), linewidths=0, alpha=0.6)
<...>
>>> xlabel(r"$\operatorname{Re}(\lambda)$")
Text(...)
>>> ylabel(r"$\operatorname{Im}(\lambda)$")
Text(...)
>>> tight_layout()
>>> savefig("modularity-spectrum.pdf")
.. testcode::
:hide:
savefig("modularity-spectrum.png")
.. figure:: modularity-spectrum.*
:align: center
Modularity matrix spectrum for the political blog network.
References
----------
.. [newman-modularity] <NAME>, <NAME>, "Finding and evaluating
community structure in networks", Phys. Rev. E 69, 026113 (2004).
:doi:`10.1103/PhysRevE.69.026113`
"""
A = adjacency(g, weight=weight, index=index)
if g.is_directed():
k_in = g.degree_property_map("in", weight=weight).fa
else:
k_in = g.degree_property_map("out", weight=weight).fa
k_out = g.degree_property_map("out", weight=weight).fa
N = A.shape[0]
E2 = float(k_out.sum())
def matvec(x):
M = x.shape[0]
if len(x.shape) > 1:
x = x.reshape(M)
nx = A * x - k_out * numpy.dot(k_in, x) / E2
return nx
def rmatvec(x):
M = x.shape[0]
if len(x.shape) > 1:
x = x.reshape(M)
nx = A.T * x - k_in * numpy.dot(k_out, x) / E2
return nx
B = scipy.sparse.linalg.LinearOperator((g.num_vertices(), g.num_vertices()),
matvec=matvec, rmatvec=rmatvec,
dtype="float")
return B
|
[
"numpy.dot",
"numpy.zeros"
] |
[((4078, 4108), 'numpy.zeros', 'numpy.zeros', (['E'], {'dtype': '"""double"""'}), "(E, dtype='double')\n", (4089, 4108), False, 'import numpy\n'), ((4117, 4146), 'numpy.zeros', 'numpy.zeros', (['E'], {'dtype': '"""int32"""'}), "(E, dtype='int32')\n", (4128, 4146), False, 'import numpy\n'), ((4155, 4184), 'numpy.zeros', 'numpy.zeros', (['E'], {'dtype': '"""int32"""'}), "(E, dtype='int32')\n", (4166, 4184), False, 'import numpy\n'), ((8721, 8751), 'numpy.zeros', 'numpy.zeros', (['N'], {'dtype': '"""double"""'}), "(N, dtype='double')\n", (8732, 8751), False, 'import numpy\n'), ((8760, 8789), 'numpy.zeros', 'numpy.zeros', (['N'], {'dtype': '"""int32"""'}), "(N, dtype='int32')\n", (8771, 8789), False, 'import numpy\n'), ((8798, 8827), 'numpy.zeros', 'numpy.zeros', (['N'], {'dtype': '"""int32"""'}), "(N, dtype='int32')\n", (8809, 8827), False, 'import numpy\n'), ((11735, 11769), 'numpy.zeros', 'numpy.zeros', (['(2 * E)'], {'dtype': '"""double"""'}), "(2 * E, dtype='double')\n", (11746, 11769), False, 'import numpy\n'), ((11778, 11811), 'numpy.zeros', 'numpy.zeros', (['(2 * E)'], {'dtype': '"""int32"""'}), "(2 * E, dtype='int32')\n", (11789, 11811), False, 'import numpy\n'), ((11820, 11853), 'numpy.zeros', 'numpy.zeros', (['(2 * E)'], {'dtype': '"""int32"""'}), "(2 * E, dtype='int32')\n", (11831, 11853), False, 'import numpy\n'), ((14469, 14499), 'numpy.zeros', 'numpy.zeros', (['E'], {'dtype': '"""double"""'}), "(E, dtype='double')\n", (14480, 14499), False, 'import numpy\n'), ((14508, 14537), 'numpy.zeros', 'numpy.zeros', (['E'], {'dtype': '"""int32"""'}), "(E, dtype='int32')\n", (14519, 14537), False, 'import numpy\n'), ((14546, 14575), 'numpy.zeros', 'numpy.zeros', (['E'], {'dtype': '"""int32"""'}), "(E, dtype='int32')\n", (14557, 14575), False, 'import numpy\n'), ((17506, 17524), 'numpy.dot', 'numpy.dot', (['k_in', 'x'], {}), '(k_in, x)\n', (17515, 17524), False, 'import numpy\n'), ((17680, 17699), 'numpy.dot', 'numpy.dot', (['k_out', 'x'], {}), '(k_out, x)\n', (17689, 17699), False, 'import numpy\n')]
|
# ---------------------------------------------------------------
# het_util.py
# Set-up time: 2021/4/1 11:40
# Copyright (c) 2020 ICT
# Licensed under The MIT License [see LICENSE for details]
# Written by Kenneth-Wong (Wenbin-Wang) @ VIPL.ICT
# Contact: <EMAIL> [OR] <EMAIL>
# ---------------------------------------------------------------
import torch
import numpy as np
from .vctree_util import ArbitraryTree
from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps
def generate_forest(det_result, pick_parent, isc_thresh, child_order='leftright', num_embed_depth=None, need_depth=False):
"""
generate a list of trees that covers all the objects in a batch
im_inds: [obj_num]
box_priors: [obj_num, (x1, y1, x2, y2)]
pair_scores: [obj_num, obj_num]
output: list of trees, each present a chunk of overlaping objects
"""
output_forest = [] # the list of trees, each one is a chunk of overlapping objects
output_depths = [] # the list of tree depths,
all_bboxes, all_dists, all_labels = det_result.bboxes, det_result.dists, det_result.labels
num_objs = [len(b) for b in all_bboxes]
if all_dists is not None:
node_scores = [dist.max(1)[0] for dist in all_dists]
else:
node_scores = [torch.ones(len(b)).to(all_bboxes[0]) for b in all_bboxes]
areas = torch.cat(all_bboxes, 0)
areas = (areas[:, 3] - areas[:, 1] + 1) * (areas[:, 2] - areas[:, 0] + 1)
split_areas = areas.split(num_objs)
split_areas = [a.cpu().numpy() for a in split_areas]
all_sorted_idxes = [np.argsort(a)[::-1] for a in split_areas]
bbox_intersections = [bbox_overlaps(boxes.cpu().numpy()[:, :4], boxes.cpu().numpy()[:, :4], mode='iof')
for boxes in all_bboxes]
offset = 0
for img_id, (scores, bboxes, labels, areas, sorted_idxes, intersection, num_obj) in enumerate(zip(node_scores,
all_bboxes,
all_labels,
split_areas,
all_sorted_idxes,
bbox_intersections,
num_objs)):
# select the nodes from the same image
node_container = []
depth_labels = np.zeros(num_objs[img_id], dtype=np.int32)
# note: the index of root is the N+tree_id
root = ArbitraryTree(sum(num_objs)+img_id, -1, -1, is_root=True)
bboxes = bboxes[:, :4]
# put all nodes into node container
for idx in range(num_obj):
new_node = ArbitraryTree(offset + idx, scores[idx], labels[idx], bboxes[idx])
node_container.append(new_node)
# iteratively generate tree
gen_het(node_container, root, areas, sorted_idxes, intersection,
pick_parent=pick_parent, isc_thresh=isc_thresh, child_order=child_order)
if need_depth:
get_tree_depth(root, depth_labels, offset, num_embed_depth)
output_depths.append(torch.from_numpy(depth_labels).long().to(bboxes.device))
output_forest.append(root)
offset += num_obj
if need_depth:
output_depths = torch.cat(output_depths, 0)
return output_forest, output_depths
def gen_het(node_container, root, areas, sorted_idxes, intersection, pick_parent='area', isc_thresh=0.9,
child_order='leftright'):
num_nodes = len(node_container)
if num_nodes == 0:
return
# first step: sort the rois according to areas
sorted_node_container = [node_container[i] for i in sorted_idxes]
if pick_parent == 'isc':
sort_key = 1
elif pick_parent == 'area':
sort_key = 2
else:
raise NotImplementedError
# i, j for sorted_node_container, origin_i, origin_j for node_container
for i in range(num_nodes):
current_node = sorted_node_container[i]
possible_parent = []
origin_i = sorted_idxes[i]
for j in range(0, i): # all nodes that are larger than current_node
origin_j = sorted_idxes[j]
M = intersection[origin_i, origin_j]
N = intersection[origin_j, origin_i]
if M > isc_thresh:
possible_parent.append((j, N, areas[origin_j]))
if len(possible_parent) == 0:
# assign the parrent of i as root
root.add_child(current_node)
else:
if pick_parent != 'area' and pick_parent != 'isc':
raise NotImplementedError('%s for pick_parent not implemented' % pick_parent)
parent_id = sorted(possible_parent, key=lambda d: d[sort_key], reverse=True)[0][0]
sorted_node_container[parent_id].add_child(current_node)
# sort the children
sort_childs(root, child_order)
def sort_childs(root, order='leftright'):
if len(root.children) == 0:
return
children = root.children
boxes = np.vstack([n.box.cpu().numpy() for n in children])
node_scores = np.array([n.score for n in children])
if order == 'leftright':
scores = (boxes[:, 0] + boxes[:, 2]) / 2
scores = scores / (np.max(scores) + 1)
elif order == 'size':
scores = (boxes[:, 2] - boxes[:, 0] + 1) * (boxes[:, 3] - boxes[:, 1] + 1)
scores = scores / (np.max(scores) + 1)
elif order == 'confidence':
scores = node_scores
elif order == 'random':
scores = np.random.rand(len(children))
else:
raise NotImplementedError('Unknown sorting method: %s' % order)
sorted_id = np.argsort(-scores)
root.children = [children[i] for i in sorted_id]
for i in range(len(root.children)):
sort_childs(root.children[i], order)
def get_tree_depth(root, tree_depths, offset, num_embed_depth):
if root.parent is not None:
depth = root.depth()
if num_embed_depth is not None and depth >= num_embed_depth:
depth = num_embed_depth - 1
tree_depths[root.index - offset] = depth
for c in root.children:
get_tree_depth(c, tree_depths, offset, num_embed_depth)
|
[
"numpy.zeros",
"torch.cat",
"numpy.argsort",
"numpy.max",
"numpy.array",
"torch.from_numpy"
] |
[((1370, 1394), 'torch.cat', 'torch.cat', (['all_bboxes', '(0)'], {}), '(all_bboxes, 0)\n', (1379, 1394), False, 'import torch\n'), ((5522, 5559), 'numpy.array', 'np.array', (['[n.score for n in children]'], {}), '([n.score for n in children])\n', (5530, 5559), True, 'import numpy as np\n'), ((6088, 6107), 'numpy.argsort', 'np.argsort', (['(-scores)'], {}), '(-scores)\n', (6098, 6107), True, 'import numpy as np\n'), ((2743, 2785), 'numpy.zeros', 'np.zeros', (['num_objs[img_id]'], {'dtype': 'np.int32'}), '(num_objs[img_id], dtype=np.int32)\n', (2751, 2785), True, 'import numpy as np\n'), ((3668, 3695), 'torch.cat', 'torch.cat', (['output_depths', '(0)'], {}), '(output_depths, 0)\n', (3677, 3695), False, 'import torch\n'), ((1598, 1611), 'numpy.argsort', 'np.argsort', (['a'], {}), '(a)\n', (1608, 1611), True, 'import numpy as np\n'), ((5668, 5682), 'numpy.max', 'np.max', (['scores'], {}), '(scores)\n', (5674, 5682), True, 'import numpy as np\n'), ((5827, 5841), 'numpy.max', 'np.max', (['scores'], {}), '(scores)\n', (5833, 5841), True, 'import numpy as np\n'), ((3501, 3531), 'torch.from_numpy', 'torch.from_numpy', (['depth_labels'], {}), '(depth_labels)\n', (3517, 3531), False, 'import torch\n')]
|
import matplotlib.pyplot as plt
import numpy as np
import math
from collections import defaultdict
class Graph:
def __init__(self):
self.graph = defaultdict(list)
def addEdge(self, u, v):
self.graph[u].append(v)
def delEdge(self, u):
self.graph[u].clear()
def DFSUtil(self, v, visited):
visited.add(v)
for neighbour in self.graph[v]:
if neighbour not in visited:
self.DFSUtil(neighbour, visited)
def DFS(self, v):
visited = set()
self.DFSUtil(v, visited)
return visited
color=['g', 'r', 'b', 'g']
file = open("CSCI3230_ClusteringData.csv")
title = np.loadtxt(file, delimiter=",", dtype = str, max_rows=1)
pointData = np.loadtxt(file, delimiter=",")
file.close()
file = open("CSCI3230_initialCluster.csv")
initCluster = np.loadtxt(file, delimiter=",", skiprows=1)
file.close()
file = open("DBSCAN_Clustering Parameters.csv")
e, minPts = np.loadtxt(file, delimiter=",", skiprows=1)
dist = np.zeros((pointData.shape[0], pointData.shape[0]))
file.close()
core = np.zeros((pointData.shape[0]))
g = Graph()
for i, record1 in enumerate(pointData):
dist[i][i] = -1
for j, record2 in enumerate(pointData[:i]):
if (((record1[0] - record2[0])**2 + (record1[1] - record2[1])**2)**0.5 <= e):
dist[i][j] = -1
dist[j][i] = -1
g.addEdge(i, j)
g.addEdge(j, i)
for i, record in enumerate(dist):
print("%c's neighbour: " % chr(i+ord('a')), end = " ")
for j, element in enumerate(record):
if (element == -1):
print("%c" % chr(j+ord('a')), end = " ")
print()
print("Core points:", end = " ")
for i, record in enumerate(dist):
if np.sum(record) <= -minPts:
print("%c" % chr(i+ord('a')), end = " ")
core[i] = -1
else:
g.delEdge(i)
print()
clusternum = -1
cluster = []
for i, isCore in enumerate(core):
if (isCore == -1):
cluster.append([])
clusternum += 1
cluster[clusternum] = g.DFS(i)
for j, removecore in enumerate(cluster[clusternum]):
core[removecore] = clusternum + 1
pointData[removecore][2] = clusternum + 1
for i, corenum in enumerate(cluster):
print(cluster[i])
for record in pointData:
plt.scatter(record[0], record[1], c=color[int(record[2])])
plt.title("Q3d DBSCAN model")
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"numpy.sum",
"numpy.zeros",
"collections.defaultdict",
"numpy.loadtxt"
] |
[((686, 740), 'numpy.loadtxt', 'np.loadtxt', (['file'], {'delimiter': '""","""', 'dtype': 'str', 'max_rows': '(1)'}), "(file, delimiter=',', dtype=str, max_rows=1)\n", (696, 740), True, 'import numpy as np\n'), ((756, 787), 'numpy.loadtxt', 'np.loadtxt', (['file'], {'delimiter': '""","""'}), "(file, delimiter=',')\n", (766, 787), True, 'import numpy as np\n'), ((863, 906), 'numpy.loadtxt', 'np.loadtxt', (['file'], {'delimiter': '""","""', 'skiprows': '(1)'}), "(file, delimiter=',', skiprows=1)\n", (873, 906), True, 'import numpy as np\n'), ((985, 1028), 'numpy.loadtxt', 'np.loadtxt', (['file'], {'delimiter': '""","""', 'skiprows': '(1)'}), "(file, delimiter=',', skiprows=1)\n", (995, 1028), True, 'import numpy as np\n'), ((1037, 1087), 'numpy.zeros', 'np.zeros', (['(pointData.shape[0], pointData.shape[0])'], {}), '((pointData.shape[0], pointData.shape[0]))\n', (1045, 1087), True, 'import numpy as np\n'), ((1112, 1140), 'numpy.zeros', 'np.zeros', (['pointData.shape[0]'], {}), '(pointData.shape[0])\n', (1120, 1140), True, 'import numpy as np\n'), ((2425, 2454), 'matplotlib.pyplot.title', 'plt.title', (['"""Q3d DBSCAN model"""'], {}), "('Q3d DBSCAN model')\n", (2434, 2454), True, 'import matplotlib.pyplot as plt\n'), ((2456, 2466), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2464, 2466), True, 'import matplotlib.pyplot as plt\n'), ((165, 182), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (176, 182), False, 'from collections import defaultdict\n'), ((1782, 1796), 'numpy.sum', 'np.sum', (['record'], {}), '(record)\n', (1788, 1796), True, 'import numpy as np\n')]
|
import copy
import numpy as np
class BaseElement(object):
def __init__(self, object_index, medium_index, fl_brightness,
points):
"""Initialize a basic element
Parameters
----------
object_index: float
Refractive index of the element
medium_index: float
Refractive index of surrounding medium
fl_brightness: float
Fluorescence brightness
points: 2d ndarray
Coordinates of the element [m]
Notes
-----
When subclassing this class, override this method with
additional parameters (e.g. position, size) and call it
with super(ClassName, self).__init__(...).
"""
#: refractive index of the object
self.object_index = object_index
#: refractive index of the medium
self.medium_index = medium_index
#: brightness of the fluorescence signal
self.fl_brightness = fl_brightness
#: 2D array of points describing the geometrical object. This
#: variable is used for affine transforms (e.g. when rotating
#: the object).
self.points = np.array(points)
def draw(self, grid_size, pixel_size):
ri = np.ones(grid_size, dtype=float) * self.medium_index
fl = np.zeros(grid_size, dtype=float)
for pp in self.points:
# ODTbrain convention
cy, cz, cx = np.array(pp/pixel_size, dtype=int)
ri[cx, cy, cz] = self.object_index
fl[cx, cy, cz] = self.fl_brightness
return ri, fl
def transform(self, x=0, y=0, z=0, rot_main=0, rot_in_plane=0,
rot_perp_plane=0):
"""Rotate and translate self.points
Notes
-----
- By convention, sinogram generation in cellsino is performed by
modifying the pitch (rotation about y-axis).
- Rotation is performed prior to translation. First, the points
are rotated about the y-axis (``rot_main``, the main sinogram
acquisition angle). Second, the points are rotated about the
x-axis (``rot_perp_plane``, perpendicular to the imaging plane).
Third, the points are rotated about the z-axis (``rot_in_plane``,
within the imaging plane).
"""
# The definition of the angles are such that the sinogram
# can be plugged right into ODTbrain/radontea without
# transposing it (when only `rot_main` is set).
alpha = -rot_main
beta = rot_perp_plane
gamma = rot_in_plane
Rx = np.array([
[1, 0, 0],
[0, np.cos(alpha), -np.sin(alpha)],
[0, np.sin(alpha), np.cos(alpha)],
])
Ry = np.array([
[np.cos(beta), 0, np.sin(beta)],
[0, 1, 0],
[-np.sin(beta), 0, np.cos(beta)],
])
Rz = np.array([
[np.cos(gamma), -np.sin(gamma), 0],
[np.sin(gamma), np.cos(gamma), 0],
[0, 0, 1],
])
R = np.dot(np.dot(Ry, Rz), Rx)
rotated = np.dot(R, self.points.T).T
rotated_pad = np.pad(rotated, ((0, 0), (0, 1)), mode="constant",
constant_values=1)
T = np.array([[1, 0, 0, x],
[0, 1, 0, y],
[0, 0, 1, z],
[0, 0, 0, 1],
])
translated = np.dot(T, rotated_pad.T)[:-1].T
# return a copy of the current instance with points transformed
telement = copy.copy(self)
telement.points = translated
return telement
|
[
"numpy.pad",
"numpy.zeros",
"numpy.ones",
"copy.copy",
"numpy.sin",
"numpy.array",
"numpy.cos",
"numpy.dot"
] |
[((1179, 1195), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (1187, 1195), True, 'import numpy as np\n'), ((1318, 1350), 'numpy.zeros', 'np.zeros', (['grid_size'], {'dtype': 'float'}), '(grid_size, dtype=float)\n', (1326, 1350), True, 'import numpy as np\n'), ((3355, 3424), 'numpy.pad', 'np.pad', (['rotated', '((0, 0), (0, 1))'], {'mode': '"""constant"""', 'constant_values': '(1)'}), "(rotated, ((0, 0), (0, 1)), mode='constant', constant_values=1)\n", (3361, 3424), True, 'import numpy as np\n'), ((3467, 3533), 'numpy.array', 'np.array', (['[[1, 0, 0, x], [0, 1, 0, y], [0, 0, 1, z], [0, 0, 0, 1]]'], {}), '([[1, 0, 0, x], [0, 1, 0, y], [0, 0, 1, z], [0, 0, 0, 1]])\n', (3475, 3533), True, 'import numpy as np\n'), ((3769, 3784), 'copy.copy', 'copy.copy', (['self'], {}), '(self)\n', (3778, 3784), False, 'import copy\n'), ((1253, 1284), 'numpy.ones', 'np.ones', (['grid_size'], {'dtype': 'float'}), '(grid_size, dtype=float)\n', (1260, 1284), True, 'import numpy as np\n'), ((1441, 1477), 'numpy.array', 'np.array', (['(pp / pixel_size)'], {'dtype': 'int'}), '(pp / pixel_size, dtype=int)\n', (1449, 1477), True, 'import numpy as np\n'), ((3268, 3282), 'numpy.dot', 'np.dot', (['Ry', 'Rz'], {}), '(Ry, Rz)\n', (3274, 3282), True, 'import numpy as np\n'), ((3306, 3330), 'numpy.dot', 'np.dot', (['R', 'self.points.T'], {}), '(R, self.points.T)\n', (3312, 3330), True, 'import numpy as np\n'), ((3645, 3669), 'numpy.dot', 'np.dot', (['T', 'rotated_pad.T'], {}), '(T, rotated_pad.T)\n', (3651, 3669), True, 'import numpy as np\n'), ((2691, 2704), 'numpy.cos', 'np.cos', (['alpha'], {}), '(alpha)\n', (2697, 2704), True, 'import numpy as np\n'), ((2749, 2762), 'numpy.sin', 'np.sin', (['alpha'], {}), '(alpha)\n', (2755, 2762), True, 'import numpy as np\n'), ((2765, 2778), 'numpy.cos', 'np.cos', (['alpha'], {}), '(alpha)\n', (2771, 2778), True, 'import numpy as np\n'), ((2854, 2866), 'numpy.cos', 'np.cos', (['beta'], {}), '(beta)\n', (2860, 2866), True, 'import numpy as np\n'), ((2872, 2884), 'numpy.sin', 'np.sin', (['beta'], {}), '(beta)\n', (2878, 2884), True, 'import numpy as np\n'), ((2984, 2996), 'numpy.cos', 'np.cos', (['beta'], {}), '(beta)\n', (2990, 2996), True, 'import numpy as np\n'), ((3072, 3085), 'numpy.cos', 'np.cos', (['gamma'], {}), '(gamma)\n', (3078, 3085), True, 'import numpy as np\n'), ((3130, 3143), 'numpy.sin', 'np.sin', (['gamma'], {}), '(gamma)\n', (3136, 3143), True, 'import numpy as np\n'), ((3146, 3159), 'numpy.cos', 'np.cos', (['gamma'], {}), '(gamma)\n', (3152, 3159), True, 'import numpy as np\n'), ((2707, 2720), 'numpy.sin', 'np.sin', (['alpha'], {}), '(alpha)\n', (2713, 2720), True, 'import numpy as np\n'), ((2967, 2979), 'numpy.sin', 'np.sin', (['beta'], {}), '(beta)\n', (2973, 2979), True, 'import numpy as np\n'), ((3088, 3101), 'numpy.sin', 'np.sin', (['gamma'], {}), '(gamma)\n', (3094, 3101), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import time
import numpy as np
from litex import RemoteClient
wb = RemoteClient()
wb.open()
# # #
x = np.linspace(0,2 * np.pi, 1000)
sine = (2**15 * np.sin(x)) + 2**15
sine = sine.astype('int').tolist()
print("artistic sine output...")
i = 0
while(1):
i = (i + 1) % 1000
wb.regs.dac_dacval.write(sine[i])
#time.sleep(0.001)
# # #
wb.close()
|
[
"litex.RemoteClient",
"numpy.sin",
"numpy.linspace"
] |
[((93, 107), 'litex.RemoteClient', 'RemoteClient', ([], {}), '()\n', (105, 107), False, 'from litex import RemoteClient\n'), ((130, 161), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(1000)'], {}), '(0, 2 * np.pi, 1000)\n', (141, 161), True, 'import numpy as np\n'), ((177, 186), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (183, 186), True, 'import numpy as np\n')]
|
## License: Apache 2.0. See LICENSE file in root directory.
## Copyright(c) 2017 Intel Corporation. All Rights Reserved.
#####################################################
## Align Depth to Color ##
#####################################################
# First import the library
import pyrealsense2 as rs
# Import Numpy for easy array manipulation
import numpy as np
import sys,os
# Import OpenCV for easy image rendering
import cv2
import json
print(os.path.expanduser("~"))
jsonObj = json.load(open("custom.json"))
json_string= str(jsonObj).replace("'", '\"')
#print(json_string)
path = os.path.expanduser("~")
# Create a pipeline
pipeline = rs.pipeline()
i = 765
#Create a config and configure the pipeline to stream
# different resolutions of color and depth streams
config = rs.config()
freq=int(jsonObj['stream-fps'])
print("W: ", int(jsonObj['stream-width']))
print("H: ", int(jsonObj['stream-height']))
print("FPS: ", int(jsonObj['stream-fps']))
config.enable_stream(rs.stream.depth, int(jsonObj['stream-width']), int(jsonObj['stream-height']), rs.format.z16, int(jsonObj['stream-fps']))
config.enable_stream(rs.stream.color, int(jsonObj['stream-width']), int(jsonObj['stream-height']), rs.format.bgr8, int(jsonObj['stream-fps']))
profile = pipeline.start(config)
dev = profile.get_device()
advnc_mode = rs.rs400_advanced_mode(dev)
advnc_mode.load_json(json_string)
# config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
# config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
# Start streaming
#profile = pipeline.start(config)
# Getting the depth sensor's depth scale (see rs-align example for explanation)
depth_sensor = profile.get_device().first_depth_sensor()
depth_scale = depth_sensor.get_depth_scale()
print("Depth Scale is: " , depth_scale)
# We will be removing the background of objects more than
# clipping_distance_in_meters meters away
clipping_distance_in_meters = 0.375 #1 meter
clipping_distance = clipping_distance_in_meters / depth_scale
bg_depth_image_fill = np.load('/home/kittipong/dataset_assemble/bg_image_fillter1.npy')
bg_depth_image_raw = np.load('/home/kittipong/dataset_assemble/bg_image1.npy')
# Create an align object
# rs.align allows us to perform alignment of depth frames to others frames
# The "align_to" is the stream type to which we plan to align depth frames.
align_to = rs.stream.color
align = rs.align(align_to)
# Streaming loop
try:
while True:
# Get frameset of color and depth
frames = pipeline.wait_for_frames()
# frames.get_depth_frame() is a 640x360 depth image
# Align the depth frame to color frame
aligned_frames = align.process(frames)
# Get aligned frames
aligned_depth_frame = aligned_frames.get_depth_frame() # aligned_depth_frame is a 640x480 depth image
color_frame = aligned_frames.get_color_frame()
# Validate that both frames are valid
if not aligned_depth_frame or not color_frame:
continue
#colorizer = rs.colorizer(0)
depth_image_raw = np.asanyarray(aligned_depth_frame.get_data())
depth_to_disparity = rs.disparity_transform(True)
disparity_to_depth = rs.disparity_transform(False)
spatial = rs.spatial_filter()
spatial.set_option(rs.option.filter_magnitude, 2)
spatial.set_option(rs.option.filter_smooth_alpha, 0.5)
spatial.set_option(rs.option.filter_smooth_delta, 20)
spatial.set_option(rs.option.holes_fill, 3)
hole_filling = rs.hole_filling_filter()
temporal = rs.temporal_filter()
depth_filter = depth_to_disparity.process(aligned_depth_frame)
depth_filter = spatial.process(depth_filter)
depth_filter = temporal.process(depth_filter)
depth_filter = disparity_to_depth.process(depth_filter)
depth_filter = hole_filling.process(depth_filter)
colorizer = rs.colorizer(2)
depth_image_fill = np.asanyarray(depth_filter.get_data())
depth_image_pre = np.asanyarray(aligned_depth_frame.get_data())
depth_image = np.asanyarray(colorizer.colorize(aligned_depth_frame).get_data())
color_image = np.asanyarray(color_frame.get_data())
depth_image_fillter = np.asanyarray(colorizer.colorize(depth_filter).get_data())
# Remove background - Set pixels further than clipping_distance to grey
grey_color = 153
#depth_image_3d = np.dstack((depth_image,depth_image,depth_image)) #depth image is 1 channel, color is 3 channels
depth_image_fill_8bit = depth_image_fill.astype("uint8")
depth_image_3d = np.dstack((depth_image_pre,depth_image_pre,depth_image_pre))
bg_removed = np.where((depth_image_3d > clipping_distance) | (depth_image_3d <= 0), grey_color, color_image)
depth_bg_removed = np.where((depth_image_3d > clipping_distance) | (depth_image_3d <= 0), grey_color, depth_image)
raw_depth_bg_remove = np.where((depth_image_raw > clipping_distance) | (depth_image_raw <= 0), 0, depth_image_raw)
fillter_depth_bg_remove = np.where((depth_image_fill > clipping_distance) | (depth_image_fill <= 0), 0, depth_image_fill)
#depth_image_raw_8 = cv2.convertScaleAbs(depth_image_pre, alpha=0.03)
depth_image_raw_8 = depth_image_raw.astype('uint8')
depth_image_diff = np.asanyarray(bg_depth_image_raw-depth_image_raw)
depth_image_diff_8bit = depth_image_diff.astype('uint8')
depth_image_diff_8bit = (np.where((depth_image_raw > bg_depth_image_raw),0,depth_image_diff_8bit)).astype("uint8")
depth_image_diff_fillter = np.asanyarray(bg_depth_image_fill-depth_image_fill)
depth_image_diff_fillter_8bit = depth_image_diff_fillter.astype('uint8')
depth_image_diff_fillter_8bit = (np.where((depth_image_fill > bg_depth_image_fill),0,depth_image_diff_fillter_8bit)).astype("uint8")
# Render images
#depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
#depth_colormap = np.asanyarray(colorizer.colorize(depth_image).get_data())
images = np.hstack((color_image,depth_image,bg_removed))
images2 = np.hstack((np.dstack((raw_depth_bg_remove.astype('uint8'),raw_depth_bg_remove.astype('uint8'),raw_depth_bg_remove.astype('uint8'))),np.dstack((depth_image_diff_fillter_8bit,depth_image_diff_fillter_8bit,depth_image_diff_fillter_8bit)),depth_bg_removed))
images3 = np.vstack((images,images2))
test = np.dstack((color_image,depth_image_raw_8))
testfilter = np.dstack((color_image,depth_image_fill_8bit))
test1 = np.dstack((color_image,depth_image_diff_8bit))
test1filter = np.dstack((color_image,depth_image_diff_fillter_8bit))
test2 = np.dstack((color_image,raw_depth_bg_remove.astype('uint8')))
test2fillter = np.dstack((color_image,fillter_depth_bg_remove.astype('uint8')))
#print(np.min(depth_image_8bit))
cv2.namedWindow('Align Example', cv2.WINDOW_AUTOSIZE)
cv2.imshow('Align Example',cv2.resize(images3,(960,480)))
key = cv2.waitKey(1)
if key == ord("r"):
bg_depth_image_raw = depth_image_raw
bg_depth_image_fill = depth_image_fill
np.save(path+'/dataset_assemble/bg_image1',bg_depth_image_raw)
np.save(path+'/dataset_assemble/bg_image_fillter1',bg_depth_image_fill)
if key == ord("s"):
cv2.imwrite(path+'/dataset_assemble/color_image'+str(i)+'.png', color_image)
cv2.imwrite(path+'/dataset_assemble/Depth/nonfilter/bg_remove'+str(i)+'.png',bg_removed)
cv2.imwrite(path+'/dataset_assemble/Depth/nonfilter/depth_image'+str(i)+'.png', depth_image)
cv2.imwrite(path+'/dataset_assemble/Depth/nonfilter/fuse_image_8bit'+str(i)+'.png', test)
cv2.imwrite(path+'/dataset_assemble/Depth/nonfilter/fuse_image_bgremove'+str(i)+'.png', test1)
cv2.imwrite(path+'/dataset_assemble/Depth/nonfilter/fuse_image_bgremove_v2_'+str(i)+'.png', test2)
np.save(path+'/dataset_assemble/Depth/nonfilter/depth_image_raw'+str(i), depth_image_raw)
cv2.imwrite(path+'/dataset_assemble/Depth/filter/depth_image'+str(i)+'.png',depth_image_fillter)
cv2.imwrite(path+'/dataset_assemble/Depth/filter/fuse_image_8bit'+str(i)+'.png',testfilter)
cv2.imwrite(path+'/dataset_assemble/Depth/filter/fuse_image_bgremove'+str(i)+'.png',test1filter )
cv2.imwrite(path+'/dataset_assemble/Depth/filter/fuse_image_bgremove_v2_'+str(i)+'.png',test2fillter)
np.save(path+'/dataset_assemble/Depth/filter/depth_image_raw'+str(i), depth_image_fill)
print(i)
i=i+1
# Press esc or 'q' to close the image window
if key & 0xFF == ord('q') or key == 27:
cv2.destroyAllWindows()
break
finally:
pipeline.stop()
|
[
"numpy.load",
"pyrealsense2.disparity_transform",
"pyrealsense2.pipeline",
"pyrealsense2.temporal_filter",
"pyrealsense2.config",
"pyrealsense2.hole_filling_filter",
"cv2.destroyAllWindows",
"cv2.resize",
"numpy.dstack",
"pyrealsense2.rs400_advanced_mode",
"numpy.save",
"cv2.waitKey",
"pyrealsense2.align",
"pyrealsense2.colorizer",
"numpy.hstack",
"numpy.vstack",
"pyrealsense2.spatial_filter",
"numpy.asanyarray",
"numpy.where",
"os.path.expanduser",
"cv2.namedWindow"
] |
[((622, 645), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (640, 645), False, 'import sys, os\n'), ((677, 690), 'pyrealsense2.pipeline', 'rs.pipeline', ([], {}), '()\n', (688, 690), True, 'import pyrealsense2 as rs\n'), ((814, 825), 'pyrealsense2.config', 'rs.config', ([], {}), '()\n', (823, 825), True, 'import pyrealsense2 as rs\n'), ((1346, 1373), 'pyrealsense2.rs400_advanced_mode', 'rs.rs400_advanced_mode', (['dev'], {}), '(dev)\n', (1368, 1373), True, 'import pyrealsense2 as rs\n'), ((2054, 2119), 'numpy.load', 'np.load', (['"""/home/kittipong/dataset_assemble/bg_image_fillter1.npy"""'], {}), "('/home/kittipong/dataset_assemble/bg_image_fillter1.npy')\n", (2061, 2119), True, 'import numpy as np\n'), ((2141, 2198), 'numpy.load', 'np.load', (['"""/home/kittipong/dataset_assemble/bg_image1.npy"""'], {}), "('/home/kittipong/dataset_assemble/bg_image1.npy')\n", (2148, 2198), True, 'import numpy as np\n'), ((2410, 2428), 'pyrealsense2.align', 'rs.align', (['align_to'], {}), '(align_to)\n', (2418, 2428), True, 'import pyrealsense2 as rs\n'), ((484, 507), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (502, 507), False, 'import sys, os\n'), ((3183, 3211), 'pyrealsense2.disparity_transform', 'rs.disparity_transform', (['(True)'], {}), '(True)\n', (3205, 3211), True, 'import pyrealsense2 as rs\n'), ((3241, 3270), 'pyrealsense2.disparity_transform', 'rs.disparity_transform', (['(False)'], {}), '(False)\n', (3263, 3270), True, 'import pyrealsense2 as rs\n'), ((3289, 3308), 'pyrealsense2.spatial_filter', 'rs.spatial_filter', ([], {}), '()\n', (3306, 3308), True, 'import pyrealsense2 as rs\n'), ((3567, 3591), 'pyrealsense2.hole_filling_filter', 'rs.hole_filling_filter', ([], {}), '()\n', (3589, 3591), True, 'import pyrealsense2 as rs\n'), ((3611, 3631), 'pyrealsense2.temporal_filter', 'rs.temporal_filter', ([], {}), '()\n', (3629, 3631), True, 'import pyrealsense2 as rs\n'), ((3970, 3985), 'pyrealsense2.colorizer', 'rs.colorizer', (['(2)'], {}), '(2)\n', (3982, 3985), True, 'import pyrealsense2 as rs\n'), ((4698, 4760), 'numpy.dstack', 'np.dstack', (['(depth_image_pre, depth_image_pre, depth_image_pre)'], {}), '((depth_image_pre, depth_image_pre, depth_image_pre))\n', (4707, 4760), True, 'import numpy as np\n'), ((4780, 4879), 'numpy.where', 'np.where', (['((depth_image_3d > clipping_distance) | (depth_image_3d <= 0))', 'grey_color', 'color_image'], {}), '((depth_image_3d > clipping_distance) | (depth_image_3d <= 0),\n grey_color, color_image)\n', (4788, 4879), True, 'import numpy as np\n'), ((4903, 5002), 'numpy.where', 'np.where', (['((depth_image_3d > clipping_distance) | (depth_image_3d <= 0))', 'grey_color', 'depth_image'], {}), '((depth_image_3d > clipping_distance) | (depth_image_3d <= 0),\n grey_color, depth_image)\n', (4911, 5002), True, 'import numpy as np\n'), ((5029, 5125), 'numpy.where', 'np.where', (['((depth_image_raw > clipping_distance) | (depth_image_raw <= 0))', '(0)', 'depth_image_raw'], {}), '((depth_image_raw > clipping_distance) | (depth_image_raw <= 0), 0,\n depth_image_raw)\n', (5037, 5125), True, 'import numpy as np\n'), ((5156, 5256), 'numpy.where', 'np.where', (['((depth_image_fill > clipping_distance) | (depth_image_fill <= 0))', '(0)', 'depth_image_fill'], {}), '((depth_image_fill > clipping_distance) | (depth_image_fill <= 0), \n 0, depth_image_fill)\n', (5164, 5256), True, 'import numpy as np\n'), ((5418, 5469), 'numpy.asanyarray', 'np.asanyarray', (['(bg_depth_image_raw - depth_image_raw)'], {}), '(bg_depth_image_raw - depth_image_raw)\n', (5431, 5469), True, 'import numpy as np\n'), ((5692, 5745), 'numpy.asanyarray', 'np.asanyarray', (['(bg_depth_image_fill - depth_image_fill)'], {}), '(bg_depth_image_fill - depth_image_fill)\n', (5705, 5745), True, 'import numpy as np\n'), ((6202, 6251), 'numpy.hstack', 'np.hstack', (['(color_image, depth_image, bg_removed)'], {}), '((color_image, depth_image, bg_removed))\n', (6211, 6251), True, 'import numpy as np\n'), ((6540, 6568), 'numpy.vstack', 'np.vstack', (['(images, images2)'], {}), '((images, images2))\n', (6549, 6568), True, 'import numpy as np\n'), ((6583, 6626), 'numpy.dstack', 'np.dstack', (['(color_image, depth_image_raw_8)'], {}), '((color_image, depth_image_raw_8))\n', (6592, 6626), True, 'import numpy as np\n'), ((6647, 6694), 'numpy.dstack', 'np.dstack', (['(color_image, depth_image_fill_8bit)'], {}), '((color_image, depth_image_fill_8bit))\n', (6656, 6694), True, 'import numpy as np\n'), ((6710, 6757), 'numpy.dstack', 'np.dstack', (['(color_image, depth_image_diff_8bit)'], {}), '((color_image, depth_image_diff_8bit))\n', (6719, 6757), True, 'import numpy as np\n'), ((6779, 6834), 'numpy.dstack', 'np.dstack', (['(color_image, depth_image_diff_fillter_8bit)'], {}), '((color_image, depth_image_diff_fillter_8bit))\n', (6788, 6834), True, 'import numpy as np\n'), ((7052, 7105), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Align Example"""', 'cv2.WINDOW_AUTOSIZE'], {}), "('Align Example', cv2.WINDOW_AUTOSIZE)\n", (7067, 7105), False, 'import cv2\n'), ((7189, 7203), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (7200, 7203), False, 'import cv2\n'), ((7141, 7172), 'cv2.resize', 'cv2.resize', (['images3', '(960, 480)'], {}), '(images3, (960, 480))\n', (7151, 7172), False, 'import cv2\n'), ((7344, 7409), 'numpy.save', 'np.save', (["(path + '/dataset_assemble/bg_image1')", 'bg_depth_image_raw'], {}), "(path + '/dataset_assemble/bg_image1', bg_depth_image_raw)\n", (7351, 7409), True, 'import numpy as np\n'), ((7419, 7493), 'numpy.save', 'np.save', (["(path + '/dataset_assemble/bg_image_fillter1')", 'bg_depth_image_fill'], {}), "(path + '/dataset_assemble/bg_image_fillter1', bg_depth_image_fill)\n", (7426, 7493), True, 'import numpy as np\n'), ((8928, 8951), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (8949, 8951), False, 'import cv2\n'), ((5566, 5638), 'numpy.where', 'np.where', (['(depth_image_raw > bg_depth_image_raw)', '(0)', 'depth_image_diff_8bit'], {}), '(depth_image_raw > bg_depth_image_raw, 0, depth_image_diff_8bit)\n', (5574, 5638), True, 'import numpy as np\n'), ((5866, 5952), 'numpy.where', 'np.where', (['(depth_image_fill > bg_depth_image_fill)', '(0)', 'depth_image_diff_fillter_8bit'], {}), '(depth_image_fill > bg_depth_image_fill, 0,\n depth_image_diff_fillter_8bit)\n', (5874, 5952), True, 'import numpy as np\n'), ((6400, 6508), 'numpy.dstack', 'np.dstack', (['(depth_image_diff_fillter_8bit, depth_image_diff_fillter_8bit,\n depth_image_diff_fillter_8bit)'], {}), '((depth_image_diff_fillter_8bit, depth_image_diff_fillter_8bit,\n depth_image_diff_fillter_8bit))\n', (6409, 6508), True, 'import numpy as np\n')]
|
import numpy as np
penalty_12 = [ 'l2']#'l1',
penalty_12none = ['l1', 'l2', None]
penalty_all = ['l1', 'l2', None, 'elasticnet']
penalty_12e = ['l1', 'l2', 'elasticnet']
max_iter = [100 , 300, 1000]
max_iter_inf = [100 , 300, 500, 1000, np.inf]
max_iter_inf2 = [100 , 300, 500, 1000, -1]
n_iter = [5 , 10, 20]
tol = [1e-4, 1e-3, 1e-2]
alpha = [1e-5, 1e-4, 1e-3, 1e-2, 0.1, 1, 3, 10]
alpha_small = [1e-5, 1e-3, 0.1 , 1]
C = [1e-2, 0.1 , 1, 5, 10]
C_small = [ 0.1, 1 , 5]
degree = [1, 2, 3, 4, 5]
eta0 = [1e-4, 1e-3, 1e-2, 0.1]
epsilon = [1e-3, 1e-2, 0.1, 0]
warm_start = [True, False]
normalize = [True, False]
shrinking = [True, False]
kernel = ['linear']#, 'rbf', 'sigmoid', 'poly']#, they are very slow
gamma = list(np.logspace(-9, 3, 6)) + ['auto']
gamma_small = list(np.logspace(-6, 3, 3)) + ['auto']
coef0 = [0, 0.1, 0.3, 0.5, 0.7, 1]
coef0_small = [0, 0.4, 0.7, 1]
nu = [1e-4, 1e-2, 0.1, 0.3, 0.5, 0.75, 0.9]
nu_small = [1e-2, 0.1, 0.5, 0.9]
n_estimators = [2, 3, 5, 10, 25, 50, 100]
n_estimators_small = [2, 10, 25, 100]
n_neighbors = [1, 3, 10, 15, 30]
neighbor_leaf_size = [1, 2, 5, 10, 20, 30, 50, 100]
neighbor_radius = [1e-2, 0.1, 1, 5, 10]
neighbor_algo = ['ball_tree' , 'kd_tree', 'brute']
neighbor_metric = ['cityblock' , 'euclidean', 'l1', 'l2', 'manhattan']
learning_rate = ['invscaling', 'adaptive', 'constant' ]
learning_rate_small = ['invscaling', 'adaptive']
max_features = [None, 'auto', 'log2', 3, 5, 10, 25, 50]
max_features_small = [None, 'auto', 'log2', 3, 5, 10]
max_depth = [None, 3, 8, 20, 50]
max_depth_small = [None, 5, 10]
min_samples_split = [2, 5, 10, 0.1]
min_impurity_split = [1e-7, 1e-6, 1e-5, 1e-4, 1e-3]
min_samples_leaf = [2]
#tree_learning_rate = [0.8, 1]
|
[
"numpy.logspace"
] |
[((962, 983), 'numpy.logspace', 'np.logspace', (['(-9)', '(3)', '(6)'], {}), '(-9, 3, 6)\n', (973, 983), True, 'import numpy as np\n'), ((1023, 1044), 'numpy.logspace', 'np.logspace', (['(-6)', '(3)', '(3)'], {}), '(-6, 3, 3)\n', (1034, 1044), True, 'import numpy as np\n')]
|
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plot
import numpy, scipy, cvxpy, pprint, itertools
from scipy.spatial import ConvexHull
from patch import *
def random_2d_convex_hull():
ps = numpy.random.rand(30, 2)
hull = ConvexHull(ps)
print("ps= {}".format(ps) )
plot.plot(ps[:,0], ps[:,1], 'o')
for simplex in hull.simplices:
plot.plot(ps[simplex, 0], ps[simplex, 1], 'k-')
plot.plot(ps[hull.vertices,0], ps[hull.vertices,1], 'r--', lw=2)
plot.plot(ps[hull.vertices[0],0], ps[hull.vertices[0],1], 'ro')
# plot.show()
plot.savefig("random_2d_convex_hull.png")
plot.gcf().clear()
def stability_region_mds_4_2():
ps = numpy.array([ [0, 0],
[2.5, 0], [0, 2.5], [2, 0], [0, 2], [2, 1], [1, 2] ] )
hull = ConvexHull(ps)
for simplex in hull.simplices:
plot.plot(ps[simplex, 0], ps[simplex, 1], 'k-')
plot.plot(ps[hull.vertices,0], ps[hull.vertices,1], 'r--', lw=2)
plot.plot(ps[hull.vertices[0],0], ps[hull.vertices[0],1], 'ro')
# plot.show()
plot.savefig("stability_region_mds_4_2.png")
plot.gcf().clear()
def opt():
# Problem data.
m = 30
n = 20
numpy.random.seed(1)
A = numpy.random.randn(m, n)
b = numpy.random.randn(m)
# Construct the problem.
x = Variable(n)
obj = Minimize(sum_squares(A*x - b))
constraints = [0 <= x, x <= 1]
prob = Problem(obj, constraints)
# The optimal obj is returned by prob.solve().
result = prob.solve()
# The optimal value for x is stored in x.value.
print("x= {}".format(x.value) )
# The optimal Lagrange multiplier for a constraint is stored in constraint.dual_value.
print("constraints[0].dual_value= {}".format(constraints[0].dual_value) )
def plot_hull_of_ps(p_l, fname, title):
ps = numpy.empty((len(p_l), 2))
for i,p in enumerate(p_l):
# ps[i,:] = [[p[0], p[1]]]
ps[i,0] = p[0]
ps[i,1] = p[1]
print("ps= {}".format(ps) )
plot.plot(ps[:,0], ps[:,1], 'o')
"""
hull = ConvexHull(ps)
for simplex in hull.simplices:
plot.plot(ps[simplex, 0], ps[simplex, 1], 'k-')
plot.plot(ps[hull.vertices,0], ps[hull.vertices,1], 'r--', lw=2)
plot.plot(ps[hull.vertices[0],0], ps[hull.vertices[0],1], 'ro')
"""
# plot.show()
# axes = plot.gca()
# axes.set_xlim([0, 2] )
plot.title(title)
plot.savefig(fname)
plot.gcf().clear()
"""
# Matrix factorization
alpha = 0.002 # 0.0002
D = numpy.zeros((n,2))
step = 0
while step < 100000:
step += 1
for i in range(n):
for j in range(2):
if M[i,j] == 1:
e_ij = M[i,j] - numpy.dot(D[i,:], C[:,j])
for k in range(2):
D[i][k] = D[i][k] + alpha*(2*e_ij * C[k][j] )
# print("step= {}, D=\n{}".format(step, D) )
e = 0
for i in range(n):
for j in range(2):
e = e + pow(M[i][j] - numpy.dot(D[i,:], C[:,j]), 2)
if e < 0.1:
break
print("step= {}, D=\n{}".format(step, D) )
M_ = numpy.dot(D, C)
print("M=\n{}".format(M_) )
"""
def generator_matrix_to_M_C(G):
n = G.shape[1]
s_rg_l = [[], []]
for s in range(0, 2):
for c in range(n):
m = numpy.column_stack((G[:,s], G[:,c]))
if numpy.linalg.det(m) == 0: # c is a systematic node for s
s_rg_l[s].append((c,))
for subset in itertools.combinations(range(n), 2):
if s in subset:
continue
m = numpy.column_stack((G[:,subset[0]], G[:,subset[1]]))
# print("m= {}".format(m) )
if numpy.linalg.det(m):
# print("columns {}, {} are LI".format(os, c) )
s_rg_l[s].append((subset[0], subset[1]))
print("s_rg_l= {}".format(pprint.pformat(s_rg_l) ) )
r_0 = len(s_rg_l[0] )
r_1 = len(s_rg_l[1] )
r = r_0 + r_1
# if r != len(s_rg_l[1] ):
# log(ERROR, "Code was supposed to be symmetric, but it is not.")
# return 1
x = s_rg_l[0] + s_rg_l[1]
M = numpy.zeros((n,r))
for i in range(n):
for j in range(r):
if i in x[j]:
M[i,j] = 1
print("M= {}".format(M) )
C = numpy.zeros((2 ,r))
C[0,0:r_0] = 1
C[1,r_0:r] = 1
print("C= {}".format(C) )
return r, M, C
def generator_matrix(code, n, k=2):
if k != 2:
log(ERROR, "Only for k=2")
return 1
if code == 'MDS':
G = numpy.zeros((2, n))
for j in range(n):
if j == 0:
G[0,j] = 1
G[1,j] = 0
elif j == 1:
G[0,j] = 0
G[1,j] = 1
else:
G[0,j] = j-1
G[1,j] = 1
return G
else:
log(ERROR, "unexpected code= {}".format(code) )
return 1
def plot_xy_stability_region(n):
# Rep(2)
# G = numpy.matrix([[1,0], [0,1]])
# G = numpy.matrix([[1,0], [0,1], [1,0], [0,1] ])
# MDS(3,2)
# G = numpy.matrix([[1,0], [0,1], [1,1]])
# MDS(4,2)
# G = numpy.matrix([[1,0], [0,1], [1,1], [2,1]])
# MDS(5,2)
# G = numpy.matrix([[1,0], [0,1], [1,1], [2,1], [3,1]])
# MDS(6,2)
# G = numpy.matrix([[1,0], [0,1], [1,1], [2,1], [3,1], [4,1]])
# MDS(7,2)
# G = numpy.matrix([[1,0], [0,1], [1,1], [2,1], [3,1], [4,1], [5,1]])
# Mixed
# G = numpy.matrix([[1,0], [0,1], [1,1], [2,1], [3,1], [1,0] ])
# G = numpy.matrix([[1,0], [0,1], [1,1], [2,1], [3,1], [1,1] ])
# G = numpy.matrix([[1,0], [0,1], [1,1], [1,0], [0,1], [1,1] ])
# G = G.transpose()
code = 'MDS' # 'Rep' # 'MDS' # 'Mixed'
G = generator_matrix(code, n)
print("G= {}".format(G) )
n = G.shape[1]
r, M, C = generator_matrix_to_M_C(G)
p_l = []
#
x = cvxpy.Variable(r, 1, name='x')
for b in numpy.linspace(0, 1, 20):
# print("b= {}".format(b) )
length = math.sqrt((1-b)**2 + b**2)
w = numpy.matrix([[(1-b)/length, b/length]] )
# print("w.shape= {}, w= {}".format(w.shape, w) )
w_ = w*C
# print("w_= {}".format(w_) )
# obj = cvxpy.Maximize(w*(C*x) )
obj = cvxpy.Maximize(w_*x)
# print("obj= {}".format(obj) )
constraints = [M*x == 1, x >= 0] # [M*x <= 1, x >= 0]
prob = cvxpy.Problem(obj, constraints)
# print("prob= {}".format(prob) )
prob.solve()
print("status= {}".format(prob.status) )
# print("optimal value= {}".format(prob.value) )
y = C*(x.value)
# print("optimal y= {}".format(y) )
p_l.append((y[0], y[1]) )
plot_hull_of_ps(p_l, "plot_xy_stability_region_{}_n_{}.png".format(code, n),
title='{}, n= {}, k= 2'.format(code, n) )
log(WARNING, "done, code= {}, n= {}".format(code, n) )
if __name__ == "__main__":
# random_2d_convex_hull()
# stability_region_mds_4_2()
# opt()
plot_xy_stability_region(n=4)
# for n in range(3, 10):
# plot_xy_stability_region(n)
# plot_xy_stability_region(n=100)
|
[
"matplotlib.pyplot.title",
"pprint.pformat",
"numpy.random.seed",
"cvxpy.Maximize",
"numpy.random.randn",
"cvxpy.Problem",
"numpy.linspace",
"numpy.linalg.det",
"matplotlib.use",
"cvxpy.Variable",
"matplotlib.pyplot.gcf",
"scipy.spatial.ConvexHull",
"numpy.matrix",
"matplotlib.pyplot.plot",
"numpy.zeros",
"numpy.array",
"numpy.column_stack",
"numpy.random.rand",
"matplotlib.pyplot.savefig"
] |
[((18, 39), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (32, 39), False, 'import matplotlib\n'), ((213, 237), 'numpy.random.rand', 'numpy.random.rand', (['(30)', '(2)'], {}), '(30, 2)\n', (230, 237), False, 'import numpy, scipy, cvxpy, pprint, itertools\n'), ((247, 261), 'scipy.spatial.ConvexHull', 'ConvexHull', (['ps'], {}), '(ps)\n', (257, 261), False, 'from scipy.spatial import ConvexHull\n'), ((300, 334), 'matplotlib.pyplot.plot', 'plot.plot', (['ps[:, 0]', 'ps[:, 1]', '"""o"""'], {}), "(ps[:, 0], ps[:, 1], 'o')\n", (309, 334), True, 'import matplotlib.pyplot as plot\n'), ((420, 486), 'matplotlib.pyplot.plot', 'plot.plot', (['ps[hull.vertices, 0]', 'ps[hull.vertices, 1]', '"""r--"""'], {'lw': '(2)'}), "(ps[hull.vertices, 0], ps[hull.vertices, 1], 'r--', lw=2)\n", (429, 486), True, 'import matplotlib.pyplot as plot\n'), ((487, 552), 'matplotlib.pyplot.plot', 'plot.plot', (['ps[hull.vertices[0], 0]', 'ps[hull.vertices[0], 1]', '"""ro"""'], {}), "(ps[hull.vertices[0], 0], ps[hull.vertices[0], 1], 'ro')\n", (496, 552), True, 'import matplotlib.pyplot as plot\n'), ((569, 610), 'matplotlib.pyplot.savefig', 'plot.savefig', (['"""random_2d_convex_hull.png"""'], {}), "('random_2d_convex_hull.png')\n", (581, 610), True, 'import matplotlib.pyplot as plot\n'), ((672, 745), 'numpy.array', 'numpy.array', (['[[0, 0], [2.5, 0], [0, 2.5], [2, 0], [0, 2], [2, 1], [1, 2]]'], {}), '([[0, 0], [2.5, 0], [0, 2.5], [2, 0], [0, 2], [2, 1], [1, 2]])\n', (683, 745), False, 'import numpy, scipy, cvxpy, pprint, itertools\n'), ((762, 776), 'scipy.spatial.ConvexHull', 'ConvexHull', (['ps'], {}), '(ps)\n', (772, 776), False, 'from scipy.spatial import ConvexHull\n'), ((867, 933), 'matplotlib.pyplot.plot', 'plot.plot', (['ps[hull.vertices, 0]', 'ps[hull.vertices, 1]', '"""r--"""'], {'lw': '(2)'}), "(ps[hull.vertices, 0], ps[hull.vertices, 1], 'r--', lw=2)\n", (876, 933), True, 'import matplotlib.pyplot as plot\n'), ((934, 999), 'matplotlib.pyplot.plot', 'plot.plot', (['ps[hull.vertices[0], 0]', 'ps[hull.vertices[0], 1]', '"""ro"""'], {}), "(ps[hull.vertices[0], 0], ps[hull.vertices[0], 1], 'ro')\n", (943, 999), True, 'import matplotlib.pyplot as plot\n'), ((1016, 1060), 'matplotlib.pyplot.savefig', 'plot.savefig', (['"""stability_region_mds_4_2.png"""'], {}), "('stability_region_mds_4_2.png')\n", (1028, 1060), True, 'import matplotlib.pyplot as plot\n'), ((1132, 1152), 'numpy.random.seed', 'numpy.random.seed', (['(1)'], {}), '(1)\n', (1149, 1152), False, 'import numpy, scipy, cvxpy, pprint, itertools\n'), ((1159, 1183), 'numpy.random.randn', 'numpy.random.randn', (['m', 'n'], {}), '(m, n)\n', (1177, 1183), False, 'import numpy, scipy, cvxpy, pprint, itertools\n'), ((1190, 1211), 'numpy.random.randn', 'numpy.random.randn', (['m'], {}), '(m)\n', (1208, 1211), False, 'import numpy, scipy, cvxpy, pprint, itertools\n'), ((1897, 1931), 'matplotlib.pyplot.plot', 'plot.plot', (['ps[:, 0]', 'ps[:, 1]', '"""o"""'], {}), "(ps[:, 0], ps[:, 1], 'o')\n", (1906, 1931), True, 'import matplotlib.pyplot as plot\n'), ((2254, 2271), 'matplotlib.pyplot.title', 'plot.title', (['title'], {}), '(title)\n', (2264, 2271), True, 'import matplotlib.pyplot as plot\n'), ((2274, 2293), 'matplotlib.pyplot.savefig', 'plot.savefig', (['fname'], {}), '(fname)\n', (2286, 2293), True, 'import matplotlib.pyplot as plot\n'), ((3773, 3792), 'numpy.zeros', 'numpy.zeros', (['(n, r)'], {}), '((n, r))\n', (3784, 3792), False, 'import numpy, scipy, cvxpy, pprint, itertools\n'), ((3912, 3931), 'numpy.zeros', 'numpy.zeros', (['(2, r)'], {}), '((2, r))\n', (3923, 3931), False, 'import numpy, scipy, cvxpy, pprint, itertools\n'), ((5333, 5363), 'cvxpy.Variable', 'cvxpy.Variable', (['r', '(1)'], {'name': '"""x"""'}), "(r, 1, name='x')\n", (5347, 5363), False, 'import numpy, scipy, cvxpy, pprint, itertools\n'), ((5375, 5399), 'numpy.linspace', 'numpy.linspace', (['(0)', '(1)', '(20)'], {}), '(0, 1, 20)\n', (5389, 5399), False, 'import numpy, scipy, cvxpy, pprint, itertools\n'), ((370, 417), 'matplotlib.pyplot.plot', 'plot.plot', (['ps[simplex, 0]', 'ps[simplex, 1]', '"""k-"""'], {}), "(ps[simplex, 0], ps[simplex, 1], 'k-')\n", (379, 417), True, 'import matplotlib.pyplot as plot\n'), ((817, 864), 'matplotlib.pyplot.plot', 'plot.plot', (['ps[simplex, 0]', 'ps[simplex, 1]', '"""k-"""'], {}), "(ps[simplex, 0], ps[simplex, 1], 'k-')\n", (826, 864), True, 'import matplotlib.pyplot as plot\n'), ((4136, 4155), 'numpy.zeros', 'numpy.zeros', (['(2, n)'], {}), '((2, n))\n', (4147, 4155), False, 'import numpy, scipy, cvxpy, pprint, itertools\n'), ((5486, 5532), 'numpy.matrix', 'numpy.matrix', (['[[(1 - b) / length, b / length]]'], {}), '([[(1 - b) / length, b / length]])\n', (5498, 5532), False, 'import numpy, scipy, cvxpy, pprint, itertools\n'), ((5676, 5698), 'cvxpy.Maximize', 'cvxpy.Maximize', (['(w_ * x)'], {}), '(w_ * x)\n', (5690, 5698), False, 'import numpy, scipy, cvxpy, pprint, itertools\n'), ((5802, 5833), 'cvxpy.Problem', 'cvxpy.Problem', (['obj', 'constraints'], {}), '(obj, constraints)\n', (5815, 5833), False, 'import numpy, scipy, cvxpy, pprint, itertools\n'), ((613, 623), 'matplotlib.pyplot.gcf', 'plot.gcf', ([], {}), '()\n', (621, 623), True, 'import matplotlib.pyplot as plot\n'), ((1063, 1073), 'matplotlib.pyplot.gcf', 'plot.gcf', ([], {}), '()\n', (1071, 1073), True, 'import matplotlib.pyplot as plot\n'), ((2296, 2306), 'matplotlib.pyplot.gcf', 'plot.gcf', ([], {}), '()\n', (2304, 2306), True, 'import matplotlib.pyplot as plot\n'), ((3038, 3076), 'numpy.column_stack', 'numpy.column_stack', (['(G[:, s], G[:, c])'], {}), '((G[:, s], G[:, c]))\n', (3056, 3076), False, 'import numpy, scipy, cvxpy, pprint, itertools\n'), ((3281, 3335), 'numpy.column_stack', 'numpy.column_stack', (['(G[:, subset[0]], G[:, subset[1]])'], {}), '((G[:, subset[0]], G[:, subset[1]]))\n', (3299, 3335), False, 'import numpy, scipy, cvxpy, pprint, itertools\n'), ((3377, 3396), 'numpy.linalg.det', 'numpy.linalg.det', (['m'], {}), '(m)\n', (3393, 3396), False, 'import numpy, scipy, cvxpy, pprint, itertools\n'), ((3531, 3553), 'pprint.pformat', 'pprint.pformat', (['s_rg_l'], {}), '(s_rg_l)\n', (3545, 3553), False, 'import numpy, scipy, cvxpy, pprint, itertools\n'), ((3084, 3103), 'numpy.linalg.det', 'numpy.linalg.det', (['m'], {}), '(m)\n', (3100, 3103), False, 'import numpy, scipy, cvxpy, pprint, itertools\n')]
|
from unittest import TestCase
import numpy as np
from uniqed.models.tof import TOF
import matplotlib.pyplot as plt
class TestTOF(TestCase):
def _gen_data(self, n=100, d=5):
return np.random.random(n*d).reshape([n, d])
def test_fit(self):
X = self._gen_data()
TOF().fit(X)
def test_predict(self):
X = self._gen_data()
TOF().fit(X).predict(X)
TOF(cutoff_n=70).fit(X).predict(X)
def test__get_outliers_inds(self):
X = self._gen_data()
TOF()._get_outliers_inds(TOF().fit(X).predict(X))
def test__find_nearest_neighbors(self):
X = self._gen_data()
tof = TOF().fit(X)
tof._find_nearest_neighbors(X)
tof = TOF().fit(X)
tof._find_nearest_neighbors(X, k=7)
def test__compute_cutoff(self):
X = self._gen_data()
tof = TOF().fit(X)
tof._compute_cutoff(cutoff_n=100)
with self.assertRaises(ValueError):
tof._compute_cutoff(cutoff_n='goosebump')
def test__compute_cutoff2(self):
X = self._gen_data()
tof = TOF(k=21).fit(X)
tof._compute_cutoff(cutoff_n=100)
def test__compute_p_value(self):
x = np.arange(100)
p = np.arange(0.01, 1.01, 0.01)
p_calculated = TOF()._compute_p_value(x)
is_equal = np.round(p, 2) == np.round(p_calculated, 2)
self.assertTrue(np.all(is_equal))
def test__compute_outlier_score(self):
X = self._gen_data()
TOF().fit(X)._compute_outlier_score(X)
def test__compute_tof(self):
X = self._gen_data()
nn_ids = np.array([[1, 3], [0, 2], [3,2], [0,1]])
ids = np.arange(4).reshape([4, 1])
score = np.mean((nn_ids-ids)**2, axis=1)**(1/2)
calcscore = TOF().fit(X)._compute_tof(nn_ids, ids)
is_eq = np.all(score==calcscore)
self.assertTrue(is_eq)
def test__compute_perc_cutoff(self):
X = self._gen_data()
z = np.arange(1, 101).astype(int)
cutoff = 10
perc = 100-cutoff+1
tof = TOF().fit(X)
tof.outlier_score_ = z
perc_cutoff = tof._compute_perc_cutoff(cutoff).astype(int)
self.assertTrue(perc_cutoff==perc)
|
[
"numpy.mean",
"numpy.array",
"numpy.arange",
"numpy.random.random",
"uniqed.models.tof.TOF",
"numpy.round",
"numpy.all"
] |
[((1204, 1218), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (1213, 1218), True, 'import numpy as np\n'), ((1231, 1258), 'numpy.arange', 'np.arange', (['(0.01)', '(1.01)', '(0.01)'], {}), '(0.01, 1.01, 0.01)\n', (1240, 1258), True, 'import numpy as np\n'), ((1613, 1655), 'numpy.array', 'np.array', (['[[1, 3], [0, 2], [3, 2], [0, 1]]'], {}), '([[1, 3], [0, 2], [3, 2], [0, 1]])\n', (1621, 1655), True, 'import numpy as np\n'), ((1828, 1854), 'numpy.all', 'np.all', (['(score == calcscore)'], {}), '(score == calcscore)\n', (1834, 1854), True, 'import numpy as np\n'), ((1327, 1341), 'numpy.round', 'np.round', (['p', '(2)'], {}), '(p, 2)\n', (1335, 1341), True, 'import numpy as np\n'), ((1345, 1370), 'numpy.round', 'np.round', (['p_calculated', '(2)'], {}), '(p_calculated, 2)\n', (1353, 1370), True, 'import numpy as np\n'), ((1395, 1411), 'numpy.all', 'np.all', (['is_equal'], {}), '(is_equal)\n', (1401, 1411), True, 'import numpy as np\n'), ((1713, 1749), 'numpy.mean', 'np.mean', (['((nn_ids - ids) ** 2)'], {'axis': '(1)'}), '((nn_ids - ids) ** 2, axis=1)\n', (1720, 1749), True, 'import numpy as np\n'), ((194, 217), 'numpy.random.random', 'np.random.random', (['(n * d)'], {}), '(n * d)\n', (210, 217), True, 'import numpy as np\n'), ((294, 299), 'uniqed.models.tof.TOF', 'TOF', ([], {}), '()\n', (297, 299), False, 'from uniqed.models.tof import TOF\n'), ((518, 523), 'uniqed.models.tof.TOF', 'TOF', ([], {}), '()\n', (521, 523), False, 'from uniqed.models.tof import TOF\n'), ((656, 661), 'uniqed.models.tof.TOF', 'TOF', ([], {}), '()\n', (659, 661), False, 'from uniqed.models.tof import TOF\n'), ((723, 728), 'uniqed.models.tof.TOF', 'TOF', ([], {}), '()\n', (726, 728), False, 'from uniqed.models.tof import TOF\n'), ((860, 865), 'uniqed.models.tof.TOF', 'TOF', ([], {}), '()\n', (863, 865), False, 'from uniqed.models.tof import TOF\n'), ((1094, 1103), 'uniqed.models.tof.TOF', 'TOF', ([], {'k': '(21)'}), '(k=21)\n', (1097, 1103), False, 'from uniqed.models.tof import TOF\n'), ((1282, 1287), 'uniqed.models.tof.TOF', 'TOF', ([], {}), '()\n', (1285, 1287), False, 'from uniqed.models.tof import TOF\n'), ((1668, 1680), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (1677, 1680), True, 'import numpy as np\n'), ((1968, 1985), 'numpy.arange', 'np.arange', (['(1)', '(101)'], {}), '(1, 101)\n', (1977, 1985), True, 'import numpy as np\n'), ((2060, 2065), 'uniqed.models.tof.TOF', 'TOF', ([], {}), '()\n', (2063, 2065), False, 'from uniqed.models.tof import TOF\n'), ((373, 378), 'uniqed.models.tof.TOF', 'TOF', ([], {}), '()\n', (376, 378), False, 'from uniqed.models.tof import TOF\n'), ((405, 421), 'uniqed.models.tof.TOF', 'TOF', ([], {'cutoff_n': '(70)'}), '(cutoff_n=70)\n', (408, 421), False, 'from uniqed.models.tof import TOF\n'), ((1494, 1499), 'uniqed.models.tof.TOF', 'TOF', ([], {}), '()\n', (1497, 1499), False, 'from uniqed.models.tof import TOF\n'), ((1773, 1778), 'uniqed.models.tof.TOF', 'TOF', ([], {}), '()\n', (1776, 1778), False, 'from uniqed.models.tof import TOF\n'), ((543, 548), 'uniqed.models.tof.TOF', 'TOF', ([], {}), '()\n', (546, 548), False, 'from uniqed.models.tof import TOF\n')]
|
"""
Created on Tue Mar 12 01:27:39 2019
@author: soumi
"""
from ast import literal_eval
import numpy as np
from skimage.draw import line_aa
from skimage.transform import resize
import imageio
#get bound of the image
def get_bounds(strokes):
min_x, max_x, min_y, max_y = (1000, 0, 1000, 0)
for stroke in strokes:
for x in stroke[0]:
min_x = min(min_x, x)
max_x = max(max_x, x)
for y in stroke[1]:
min_y = min(min_y, y)
max_y = max(max_y, y)
return (min_x, max_x, min_y, max_y)
# convert strokes to bitmap
def strokes_to_npy(strokes):
# if no stroke- Convert to a black image of dimension 150 by 150
if len(strokes)==0:
dims=(150,150)
img = np.zeros(dims, dtype=np.uint8)
else:
min_x, max_x, min_y, max_y = get_bounds(strokes)
# Add boundary of 20 pixels
dims = (20 + max_x - min_x, 20 + max_y - min_y)
img = np.zeros(dims, dtype=np.uint8)
#fix according to binary
abs_x = min_x - 10
abs_y = min_y - 10
for stroke in strokes:
if len(stroke[0]) >1:
prev_x = stroke[0][0]-abs_x
prev_y = stroke[1][0]-abs_y
for i in range(len(stroke[0])-1):
dx = stroke[0][i+1]-abs_x
dy = stroke[1][i+1]-abs_y
rr, cc, val = line_aa(prev_x, prev_y, dx, dy)
img[rr, cc] = (val * 255).astype(np.uint8)
prev_x = dx
prev_y = dy
return img.T
# fit in square box
def reshape_to_square(img, size=512):
img_resize = resize(img, (size, size))
return img_resize
# crate a square image of dimension 100 by 100
def strokeToSquareImage(strokes, size=100):
strokes = np.asarray(strokes)
img = strokes_to_npy(strokes)
img_resize = resize(img, (size, size))
return img_resize
# convert the image and create a outfile.jpg in local directory
def getImage(content):
img = strokeToSquareImage(strokes)
imageio.imwrite('outfile.jpg', 255-img)
# scipy.misc.imsave('outfile.jpg', 255-img)
return img
# provide full path of the file or save it locally
def readStrokes(fileName):
f = open(fileName, 'r')
x = f.read()
f.close()
strokes = literal_eval(str(x))
return strokes
if __name__ == '__main__':
strokes = readStrokes("temp")
getImage(strokes)
|
[
"skimage.draw.line_aa",
"numpy.asarray",
"numpy.zeros",
"skimage.transform.resize",
"imageio.imwrite"
] |
[((1669, 1694), 'skimage.transform.resize', 'resize', (['img', '(size, size)'], {}), '(img, (size, size))\n', (1675, 1694), False, 'from skimage.transform import resize\n'), ((1823, 1842), 'numpy.asarray', 'np.asarray', (['strokes'], {}), '(strokes)\n', (1833, 1842), True, 'import numpy as np\n'), ((1894, 1919), 'skimage.transform.resize', 'resize', (['img', '(size, size)'], {}), '(img, (size, size))\n', (1900, 1919), False, 'from skimage.transform import resize\n'), ((2078, 2119), 'imageio.imwrite', 'imageio.imwrite', (['"""outfile.jpg"""', '(255 - img)'], {}), "('outfile.jpg', 255 - img)\n", (2093, 2119), False, 'import imageio\n'), ((753, 783), 'numpy.zeros', 'np.zeros', (['dims'], {'dtype': 'np.uint8'}), '(dims, dtype=np.uint8)\n', (761, 783), True, 'import numpy as np\n'), ((958, 988), 'numpy.zeros', 'np.zeros', (['dims'], {'dtype': 'np.uint8'}), '(dims, dtype=np.uint8)\n', (966, 988), True, 'import numpy as np\n'), ((1415, 1446), 'skimage.draw.line_aa', 'line_aa', (['prev_x', 'prev_y', 'dx', 'dy'], {}), '(prev_x, prev_y, dx, dy)\n', (1422, 1446), False, 'from skimage.draw import line_aa\n')]
|
# Author: wangxy
# Emial: <EMAIL>
import copy, math
import numpy as np
from numba import jit
from scipy.spatial import ConvexHull
def iou_batch(boxA, boxB):
boxA = [int(x) for x in boxA]
boxB = [int(x) for x in boxB]
xA = max(boxA[0], boxB[0])
yA = max(boxA[1], boxB[1])
xB = min(boxA[2], boxB[2])
yB = min(boxA[3], boxB[3])
interArea = max(0, xB - xA + 1) * max(0, yB - yA + 1)
boxAArea = (boxA[2] - boxA[0] + 1) * (boxA[3] - boxA[1] + 1)
boxBArea = (boxB[2] - boxB[0] + 1) * (boxB[3] - boxB[1] + 1)
iou = interArea / float(boxAArea + boxBArea - interArea)
return iou
@jit
def poly_area(x, y):
""" Ref: http://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates """
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
@jit
def box3d_vol(corners):
''' corners: (8,3) no assumption on axis direction '''
a = np.sqrt(np.sum((corners[0, :] - corners[1, :]) ** 2))
b = np.sqrt(np.sum((corners[1, :] - corners[2, :]) ** 2))
c = np.sqrt(np.sum((corners[0, :] - corners[4, :]) ** 2))
return a * b * c
def convex_hull_intersection(p1, p2):
""" Compute area of two convex hull's intersection area.
p1,p2 are a list of (x,y) tuples of hull vertices.
return a list of (x,y) for the intersection and its volume
"""
inter_p = polygon_clip(p1, p2)
if inter_p is not None:
hull_inter = ConvexHull(inter_p)
return inter_p, hull_inter.volume
else:
return None, 0.0
def polygon_clip(subjectPolygon, clipPolygon):
""" Clip a polygon with another polygon.
Ref: https://rosettacode.org/wiki/Sutherland-Hodgman_polygon_clipping#Python
Args:
subjectPolygon: a list of (x,y) 2d points, any polygon.
clipPolygon: a list of (x,y) 2d points, has to be *convex*
Note:
**points have to be counter-clockwise ordered**
Return:
a list of (x,y) vertex point for the intersection polygon.
"""
def inside(p):
return (cp2[0] - cp1[0]) * (p[1] - cp1[1]) > (cp2[1] - cp1[1]) * (p[0] - cp1[0])
def computeIntersection():
dc = [cp1[0] - cp2[0], cp1[1] - cp2[1]]
dp = [s[0] - e[0], s[1] - e[1]]
n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0]
n2 = s[0] * e[1] - s[1] * e[0]
n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0])
return [(n1 * dp[0] - n2 * dc[0]) * n3, (n1 * dp[1] - n2 * dc[1]) * n3]
outputList = subjectPolygon
cp1 = clipPolygon[-1]
for clipVertex in clipPolygon:
cp2 = clipVertex
inputList = outputList
outputList = []
s = inputList[-1]
for subjectVertex in inputList:
e = subjectVertex
if inside(e):
if not inside(s): outputList.append(computeIntersection())
outputList.append(e)
elif inside(s):
outputList.append(computeIntersection())
s = e
cp1 = cp2
if len(outputList) == 0: return None
return (outputList)
def iou3d(corners1, corners2):
''' Compute 3D bounding box IoU, only working for object parallel to ground
Input:
corners1: numpy array (8,3), assume up direction is negative Y
corners2: numpy array (8,3), assume up direction is negative Y
Output:
iou: 3D bounding box IoU
iou_2d: bird's eye view 2D bounding box IoU
todo (rqi): add more description on corner points' orders.
'''
# corner points are in counter clockwise order
rect1 = [(corners1[i, 0], corners1[i, 2]) for i in range(3, -1, -1)]
rect2 = [(corners2[i, 0], corners2[i, 2]) for i in range(3, -1, -1)]
area1 = poly_area(np.array(rect1)[:, 0], np.array(rect1)[:, 1])
area2 = poly_area(np.array(rect2)[:, 0], np.array(rect2)[:, 1])
# inter_area = shapely_polygon_intersection(rect1, rect2)
_, inter_area = convex_hull_intersection(rect1, rect2)
# try:
# _, inter_area = convex_hull_intersection(rect1, rect2)
# except ValueError:
# inter_area = 0
# except scipy.spatial.qhull.QhullError:
# inter_area = 0
iou_2d = inter_area / (area1 + area2 - inter_area)
ymax = min(corners1[0, 1], corners2[0, 1])
ymin = max(corners1[4, 1], corners2[4, 1])
inter_vol = inter_area * max(0.0, ymax - ymin)
vol1 = box3d_vol(corners1)
vol2 = box3d_vol(corners2)
iou = inter_vol / (vol1 + vol2 - inter_vol)
return iou, iou_2d
def eucliDistance(detection, track):
# coefficient_det = math.sqrt(detection[0] ** 2 + detection[1] ** 2 + detection[2] ** 2)
# coefficient_trk = math.sqrt(track[0] ** 2 + track[1] ** 2 + track[2] ** 2)
# x = [i / coefficient_det for i in detection]
# y = [k / coefficient_trk for k in track]
# dist = math.sqrt((x[0] - y[0]) ** 2 + (x[1] - y[1]) ** 2 + (x[2] - y[2]) ** 2)
dist = math.sqrt((detection[0] - track[0]) ** 2 + (detection[1] - track[1]) ** 2 + (detection[2] - track[2]) ** 2)
# dist = 1 /(1+dist) # Normalization
return dist
def roty(t):
''' Rotation about the y-axis. '''
c = np.cos(t)
s = np.sin(t)
return np.array([[c, 0, s],
[0, 1, 0],
[-s, 0, c]])
def convert_3dbox_to_8corner(bbox3d_input):
''' Takes an object's 3D box with the representation of [x,y,z,theta,l,w,h] and
convert it to the 8 corners of the 3D box
Returns:
corners_3d: (8,3) array in in rect camera coord
'''
# compute rotational matrix around yaw axis
bbox3d = copy.copy(bbox3d_input)
R = roty(bbox3d[3])
# 3d bounding box dimensions
l = bbox3d[4]
w = bbox3d[5]
h = bbox3d[6]
# 3d bounding box corners 这是什么东西
x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2];
y_corners = [0, 0, 0, 0, -h, -h, -h, -h];
z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2];
# rotate and translate 3d bounding box
corners_3d = np.dot(R, np.vstack(
[x_corners, y_corners, z_corners])) # np.vstack([x_corners,y_corners,z_corners]) 3*8按照竖直方向排列
# print corners_3d.shape
corners_3d[0, :] = corners_3d[0, :] + bbox3d[0] # x
corners_3d[1, :] = corners_3d[1, :] + bbox3d[1] # y
corners_3d[2, :] = corners_3d[2, :] + bbox3d[2] # z
return np.transpose(corners_3d)
|
[
"numpy.sum",
"math.sqrt",
"numpy.roll",
"copy.copy",
"numpy.transpose",
"numpy.sin",
"numpy.array",
"numpy.cos",
"scipy.spatial.ConvexHull",
"numpy.vstack"
] |
[((4886, 4997), 'math.sqrt', 'math.sqrt', (['((detection[0] - track[0]) ** 2 + (detection[1] - track[1]) ** 2 + (\n detection[2] - track[2]) ** 2)'], {}), '((detection[0] - track[0]) ** 2 + (detection[1] - track[1]) ** 2 +\n (detection[2] - track[2]) ** 2)\n', (4895, 4997), False, 'import copy, math\n'), ((5115, 5124), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (5121, 5124), True, 'import numpy as np\n'), ((5133, 5142), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (5139, 5142), True, 'import numpy as np\n'), ((5154, 5198), 'numpy.array', 'np.array', (['[[c, 0, s], [0, 1, 0], [-s, 0, c]]'], {}), '([[c, 0, s], [0, 1, 0], [-s, 0, c]])\n', (5162, 5198), True, 'import numpy as np\n'), ((5568, 5591), 'copy.copy', 'copy.copy', (['bbox3d_input'], {}), '(bbox3d_input)\n', (5577, 5591), False, 'import copy, math\n'), ((6344, 6368), 'numpy.transpose', 'np.transpose', (['corners_3d'], {}), '(corners_3d)\n', (6356, 6368), True, 'import numpy as np\n'), ((940, 984), 'numpy.sum', 'np.sum', (['((corners[0, :] - corners[1, :]) ** 2)'], {}), '((corners[0, :] - corners[1, :]) ** 2)\n', (946, 984), True, 'import numpy as np\n'), ((1002, 1046), 'numpy.sum', 'np.sum', (['((corners[1, :] - corners[2, :]) ** 2)'], {}), '((corners[1, :] - corners[2, :]) ** 2)\n', (1008, 1046), True, 'import numpy as np\n'), ((1064, 1108), 'numpy.sum', 'np.sum', (['((corners[0, :] - corners[4, :]) ** 2)'], {}), '((corners[0, :] - corners[4, :]) ** 2)\n', (1070, 1108), True, 'import numpy as np\n'), ((1449, 1468), 'scipy.spatial.ConvexHull', 'ConvexHull', (['inter_p'], {}), '(inter_p)\n', (1459, 1468), False, 'from scipy.spatial import ConvexHull\n'), ((6017, 6061), 'numpy.vstack', 'np.vstack', (['[x_corners, y_corners, z_corners]'], {}), '([x_corners, y_corners, z_corners])\n', (6026, 6061), True, 'import numpy as np\n'), ((3718, 3733), 'numpy.array', 'np.array', (['rect1'], {}), '(rect1)\n', (3726, 3733), True, 'import numpy as np\n'), ((3741, 3756), 'numpy.array', 'np.array', (['rect1'], {}), '(rect1)\n', (3749, 3756), True, 'import numpy as np\n'), ((3786, 3801), 'numpy.array', 'np.array', (['rect2'], {}), '(rect2)\n', (3794, 3801), True, 'import numpy as np\n'), ((3809, 3824), 'numpy.array', 'np.array', (['rect2'], {}), '(rect2)\n', (3817, 3824), True, 'import numpy as np\n'), ((792, 805), 'numpy.roll', 'np.roll', (['y', '(1)'], {}), '(y, 1)\n', (799, 805), True, 'import numpy as np\n'), ((819, 832), 'numpy.roll', 'np.roll', (['x', '(1)'], {}), '(x, 1)\n', (826, 832), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
-------------------------------
Time : 2018-12-02 12:29
Author : diw
Email : <EMAIL>
File : predict.py
Desc: Load model, predict audio's class.
-------------------------------
"""
"""
audio_class = ['angry','fear','happy','neutral','sad','surprise']
Input audio's format:
BIT DEPTH = 16(paInt16)
Sample Rate = 16000
CHANNELS = 1
Usage:
Load model:
model = load_model('model/best_model.h5')
get_result:
predict_class,predict_prob = get_audioclass(model,wav_file_path):
get_allaudio:
predict_class,predict_prob,result_dic = get_audioclass(model,wav_file_path,all = True):
result_dic: {class:prob}
"""
from keras.models import load_model
from pyAudioAnalysis import audioFeatureExtraction
from keras.preprocessing import sequence
import numpy as np
from scipy import stats
import pickle
import librosa
import os
from keras import backend as K
#获取音频
from get_audio import microphone_audio
# classes = {0: 'angry', 1: 'fear', 2: 'happy', 3: 'neutral', 4: 'sad', 5: 'surprise'}
classes_e_n = {0: 'emotional', 1: 'neutral'}
classes = {0: 'angry', 1: 'fear', 2: 'happy', 3: 'sad', 4: 'surprise'}
gender_classes = {0:'male',1:'female'}
max_len = 1024
nb_features = 36
nb_attention_param = 256
attention_init_value = 1.0 / 256
dropout_rate = 0.5
nb_lstm_cells = 128
nb_classes = 6
masking_value = -100.0
frame_size = 0.025 # 25 msec segments
step = 0.01 # 10 msec time step
if('tensorflow' == K.backend()):
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
def get_data(audio_path):
# 采样率16000
# 前1秒为噪声提取
# data, sr = librosa.load(audio_path, sr=16000, offset=1.0, duration=3.0)
data, sr = librosa.load(audio_path, sr=16000)
return data, sr
def extract_dataset_tosequence(data, Fs=16000, save=False):
# x:librosa读取的文件 Fs:采样率
f_global = []
# 34D short-term feature
f = audioFeatureExtraction.stFeatureExtraction(
data, Fs, frame_size * Fs, step * Fs)
# for pyAudioAnalysis which support python3
if type(f) is tuple:
f = f[0]
# Harmonic ratio and pitch, 2D
hr_pitch = audioFeatureExtraction.stFeatureSpeed(
data, Fs, frame_size * Fs, step * Fs)
f = np.append(f, hr_pitch.transpose(), axis=0)
# Z-normalized
f = stats.zscore(f, axis=0)
f = f.transpose()
f_global.append(f)
# print("Extracting features from data")
f_global = sequence.pad_sequences(f_global,
maxlen=max_len,
dtype='float32',
padding='post',
value=masking_value)
if save:
print("Saving features to file...")
pickle.dump(f, open('features.p', 'wb'))
return f_global
def find_max(list_iter):
# 返回:概率,类别id
prob_list = list_iter[0]
max = prob_list[0]
index = 0
for i in range(len(prob_list)):
current_prob = prob_list[i]
if(current_prob >= max):
max = current_prob
index = i
return max, index
# test_folder = '/Users/diweng/github_project/keras_audio_classifier/data/test'
def test_model(model_path, test_folder, model_type = 'emotion'):
model = load_model(model_path)
emotion_list = os.listdir(test_folder)
total = 0
count = 0
if (model_type == 'emotion'):
for current_emotion in emotion_list:
if (current_emotion == '.DS_Store' or current_emotion == '_desktop.ini'):
continue
emotion_total = 0
emotion_count = 0
current_emotion_path = test_folder + '/' + current_emotion
test_file_list = os.listdir(current_emotion_path)
for current_test_file in test_file_list:
if (not current_test_file.endswith('.wav')):
continue
test_file_path = current_emotion_path + '/' + current_test_file
data, sr = get_data(test_file_path)
f = extract_dataset_tosequence(data, sr)
f_ex = np.full((f.shape[0], nb_attention_param),
attention_init_value, dtype=np.float32)
predict_output = model.predict([f_ex, f])
predict_prob, predict_label = find_max(predict_output)
predict_class = classes[predict_label]
total += 1
emotion_total += 1
if(predict_class == current_emotion):
emotion_count += 1
count += 1
current_accuracy = float(emotion_count) / emotion_total
print('%s accuracy: %.2f%%' % (str(current_emotion), current_accuracy * 100))
elif(model_type == 'gender'):
speaker_class = {'ZhaoZuoxiang':0, 'wangzhe':0, 'zhaoquanyin':1, 'liuchanhg':1}
for current_emotion in emotion_list:
if (current_emotion == '.DS_Store' or current_emotion == '_desktop.ini'):
continue
gender_total = 0
gender_count = 0
current_emotion_path = test_folder + '/' + current_emotion
test_file_list = os.listdir(current_emotion_path)
for current_test_file in test_file_list:
if (current_test_file == '.DS_Store' or current_test_file == '_desktop.ini'):
continue
current_gender = speaker_class[current_test_file.split('_')[1]]
test_file_path = current_emotion_path + '/' + current_test_file
data, sr = get_data(test_file_path)
f = extract_dataset_tosequence(data, sr)
f_ex = np.full((f.shape[0], nb_attention_param),
attention_init_value, dtype=np.float32)
predict_output = model.predict([f_ex, f])
predict_prob, predict_label = find_max(predict_output)
total += 1
gender_total += 1
if (predict_label == current_gender):
gender_count += 1
count += 1
current_accuracy = float(gender_count) / gender_total
print('%s accuracy: %.2f%%' % (str(current_emotion), current_accuracy * 100))
total_accuracy = float(count) / total
print('Total accuracy: %.2f%%' % (total_accuracy * 100))
def analyse_emotionn(model,test_file):
dic = {}
data, sr = get_data(test_file)
f = extract_dataset_tosequence(data, sr)
f_ex = np.full((f.shape[0], nb_attention_param),
attention_init_value, dtype=np.float32)
predict_output = model.predict([f_ex, f])
predict_prob, predict_label = find_max(predict_output)
predict_class = classes[predict_label]
for i in range(len(predict_output[0])):
current_prob = predict_output[0][i]
current_class = classes[i]
# print('当前语音的情感为:%-8s 的概率为:%.2f%%' %
# (str(current_class), current_prob * 100))
dic[current_class] = current_prob * 100
# print('因此,当前语音的情感为:%s, 概率为:%.2f%%' %
# (str(predict_class), predict_prob * 100))
return dic
def get_audioclass(model,test_file,model_type = 'emotion',all = False):
if(model_type == 'emotion'):
data, sr = get_data(test_file)
f = extract_dataset_tosequence(data, sr)
f_ex = np.full((f.shape[0], nb_attention_param),
attention_init_value, dtype=np.float32)
predict_output = model.predict([f_ex, f])
predict_prob, predict_label = find_max(predict_output)
predict_class = classes[predict_label]
class_dic = {}
for i in range(len(predict_output[0])):
current_prob = predict_output[0][i]
current_class = classes[i]
class_dic[current_class] = current_prob
# print('当前语音的情感为:%-8s 的概率为:%.2f%%' %
# (str(current_class), current_prob * 100))
if(all):
return predict_class,predict_prob,class_dic
return predict_class,predict_prob
elif(model_type == 'gender'):
data, sr = get_data(test_file)
f = extract_dataset_tosequence(data, sr)
f_ex = np.full((f.shape[0], nb_attention_param),
attention_init_value, dtype=np.float32)
predict_output = model.predict([f_ex, f])
predict_prob, predict_label = find_max(predict_output)
predict_class = gender_classes[predict_label]
class_dic = {}
for i in range(len(predict_output[0])):
current_prob = predict_output[0][i]
current_class = gender_classes[i]
class_dic[current_class] = current_prob
# print('当前语音的情感为:%-8s 的概率为:%.2f%%' %
# (str(current_class), current_prob * 100))
if (all):
return predict_class, predict_prob, class_dic
return predict_class, predict_prob
elif(model_type == 'emotion_neutral'):
data, sr = get_data(test_file)
f = extract_dataset_tosequence(data, sr)
f_ex = np.full((f.shape[0], nb_attention_param),
attention_init_value, dtype=np.float32)
predict_output = model.predict([f_ex, f])
predict_prob, predict_label = find_max(predict_output)
predict_class = classes_e_n[predict_label]
return predict_prob,predict_class
def model_confusion_matrix(model, test_folder,model_type):
if(model_type == 'emotion'):
# 真实结果的预测结果list, y为真实结果,x为预测结果
result_list = []
# emotion_list = ['angry','fear','happy','neutral','sad','surprise']
# emotion_list = ['angry','fear','happy','sad','surprise']
emotion_list = ['angry','happy','sad']
for current_emotion in emotion_list:
result_list.append([0 for i in range(len(emotion_list))])
for current_emotion in emotion_list:
current_emotion_path = test_folder + '/' + current_emotion
test_file_list = os.listdir(current_emotion_path)
for current_test_file in test_file_list:
if (not current_test_file.endswith('.wav')):
continue
test_file_path = current_emotion_path + '/' + current_test_file
data, sr = get_data(test_file_path)
f = extract_dataset_tosequence(data, sr)
f_ex = np.full((f.shape[0], nb_attention_param),
attention_init_value, dtype=np.float32)
predict_output = model.predict([f_ex, f])
predict_prob, predict_label = find_max(predict_output)
result_list[emotion_list.index(current_emotion)][int(predict_label)] += 1
print(result_list)
elif (model_type == 'gender'):
# 真实结果的预测结果list, y为真实结果,x为预测结果
result_list = []
gender_list = ['male', 'female']
male_list = ['Zhe.Wang','Zuoxiang.Zhao']
female_list=['Chang.Liu','Quanyin.Zhao',]
emotion_list = ['angry', 'fear', 'happy', 'neutral', 'sad', 'surprise']
for current_gender in gender_list:
result_list.append([0 for i in range(len(gender_list))])
speak_list = os.listdir(test_folder)
for current_speak in speak_list:
if(current_speak in male_list):
gender = 0
elif(current_speak in female_list):
gender = 1
else:
continue
speak_path = test_folder + '/' + current_speak
for current_emotion in emotion_list:
current_emotion_path = speak_path + '/' + current_emotion
test_file_list = os.listdir(current_emotion_path)
for current_test_file in test_file_list:
if (not current_test_file.endswith('.wav')):
continue
test_file_path = current_emotion_path + '/' + current_test_file
data, sr = get_data(test_file_path)
f = extract_dataset_tosequence(data, sr)
f_ex = np.full((f.shape[0], nb_attention_param),
attention_init_value, dtype=np.float32)
predict_output = model.predict([f_ex, f])
predict_prob, predict_label = find_max(predict_output)
result_list[gender_list.index(gender)][int(predict_label)] += 1
print(result_list)
print(result_list)
if __name__ == '__main__':
#input wav format
# FORMAT = pyaudio.paInt16
# CHANNELS = 1
# RATE = 16000
test_file = 'input.wav'
test_folder = '/Volumes/data/CAS/不同文本100/ChangLiu'
model_path = 'model/test_12.h5'
model = load_model(model_path)
# test_model(model_path,test_folder,model_type='emotion')
# #获取音频
# microphone_audio(test_file)
#
#验证模型正确率
# print(analyse_emotionn(model,test_file))
# emotion_predict_class, emotion_predict_prob, emotion_class_dic = get_audioclass(model, test_file, 'emotion',
# all=True)
from keras.utils import plot_model
plot_model(model, to_file='model.png', show_shapes=True)
model_confusion_matrix(model,test_folder,model_type='emotion')
|
[
"pyAudioAnalysis.audioFeatureExtraction.stFeatureSpeed",
"keras.models.load_model",
"numpy.full",
"scipy.stats.zscore",
"keras.preprocessing.sequence.pad_sequences",
"keras.backend.backend",
"tensorflow.Session",
"pyAudioAnalysis.audioFeatureExtraction.stFeatureExtraction",
"tensorflow.ConfigProto",
"keras.utils.plot_model",
"librosa.load",
"os.listdir"
] |
[((1461, 1472), 'keras.backend.backend', 'K.backend', ([], {}), '()\n', (1470, 1472), True, 'from keras import backend as K\n'), ((1578, 1594), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (1592, 1594), True, 'import tensorflow as tf\n'), ((1649, 1674), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (1659, 1674), True, 'import tensorflow as tf\n'), ((1825, 1859), 'librosa.load', 'librosa.load', (['audio_path'], {'sr': '(16000)'}), '(audio_path, sr=16000)\n', (1837, 1859), False, 'import librosa\n'), ((2025, 2110), 'pyAudioAnalysis.audioFeatureExtraction.stFeatureExtraction', 'audioFeatureExtraction.stFeatureExtraction', (['data', 'Fs', '(frame_size * Fs)', '(step * Fs)'], {}), '(data, Fs, frame_size * Fs, step * Fs\n )\n', (2067, 2110), False, 'from pyAudioAnalysis import audioFeatureExtraction\n'), ((2257, 2332), 'pyAudioAnalysis.audioFeatureExtraction.stFeatureSpeed', 'audioFeatureExtraction.stFeatureSpeed', (['data', 'Fs', '(frame_size * Fs)', '(step * Fs)'], {}), '(data, Fs, frame_size * Fs, step * Fs)\n', (2294, 2332), False, 'from pyAudioAnalysis import audioFeatureExtraction\n'), ((2421, 2444), 'scipy.stats.zscore', 'stats.zscore', (['f'], {'axis': '(0)'}), '(f, axis=0)\n', (2433, 2444), False, 'from scipy import stats\n'), ((2552, 2659), 'keras.preprocessing.sequence.pad_sequences', 'sequence.pad_sequences', (['f_global'], {'maxlen': 'max_len', 'dtype': '"""float32"""', 'padding': '"""post"""', 'value': 'masking_value'}), "(f_global, maxlen=max_len, dtype='float32', padding=\n 'post', value=masking_value)\n", (2574, 2659), False, 'from keras.preprocessing import sequence\n'), ((3384, 3406), 'keras.models.load_model', 'load_model', (['model_path'], {}), '(model_path)\n', (3394, 3406), False, 'from keras.models import load_model\n'), ((3426, 3449), 'os.listdir', 'os.listdir', (['test_folder'], {}), '(test_folder)\n', (3436, 3449), False, 'import os\n'), ((6626, 6712), 'numpy.full', 'np.full', (['(f.shape[0], nb_attention_param)', 'attention_init_value'], {'dtype': 'np.float32'}), '((f.shape[0], nb_attention_param), attention_init_value, dtype=np.\n float32)\n', (6633, 6712), True, 'import numpy as np\n'), ((12852, 12874), 'keras.models.load_model', 'load_model', (['model_path'], {}), '(model_path)\n', (12862, 12874), False, 'from keras.models import load_model\n'), ((13306, 13362), 'keras.utils.plot_model', 'plot_model', (['model'], {'to_file': '"""model.png"""', 'show_shapes': '(True)'}), "(model, to_file='model.png', show_shapes=True)\n", (13316, 13362), False, 'from keras.utils import plot_model\n'), ((7471, 7557), 'numpy.full', 'np.full', (['(f.shape[0], nb_attention_param)', 'attention_init_value'], {'dtype': 'np.float32'}), '((f.shape[0], nb_attention_param), attention_init_value, dtype=np.\n float32)\n', (7478, 7557), True, 'import numpy as np\n'), ((3829, 3861), 'os.listdir', 'os.listdir', (['current_emotion_path'], {}), '(current_emotion_path)\n', (3839, 3861), False, 'import os\n'), ((8310, 8396), 'numpy.full', 'np.full', (['(f.shape[0], nb_attention_param)', 'attention_init_value'], {'dtype': 'np.float32'}), '((f.shape[0], nb_attention_param), attention_init_value, dtype=np.\n float32)\n', (8317, 8396), True, 'import numpy as np\n'), ((10098, 10130), 'os.listdir', 'os.listdir', (['current_emotion_path'], {}), '(current_emotion_path)\n', (10108, 10130), False, 'import os\n'), ((11307, 11330), 'os.listdir', 'os.listdir', (['test_folder'], {}), '(test_folder)\n', (11317, 11330), False, 'import os\n'), ((4218, 4304), 'numpy.full', 'np.full', (['(f.shape[0], nb_attention_param)', 'attention_init_value'], {'dtype': 'np.float32'}), '((f.shape[0], nb_attention_param), attention_init_value, dtype=np.\n float32)\n', (4225, 4304), True, 'import numpy as np\n'), ((5296, 5328), 'os.listdir', 'os.listdir', (['current_emotion_path'], {}), '(current_emotion_path)\n', (5306, 5328), False, 'import os\n'), ((9176, 9262), 'numpy.full', 'np.full', (['(f.shape[0], nb_attention_param)', 'attention_init_value'], {'dtype': 'np.float32'}), '((f.shape[0], nb_attention_param), attention_init_value, dtype=np.\n float32)\n', (9183, 9262), True, 'import numpy as np\n'), ((10487, 10573), 'numpy.full', 'np.full', (['(f.shape[0], nb_attention_param)', 'attention_init_value'], {'dtype': 'np.float32'}), '((f.shape[0], nb_attention_param), attention_init_value, dtype=np.\n float32)\n', (10494, 10573), True, 'import numpy as np\n'), ((5797, 5883), 'numpy.full', 'np.full', (['(f.shape[0], nb_attention_param)', 'attention_init_value'], {'dtype': 'np.float32'}), '((f.shape[0], nb_attention_param), attention_init_value, dtype=np.\n float32)\n', (5804, 5883), True, 'import numpy as np\n'), ((11778, 11810), 'os.listdir', 'os.listdir', (['current_emotion_path'], {}), '(current_emotion_path)\n', (11788, 11810), False, 'import os\n'), ((12195, 12281), 'numpy.full', 'np.full', (['(f.shape[0], nb_attention_param)', 'attention_init_value'], {'dtype': 'np.float32'}), '((f.shape[0], nb_attention_param), attention_init_value, dtype=np.\n float32)\n', (12202, 12281), True, 'import numpy as np\n')]
|
import os
import pytest
import sys
import numpy as np
try:
import pymake
except:
msg = "Error. Pymake package is not available.\n"
msg += "Try installing using the following command:\n"
msg += " pip install https://github.com/modflowpy/pymake/zipball/master"
raise Exception(msg)
try:
import flopy
except:
msg = "Error. FloPy package is not available.\n"
msg += "Try installing using the following command:\n"
msg += " pip install flopy"
raise Exception(msg)
from framework import testing_framework
from simulation import Simulation
ex = ["aux02"]
exdirs = []
for s in ex:
exdirs.append(os.path.join("temp", s))
def build_model(idx, dir):
nlay, nrow, ncol = 1, 10, 10
nper = 3
perlen = [1.0, 1.0, 1.0]
nstp = [10, 10, 10]
tsmult = [1.0, 1.0, 1.0]
lenx = 300.0
delr = delc = lenx / float(nrow)
strt = 100.0
nouter, ninner = 100, 300
hclose, rclose, relax = 1e-9, 1e-3, 0.97
tdis_rc = []
for i in range(nper):
tdis_rc.append((perlen[i], nstp[i], tsmult[i]))
name = ex[idx]
# build MODFLOW 6 files
ws = dir
sim = flopy.mf6.MFSimulation(
sim_name=name, version="mf6", exe_name="mf6", sim_ws=ws
)
# create tdis package
tdis = flopy.mf6.ModflowTdis(sim, time_units="DAYS", nper=nper, perioddata=tdis_rc)
# create gwf model
gwf = flopy.mf6.ModflowGwf(sim, modelname=name)
# create iterative model solution and register the gwf model with it
ims = flopy.mf6.ModflowIms(
sim,
print_option="SUMMARY",
outer_dvclose=hclose,
outer_maximum=nouter,
under_relaxation="DBD",
inner_maximum=ninner,
inner_dvclose=hclose,
rcloserecord=rclose,
linear_acceleration="BICGSTAB",
scaling_method="NONE",
reordering_method="NONE",
relaxation_factor=relax,
)
sim.register_ims_package(ims, [gwf.name])
dis = flopy.mf6.ModflowGwfdis(
gwf,
nlay=nlay,
nrow=nrow,
ncol=ncol,
delr=delr,
delc=delc,
top=90.0,
botm=0.0,
)
# initial conditions
ic = flopy.mf6.ModflowGwfic(gwf, strt=strt)
# node property flow
npf = flopy.mf6.ModflowGwfnpf(gwf, save_flows=True, icelltype=1, k=1.0, k33=0.01)
# chd files
chdlist0 = []
chdlist0.append([(0, 0, 0), 100.0] + [a for a in range(100)])
chdlist0.append([(0, nrow - 1, ncol - 1), 95.0] + [a for a in range(100)])
chdspdict = {0: chdlist0}
chd = flopy.mf6.ModflowGwfchd(
gwf,
stress_period_data=chdspdict,
save_flows=True,
auxiliary=[f"aux{i}" for i in range(100)],
pname="CHD-1",
)
# output control
oc = flopy.mf6.ModflowGwfoc(
gwf,
budget_filerecord="{}.bud".format(name),
head_filerecord="{}.hds".format(name),
headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")],
saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")],
printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")],
filename="{}.oc".format(name),
)
return sim, None
def eval_model(sim):
print("evaluating model...")
# maw budget aux variables
fpth = os.path.join(sim.simpath, "aux02.bud")
bobj = flopy.utils.CellBudgetFile(fpth, precision="double")
records = bobj.get_data(text="CHD")
for r in records:
for a in range(100):
aname = f"AUX{a}"
assert np.allclose(r[aname], a)
return
# - No need to change any code below
@pytest.mark.parametrize(
"idx, dir",
list(enumerate(exdirs)),
)
def test_mf6model(idx, dir):
# initialize testing framework
test = testing_framework()
# build the model
test.build_mf6_models(build_model, idx, dir)
# run the test model
test.run_mf6(Simulation(dir, exfunc=eval_model, idxsim=idx))
def main():
# initialize testing framework
test = testing_framework()
# run the test model
for idx, dir in enumerate(exdirs):
test.build_mf6_models(build_model, idx, dir)
sim = Simulation(dir, exfunc=eval_model, idxsim=idx)
test.run_mf6(sim)
if __name__ == "__main__":
# print message
print("standalone run of {}".format(os.path.basename(__file__)))
# run main routine
main()
|
[
"flopy.mf6.ModflowIms",
"flopy.utils.CellBudgetFile",
"os.path.join",
"flopy.mf6.ModflowTdis",
"os.path.basename",
"numpy.allclose",
"framework.testing_framework",
"flopy.mf6.ModflowGwfnpf",
"flopy.mf6.ModflowGwf",
"simulation.Simulation",
"flopy.mf6.ModflowGwfic",
"flopy.mf6.MFSimulation",
"flopy.mf6.ModflowGwfdis"
] |
[((1137, 1216), 'flopy.mf6.MFSimulation', 'flopy.mf6.MFSimulation', ([], {'sim_name': 'name', 'version': '"""mf6"""', 'exe_name': '"""mf6"""', 'sim_ws': 'ws'}), "(sim_name=name, version='mf6', exe_name='mf6', sim_ws=ws)\n", (1159, 1216), False, 'import flopy\n'), ((1268, 1344), 'flopy.mf6.ModflowTdis', 'flopy.mf6.ModflowTdis', (['sim'], {'time_units': '"""DAYS"""', 'nper': 'nper', 'perioddata': 'tdis_rc'}), "(sim, time_units='DAYS', nper=nper, perioddata=tdis_rc)\n", (1289, 1344), False, 'import flopy\n'), ((1379, 1420), 'flopy.mf6.ModflowGwf', 'flopy.mf6.ModflowGwf', (['sim'], {'modelname': 'name'}), '(sim, modelname=name)\n', (1399, 1420), False, 'import flopy\n'), ((1505, 1810), 'flopy.mf6.ModflowIms', 'flopy.mf6.ModflowIms', (['sim'], {'print_option': '"""SUMMARY"""', 'outer_dvclose': 'hclose', 'outer_maximum': 'nouter', 'under_relaxation': '"""DBD"""', 'inner_maximum': 'ninner', 'inner_dvclose': 'hclose', 'rcloserecord': 'rclose', 'linear_acceleration': '"""BICGSTAB"""', 'scaling_method': '"""NONE"""', 'reordering_method': '"""NONE"""', 'relaxation_factor': 'relax'}), "(sim, print_option='SUMMARY', outer_dvclose=hclose,\n outer_maximum=nouter, under_relaxation='DBD', inner_maximum=ninner,\n inner_dvclose=hclose, rcloserecord=rclose, linear_acceleration=\n 'BICGSTAB', scaling_method='NONE', reordering_method='NONE',\n relaxation_factor=relax)\n", (1525, 1810), False, 'import flopy\n'), ((1954, 2061), 'flopy.mf6.ModflowGwfdis', 'flopy.mf6.ModflowGwfdis', (['gwf'], {'nlay': 'nlay', 'nrow': 'nrow', 'ncol': 'ncol', 'delr': 'delr', 'delc': 'delc', 'top': '(90.0)', 'botm': '(0.0)'}), '(gwf, nlay=nlay, nrow=nrow, ncol=ncol, delr=delr,\n delc=delc, top=90.0, botm=0.0)\n', (1977, 2061), False, 'import flopy\n'), ((2164, 2202), 'flopy.mf6.ModflowGwfic', 'flopy.mf6.ModflowGwfic', (['gwf'], {'strt': 'strt'}), '(gwf, strt=strt)\n', (2186, 2202), False, 'import flopy\n'), ((2239, 2314), 'flopy.mf6.ModflowGwfnpf', 'flopy.mf6.ModflowGwfnpf', (['gwf'], {'save_flows': '(True)', 'icelltype': '(1)', 'k': '(1.0)', 'k33': '(0.01)'}), '(gwf, save_flows=True, icelltype=1, k=1.0, k33=0.01)\n', (2262, 2314), False, 'import flopy\n'), ((3242, 3280), 'os.path.join', 'os.path.join', (['sim.simpath', '"""aux02.bud"""'], {}), "(sim.simpath, 'aux02.bud')\n", (3254, 3280), False, 'import os\n'), ((3292, 3344), 'flopy.utils.CellBudgetFile', 'flopy.utils.CellBudgetFile', (['fpth'], {'precision': '"""double"""'}), "(fpth, precision='double')\n", (3318, 3344), False, 'import flopy\n'), ((3709, 3728), 'framework.testing_framework', 'testing_framework', ([], {}), '()\n', (3726, 3728), False, 'from framework import testing_framework\n'), ((3952, 3971), 'framework.testing_framework', 'testing_framework', ([], {}), '()\n', (3969, 3971), False, 'from framework import testing_framework\n'), ((635, 658), 'os.path.join', 'os.path.join', (['"""temp"""', 's'], {}), "('temp', s)\n", (647, 658), False, 'import os\n'), ((3844, 3890), 'simulation.Simulation', 'Simulation', (['dir'], {'exfunc': 'eval_model', 'idxsim': 'idx'}), '(dir, exfunc=eval_model, idxsim=idx)\n', (3854, 3890), False, 'from simulation import Simulation\n'), ((4104, 4150), 'simulation.Simulation', 'Simulation', (['dir'], {'exfunc': 'eval_model', 'idxsim': 'idx'}), '(dir, exfunc=eval_model, idxsim=idx)\n', (4114, 4150), False, 'from simulation import Simulation\n'), ((3485, 3509), 'numpy.allclose', 'np.allclose', (['r[aname]', 'a'], {}), '(r[aname], a)\n', (3496, 3509), True, 'import numpy as np\n'), ((4266, 4292), 'os.path.basename', 'os.path.basename', (['__file__'], {}), '(__file__)\n', (4282, 4292), False, 'import os\n')]
|
import numpy as np
from kernel_tuner import core
from kernel_tuner.interface import Options, _kernel_options
from kernel_tuner.integration import TuneResults
class PythonKernel(object):
def __init__(self, kernel_name, kernel_string, problem_size, arguments, params=None, inputs=None, outputs=None, device=0, platform=0,
block_size_names=None, grid_div_x=None, grid_div_y=None, grid_div_z=None, verbose=True, lang=None,
results_file=None):
""" Construct Python helper object to compile and call the kernel from Python
This object compiles a GPU kernel parameterized using the parameters in params.
GPU memory is allocated for each argument using its size and type as listed in arguments.
The object can be called directly as a function with the kernel arguments as function arguments.
Kernel arguments marked as inputs will be copied to the GPU on every kernel launch.
Only the kernel arguments marked as outputs will be returned, note that the result is always
returned in a list, even when there is only one output.
Most of the arguments to this function are the same as with tune_kernel or run_kernel in Kernel Tuner,
and are therefore not duplicated here. The two new arguments are:
:param inputs: a boolean list of length arguments to signal whether an argument is input to the kernel
:type inputs: list(bool)
:param outputs: a boolean list of length arguments to signal whether an argument is output of the kernel
:type outputs: list(bool)
"""
#construct device interface
kernel_source = core.KernelSource(kernel_name, kernel_string, lang)
self.dev = core.DeviceInterface(kernel_source, device=device, quiet=True)
if not params:
params = {}
#if results_file is passed use the results file to lookup tunable parameters
if results_file:
results = TuneResults(results_file)
params.update(results.get_best_config(self.dev.name, problem_size))
self.params = params
#construct kernel_options to hold information about the kernel
opts = locals()
kernel_options = Options([(k, opts[k]) for k in _kernel_options.keys() if k in opts.keys()])
#instantiate the kernel given the parameters in params
self.kernel_instance = self.dev.create_kernel_instance(kernel_source, kernel_options, params, verbose)
#compile the kernel
self.func = self.dev.compile_kernel(self.kernel_instance, verbose)
#setup GPU memory
self.gpu_args = self.dev.ready_argument_list(arguments)
if inputs:
self.inputs = inputs
else:
self.inputs = [True for _ in arguments]
if outputs:
self.outputs = outputs
else:
self.outputs = [True for _ in arguments]
def update_gpu_args(self, args):
for i, arg in enumerate(args):
if self.inputs[i]:
if isinstance(args[i], np.ndarray):
self.dev.dev.memcpy_htod(self.gpu_args[i], arg)
else:
self.gpu_args[i] = arg
return self.gpu_args
def get_gpu_result(self, args):
results = []
for i, _ in enumerate(self.gpu_args):
if self.outputs[i] and isinstance(args[i], np.ndarray):
res = np.zeros_like(args[i])
self.dev.memcpy_dtoh(res, self.gpu_args[i])
results.append(res)
return results
def run_kernel(self, args):
"""Run the GPU kernel
Copy the arguments marked as inputs to the GPU
Call the GPU kernel
Copy the arguments marked as outputs from the GPU
Return the outputs in a list of numpy arrays
:param args: A list with the kernel arguments as numpy arrays or numpy scalars
:type args: list(np.ndarray or np.generic)
"""
self.update_gpu_args(args)
self.dev.run_kernel(self.func, self.gpu_args, self.kernel_instance)
return self.get_gpu_result(args)
def __call__(self, *args):
"""Run the GPU kernel
Copy the arguments marked as inputs to the GPU
Call the GPU kernel
Copy the arguments marked as outputs from the GPU
Return the outputs in a list of numpy arrays
:param *args: A variable number of kernel arguments as numpy arrays or numpy scalars
:type *args: np.ndarray or np.generic
"""
return self.run_kernel(args)
def __del__(self):
if hasattr(self, 'dev'):
self.dev.__exit__([None, None, None])
|
[
"numpy.zeros_like",
"kernel_tuner.integration.TuneResults",
"kernel_tuner.core.DeviceInterface",
"kernel_tuner.interface._kernel_options.keys",
"kernel_tuner.core.KernelSource"
] |
[((1716, 1767), 'kernel_tuner.core.KernelSource', 'core.KernelSource', (['kernel_name', 'kernel_string', 'lang'], {}), '(kernel_name, kernel_string, lang)\n', (1733, 1767), False, 'from kernel_tuner import core\n'), ((1787, 1849), 'kernel_tuner.core.DeviceInterface', 'core.DeviceInterface', (['kernel_source'], {'device': 'device', 'quiet': '(True)'}), '(kernel_source, device=device, quiet=True)\n', (1807, 1849), False, 'from kernel_tuner import core\n'), ((2030, 2055), 'kernel_tuner.integration.TuneResults', 'TuneResults', (['results_file'], {}), '(results_file)\n', (2041, 2055), False, 'from kernel_tuner.integration import TuneResults\n'), ((3488, 3510), 'numpy.zeros_like', 'np.zeros_like', (['args[i]'], {}), '(args[i])\n', (3501, 3510), True, 'import numpy as np\n'), ((2317, 2339), 'kernel_tuner.interface._kernel_options.keys', '_kernel_options.keys', ([], {}), '()\n', (2337, 2339), False, 'from kernel_tuner.interface import Options, _kernel_options\n')]
|
import pytest
from .fixtures import *
import pandas as pd
import numpy as np
DROPPED_ROWS_INDICES = [2, 5, 7, 10]
@pytest.mark.parametrize("original_df", [
make_table(unsorted_int_index, rows=30, astype="pandas"),
make_table(unsorted_datetime_index, rows=37, astype="pandas"),
make_table(unsorted_string_index, rows=125, astype="pandas")
])
def test_insert_table(original_df, store):
# Arrange
row_indices = np.random.choice(original_df.index, size=5, replace=False)
insert_df = original_df.loc[row_indices, :]
expected = original_df.copy().sort_index()
original_df = original_df.drop(index=row_indices)
partition_size = get_partition_size(
original_df, num_partitions=NUMBER_OF_PARTITIONS)
store.write_table(TABLE_NAME,
original_df,
partition_size=partition_size,
warnings='ignore')
table = store.select_table(TABLE_NAME)
# Act
table.insert(insert_df)
# Assert
df = store.read_pandas(TABLE_NAME)
assert df.equals(expected)
def test_insert_table_with_pandas_series(store):
# Arrange
original_df = make_table(cols=1, astype='pandas').squeeze()
row_indices = np.random.choice(original_df.index, size=5, replace=False)
insert_df = original_df.loc[row_indices]
expected = original_df.copy().sort_index()
original_df = original_df.drop(index=row_indices)
partition_size = get_partition_size(
original_df, num_partitions=NUMBER_OF_PARTITIONS)
store.write_table(TABLE_NAME,
original_df,
partition_size=partition_size,
warnings='ignore')
table = store.select_table(TABLE_NAME)
# Act
table.insert(insert_df)
# Assert
df = store.read_pandas(TABLE_NAME)
assert df.equals(expected)
def _wrong_index_dtype():
df = make_table(sorted_datetime_index, astype="pandas")
return df
def _existing_index_values():
df = make_table(astype="pandas")
return df
def _duplicate_index_values():
df = make_table(astype="pandas")
df = df.iloc[DROPPED_ROWS_INDICES, :]
df = pd.concat([df, df]) # Duplicate df
return df
def _wrong_column_dtype():
df = make_table(sorted_string_index, cols=4, astype="pandas")
df = df.reset_index()
df.columns = ['c0', 'c1', 'c2', 'c3', 'c4']
df = df.iloc[DROPPED_ROWS_INDICES, :]
return df
def _wrong_column_names():
df = make_table(cols=2, astype="pandas")
df = df.iloc[DROPPED_ROWS_INDICES, :]
df.columns = ['c1', 'non-existant_column']
return df
def _duplicate_column_names():
df = make_table(cols=6, astype="pandas")
df = df.iloc[DROPPED_ROWS_INDICES, :]
df.columns = ['c0', 'c0', 'c1', 'c2', 'c3', 'c4']
return df
@pytest.mark.parametrize(
("insert_df", "exception"),
[
(_wrong_index_dtype(), TypeError),
(_existing_index_values(), ValueError),
(_duplicate_index_values(), IndexError),
(_wrong_column_dtype(), TypeError),
(_wrong_column_names(), ValueError),
(_duplicate_column_names(), IndexError),
],
ids=[
"_wrong_index_dtype",
"_existing_index_values",
"_duplicate_index_values",
"_wrong_column_dtype",
"_wrong_column_names",
"_duplicate_column_names",
],
)
def test_can_insert_table(insert_df, exception, store):
# Arrange
original_df = make_table(cols=5, astype='pandas')
original_df = original_df.drop(index=DROPPED_ROWS_INDICES)
store.write_table(TABLE_NAME, original_df)
table = store.select_table(TABLE_NAME)
# Act
with pytest.raises(exception) as e:
table.insert(insert_df)
# Assert
assert isinstance(e.type(), exception)
|
[
"pytest.raises",
"pandas.concat",
"numpy.random.choice"
] |
[((432, 490), 'numpy.random.choice', 'np.random.choice', (['original_df.index'], {'size': '(5)', 'replace': '(False)'}), '(original_df.index, size=5, replace=False)\n', (448, 490), True, 'import numpy as np\n'), ((1214, 1272), 'numpy.random.choice', 'np.random.choice', (['original_df.index'], {'size': '(5)', 'replace': '(False)'}), '(original_df.index, size=5, replace=False)\n', (1230, 1272), True, 'import numpy as np\n'), ((2152, 2171), 'pandas.concat', 'pd.concat', (['[df, df]'], {}), '([df, df])\n', (2161, 2171), True, 'import pandas as pd\n'), ((3654, 3678), 'pytest.raises', 'pytest.raises', (['exception'], {}), '(exception)\n', (3667, 3678), False, 'import pytest\n')]
|
from django.shortcuts import render,redirect
from .models import given_image,predicted_label,image_name
from .forms import given_image_form
import numpy as np
import pandas as pd
from PIL import Image
import cv2
import os.path
import pickle
from sklearn.tree import DecisionTreeClassifier
# Create your views here.
def home(request):
form=given_image_form(request.POST or None,request.FILES or None)
if form.is_valid():
form.save()
a=form.cleaned_data['image_given']
name=image_name(name_of_image=a)
name.save()
b=image_name.objects.all().last()
print(b)
c=str(b)
new_path=os.path.join('/home/vipul/media/',c)
print(new_path)
img=cv2.imread(new_path)
print(img)
arr=np.array(img)
print(arr.shape)
new_img=np.reshape(arr,(784,3), order='C')
final_img=new_img[0:,1]
print(new_img.shape)
print(final_img.shape)
#classifier=decision_tree_model()
#pred_label=classifier.predict([final_img])
#label_pred=predicted_label(label=np.asscalar(pred_label))
#label_pred.save()
loaded_classifier=pickle.load(open('decision_tree_model.sav','rb'))
pred_label=loaded_classifier.predict([final_img])
label_pred=predicted_label(label=np.asscalar(pred_label))
label_pred.save()
number=predicted_label.objects.all().last()
context={"number":number}
return render(request,'mnistwebsite/successPage.html',context)
context={'form':form}
return render(request,'mnistwebsite/home.html',context)
def successPage(request):
return render(request,'successPage.html')
|
[
"cv2.imread",
"numpy.array",
"numpy.reshape",
"django.shortcuts.render",
"numpy.asscalar"
] |
[((1654, 1704), 'django.shortcuts.render', 'render', (['request', '"""mnistwebsite/home.html"""', 'context'], {}), "(request, 'mnistwebsite/home.html', context)\n", (1660, 1704), False, 'from django.shortcuts import render, redirect\n'), ((1750, 1785), 'django.shortcuts.render', 'render', (['request', '"""successPage.html"""'], {}), "(request, 'successPage.html')\n", (1756, 1785), False, 'from django.shortcuts import render, redirect\n'), ((748, 768), 'cv2.imread', 'cv2.imread', (['new_path'], {}), '(new_path)\n', (758, 768), False, 'import cv2\n'), ((805, 818), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (813, 818), True, 'import numpy as np\n'), ((869, 905), 'numpy.reshape', 'np.reshape', (['arr', '(784, 3)'], {'order': '"""C"""'}), "(arr, (784, 3), order='C')\n", (879, 905), True, 'import numpy as np\n'), ((1556, 1613), 'django.shortcuts.render', 'render', (['request', '"""mnistwebsite/successPage.html"""', 'context'], {}), "(request, 'mnistwebsite/successPage.html', context)\n", (1562, 1613), False, 'from django.shortcuts import render, redirect\n'), ((1377, 1400), 'numpy.asscalar', 'np.asscalar', (['pred_label'], {}), '(pred_label)\n', (1388, 1400), True, 'import numpy as np\n')]
|
"""
Unit tests for optimizers.
"""
import numpy as np
import pytest
from numpy.linalg import norm
from scipy.integrate import odeint
from sklearn.base import BaseEstimator
from sklearn.exceptions import ConvergenceWarning
from sklearn.exceptions import NotFittedError
from sklearn.linear_model import ElasticNet
from sklearn.linear_model import Lasso
from sklearn.utils.validation import check_is_fitted
from pysindy import FiniteDifference
from pysindy import PolynomialLibrary
from pysindy import SINDy
from pysindy.feature_library import CustomLibrary
from pysindy.optimizers import ConstrainedSR3
from pysindy.optimizers import SINDyOptimizer
from pysindy.optimizers import SR3
from pysindy.optimizers import STLSQ
from pysindy.optimizers import TrappingSR3
from pysindy.utils import supports_multiple_targets
def lorenz(z, t):
return 10 * (z[1] - z[0]), z[0] * (28 - z[2]) - z[1], z[0] * z[1] - 8 / 3 * z[2]
class DummyLinearModel(BaseEstimator):
# Does not natively support multiple targets
def fit(self, x, y):
self.coef_ = np.ones(x.shape[1])
self.intercept_ = 0
return self
def predict(self, x):
return x
class DummyEmptyModel(BaseEstimator):
# Does not have fit or predict methods
def __init__(self):
self.fit_intercept = False
self.normalize = False
class DummyModelNoCoef(BaseEstimator):
# Does not set the coef_ attribute
def fit(self, x, y):
self.intercept_ = 0
return self
def predict(self, x):
return x
@pytest.mark.parametrize(
"cls, support",
[
(Lasso, True),
(STLSQ, True),
(SR3, True),
(ConstrainedSR3, True),
(TrappingSR3, True),
(DummyLinearModel, False),
],
)
def test_supports_multiple_targets(cls, support):
assert supports_multiple_targets(cls()) == support
@pytest.fixture(params=["data_derivative_1d", "data_derivative_2d"])
def data(request):
return request.getfixturevalue(request.param)
@pytest.mark.parametrize(
"optimizer",
[
STLSQ(),
SR3(),
ConstrainedSR3(),
TrappingSR3(),
Lasso(fit_intercept=False),
ElasticNet(fit_intercept=False),
DummyLinearModel(),
],
)
def test_fit(data, optimizer):
x, x_dot = data
if len(x.shape) == 1:
x = x.reshape(-1, 1)
opt = SINDyOptimizer(optimizer, unbias=False)
opt.fit(x, x_dot)
check_is_fitted(opt)
assert opt.complexity >= 0
if len(x_dot.shape) > 1:
assert opt.coef_.shape == (x.shape[1], x_dot.shape[1])
else:
assert opt.coef_.shape == (1, x.shape[1])
@pytest.mark.parametrize(
"optimizer",
[STLSQ(), SR3()],
)
def test_not_fitted(optimizer):
with pytest.raises(NotFittedError):
optimizer.predict(np.ones((1, 3)))
@pytest.mark.parametrize("optimizer", [STLSQ(), SR3()])
def test_complexity_not_fitted(optimizer, data_derivative_2d):
with pytest.raises(NotFittedError):
optimizer.complexity
x, _ = data_derivative_2d
optimizer.fit(x, x)
assert optimizer.complexity > 0
@pytest.mark.parametrize(
"kwargs", [{"normalize": True}, {"fit_intercept": True}, {"copy_X": False}]
)
def test_alternate_parameters(data_derivative_1d, kwargs):
x, x_dot = data_derivative_1d
x = x.reshape(-1, 1)
model = STLSQ(**kwargs)
model.fit(x, x_dot)
model.fit(x, x_dot, sample_weight=x[:, 0])
check_is_fitted(model)
@pytest.mark.parametrize("optimizer", [STLSQ, SR3, ConstrainedSR3])
@pytest.mark.parametrize("params", [dict(threshold=-1), dict(max_iter=0)])
def test_general_bad_parameters(optimizer, params):
with pytest.raises(ValueError):
optimizer(**params)
@pytest.mark.parametrize("optimizer", [SR3, ConstrainedSR3])
@pytest.mark.parametrize(
"params",
[dict(nu=0), dict(tol=0), dict(trimming_fraction=-1), dict(trimming_fraction=2)],
)
def test_sr3_bad_parameters(optimizer, params):
with pytest.raises(ValueError):
optimizer(**params)
@pytest.mark.parametrize(
"params",
[
dict(eta=-1),
dict(tol=0),
dict(tol_m=0),
dict(eps_solver=0),
dict(alpha_m=-1),
dict(alpha_A=-1),
dict(gamma=1),
dict(evolve_w=False, relax_optim=False),
dict(thresholder="l0"),
dict(threshold=-1),
dict(max_iter=0),
dict(eta=10, alpha_m=20),
dict(eta=10, alpha_A=20),
],
)
def test_trapping_bad_parameters(params):
with pytest.raises(ValueError):
TrappingSR3(**params)
@pytest.mark.parametrize(
"params",
[dict(PL=np.random.rand(3, 3, 3, 9)), dict(PQ=np.random.rand(3, 3, 3, 3, 9))],
)
def test_trapping_bad_tensors(params):
x = np.random.standard_normal((10, 9))
x_dot = np.random.standard_normal((10, 3))
with pytest.raises(ValueError):
model = TrappingSR3(**params)
model.fit(x, x_dot)
@pytest.mark.parametrize(
"params",
[dict(PL=np.ones((3, 3, 3, 9)), PQ=np.ones((3, 3, 3, 3, 9)))],
)
def test_trapping_quadratic_library(params):
x = np.random.standard_normal((10, 3))
library_functions = [
lambda x: x,
lambda x, y: x * y,
lambda x: x ** 2,
]
library_function_names = [
lambda x: str(x),
lambda x, y: "{} * {}".format(x, y),
lambda x: "{}^2".format(x),
]
sindy_library = CustomLibrary(
library_functions=library_functions, function_names=library_function_names
)
opt = TrappingSR3(**params)
model = SINDy(optimizer=opt, feature_library=sindy_library)
model.fit(x)
assert opt.PL.shape == (3, 3, 3, 9)
assert opt.PQ.shape == (3, 3, 3, 3, 9)
check_is_fitted(model)
@pytest.mark.parametrize(
"params",
[dict(PL=np.ones((3, 3, 3, 9)), PQ=np.ones((3, 3, 3, 3, 9)))],
)
def test_trapping_cubic_library(params):
x = np.random.standard_normal((10, 3))
library_functions = [
lambda x: x,
lambda x, y: x * y,
lambda x: x ** 2,
lambda x, y, z: x * y * z,
lambda x, y: x ** 2 * y,
lambda x: x ** 3,
]
library_function_names = [
lambda x: str(x),
lambda x, y: "{} * {}".format(x, y),
lambda x: "{}^2".format(x),
lambda x, y, z: "{} * {} * {}".format(x, y, z),
lambda x, y: "{}^2 * {}".format(x, y),
lambda x: "{}^3".format(x),
]
sindy_library = CustomLibrary(
library_functions=library_functions, function_names=library_function_names
)
with pytest.raises(ValueError):
opt = TrappingSR3(**params)
model = SINDy(optimizer=opt, feature_library=sindy_library)
model.fit(x)
@pytest.mark.parametrize(
"error, optimizer, params",
[
(ValueError, STLSQ, dict(alpha=-1)),
(NotImplementedError, SR3, dict(thresholder="l2")),
(NotImplementedError, ConstrainedSR3, dict(thresholder="l2")),
(ValueError, ConstrainedSR3, dict(thresholder="weighted_l0", thresholds=None)),
(ValueError, ConstrainedSR3, dict(thresholder="weighted_l0", thresholds=None)),
(ValueError, ConstrainedSR3, dict(thresholds=-np.ones((5, 5)))),
],
)
def test_specific_bad_parameters(error, optimizer, params):
with pytest.raises(error):
optimizer(**params)
def test_bad_optimizers(data_derivative_1d):
x, x_dot = data_derivative_1d
x = x.reshape(-1, 1)
with pytest.raises(AttributeError):
opt = SINDyOptimizer(DummyEmptyModel())
with pytest.raises(AttributeError):
opt = SINDyOptimizer(DummyModelNoCoef())
opt.fit(x, x_dot)
@pytest.mark.parametrize("optimizer", [SR3, ConstrainedSR3])
def test_initial_guess_sr3(optimizer):
x = np.random.standard_normal((10, 3))
x_dot = np.random.standard_normal((10, 2))
control_model = optimizer(max_iter=1).fit(x, x_dot)
initial_guess = np.random.standard_normal((x_dot.shape[1], x.shape[1]))
guess_model = optimizer(max_iter=1, initial_guess=initial_guess).fit(x, x_dot)
assert np.any(np.not_equal(control_model.coef_, guess_model.coef_))
# The different capitalizations are intentional;
# I want to make sure different versions are recognized
@pytest.mark.parametrize("optimizer", [SR3, ConstrainedSR3])
@pytest.mark.parametrize("thresholder", ["L0", "l1"])
def test_prox_functions(data_derivative_1d, optimizer, thresholder):
x, x_dot = data_derivative_1d
x = x.reshape(-1, 1)
model = optimizer(thresholder=thresholder)
model.fit(x, x_dot)
check_is_fitted(model)
def test_cad_prox_function(data_derivative_1d):
x, x_dot = data_derivative_1d
x = x.reshape(-1, 1)
model = SR3(thresholder="cAd")
model.fit(x, x_dot)
check_is_fitted(model)
@pytest.mark.parametrize("thresholder", ["weighted_l0", "weighted_l1"])
def test_weighted_prox_functions(data, thresholder):
x, x_dot = data
if x.ndim == 1:
x = x.reshape(-1, 1)
thresholds = np.ones((1, 1))
else:
thresholds = np.ones((x_dot.shape[1], x.shape[1]))
model = ConstrainedSR3(thresholder=thresholder, thresholds=thresholds)
model.fit(x, x_dot)
check_is_fitted(model)
@pytest.mark.parametrize("thresholder", ["L0", "l1"])
def test_constrained_sr3_prox_functions(data_derivative_1d, thresholder):
x, x_dot = data_derivative_1d
x = x.reshape(-1, 1)
model = ConstrainedSR3(thresholder=thresholder)
model.fit(x, x_dot)
check_is_fitted(model)
def test_unbias(data_derivative_1d):
x, x_dot = data_derivative_1d
x = x.reshape(-1, 1)
optimizer_biased = SINDyOptimizer(
STLSQ(threshold=0.01, alpha=0.1, max_iter=1), unbias=False
)
optimizer_biased.fit(x, x_dot)
optimizer_unbiased = SINDyOptimizer(
STLSQ(threshold=0.01, alpha=0.1, max_iter=1), unbias=True
)
optimizer_unbiased.fit(x, x_dot)
assert (
norm(optimizer_biased.coef_ - optimizer_unbiased.coef_)
/ norm(optimizer_unbiased.coef_)
> 1e-9
)
def test_unbias_external(data_derivative_1d):
x, x_dot = data_derivative_1d
x = x.reshape(-1, 1)
optimizer_biased = SINDyOptimizer(
Lasso(alpha=0.1, fit_intercept=False, max_iter=1), unbias=False
)
optimizer_biased.fit(x, x_dot)
optimizer_unbiased = SINDyOptimizer(
Lasso(alpha=0.1, fit_intercept=False, max_iter=1), unbias=True
)
optimizer_unbiased.fit(x, x_dot)
assert (
norm(optimizer_biased.coef_ - optimizer_unbiased.coef_)
/ (norm(optimizer_unbiased.coef_) + 1e-5)
> 1e-9
)
@pytest.mark.parametrize("optimizer", [SR3, ConstrainedSR3])
def test_sr3_trimming(optimizer, data_linear_oscillator_corrupted):
X, X_dot, trimming_array = data_linear_oscillator_corrupted
optimizer_without_trimming = SINDyOptimizer(optimizer(), unbias=False)
optimizer_without_trimming.fit(X, X_dot)
optimizer_trimming = SINDyOptimizer(optimizer(trimming_fraction=0.15), unbias=False)
optimizer_trimming.fit(X, X_dot)
# Check that trimming found the right samples to remove
np.testing.assert_array_equal(
optimizer_trimming.optimizer.trimming_array, trimming_array
)
# Check that the coefficients found by the optimizer with trimming
# are closer to the true coefficients than the coefficients found by the
# optimizer without trimming
true_coef = np.array([[-2.0, 0.0], [0.0, 1.0]])
assert norm(true_coef - optimizer_trimming.coef_) < norm(
true_coef - optimizer_without_trimming.coef_
)
@pytest.mark.parametrize("optimizer", [SR3, ConstrainedSR3])
def test_sr3_disable_trimming(optimizer, data_linear_oscillator_corrupted):
x, x_dot, _ = data_linear_oscillator_corrupted
model_plain = optimizer()
model_plain.fit(x, x_dot)
model_trimming = optimizer(trimming_fraction=0.5)
model_trimming.disable_trimming()
model_trimming.fit(x, x_dot)
np.testing.assert_allclose(model_plain.coef_, model_trimming.coef_)
@pytest.mark.parametrize("optimizer", [SR3, ConstrainedSR3])
def test_sr3_enable_trimming(optimizer, data_linear_oscillator_corrupted):
x, x_dot, _ = data_linear_oscillator_corrupted
model_plain = optimizer()
model_plain.enable_trimming(trimming_fraction=0.5)
model_plain.fit(x, x_dot)
model_trimming = optimizer(trimming_fraction=0.5)
model_trimming.fit(x, x_dot)
np.testing.assert_allclose(model_plain.coef_, model_trimming.coef_)
@pytest.mark.parametrize("optimizer", [SR3, ConstrainedSR3, TrappingSR3])
def test_sr3_warn(optimizer, data_linear_oscillator_corrupted):
x, x_dot, _ = data_linear_oscillator_corrupted
model = optimizer(max_iter=1, tol=1e-10)
with pytest.warns(ConvergenceWarning):
model.fit(x, x_dot)
@pytest.mark.parametrize(
"optimizer",
[
STLSQ(max_iter=1),
SR3(max_iter=1),
ConstrainedSR3(max_iter=1),
TrappingSR3(max_iter=1),
],
)
def test_fit_warn(data_derivative_1d, optimizer):
x, x_dot = data_derivative_1d
x = x.reshape(-1, 1)
with pytest.warns(ConvergenceWarning):
optimizer.fit(x, x_dot)
@pytest.mark.parametrize("optimizer", [ConstrainedSR3, TrappingSR3])
@pytest.mark.parametrize("target_value", [0, -1, 3])
def test_row_format_constraints(data_linear_combination, optimizer, target_value):
# Solution is x_dot = x.dot(np.array([[1, 1, 0], [0, 1, 1]]))
x, x_dot = data_linear_combination
constraint_rhs = target_value * np.ones(2)
constraint_lhs = np.zeros((2, x.shape[1] * x_dot.shape[1]))
# Should force corresponding entries of coef_ to be target_value
constraint_lhs[0, 0] = 1
constraint_lhs[1, 3] = 1
model = optimizer(
constraint_lhs=constraint_lhs,
constraint_rhs=constraint_rhs,
constraint_order="feature",
)
model.fit(x, x_dot)
np.testing.assert_allclose(
np.array([model.coef_[0, 0], model.coef_[1, 1]]), target_value, atol=1e-8
)
@pytest.mark.parametrize("optimizer", [ConstrainedSR3, TrappingSR3])
@pytest.mark.parametrize("target_value", [0, -1, 3])
def test_target_format_constraints(data_linear_combination, optimizer, target_value):
x, x_dot = data_linear_combination
constraint_rhs = target_value * np.ones(2)
constraint_lhs = np.zeros((2, x.shape[1] * x_dot.shape[1]))
# Should force corresponding entries of coef_ to be target_value
constraint_lhs[0, 1] = 1
constraint_lhs[1, 4] = 1
model = optimizer(constraint_lhs=constraint_lhs, constraint_rhs=constraint_rhs)
model.fit(x, x_dot)
np.testing.assert_allclose(model.coef_[:, 1], target_value, atol=1e-8)
@pytest.mark.parametrize("thresholds", [0.005, 0.05])
@pytest.mark.parametrize("relax_optim", [False, True])
@pytest.mark.parametrize("noise_levels", [0.0, 0.05, 0.5])
def test_trapping_inequality_constraints(thresholds, relax_optim, noise_levels):
t = np.arange(0, 40, 0.05)
x = odeint(lorenz, [-8, 8, 27], t)
x = x + np.random.normal(0.0, noise_levels, x.shape)
# if order is "feature"
constraint_rhs = np.array([-10.0, -2.0])
constraint_matrix = np.zeros((2, 30))
constraint_matrix[0, 6] = 1.0
constraint_matrix[1, 17] = 1.0
feature_names = ["x", "y", "z"]
opt = TrappingSR3(
threshold=thresholds,
constraint_lhs=constraint_matrix,
constraint_rhs=constraint_rhs,
constraint_order="feature",
inequality_constraints=True,
relax_optim=relax_optim,
)
poly_lib = PolynomialLibrary(degree=2)
model = SINDy(
optimizer=opt,
feature_library=poly_lib,
differentiation_method=FiniteDifference(drop_endpoints=True),
feature_names=feature_names,
)
model.fit(x, t=t[1] - t[0])
assert np.all(
np.dot(constraint_matrix, (model.coefficients()).flatten("F")) <= constraint_rhs
) or np.allclose(
np.dot(constraint_matrix, (model.coefficients()).flatten("F")), constraint_rhs
)
def test_inequality_constraints_reqs():
constraint_rhs = np.array([-10.0, -2.0])
constraint_matrix = np.zeros((2, 30))
constraint_matrix[0, 6] = 1.0
constraint_matrix[1, 17] = 1.0
with pytest.raises(ValueError):
TrappingSR3(
threshold=0.0,
constraint_lhs=constraint_matrix,
constraint_rhs=constraint_rhs,
constraint_order="feature",
inequality_constraints=True,
relax_optim=True,
)
|
[
"numpy.ones",
"pysindy.optimizers.STLSQ",
"numpy.arange",
"pysindy.optimizers.TrappingSR3",
"numpy.linalg.norm",
"numpy.random.normal",
"pytest.mark.parametrize",
"pysindy.PolynomialLibrary",
"sklearn.linear_model.ElasticNet",
"scipy.integrate.odeint",
"pytest.warns",
"pysindy.optimizers.SR3",
"pysindy.SINDy",
"pytest.raises",
"numpy.testing.assert_allclose",
"pysindy.optimizers.ConstrainedSR3",
"sklearn.linear_model.Lasso",
"numpy.testing.assert_array_equal",
"pysindy.feature_library.CustomLibrary",
"pytest.fixture",
"numpy.not_equal",
"numpy.random.standard_normal",
"pysindy.optimizers.SINDyOptimizer",
"numpy.zeros",
"pysindy.FiniteDifference",
"sklearn.utils.validation.check_is_fitted",
"numpy.array",
"numpy.random.rand"
] |
[((1541, 1706), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""cls, support"""', '[(Lasso, True), (STLSQ, True), (SR3, True), (ConstrainedSR3, True), (\n TrappingSR3, True), (DummyLinearModel, False)]'], {}), "('cls, support', [(Lasso, True), (STLSQ, True), (SR3,\n True), (ConstrainedSR3, True), (TrappingSR3, True), (DummyLinearModel, \n False)])\n", (1564, 1706), False, 'import pytest\n'), ((1872, 1939), 'pytest.fixture', 'pytest.fixture', ([], {'params': "['data_derivative_1d', 'data_derivative_2d']"}), "(params=['data_derivative_1d', 'data_derivative_2d'])\n", (1886, 1939), False, 'import pytest\n'), ((3110, 3215), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""kwargs"""', "[{'normalize': True}, {'fit_intercept': True}, {'copy_X': False}]"], {}), "('kwargs', [{'normalize': True}, {'fit_intercept': \n True}, {'copy_X': False}])\n", (3133, 3215), False, 'import pytest\n'), ((3466, 3532), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""optimizer"""', '[STLSQ, SR3, ConstrainedSR3]'], {}), "('optimizer', [STLSQ, SR3, ConstrainedSR3])\n", (3489, 3532), False, 'import pytest\n'), ((3727, 3786), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""optimizer"""', '[SR3, ConstrainedSR3]'], {}), "('optimizer', [SR3, ConstrainedSR3])\n", (3750, 3786), False, 'import pytest\n'), ((7616, 7675), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""optimizer"""', '[SR3, ConstrainedSR3]'], {}), "('optimizer', [SR3, ConstrainedSR3])\n", (7639, 7675), False, 'import pytest\n'), ((8202, 8261), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""optimizer"""', '[SR3, ConstrainedSR3]'], {}), "('optimizer', [SR3, ConstrainedSR3])\n", (8225, 8261), False, 'import pytest\n'), ((8263, 8315), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""thresholder"""', "['L0', 'l1']"], {}), "('thresholder', ['L0', 'l1'])\n", (8286, 8315), False, 'import pytest\n'), ((8740, 8810), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""thresholder"""', "['weighted_l0', 'weighted_l1']"], {}), "('thresholder', ['weighted_l0', 'weighted_l1'])\n", (8763, 8810), False, 'import pytest\n'), ((9169, 9221), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""thresholder"""', "['L0', 'l1']"], {}), "('thresholder', ['L0', 'l1'])\n", (9192, 9221), False, 'import pytest\n'), ((10563, 10622), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""optimizer"""', '[SR3, ConstrainedSR3]'], {}), "('optimizer', [SR3, ConstrainedSR3])\n", (10586, 10622), False, 'import pytest\n'), ((11531, 11590), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""optimizer"""', '[SR3, ConstrainedSR3]'], {}), "('optimizer', [SR3, ConstrainedSR3])\n", (11554, 11590), False, 'import pytest\n'), ((11981, 12040), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""optimizer"""', '[SR3, ConstrainedSR3]'], {}), "('optimizer', [SR3, ConstrainedSR3])\n", (12004, 12040), False, 'import pytest\n'), ((12447, 12519), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""optimizer"""', '[SR3, ConstrainedSR3, TrappingSR3]'], {}), "('optimizer', [SR3, ConstrainedSR3, TrappingSR3])\n", (12470, 12519), False, 'import pytest\n'), ((13121, 13188), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""optimizer"""', '[ConstrainedSR3, TrappingSR3]'], {}), "('optimizer', [ConstrainedSR3, TrappingSR3])\n", (13144, 13188), False, 'import pytest\n'), ((13190, 13241), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""target_value"""', '[0, -1, 3]'], {}), "('target_value', [0, -1, 3])\n", (13213, 13241), False, 'import pytest\n'), ((13962, 14029), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""optimizer"""', '[ConstrainedSR3, TrappingSR3]'], {}), "('optimizer', [ConstrainedSR3, TrappingSR3])\n", (13985, 14029), False, 'import pytest\n'), ((14031, 14082), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""target_value"""', '[0, -1, 3]'], {}), "('target_value', [0, -1, 3])\n", (14054, 14082), False, 'import pytest\n'), ((14635, 14687), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""thresholds"""', '[0.005, 0.05]'], {}), "('thresholds', [0.005, 0.05])\n", (14658, 14687), False, 'import pytest\n'), ((14689, 14742), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""relax_optim"""', '[False, True]'], {}), "('relax_optim', [False, True])\n", (14712, 14742), False, 'import pytest\n'), ((14744, 14801), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""noise_levels"""', '[0.0, 0.05, 0.5]'], {}), "('noise_levels', [0.0, 0.05, 0.5])\n", (14767, 14801), False, 'import pytest\n'), ((2371, 2410), 'pysindy.optimizers.SINDyOptimizer', 'SINDyOptimizer', (['optimizer'], {'unbias': '(False)'}), '(optimizer, unbias=False)\n', (2385, 2410), False, 'from pysindy.optimizers import SINDyOptimizer\n'), ((2438, 2458), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['opt'], {}), '(opt)\n', (2453, 2458), False, 'from sklearn.utils.validation import check_is_fitted\n'), ((3348, 3363), 'pysindy.optimizers.STLSQ', 'STLSQ', ([], {}), '(**kwargs)\n', (3353, 3363), False, 'from pysindy.optimizers import STLSQ\n'), ((3440, 3462), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['model'], {}), '(model)\n', (3455, 3462), False, 'from sklearn.utils.validation import check_is_fitted\n'), ((4738, 4772), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(10, 9)'], {}), '((10, 9))\n', (4763, 4772), True, 'import numpy as np\n'), ((4785, 4819), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(10, 3)'], {}), '((10, 3))\n', (4810, 4819), True, 'import numpy as np\n'), ((5086, 5120), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(10, 3)'], {}), '((10, 3))\n', (5111, 5120), True, 'import numpy as np\n'), ((5392, 5486), 'pysindy.feature_library.CustomLibrary', 'CustomLibrary', ([], {'library_functions': 'library_functions', 'function_names': 'library_function_names'}), '(library_functions=library_functions, function_names=\n library_function_names)\n', (5405, 5486), False, 'from pysindy.feature_library import CustomLibrary\n'), ((5506, 5527), 'pysindy.optimizers.TrappingSR3', 'TrappingSR3', ([], {}), '(**params)\n', (5517, 5527), False, 'from pysindy.optimizers import TrappingSR3\n'), ((5540, 5591), 'pysindy.SINDy', 'SINDy', ([], {'optimizer': 'opt', 'feature_library': 'sindy_library'}), '(optimizer=opt, feature_library=sindy_library)\n', (5545, 5591), False, 'from pysindy import SINDy\n'), ((5696, 5718), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['model'], {}), '(model)\n', (5711, 5718), False, 'from sklearn.utils.validation import check_is_fitted\n'), ((5879, 5913), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(10, 3)'], {}), '((10, 3))\n', (5904, 5913), True, 'import numpy as np\n'), ((6418, 6512), 'pysindy.feature_library.CustomLibrary', 'CustomLibrary', ([], {'library_functions': 'library_functions', 'function_names': 'library_function_names'}), '(library_functions=library_functions, function_names=\n library_function_names)\n', (6431, 6512), False, 'from pysindy.feature_library import CustomLibrary\n'), ((7723, 7757), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(10, 3)'], {}), '((10, 3))\n', (7748, 7757), True, 'import numpy as np\n'), ((7770, 7804), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(10, 2)'], {}), '((10, 2))\n', (7795, 7804), True, 'import numpy as np\n'), ((7883, 7938), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(x_dot.shape[1], x.shape[1])'], {}), '((x_dot.shape[1], x.shape[1]))\n', (7908, 7938), True, 'import numpy as np\n'), ((8519, 8541), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['model'], {}), '(model)\n', (8534, 8541), False, 'from sklearn.utils.validation import check_is_fitted\n'), ((8663, 8685), 'pysindy.optimizers.SR3', 'SR3', ([], {'thresholder': '"""cAd"""'}), "(thresholder='cAd')\n", (8666, 8685), False, 'from pysindy.optimizers import SR3\n'), ((8714, 8736), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['model'], {}), '(model)\n', (8729, 8736), False, 'from sklearn.utils.validation import check_is_fitted\n'), ((9052, 9114), 'pysindy.optimizers.ConstrainedSR3', 'ConstrainedSR3', ([], {'thresholder': 'thresholder', 'thresholds': 'thresholds'}), '(thresholder=thresholder, thresholds=thresholds)\n', (9066, 9114), False, 'from pysindy.optimizers import ConstrainedSR3\n'), ((9143, 9165), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['model'], {}), '(model)\n', (9158, 9165), False, 'from sklearn.utils.validation import check_is_fitted\n'), ((9367, 9406), 'pysindy.optimizers.ConstrainedSR3', 'ConstrainedSR3', ([], {'thresholder': 'thresholder'}), '(thresholder=thresholder)\n', (9381, 9406), False, 'from pysindy.optimizers import ConstrainedSR3\n'), ((9435, 9457), 'sklearn.utils.validation.check_is_fitted', 'check_is_fitted', (['model'], {}), '(model)\n', (9450, 9457), False, 'from sklearn.utils.validation import check_is_fitted\n'), ((11068, 11162), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['optimizer_trimming.optimizer.trimming_array', 'trimming_array'], {}), '(optimizer_trimming.optimizer.trimming_array,\n trimming_array)\n', (11097, 11162), True, 'import numpy as np\n'), ((11371, 11406), 'numpy.array', 'np.array', (['[[-2.0, 0.0], [0.0, 1.0]]'], {}), '([[-2.0, 0.0], [0.0, 1.0]])\n', (11379, 11406), True, 'import numpy as np\n'), ((11910, 11977), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['model_plain.coef_', 'model_trimming.coef_'], {}), '(model_plain.coef_, model_trimming.coef_)\n', (11936, 11977), True, 'import numpy as np\n'), ((12376, 12443), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['model_plain.coef_', 'model_trimming.coef_'], {}), '(model_plain.coef_, model_trimming.coef_)\n', (12402, 12443), True, 'import numpy as np\n'), ((13499, 13541), 'numpy.zeros', 'np.zeros', (['(2, x.shape[1] * x_dot.shape[1])'], {}), '((2, x.shape[1] * x_dot.shape[1]))\n', (13507, 13541), True, 'import numpy as np\n'), ((14277, 14319), 'numpy.zeros', 'np.zeros', (['(2, x.shape[1] * x_dot.shape[1])'], {}), '((2, x.shape[1] * x_dot.shape[1]))\n', (14285, 14319), True, 'import numpy as np\n'), ((14561, 14632), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['model.coef_[:, 1]', 'target_value'], {'atol': '(1e-08)'}), '(model.coef_[:, 1], target_value, atol=1e-08)\n', (14587, 14632), True, 'import numpy as np\n'), ((14891, 14913), 'numpy.arange', 'np.arange', (['(0)', '(40)', '(0.05)'], {}), '(0, 40, 0.05)\n', (14900, 14913), True, 'import numpy as np\n'), ((14922, 14952), 'scipy.integrate.odeint', 'odeint', (['lorenz', '[-8, 8, 27]', 't'], {}), '(lorenz, [-8, 8, 27], t)\n', (14928, 14952), False, 'from scipy.integrate import odeint\n'), ((15059, 15082), 'numpy.array', 'np.array', (['[-10.0, -2.0]'], {}), '([-10.0, -2.0])\n', (15067, 15082), True, 'import numpy as np\n'), ((15107, 15124), 'numpy.zeros', 'np.zeros', (['(2, 30)'], {}), '((2, 30))\n', (15115, 15124), True, 'import numpy as np\n'), ((15240, 15428), 'pysindy.optimizers.TrappingSR3', 'TrappingSR3', ([], {'threshold': 'thresholds', 'constraint_lhs': 'constraint_matrix', 'constraint_rhs': 'constraint_rhs', 'constraint_order': '"""feature"""', 'inequality_constraints': '(True)', 'relax_optim': 'relax_optim'}), "(threshold=thresholds, constraint_lhs=constraint_matrix,\n constraint_rhs=constraint_rhs, constraint_order='feature',\n inequality_constraints=True, relax_optim=relax_optim)\n", (15251, 15428), False, 'from pysindy.optimizers import TrappingSR3\n'), ((15491, 15518), 'pysindy.PolynomialLibrary', 'PolynomialLibrary', ([], {'degree': '(2)'}), '(degree=2)\n', (15508, 15518), False, 'from pysindy import PolynomialLibrary\n'), ((16026, 16049), 'numpy.array', 'np.array', (['[-10.0, -2.0]'], {}), '([-10.0, -2.0])\n', (16034, 16049), True, 'import numpy as np\n'), ((16074, 16091), 'numpy.zeros', 'np.zeros', (['(2, 30)'], {}), '((2, 30))\n', (16082, 16091), True, 'import numpy as np\n'), ((1056, 1075), 'numpy.ones', 'np.ones', (['x.shape[1]'], {}), '(x.shape[1])\n', (1063, 1075), True, 'import numpy as np\n'), ((2068, 2075), 'pysindy.optimizers.STLSQ', 'STLSQ', ([], {}), '()\n', (2073, 2075), False, 'from pysindy.optimizers import STLSQ\n'), ((2085, 2090), 'pysindy.optimizers.SR3', 'SR3', ([], {}), '()\n', (2088, 2090), False, 'from pysindy.optimizers import SR3\n'), ((2100, 2116), 'pysindy.optimizers.ConstrainedSR3', 'ConstrainedSR3', ([], {}), '()\n', (2114, 2116), False, 'from pysindy.optimizers import ConstrainedSR3\n'), ((2126, 2139), 'pysindy.optimizers.TrappingSR3', 'TrappingSR3', ([], {}), '()\n', (2137, 2139), False, 'from pysindy.optimizers import TrappingSR3\n'), ((2149, 2175), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'fit_intercept': '(False)'}), '(fit_intercept=False)\n', (2154, 2175), False, 'from sklearn.linear_model import Lasso\n'), ((2185, 2216), 'sklearn.linear_model.ElasticNet', 'ElasticNet', ([], {'fit_intercept': '(False)'}), '(fit_intercept=False)\n', (2195, 2216), False, 'from sklearn.linear_model import ElasticNet\n'), ((2752, 2781), 'pytest.raises', 'pytest.raises', (['NotFittedError'], {}), '(NotFittedError)\n', (2765, 2781), False, 'import pytest\n'), ((2692, 2699), 'pysindy.optimizers.STLSQ', 'STLSQ', ([], {}), '()\n', (2697, 2699), False, 'from pysindy.optimizers import STLSQ\n'), ((2701, 2706), 'pysindy.optimizers.SR3', 'SR3', ([], {}), '()\n', (2704, 2706), False, 'from pysindy.optimizers import SR3\n'), ((2956, 2985), 'pytest.raises', 'pytest.raises', (['NotFittedError'], {}), '(NotFittedError)\n', (2969, 2985), False, 'import pytest\n'), ((2867, 2874), 'pysindy.optimizers.STLSQ', 'STLSQ', ([], {}), '()\n', (2872, 2874), False, 'from pysindy.optimizers import STLSQ\n'), ((2876, 2881), 'pysindy.optimizers.SR3', 'SR3', ([], {}), '()\n', (2879, 2881), False, 'from pysindy.optimizers import SR3\n'), ((3669, 3694), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3682, 3694), False, 'import pytest\n'), ((3972, 3997), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3985, 3997), False, 'import pytest\n'), ((4507, 4532), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4520, 4532), False, 'import pytest\n'), ((4542, 4563), 'pysindy.optimizers.TrappingSR3', 'TrappingSR3', ([], {}), '(**params)\n', (4553, 4563), False, 'from pysindy.optimizers import TrappingSR3\n'), ((4829, 4854), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4842, 4854), False, 'import pytest\n'), ((4872, 4893), 'pysindy.optimizers.TrappingSR3', 'TrappingSR3', ([], {}), '(**params)\n', (4883, 4893), False, 'from pysindy.optimizers import TrappingSR3\n'), ((6531, 6556), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6544, 6556), False, 'import pytest\n'), ((6572, 6593), 'pysindy.optimizers.TrappingSR3', 'TrappingSR3', ([], {}), '(**params)\n', (6583, 6593), False, 'from pysindy.optimizers import TrappingSR3\n'), ((6610, 6661), 'pysindy.SINDy', 'SINDy', ([], {'optimizer': 'opt', 'feature_library': 'sindy_library'}), '(optimizer=opt, feature_library=sindy_library)\n', (6615, 6661), False, 'from pysindy import SINDy\n'), ((7252, 7272), 'pytest.raises', 'pytest.raises', (['error'], {}), '(error)\n', (7265, 7272), False, 'import pytest\n'), ((7418, 7447), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (7431, 7447), False, 'import pytest\n'), ((7507, 7536), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (7520, 7536), False, 'import pytest\n'), ((8040, 8092), 'numpy.not_equal', 'np.not_equal', (['control_model.coef_', 'guess_model.coef_'], {}), '(control_model.coef_, guess_model.coef_)\n', (8052, 8092), True, 'import numpy as np\n'), ((8954, 8969), 'numpy.ones', 'np.ones', (['(1, 1)'], {}), '((1, 1))\n', (8961, 8969), True, 'import numpy as np\n'), ((9001, 9038), 'numpy.ones', 'np.ones', (['(x_dot.shape[1], x.shape[1])'], {}), '((x_dot.shape[1], x.shape[1]))\n', (9008, 9038), True, 'import numpy as np\n'), ((9604, 9648), 'pysindy.optimizers.STLSQ', 'STLSQ', ([], {'threshold': '(0.01)', 'alpha': '(0.1)', 'max_iter': '(1)'}), '(threshold=0.01, alpha=0.1, max_iter=1)\n', (9609, 9648), False, 'from pysindy.optimizers import STLSQ\n'), ((9754, 9798), 'pysindy.optimizers.STLSQ', 'STLSQ', ([], {'threshold': '(0.01)', 'alpha': '(0.1)', 'max_iter': '(1)'}), '(threshold=0.01, alpha=0.1, max_iter=1)\n', (9759, 9798), False, 'from pysindy.optimizers import STLSQ\n'), ((10150, 10199), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'alpha': '(0.1)', 'fit_intercept': '(False)', 'max_iter': '(1)'}), '(alpha=0.1, fit_intercept=False, max_iter=1)\n', (10155, 10199), False, 'from sklearn.linear_model import Lasso\n'), ((10305, 10354), 'sklearn.linear_model.Lasso', 'Lasso', ([], {'alpha': '(0.1)', 'fit_intercept': '(False)', 'max_iter': '(1)'}), '(alpha=0.1, fit_intercept=False, max_iter=1)\n', (10310, 10354), False, 'from sklearn.linear_model import Lasso\n'), ((11418, 11460), 'numpy.linalg.norm', 'norm', (['(true_coef - optimizer_trimming.coef_)'], {}), '(true_coef - optimizer_trimming.coef_)\n', (11422, 11460), False, 'from numpy.linalg import norm\n'), ((11463, 11513), 'numpy.linalg.norm', 'norm', (['(true_coef - optimizer_without_trimming.coef_)'], {}), '(true_coef - optimizer_without_trimming.coef_)\n', (11467, 11513), False, 'from numpy.linalg import norm\n'), ((12690, 12722), 'pytest.warns', 'pytest.warns', (['ConvergenceWarning'], {}), '(ConvergenceWarning)\n', (12702, 12722), False, 'import pytest\n'), ((13052, 13084), 'pytest.warns', 'pytest.warns', (['ConvergenceWarning'], {}), '(ConvergenceWarning)\n', (13064, 13084), False, 'import pytest\n'), ((12811, 12828), 'pysindy.optimizers.STLSQ', 'STLSQ', ([], {'max_iter': '(1)'}), '(max_iter=1)\n', (12816, 12828), False, 'from pysindy.optimizers import STLSQ\n'), ((12838, 12853), 'pysindy.optimizers.SR3', 'SR3', ([], {'max_iter': '(1)'}), '(max_iter=1)\n', (12841, 12853), False, 'from pysindy.optimizers import SR3\n'), ((12863, 12889), 'pysindy.optimizers.ConstrainedSR3', 'ConstrainedSR3', ([], {'max_iter': '(1)'}), '(max_iter=1)\n', (12877, 12889), False, 'from pysindy.optimizers import ConstrainedSR3\n'), ((12899, 12922), 'pysindy.optimizers.TrappingSR3', 'TrappingSR3', ([], {'max_iter': '(1)'}), '(max_iter=1)\n', (12910, 12922), False, 'from pysindy.optimizers import TrappingSR3\n'), ((13467, 13477), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (13474, 13477), True, 'import numpy as np\n'), ((13879, 13927), 'numpy.array', 'np.array', (['[model.coef_[0, 0], model.coef_[1, 1]]'], {}), '([model.coef_[0, 0], model.coef_[1, 1]])\n', (13887, 13927), True, 'import numpy as np\n'), ((14245, 14255), 'numpy.ones', 'np.ones', (['(2)'], {}), '(2)\n', (14252, 14255), True, 'import numpy as np\n'), ((14965, 15009), 'numpy.random.normal', 'np.random.normal', (['(0.0)', 'noise_levels', 'x.shape'], {}), '(0.0, noise_levels, x.shape)\n', (14981, 15009), True, 'import numpy as np\n'), ((16170, 16195), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (16183, 16195), False, 'import pytest\n'), ((16205, 16381), 'pysindy.optimizers.TrappingSR3', 'TrappingSR3', ([], {'threshold': '(0.0)', 'constraint_lhs': 'constraint_matrix', 'constraint_rhs': 'constraint_rhs', 'constraint_order': '"""feature"""', 'inequality_constraints': '(True)', 'relax_optim': '(True)'}), "(threshold=0.0, constraint_lhs=constraint_matrix, constraint_rhs\n =constraint_rhs, constraint_order='feature', inequality_constraints=\n True, relax_optim=True)\n", (16216, 16381), False, 'from pysindy.optimizers import TrappingSR3\n'), ((2809, 2824), 'numpy.ones', 'np.ones', (['(1, 3)'], {}), '((1, 3))\n', (2816, 2824), True, 'import numpy as np\n'), ((9877, 9932), 'numpy.linalg.norm', 'norm', (['(optimizer_biased.coef_ - optimizer_unbiased.coef_)'], {}), '(optimizer_biased.coef_ - optimizer_unbiased.coef_)\n', (9881, 9932), False, 'from numpy.linalg import norm\n'), ((9943, 9973), 'numpy.linalg.norm', 'norm', (['optimizer_unbiased.coef_'], {}), '(optimizer_unbiased.coef_)\n', (9947, 9973), False, 'from numpy.linalg import norm\n'), ((10433, 10488), 'numpy.linalg.norm', 'norm', (['(optimizer_biased.coef_ - optimizer_unbiased.coef_)'], {}), '(optimizer_biased.coef_ - optimizer_unbiased.coef_)\n', (10437, 10488), False, 'from numpy.linalg import norm\n'), ((15626, 15663), 'pysindy.FiniteDifference', 'FiniteDifference', ([], {'drop_endpoints': '(True)'}), '(drop_endpoints=True)\n', (15642, 15663), False, 'from pysindy import FiniteDifference\n'), ((4619, 4645), 'numpy.random.rand', 'np.random.rand', (['(3)', '(3)', '(3)', '(9)'], {}), '(3, 3, 3, 9)\n', (4633, 4645), True, 'import numpy as np\n'), ((4656, 4685), 'numpy.random.rand', 'np.random.rand', (['(3)', '(3)', '(3)', '(3)', '(9)'], {}), '(3, 3, 3, 3, 9)\n', (4670, 4685), True, 'import numpy as np\n'), ((4977, 4998), 'numpy.ones', 'np.ones', (['(3, 3, 3, 9)'], {}), '((3, 3, 3, 9))\n', (4984, 4998), True, 'import numpy as np\n'), ((5003, 5027), 'numpy.ones', 'np.ones', (['(3, 3, 3, 3, 9)'], {}), '((3, 3, 3, 3, 9))\n', (5010, 5027), True, 'import numpy as np\n'), ((5774, 5795), 'numpy.ones', 'np.ones', (['(3, 3, 3, 9)'], {}), '((3, 3, 3, 9))\n', (5781, 5795), True, 'import numpy as np\n'), ((5800, 5824), 'numpy.ones', 'np.ones', (['(3, 3, 3, 3, 9)'], {}), '((3, 3, 3, 3, 9))\n', (5807, 5824), True, 'import numpy as np\n'), ((10500, 10530), 'numpy.linalg.norm', 'norm', (['optimizer_unbiased.coef_'], {}), '(optimizer_unbiased.coef_)\n', (10504, 10530), False, 'from numpy.linalg import norm\n'), ((7155, 7170), 'numpy.ones', 'np.ones', (['(5, 5)'], {}), '((5, 5))\n', (7162, 7170), True, 'import numpy as np\n')]
|
import warnings
from ast import literal_eval
from datetime import datetime
import numpy as np
from scipy.stats import pearsonr
from sklearn import metrics
from sklearn.utils.multiclass import type_of_target
MULTICLASS_INDICATOR = "multiclass-indicator"
warnings.filterwarnings("ignore")
def get_epoch_time():
return int((datetime.now() - datetime(1970, 1, 1)).total_seconds())
def count_params(trainable_variables):
# to return number of trainable variables. Example: shared.count_params(tf.trainable_variables()))
return np.sum([np.prod(v.get_shape().as_list()) for v in trainable_variables])
def load_id2gt(gt_file):
ids = []
fgt = open(gt_file)
id2gt = dict()
for line in fgt.readlines():
id, gt = line.strip().split("\t") # id is string
id2gt[id] = literal_eval(gt) # gt is array
ids.append(id)
return ids, id2gt
def load_id2path(index_file):
paths = []
fspec = open(index_file)
id2path = dict()
for line in fspec.readlines():
id, path = line.strip().split("\t")
id2path[id] = path
paths.append(path)
return paths, id2path
def type_of_groundtruth(y):
"""
Get the type of groundtruth data by extending scikit learn functionality.
scikit-learn will detect one-hot encoded multiclass data as multilabl-indicator.
If this is the case this function returns "multiclass-indicator", which is
currently not used in scikit-learn, and the scikit-learn result otherwise.
Args:
y: numpy array with the groundtruth data
Returns:
target_type: string
Either "multiclass-indicator" or the result of
sklearn.utils.multiclass.type_of_target
"""
scikit_learn_type = type_of_target(y)
if (
scikit_learn_type == "multilabel-indicator"
and np.count_nonzero(y) == y.shape[0]
):
return MULTICLASS_INDICATOR
else:
return scikit_learn_type
def compute_auc(true, estimated):
"""
Calculate macro PR-AUC and macro ROC-AUC using the default scikit-learn parameters.
Multiclass data is currently not supported and ROC-AUC & PR AUC will be NaN.
"""
estimated = np.array(estimated)
true = np.array(true)
if type_of_groundtruth(true) == MULTICLASS_INDICATOR:
pr_auc = np.nan
# if we move to scikit-learn 0.22 we can calculate a roc_auc_score for
# multiclass data like this:
# estimated = estimated.argmax(axis=1)
# roc_auc = metrics.roc_auc_score(true, estimated, multi_class="ovr")
roc_auc = np.nan
else:
pr_auc = metrics.average_precision_score(true, estimated)
roc_auc = metrics.roc_auc_score(true, estimated)
return roc_auc, pr_auc
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def minmax_standarize(x, x_min=-1, x_max=1, headroom=0.1):
return (x - x_min) / ((x_max + headroom) - (x_min - headroom))
def average_predictions(pred_array, id_array, ids, id2gt=None):
# averaging probabilities -> one could also do majority voting
print('Averaging predictions')
y_pred = []
y_true = []
healthy_ids = []
for id in ids:
try:
avg = np.mean(pred_array[np.where(id_array == id)], axis=0)
if np.isnan(avg).any():
print('{} skipped because it contains nans'.format(id))
continue
if np.isposinf(avg).any():
print('{} skipped because it contains pos infs'.format(id))
continue
if np.isneginf(avg).any():
print('{} skipped because it contains neg infs'.format(id))
continue
y_pred.append(avg)
if id2gt:
y_true.append(id2gt[id])
healthy_ids.append(id)
except:
print(id)
if id2gt:
return y_true, y_pred, healthy_ids
else:
return y_pred
def average_predictions_ids(pred_array, id_array, ids):
# averages the predictions and returns the ids of the elements
# that did not fail.
print('Averaging predictions')
y_pred = []
ids_present = []
for id in ids:
try:
avg = np.mean(pred_array[np.where(id_array == id)], axis=0)
if np.isnan(avg).any():
print('{} skipped because it contains nans'.format(id))
continue
if np.isposinf(avg).any():
print('{} skipped because it contains pos infs'.format(id))
continue
if np.isneginf(avg).any():
print('{} skipped because it contains neg infs'.format(id))
continue
y_pred.append(avg)
ids_present.append(id)
except:
print(id)
return y_pred, ids_present
def compute_accuracy(y_true, y_pred):
y_true = np.array(y_true)
y_pred = np.array(y_pred)
print('computing accuracy of {} elements'.format(len(y_true)))
groundtruth_type = type_of_groundtruth(y_true)
if groundtruth_type == "multilabel-indicator":
y_pred = np.round(y_pred)
return metrics.accuracy_score(y_true, y_pred)
elif groundtruth_type == MULTICLASS_INDICATOR:
y_true = np.argmax(y_true, axis=1)
y_pred = np.argmax(y_pred, axis=1)
else:
y_true = np.squeeze(y_true)
y_pred = np.round(np.squeeze(y_pred))
return metrics.balanced_accuracy_score(y_true, y_pred)
def compute_pearson_correlation(y_true, y_pred, axis=0):
print(f'computing Pearson Correlation Coefficient of {len(y_true)} elements')
mx = np.mean(y_true,axis=axis)
my = np.mean(y_pred,axis=axis)
xm, ym = y_true - mx, y_pred - my
r_num = np.mean(xm * ym, axis=axis)
r_den = np.std(xm, axis=axis) * np.std(ym, axis=axis)
return r_num / r_den
def compute_ccc(y_true, y_pred, axis=0):
print(f'computing Concordance Correlation Coefficient of {len(y_true)} elements')
# Concordance Correlation Coefficient (CCC)
x_mean = np.mean(y_true, axis=axis)
y_mean = np.mean(y_pred, axis=axis)
x_var = np.var(y_true, axis=axis)
y_var = np.var(y_pred, axis=axis)
cov = np.mean((y_true - x_mean) * (y_pred - y_mean))
numerator = 2 * cov
denominator = x_var + y_var + (x_mean - y_mean) ** 2
return numerator / denominator
# R2 score
def compute_r2_score(y_true, y_pred, axis=0):
print(f'computing R2 Score of {len(y_true)} elements')
y_true = np.array(y_true)
y_pred = np.array(y_pred)
ss_residual = np.sum(np.square(y_true - y_pred), axis=0)
ss_total = np.sum(
np.square(y_true - np.mean(y_true)), axis=0
)
r_squared = 1.0 - np.nan_to_num(ss_residual/ss_total)
return r_squared
# Adjusted R2 score
def compute_adjusted_r2_score(y_true, y_pred, p):
# p refers to the number of predictors (i.e for arousal and valence p=2)
print(f'computing Adjusted R2 Score of {len(y_true)} elements')
r_squared = compute_r2_score(y_true, y_pred)
adjusted_r_squared = 1 - (1 - r_squared) * (len(y_true) - 1.0) / (len(y_true) - p - 1.0)
return adjusted_r_squared
def compute_root_mean_squared_error(y_true, y_pred):
print(f'computing Root Mean Squared Error of {len(y_true)} elements')
y_true = np.array(y_true)
y_pred = np.array(y_pred)
return metrics.mean_squared_error(y_true, y_pred, multioutput="raw_values", squared=True)
def compute_mean_squared_error(y_true, y_pred):
print(f'computing Mean Squared Error of {len(y_true)} elements')
y_true = np.array(y_true)
y_pred = np.array(y_pred)
return metrics.mean_squared_error(y_true, y_pred, multioutput="raw_values", squared=False)
|
[
"numpy.nan_to_num",
"numpy.argmax",
"sklearn.metrics.accuracy_score",
"numpy.isnan",
"numpy.mean",
"numpy.exp",
"numpy.round",
"numpy.isposinf",
"numpy.std",
"sklearn.utils.multiclass.type_of_target",
"sklearn.metrics.average_precision_score",
"numpy.var",
"sklearn.metrics.mean_squared_error",
"datetime.datetime.now",
"numpy.isneginf",
"numpy.square",
"sklearn.metrics.roc_auc_score",
"datetime.datetime",
"numpy.squeeze",
"numpy.count_nonzero",
"warnings.filterwarnings",
"sklearn.metrics.balanced_accuracy_score",
"numpy.where",
"numpy.array",
"ast.literal_eval"
] |
[((256, 289), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (279, 289), False, 'import warnings\n'), ((1737, 1754), 'sklearn.utils.multiclass.type_of_target', 'type_of_target', (['y'], {}), '(y)\n', (1751, 1754), False, 'from sklearn.utils.multiclass import type_of_target\n'), ((2186, 2205), 'numpy.array', 'np.array', (['estimated'], {}), '(estimated)\n', (2194, 2205), True, 'import numpy as np\n'), ((2217, 2231), 'numpy.array', 'np.array', (['true'], {}), '(true)\n', (2225, 2231), True, 'import numpy as np\n'), ((4850, 4866), 'numpy.array', 'np.array', (['y_true'], {}), '(y_true)\n', (4858, 4866), True, 'import numpy as np\n'), ((4880, 4896), 'numpy.array', 'np.array', (['y_pred'], {}), '(y_pred)\n', (4888, 4896), True, 'import numpy as np\n'), ((5396, 5443), 'sklearn.metrics.balanced_accuracy_score', 'metrics.balanced_accuracy_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (5427, 5443), False, 'from sklearn import metrics\n'), ((5595, 5621), 'numpy.mean', 'np.mean', (['y_true'], {'axis': 'axis'}), '(y_true, axis=axis)\n', (5602, 5621), True, 'import numpy as np\n'), ((5630, 5656), 'numpy.mean', 'np.mean', (['y_pred'], {'axis': 'axis'}), '(y_pred, axis=axis)\n', (5637, 5656), True, 'import numpy as np\n'), ((5706, 5733), 'numpy.mean', 'np.mean', (['(xm * ym)'], {'axis': 'axis'}), '(xm * ym, axis=axis)\n', (5713, 5733), True, 'import numpy as np\n'), ((6010, 6036), 'numpy.mean', 'np.mean', (['y_true'], {'axis': 'axis'}), '(y_true, axis=axis)\n', (6017, 6036), True, 'import numpy as np\n'), ((6050, 6076), 'numpy.mean', 'np.mean', (['y_pred'], {'axis': 'axis'}), '(y_pred, axis=axis)\n', (6057, 6076), True, 'import numpy as np\n'), ((6090, 6115), 'numpy.var', 'np.var', (['y_true'], {'axis': 'axis'}), '(y_true, axis=axis)\n', (6096, 6115), True, 'import numpy as np\n'), ((6128, 6153), 'numpy.var', 'np.var', (['y_pred'], {'axis': 'axis'}), '(y_pred, axis=axis)\n', (6134, 6153), True, 'import numpy as np\n'), ((6165, 6211), 'numpy.mean', 'np.mean', (['((y_true - x_mean) * (y_pred - y_mean))'], {}), '((y_true - x_mean) * (y_pred - y_mean))\n', (6172, 6211), True, 'import numpy as np\n'), ((6463, 6479), 'numpy.array', 'np.array', (['y_true'], {}), '(y_true)\n', (6471, 6479), True, 'import numpy as np\n'), ((6493, 6509), 'numpy.array', 'np.array', (['y_pred'], {}), '(y_pred)\n', (6501, 6509), True, 'import numpy as np\n'), ((7263, 7279), 'numpy.array', 'np.array', (['y_true'], {}), '(y_true)\n', (7271, 7279), True, 'import numpy as np\n'), ((7293, 7309), 'numpy.array', 'np.array', (['y_pred'], {}), '(y_pred)\n', (7301, 7309), True, 'import numpy as np\n'), ((7321, 7407), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_true', 'y_pred'], {'multioutput': '"""raw_values"""', 'squared': '(True)'}), "(y_true, y_pred, multioutput='raw_values',\n squared=True)\n", (7347, 7407), False, 'from sklearn import metrics\n'), ((7536, 7552), 'numpy.array', 'np.array', (['y_true'], {}), '(y_true)\n', (7544, 7552), True, 'import numpy as np\n'), ((7566, 7582), 'numpy.array', 'np.array', (['y_pred'], {}), '(y_pred)\n', (7574, 7582), True, 'import numpy as np\n'), ((7594, 7681), 'sklearn.metrics.mean_squared_error', 'metrics.mean_squared_error', (['y_true', 'y_pred'], {'multioutput': '"""raw_values"""', 'squared': '(False)'}), "(y_true, y_pred, multioutput='raw_values',\n squared=False)\n", (7620, 7681), False, 'from sklearn import metrics\n'), ((807, 823), 'ast.literal_eval', 'literal_eval', (['gt'], {}), '(gt)\n', (819, 823), False, 'from ast import literal_eval\n'), ((2608, 2656), 'sklearn.metrics.average_precision_score', 'metrics.average_precision_score', (['true', 'estimated'], {}), '(true, estimated)\n', (2639, 2656), False, 'from sklearn import metrics\n'), ((2675, 2713), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['true', 'estimated'], {}), '(true, estimated)\n', (2696, 2713), False, 'from sklearn import metrics\n'), ((5084, 5100), 'numpy.round', 'np.round', (['y_pred'], {}), '(y_pred)\n', (5092, 5100), True, 'import numpy as np\n'), ((5116, 5154), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (5138, 5154), False, 'from sklearn import metrics\n'), ((5746, 5767), 'numpy.std', 'np.std', (['xm'], {'axis': 'axis'}), '(xm, axis=axis)\n', (5752, 5767), True, 'import numpy as np\n'), ((5770, 5791), 'numpy.std', 'np.std', (['ym'], {'axis': 'axis'}), '(ym, axis=axis)\n', (5776, 5791), True, 'import numpy as np\n'), ((6536, 6562), 'numpy.square', 'np.square', (['(y_true - y_pred)'], {}), '(y_true - y_pred)\n', (6545, 6562), True, 'import numpy as np\n'), ((6675, 6712), 'numpy.nan_to_num', 'np.nan_to_num', (['(ss_residual / ss_total)'], {}), '(ss_residual / ss_total)\n', (6688, 6712), True, 'import numpy as np\n'), ((1828, 1847), 'numpy.count_nonzero', 'np.count_nonzero', (['y'], {}), '(y)\n', (1844, 1847), True, 'import numpy as np\n'), ((2779, 2789), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (2785, 2789), True, 'import numpy as np\n'), ((5223, 5248), 'numpy.argmax', 'np.argmax', (['y_true'], {'axis': '(1)'}), '(y_true, axis=1)\n', (5232, 5248), True, 'import numpy as np\n'), ((5266, 5291), 'numpy.argmax', 'np.argmax', (['y_pred'], {'axis': '(1)'}), '(y_pred, axis=1)\n', (5275, 5291), True, 'import numpy as np\n'), ((5319, 5337), 'numpy.squeeze', 'np.squeeze', (['y_true'], {}), '(y_true)\n', (5329, 5337), True, 'import numpy as np\n'), ((5364, 5382), 'numpy.squeeze', 'np.squeeze', (['y_pred'], {}), '(y_pred)\n', (5374, 5382), True, 'import numpy as np\n'), ((6622, 6637), 'numpy.mean', 'np.mean', (['y_true'], {}), '(y_true)\n', (6629, 6637), True, 'import numpy as np\n'), ((330, 344), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (342, 344), False, 'from datetime import datetime\n'), ((347, 367), 'datetime.datetime', 'datetime', (['(1970)', '(1)', '(1)'], {}), '(1970, 1, 1)\n', (355, 367), False, 'from datetime import datetime\n'), ((3209, 3233), 'numpy.where', 'np.where', (['(id_array == id)'], {}), '(id_array == id)\n', (3217, 3233), True, 'import numpy as np\n'), ((3259, 3272), 'numpy.isnan', 'np.isnan', (['avg'], {}), '(avg)\n', (3267, 3272), True, 'import numpy as np\n'), ((3393, 3409), 'numpy.isposinf', 'np.isposinf', (['avg'], {}), '(avg)\n', (3404, 3409), True, 'import numpy as np\n'), ((3534, 3550), 'numpy.isneginf', 'np.isneginf', (['avg'], {}), '(avg)\n', (3545, 3550), True, 'import numpy as np\n'), ((4211, 4235), 'numpy.where', 'np.where', (['(id_array == id)'], {}), '(id_array == id)\n', (4219, 4235), True, 'import numpy as np\n'), ((4261, 4274), 'numpy.isnan', 'np.isnan', (['avg'], {}), '(avg)\n', (4269, 4274), True, 'import numpy as np\n'), ((4395, 4411), 'numpy.isposinf', 'np.isposinf', (['avg'], {}), '(avg)\n', (4406, 4411), True, 'import numpy as np\n'), ((4536, 4552), 'numpy.isneginf', 'np.isneginf', (['avg'], {}), '(avg)\n', (4547, 4552), True, 'import numpy as np\n')]
|
# %%
from logging import critical
from typing import Callable, Tuple, Union
import numpy as np
from enum import Enum, auto
from scipy.stats import norm, t, chi2
# %%
decimal_limit = 2
class TestType(Enum):
# Non-directional
DOUBLE_TAILED = auto()
# directional
LOWER_TAILED = auto()
UPPER_TAILED = auto()
# https://machinelearningmastery.com/critical-values-for-statistical-hypothesis-testing/
# https://dfrieds.com/math/z-tests.html#:~:text=In%20this%20instance%2C%20the%20z,and%20standard%20deviation%20of%201).
# https://reneshbedre.github.io/blog/anova.html
# https://statisticsbyjim.com/hypothesis-testing/one-tailed-two-tailed-hypothesis-tests/
# %%
def get_standard_error(σ: float, n: int) -> float:
return round(σ / np.sqrt(n), decimal_limit)
# def get_standard_error(μ:float, σ:float, n:int)->float:
def get_z_critical_normal(alpha: float) -> float:
# Calculate Zc
probability = 1 - alpha
return round(norm.ppf(probability), decimal_limit)
def get_z_critical_t(alpha: float, df: int) -> float:
# Calculate Zc
probability = 1 - alpha
return round(t.ppf(probability, df), decimal_limit)
def get_z_critical_chi2(alpha: float, df: int) -> float:
# Calculate Zc
probability = 1 - alpha
return round(chi2.ppf(probability, df), decimal_limit)
def prepare_output(LCV, UCV, critical_value, population_mean, z_critical):
msg = (
'"Fail" to reject the null hypothesis'
if LCV <= round(population_mean, decimal_limit) <= UCV
else "Reject the null hypothesis"
)
return f"LCV: {LCV}, UCV: {UCV}, Addition Value: {critical_value}, Zc: {z_critical}, Result: {msg}"
def get_critical_value(
population_mean: float,
population_std: float,
sample_size: int,
alpha: float,
test_type: TestType,
df=None,
critical_value_calculator: Union[
Callable[[float, int], float], Callable[[float], float]
] = get_z_critical_normal,
standard_error_calculator: Callable[[float, int], float] = get_standard_error,
sample_mean: float = None,
) -> Union[float, Tuple[float, float]]:
if test_type == TestType.DOUBLE_TAILED:
alpha = alpha / 2
se = standard_error_calculator(population_std, sample_size)
if df is None:
z_critical = critical_value_calculator(alpha)
else:
z_critical = critical_value_calculator(alpha, df)
critical_value = round(z_critical * se, decimal_limit)
LCV = population_mean - critical_value
UCV = population_mean + critical_value
if sample_mean is None:
sample_mean = population_mean
return prepare_output(LCV, UCV, critical_value, sample_mean, z_critical)
def get_critical_value_with_zc(
z_critical: float,
population_mean: float,
population_std: float,
sample_size: int,
sample_mean: float = None,
) -> float:
se = get_standard_error(population_std, sample_size)
critical_value = round(z_critical * se, decimal_limit)
LCV = population_mean - critical_value
UCV = population_mean + critical_value
if sample_mean is None:
sample_mean = population_mean
return prepare_output(LCV, UCV, critical_value, sample_mean, z_critical)
# %%
get_critical_value_with_zc(2.17, 36, 4, 49)
# %%
get_critical_value_with_zc(2.17, 36, 4, 49, sample_mean=34.6)
# %%
alpha = 0.03
test_type = TestType.DOUBLE_TAILED
population_mean = 36
population_std = 4
sample_size = 49
get_critical_value(population_mean, population_std, sample_size, alpha, test_type)
# %%
# se = get_standard_error(population_std, sample_size)
# sample_mean = 34.5
# z = (sample_mean - population_mean) / se
# z
# %%
alpha = 0.05
test_type = TestType.LOWER_TAILED
population_mean = 350
print(get_z_critical_normal(alpha))
population_std = 90
sample_size = 36
sample_mean = 34.6
x = 370.16
get_critical_value(population_mean, population_std, sample_size, alpha, test_type)
# %%
alpha = 0.03
test_type = TestType.LOWER_TAILED
population_mean = 2.5
print(get_z_critical_normal(alpha))
population_std = 0.6
sample_size = 100
sample_mean = 2.6
get_critical_value(
population_mean,
population_std,
sample_size,
alpha,
test_type,
sample_mean=sample_mean,
)
# %%
alpha = 0.03
test_type = TestType.LOWER_TAILED
population_mean = 2.5
population_std = 0.6
sample_size = 1000
sample_mean = 2.6
get_critical_value(
population_mean,
population_std,
sample_size,
alpha,
test_type,
sample_mean=sample_mean,
)
# %%
alpha = 0.02
test_type = TestType.DOUBLE_TAILED
population_mean = 60
population_std = 10.7
sample_size = 100
sample_mean = 62.6
get_critical_value(
population_mean,
population_std,
sample_size,
alpha,
test_type,
sample_mean=sample_mean,
)
# %%
|
[
"scipy.stats.norm.ppf",
"scipy.stats.chi2.ppf",
"enum.auto",
"scipy.stats.t.ppf",
"numpy.sqrt"
] |
[((264, 270), 'enum.auto', 'auto', ([], {}), '()\n', (268, 270), False, 'from enum import Enum, auto\n'), ((310, 316), 'enum.auto', 'auto', ([], {}), '()\n', (314, 316), False, 'from enum import Enum, auto\n'), ((337, 343), 'enum.auto', 'auto', ([], {}), '()\n', (341, 343), False, 'from enum import Enum, auto\n'), ((994, 1015), 'scipy.stats.norm.ppf', 'norm.ppf', (['probability'], {}), '(probability)\n', (1002, 1015), False, 'from scipy.stats import norm, t, chi2\n'), ((1158, 1180), 'scipy.stats.t.ppf', 't.ppf', (['probability', 'df'], {}), '(probability, df)\n', (1163, 1180), False, 'from scipy.stats import norm, t, chi2\n'), ((1326, 1351), 'scipy.stats.chi2.ppf', 'chi2.ppf', (['probability', 'df'], {}), '(probability, df)\n', (1334, 1351), False, 'from scipy.stats import norm, t, chi2\n'), ((783, 793), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (790, 793), True, 'import numpy as np\n')]
|
# test solver
import numpy as np
from MLEK.main.solver import solver
from MLEK.main.utils import irfft
def V_gen(nbasis, V0):
hamilton_mat = np.zeros((nbasis, nbasis), dtype=np.complex64)
np.fill_diagonal(hamilton_mat[1:, :-1], V0*(-0.25))
Vq = np.zeros(nbasis, dtype=np.complex64)
Vq[0], Vq[1] = -0.5*V0, -0.25*V0
return hamilton_mat, Vq
nk = 100
nbasis = 10
V0 = 10
hamilton_mat, Vq = V_gen(nbasis, V0)
mu = 10
T, mu, dens_q = solver(nk, nbasis, mu, hamilton_mat)
kpoints = np.linspace(0, np.pi, nk)
print(dens_q[0])
X = np.linspace(0, 1, 100)
dens_x = irfft(dens_q, 100)
# print(delta_En_k)
# # print(Vq[1])
# print(dens_q[0])
# print((dens_q[0]**3)*(np.pi**2)/6)
# print(T)
omega = 2*np.pi*np.sqrt(V0)
f = lambda x: np.sqrt(omega/np.pi)*np.exp(-omega*x**2)
y = f(X)
import matplotlib.pyplot as plt
plt.plot(X, dens_x, 'b')
plt.plot(X, y, 'r')
plt.show()
|
[
"numpy.fill_diagonal",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"MLEK.main.solver.solver",
"numpy.zeros",
"MLEK.main.utils.irfft",
"numpy.exp",
"numpy.linspace",
"numpy.sqrt"
] |
[((452, 488), 'MLEK.main.solver.solver', 'solver', (['nk', 'nbasis', 'mu', 'hamilton_mat'], {}), '(nk, nbasis, mu, hamilton_mat)\n', (458, 488), False, 'from MLEK.main.solver import solver\n'), ((500, 525), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', 'nk'], {}), '(0, np.pi, nk)\n', (511, 525), True, 'import numpy as np\n'), ((548, 570), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (559, 570), True, 'import numpy as np\n'), ((580, 598), 'MLEK.main.utils.irfft', 'irfft', (['dens_q', '(100)'], {}), '(dens_q, 100)\n', (585, 598), False, 'from MLEK.main.utils import irfft\n'), ((830, 854), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'dens_x', '"""b"""'], {}), "(X, dens_x, 'b')\n", (838, 854), True, 'import matplotlib.pyplot as plt\n'), ((855, 874), 'matplotlib.pyplot.plot', 'plt.plot', (['X', 'y', '"""r"""'], {}), "(X, y, 'r')\n", (863, 874), True, 'import matplotlib.pyplot as plt\n'), ((875, 885), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (883, 885), True, 'import matplotlib.pyplot as plt\n'), ((147, 193), 'numpy.zeros', 'np.zeros', (['(nbasis, nbasis)'], {'dtype': 'np.complex64'}), '((nbasis, nbasis), dtype=np.complex64)\n', (155, 193), True, 'import numpy as np\n'), ((198, 249), 'numpy.fill_diagonal', 'np.fill_diagonal', (['hamilton_mat[1:, :-1]', '(V0 * -0.25)'], {}), '(hamilton_mat[1:, :-1], V0 * -0.25)\n', (214, 249), True, 'import numpy as np\n'), ((259, 295), 'numpy.zeros', 'np.zeros', (['nbasis'], {'dtype': 'np.complex64'}), '(nbasis, dtype=np.complex64)\n', (267, 295), True, 'import numpy as np\n'), ((721, 732), 'numpy.sqrt', 'np.sqrt', (['V0'], {}), '(V0)\n', (728, 732), True, 'import numpy as np\n'), ((747, 769), 'numpy.sqrt', 'np.sqrt', (['(omega / np.pi)'], {}), '(omega / np.pi)\n', (754, 769), True, 'import numpy as np\n'), ((768, 791), 'numpy.exp', 'np.exp', (['(-omega * x ** 2)'], {}), '(-omega * x ** 2)\n', (774, 791), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 14 11:48:32 2021
@author: surajitrana
"""
import matplotlib.pyplot as plt
import numpy as np
def plot_barchart():
x = np.array(["Apple", "Samsung", "IBM", "Intel"])
y = np.array([1000, 560, 900, 678])
plt.xlabel("Brands")
plt.ylabel("Sales (in billions USD)")
plt.title("Sales of brands over FY1-2021")
plt.bar(x, y, color="hotpink", width=0.3)
plt.show()
if __name__ == '__main__':
plot_barchart()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.bar",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((197, 243), 'numpy.array', 'np.array', (["['Apple', 'Samsung', 'IBM', 'Intel']"], {}), "(['Apple', 'Samsung', 'IBM', 'Intel'])\n", (205, 243), True, 'import numpy as np\n'), ((252, 283), 'numpy.array', 'np.array', (['[1000, 560, 900, 678]'], {}), '([1000, 560, 900, 678])\n', (260, 283), True, 'import numpy as np\n'), ((289, 309), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Brands"""'], {}), "('Brands')\n", (299, 309), True, 'import matplotlib.pyplot as plt\n'), ((314, 351), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Sales (in billions USD)"""'], {}), "('Sales (in billions USD)')\n", (324, 351), True, 'import matplotlib.pyplot as plt\n'), ((356, 398), 'matplotlib.pyplot.title', 'plt.title', (['"""Sales of brands over FY1-2021"""'], {}), "('Sales of brands over FY1-2021')\n", (365, 398), True, 'import matplotlib.pyplot as plt\n'), ((403, 444), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'y'], {'color': '"""hotpink"""', 'width': '(0.3)'}), "(x, y, color='hotpink', width=0.3)\n", (410, 444), True, 'import matplotlib.pyplot as plt\n'), ((449, 459), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (457, 459), True, 'import matplotlib.pyplot as plt\n')]
|
import numpy as np
import scipy.io as spio
# Utility functions to initialize the problem
from Grid.GridProcessing import Grid
from Shapes.ShapesFunctions import *
# Specify the file that includes dynamic systems
from dynamics.DubinsCar4D_HRI import *
# Plot options
from plot_options import *
# Solver core
from solver import HJSolver
import math
## Note: this is adapted from user_definer.py and will try to visualize the value function at the end.
# for this scenario this visualization won't work (haven't tried to fix it yet) and will raise an error.
# however, it will still save the final value function to your computer before the error is raised
""" USER INTERFACES
- Define grid
- Generate initial values for grid using shape functions
- Time length for computations
- Initialize plotting option
- Call HJSolver function
"""
# find the implicit surface of the Robot's avoid set:
def ShapeRobotAvoid(xs,params):
# find the implicit surface of the Robot's avoid set:
# -x_r + lgt_lb <= 0 (data1), and
# x_r - lgt_ub <= 0 (data2), and
# y_R - y_H - lat_bd <= 0 (data3), and
# -y_R + y_H - lat_bd <= 0 (data4)
# state vector = [x_r, y_R, y_H, v_r]
#
# NOTICE:
# This function assumes zero sublevel set, i.e. negative inside,
# positive outside. Add a negative sign if using this as an avoid set.
# set specifications
lgt_lb = params['avoid']['lgt_lb']
lgt_ub = params['avoid']['lgt_ub']
lat_bd = params['avoid']['lat_bd']
# data1: -x_r + lgt_lb <= 0
data1 = -xs[0] + lgt_lb
# data2: x_r - lgt_ub <= 0
data2 = xs[0] - lgt_ub
# data3: y_R - y_H - lat_bd <= 0
data3 = xs[1] - xs[2] - lat_bd
# data4: -y_R + y_H - lat_bd <= 0
data4 = -xs[1] + xs[2] - lat_bd
# the final data is just the intersection of the four
data = Intersection(data1, data2)
data = Intersection(data, data3)
data = Intersection(data, data4)
return(data)
'''
Defining parameters for the scenario
'''
# dictionary keeping track of extra arguments for the solver
extraArgs = {}
extraArgs['obstacles'] = None
# dictionary tracking parameters of the problem
params = {}
# road width
params['rd_bd_min'] = -3.7
params['rd_bd_max'] = 3.7
# relative longitudinal distance
params['rd_len_lb'] = -18
params['rd_len_ub'] = 12
# desired cruising speed
params['vDes'] = 30
# relative velocity bounds
params['v_rel_lb'] = -10
params['v_rel_ub'] = 10
# target set specs
params['xr_tar_overtake'] = 10
params['xr_tar_lanekeep'] = params['rd_len_lb'] + 3
# avoid set specs
params['avoid'] ={'lgt_lb': -5.5, 'lgt_ub': 5.5, 'lat_bd':2.0}
# input bounds and model parameters
params['accMax_R'] = 3
params['vLatMax_R'] = 3
params['accMax_H'] = 1
params['vLatMax_H'] = 1
params['accMax_R_sh'] = 3
params['vLatMax_R_sh'] = 3
params['accMax_H_sh'] = 1
params['vLatMax_H_sh'] = 1
params['talpha'] = 0.01
'''
Defining the grid for the problem
'''
# states x_r y_R y_H v_r
HJ_grid_min = np.array([params['rd_len_lb'], params['rd_bd_min'], params['rd_bd_min'], params['v_rel_lb']])
HJ_grid_max = np.array([params['rd_len_ub'], params['rd_bd_max'], params['rd_bd_max'], params['v_rel_ub']])
HJ_dims = 4 # number of dimensions
HJ_N = np.array([41, 15, 15, 31]) # number of grid points per dimension
HJ_pdDims = [] # periodic dimensions
# g = Grid(np.array([min, max, num_dim, pts_each_dim, pDim=[])
g = Grid(HJ_grid_min, HJ_grid_max , HJ_dims, HJ_N, HJ_pdDims)
# optimized_dp lacks the "xs" field for their grid objects, constructing here
xs = np.empty([4,HJ_N[0],HJ_N[1],HJ_N[2],HJ_N[3]])
for l in range(HJ_N[3]):
for k in range(HJ_N[2]):
for j in range(HJ_N[1]):
xs[0][:,j,k,l] = g.vs[0][:,0,0,0]
for i in range(HJ_N[0]):
xs[1][i,:,k,l] = g.vs[1][0,:,0,0]
for j in range(HJ_N[1]):
for i in range(HJ_N[0]):
xs[2][i,j,:,l] = g.vs[2][0,0,:,0]
for k in range(HJ_N[2]):
for j in range(HJ_N[1]):
for i in range(HJ_N[0]):
xs[3][i,j,k,:] = g.vs[3][0,0,0,:]
'''
making target and avoid sets
'''
inf = np.inf
# going off road boundaries - Robot
rd_bd_left_R = ShapeRectangle(g, [-inf, params['rd_bd_max']-0.5, -inf, -inf], [inf, inf, inf, inf])
rd_bd_right_R = ShapeRectangle(g, [-inf, -inf, -inf, -inf], [inf, params['rd_bd_min']+0.5, inf, inf])
D_compl_R = Union(rd_bd_left_R, rd_bd_right_R)
# going off road boundaries - Human
rd_bd_left_H = ShapeRectangle(g, [-inf, -inf, params['rd_bd_max'], -inf], [inf, inf, inf, inf])
rd_bd_right_H = ShapeRectangle(g, [-inf, -inf, -inf, -inf], [inf, inf, params['rd_bd_min'], inf])
D_compl_H = Union(rd_bd_left_H, rd_bd_right_H)
# avoid set - Robot
HJ_avoid = ShapeRobotAvoid(xs, params)
HJ_avoid = Union(HJ_avoid, D_compl_R)
# target set - Robot
# overtake
target_ot = ShapeRectangle(g, [params['xr_tar_overtake'], 0, -inf, -inf], [inf, params['rd_bd_max'], inf, inf])
# lanekeep
target_lk = ShapeRectangle(g, [-inf, params['rd_bd_min'], -inf, -inf], [params['xr_tar_lanekeep'], params['rd_bd_max'], inf, inf])
HJ_target = Union(target_ot, target_lk)
HJ_target = Union(HJ_target, D_compl_H)
'''
compute the Reach-Avoid set
'''
uMode = "min"
dMode = "max"
HJ_minwith = "minVWithVInit"
my_car = DubinsCar4D_HRI([0,0,0,0], params['accMax_R_sh'], params['accMax_H_sh'], params['vLatMax_R_sh'], params['vLatMax_H_sh'], params['talpha'], uMode, dMode)
# Look-back length and time step
lookback_length = 15.0 #15.0
t_step = 0.05
small_number = 1e-5
tau = np.arange(start=0, stop=lookback_length + small_number, step=t_step)
#po2 = PlotOptions("3d_plot", [0,1,2], [])
"""
Assign one of the following strings to `compMethod` to specify the characteristics of computation
"none" -> compute Backward Reachable Set
"minVWithV0" -> compute Backward Reachable Tube
"maxVWithVInit" -> compute max V over time
"minVWithVInit" compute min V over time
"""
extraArgs['obstacles'] = HJ_avoid
# HJSolver(dynamics object, grid, initial value function, time length, system objectives, plotting options, extra arguments)
HJSolver(my_car, g, HJ_target, tau, HJ_minwith, None, extraArgs)
|
[
"Grid.GridProcessing.Grid",
"numpy.empty",
"numpy.array",
"numpy.arange",
"solver.HJSolver"
] |
[((3021, 3118), 'numpy.array', 'np.array', (["[params['rd_len_lb'], params['rd_bd_min'], params['rd_bd_min'], params[\n 'v_rel_lb']]"], {}), "([params['rd_len_lb'], params['rd_bd_min'], params['rd_bd_min'],\n params['v_rel_lb']])\n", (3029, 3118), True, 'import numpy as np\n'), ((3129, 3226), 'numpy.array', 'np.array', (["[params['rd_len_ub'], params['rd_bd_max'], params['rd_bd_max'], params[\n 'v_rel_ub']]"], {}), "([params['rd_len_ub'], params['rd_bd_max'], params['rd_bd_max'],\n params['v_rel_ub']])\n", (3137, 3226), True, 'import numpy as np\n'), ((3267, 3293), 'numpy.array', 'np.array', (['[41, 15, 15, 31]'], {}), '([41, 15, 15, 31])\n', (3275, 3293), True, 'import numpy as np\n'), ((3438, 3494), 'Grid.GridProcessing.Grid', 'Grid', (['HJ_grid_min', 'HJ_grid_max', 'HJ_dims', 'HJ_N', 'HJ_pdDims'], {}), '(HJ_grid_min, HJ_grid_max, HJ_dims, HJ_N, HJ_pdDims)\n', (3442, 3494), False, 'from Grid.GridProcessing import Grid\n'), ((3581, 3630), 'numpy.empty', 'np.empty', (['[4, HJ_N[0], HJ_N[1], HJ_N[2], HJ_N[3]]'], {}), '([4, HJ_N[0], HJ_N[1], HJ_N[2], HJ_N[3]])\n', (3589, 3630), True, 'import numpy as np\n'), ((5490, 5558), 'numpy.arange', 'np.arange', ([], {'start': '(0)', 'stop': '(lookback_length + small_number)', 'step': 't_step'}), '(start=0, stop=lookback_length + small_number, step=t_step)\n', (5499, 5558), True, 'import numpy as np\n'), ((6044, 6108), 'solver.HJSolver', 'HJSolver', (['my_car', 'g', 'HJ_target', 'tau', 'HJ_minwith', 'None', 'extraArgs'], {}), '(my_car, g, HJ_target, tau, HJ_minwith, None, extraArgs)\n', (6052, 6108), False, 'from solver import HJSolver\n')]
|
# -*- coding: utf-8 -*-
import numpy as np
import pyworld
import pysptk
from pysptk.synthesis import MLSADF
class Synthesizer(object):
"""
Speech synthesizer with several acoustic features
Parameters
----------
fs: int, optional
Sampling frequency
Default set to 16000
fftl: int, optional
Frame Length of STFT
Default set to 1024
shiftms: int, optional
Shift size for STFT
Default set to 5
"""
def __init__(self, fs=16000, fftl=1024, shiftms=5):
self.fs = fs
self.fftl = fftl
self.shiftms = shiftms
return
def synthesis(self, f0, mcep, ap, rmcep=None, alpha=0.42):
"""synthesis generates waveform from F0, mcep, aperiodicity
Parameters
----------
f0 : array, shape (`T`, `1`)
array of F0 sequence
mcep : array, shape (`T`, `dim`)
array of mel-cepstrum sequence
ap : array, shape (`T`, `fftlen / 2 + 1`) or (`T`, `dim_codeap`)
array of aperiodicity or code aperiodicity
rmcep : array, optional, shape (`T`, `dim`)
array of reference mel-cepstrum sequence
Default set to None
alpha : int, optional
Parameter of all-path transfer function
Default set to 0.42
Returns
----------
wav: array,
Synethesized waveform
"""
if rmcep is not None:
# power modification
mcep = mod_power(mcep, rmcep, alpha=alpha)
if ap.shape[1] < self.fftl // 2 + 1:
# decode codeap to ap
ap = pyworld.decode_aperiodicity(ap, self.fs, self.fftl)
# mcep into spc
spc = pysptk.mc2sp(mcep, alpha, self.fftl)
# generate waveform using world vocoder with f0, spc, ap
wav = pyworld.synthesize(f0, spc, ap,
self.fs, frame_period=self.shiftms)
return wav
def synthesis_diff(self, x, diffmcep, rmcep=None, alpha=0.42):
"""filtering with a differential mel-cesptrum
Parameters
----------
x : array, shape (`samples`)
array of waveform sequence
diffmcep : array, shape (`T`, `dim`)
array of differential mel-cepstrum sequence
rmcep : array, shape (`T`, `dim`)
array of reference mel-cepstrum sequence
Default set to None
alpha : float, optional
Parameter of all-path transfer function
Default set to 0.42
Return
----------
wav: array, shape (`samples`)
Synethesized waveform
"""
x = x.astype(np.float64)
dim = diffmcep.shape[1] - 1
shiftl = int(self.fs / 1000 * self.shiftms)
if rmcep is not None:
# power modification
diffmcep = mod_power(rmcep + diffmcep, rmcep, alpha=alpha) - rmcep
b = np.apply_along_axis(pysptk.mc2b, 1, diffmcep, alpha)
assert np.isfinite(b).all()
mlsa_fil = pysptk.synthesis.Synthesizer(
MLSADF(dim, alpha=alpha), shiftl)
wav = mlsa_fil.synthesis(x, b)
return wav
def synthesis_spc(self, f0, spc, ap):
"""synthesis generates waveform from F0, mcep, ap
Parameters
----------
f0 : array, shape (`T`, `1`)
array of F0 sequence
spc : array, shape (`T`, `fftl // 2 + 1`)
array of mel-cepstrum sequence
ap : array, shape (`T`, `fftl // 2 + 1`)
array of aperiodicity
Return
------
wav: vector, shape (`samples`)
Synethesized waveform
"""
# generate waveform using world vocoder with f0, spc, ap
wav = pyworld.synthesize(f0, spc, ap,
self.fs, frame_period=self.shiftms)
return wav
def mod_power(cvmcep, rmcep, alpha=0.42, irlen=1024):
"""Power modification based on inpulse responce
Parameters
----------
cvmcep : array, shape (`T`, `dim`)
array of converted mel-cepstrum
rmcep : array, shape (`T`, `dim`)
array of reference mel-cepstrum
alpha : float, optional
All-path filter transfer function
Default set to 0.42
irlen : int, optional
Length for IIR filter
Default set to 1024
Return
------
modified_cvmcep : array, shape (`T`, `dim`)
array of power modified converted mel-cepstrum
"""
if rmcep.shape != cvmcep.shape:
raise ValueError("The shapes of the converted and \
reference mel-cepstrum are different: \
{} / {}".format(cvmcep.shape, rmcep.shape))
cv_e = pysptk.mc2e(cvmcep, alpha=alpha, irlen=irlen)
r_e = pysptk.mc2e(rmcep, alpha=alpha, irlen=irlen)
dpow = np.log(r_e / cv_e) / 2
modified_cvmcep = np.copy(cvmcep)
modified_cvmcep[:, 0] += dpow
return modified_cvmcep
|
[
"pyworld.synthesize",
"numpy.log",
"numpy.copy",
"pysptk.synthesis.MLSADF",
"pyworld.decode_aperiodicity",
"numpy.isfinite",
"numpy.apply_along_axis",
"pysptk.mc2sp",
"pysptk.mc2e"
] |
[((4755, 4800), 'pysptk.mc2e', 'pysptk.mc2e', (['cvmcep'], {'alpha': 'alpha', 'irlen': 'irlen'}), '(cvmcep, alpha=alpha, irlen=irlen)\n', (4766, 4800), False, 'import pysptk\n'), ((4811, 4855), 'pysptk.mc2e', 'pysptk.mc2e', (['rmcep'], {'alpha': 'alpha', 'irlen': 'irlen'}), '(rmcep, alpha=alpha, irlen=irlen)\n', (4822, 4855), False, 'import pysptk\n'), ((4914, 4929), 'numpy.copy', 'np.copy', (['cvmcep'], {}), '(cvmcep)\n', (4921, 4929), True, 'import numpy as np\n'), ((1742, 1778), 'pysptk.mc2sp', 'pysptk.mc2sp', (['mcep', 'alpha', 'self.fftl'], {}), '(mcep, alpha, self.fftl)\n', (1754, 1778), False, 'import pysptk\n'), ((1859, 1926), 'pyworld.synthesize', 'pyworld.synthesize', (['f0', 'spc', 'ap', 'self.fs'], {'frame_period': 'self.shiftms'}), '(f0, spc, ap, self.fs, frame_period=self.shiftms)\n', (1877, 1926), False, 'import pyworld\n'), ((2959, 3011), 'numpy.apply_along_axis', 'np.apply_along_axis', (['pysptk.mc2b', '(1)', 'diffmcep', 'alpha'], {}), '(pysptk.mc2b, 1, diffmcep, alpha)\n', (2978, 3011), True, 'import numpy as np\n'), ((3778, 3845), 'pyworld.synthesize', 'pyworld.synthesize', (['f0', 'spc', 'ap', 'self.fs'], {'frame_period': 'self.shiftms'}), '(f0, spc, ap, self.fs, frame_period=self.shiftms)\n', (3796, 3845), False, 'import pyworld\n'), ((4868, 4886), 'numpy.log', 'np.log', (['(r_e / cv_e)'], {}), '(r_e / cv_e)\n', (4874, 4886), True, 'import numpy as np\n'), ((1651, 1702), 'pyworld.decode_aperiodicity', 'pyworld.decode_aperiodicity', (['ap', 'self.fs', 'self.fftl'], {}), '(ap, self.fs, self.fftl)\n', (1678, 1702), False, 'import pyworld\n'), ((3110, 3134), 'pysptk.synthesis.MLSADF', 'MLSADF', (['dim'], {'alpha': 'alpha'}), '(dim, alpha=alpha)\n', (3116, 3134), False, 'from pysptk.synthesis import MLSADF\n'), ((3027, 3041), 'numpy.isfinite', 'np.isfinite', (['b'], {}), '(b)\n', (3038, 3041), True, 'import numpy as np\n')]
|
import numpy as np
from taped.util import (
DFLT_SR,
DFLT_SAMPLE_WIDTH,
DFLT_CHK_SIZE,
DFLT_STREAM_BUF_SIZE_S,
waveform_to_bytes,
)
from taped.scrap.audio_pokes import live_wf_ctx
######################################################################################################
# Example applications
from itertools import islice
import pyaudio
from time import sleep
import soundfile as sf
from io import BytesIO
def asis(wf):
return wf
def reverse_and_print(wf):
print('reversed sounds like this...')
return wf[::-1]
def listen_and_shout(
transform_wf=asis,
every_seconds=1,
input_device_index=None,
sr=DFLT_SR,
sample_width=DFLT_SAMPLE_WIDTH,
chk_size=DFLT_CHK_SIZE,
stream_buffer_size_s=DFLT_STREAM_BUF_SIZE_S,
):
"""
:param transform_wf: Callable that will be called on recorded waveform before outputting to speakers
:param every_seconds: Frequency
:param input_device_index: Index of Input Device to use. Unspecified (or None) uses default device.
:param sr: Specifies the desired sample rate (in Hz)
:param sample_width: Sample width in bytes (1, 2, 3, or 4)
:param chk_size:
:param stream_buffer_size_s: How many seconds of data to keep in the buffer (i.e. how far in the past you can see)
"""
# Create an interface to PortAudio
p = pyaudio.PyAudio()
if sample_width != 2:
from warnings import warn
warn("I've never seen it work with anything than sample_width=2")
# 'output = True' indicates that the sound will be played rather than recorded
stream = p.open(
format=sample_width,
channels=1,
rate=int(sr / sample_width), # why? I don't know. I guess unit is bytes here?
output=True,
)
with live_wf_ctx(
input_device_index,
sr=sr,
sample_width=sample_width,
chk_size=chk_size,
stream_buffer_size_s=stream_buffer_size_s,
) as wf_gen:
while True:
try:
wf = list(islice(wf_gen, int(sr * every_seconds)))
b = waveform_to_bytes(transform_wf(wf), sr, sample_width)
stream.write(b)
except KeyboardInterrupt:
print('KeyboardInterrupt... Closing down')
break
# Close and terminate the stream
stream.close()
p.terminate()
def vol(wf):
return np.std(np.abs(wf))
def print_vol_num(wf):
print(f'{vol(wf):0.04f}')
def print_vol(wf, char='-', gain=2, saturation_vol=99):
log_vol = int(min(saturation_vol, max(1, gain * np.std(np.abs(wf)) / 100)))
print(f'{char * log_vol}')
def push_sound_through_a_pipe(
callback=print_vol_num,
every_seconds=1,
input_device_index=None,
sr=DFLT_SR,
sample_width=DFLT_SAMPLE_WIDTH,
chk_size=DFLT_CHK_SIZE,
stream_buffer_size_s=DFLT_STREAM_BUF_SIZE_S,
):
"""
:param transform_wf: Callable that will be called on recorded waveform before outputting to speakers
:param every_seconds: Frequency
:param input_device_index: Index of Input Device to use. Unspecified (or None) uses default device.
:param sr: Specifies the desired sample rate (in Hz)
:param sample_width: Sample width in bytes (1, 2, 3, or 4)
:param chk_size:
:param stream_buffer_size_s: How many seconds of data to keep in the buffer (i.e. how far in the past you can see)
"""
with live_wf_ctx(
input_device_index,
sr=sr,
sample_width=sample_width,
chk_size=chk_size,
stream_buffer_size_s=stream_buffer_size_s,
) as wf_gen:
while True:
try:
callback(list(islice(wf_gen, int(sr * every_seconds))))
except KeyboardInterrupt:
print('KeyboardInterrupt... Closing down')
break
|
[
"warnings.warn",
"taped.scrap.audio_pokes.live_wf_ctx",
"pyaudio.PyAudio",
"numpy.abs"
] |
[((1362, 1379), 'pyaudio.PyAudio', 'pyaudio.PyAudio', ([], {}), '()\n', (1377, 1379), False, 'import pyaudio\n'), ((1450, 1515), 'warnings.warn', 'warn', (['"""I\'ve never seen it work with anything than sample_width=2"""'], {}), '("I\'ve never seen it work with anything than sample_width=2")\n', (1454, 1515), False, 'from warnings import warn\n'), ((1793, 1925), 'taped.scrap.audio_pokes.live_wf_ctx', 'live_wf_ctx', (['input_device_index'], {'sr': 'sr', 'sample_width': 'sample_width', 'chk_size': 'chk_size', 'stream_buffer_size_s': 'stream_buffer_size_s'}), '(input_device_index, sr=sr, sample_width=sample_width, chk_size=\n chk_size, stream_buffer_size_s=stream_buffer_size_s)\n', (1804, 1925), False, 'from taped.scrap.audio_pokes import live_wf_ctx\n'), ((2416, 2426), 'numpy.abs', 'np.abs', (['wf'], {}), '(wf)\n', (2422, 2426), True, 'import numpy as np\n'), ((3427, 3559), 'taped.scrap.audio_pokes.live_wf_ctx', 'live_wf_ctx', (['input_device_index'], {'sr': 'sr', 'sample_width': 'sample_width', 'chk_size': 'chk_size', 'stream_buffer_size_s': 'stream_buffer_size_s'}), '(input_device_index, sr=sr, sample_width=sample_width, chk_size=\n chk_size, stream_buffer_size_s=stream_buffer_size_s)\n', (3438, 3559), False, 'from taped.scrap.audio_pokes import live_wf_ctx\n'), ((2600, 2610), 'numpy.abs', 'np.abs', (['wf'], {}), '(wf)\n', (2606, 2610), True, 'import numpy as np\n')]
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data loaders for Graph Agreement Models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import logging
import os
import pickle
import sys
from gam.data.dataset import Dataset
from gam.data.dataset import PlanetoidDataset
from gam.data.preprocessing import convert_image
from gam.data.preprocessing import split_train_val_unlabeled
import networkx as nx
import numpy as np
from scipy import sparse as sp
import tensorflow_datasets as tfds
def load_data_tf_datasets(dataset_name, target_num_train_per_class,
target_num_val, seed):
"""Load and preprocess data from tensorflow_datasets."""
logging.info('Loading and preprocessing data from tensorflow datasets...')
# Load train data.
ds = tfds.load(dataset_name, split=tfds.Split.TRAIN, batch_size=-1)
ds = tfds.as_numpy(ds)
train_inputs, train_labels = ds['image'], ds['label']
# Load test data.
ds = tfds.load(dataset_name, split=tfds.Split.TEST, batch_size=-1)
ds = tfds.as_numpy(ds)
test_inputs, test_labels = ds['image'], ds['label']
# Remove extra dimensions of size 1.
train_labels = np.squeeze(train_labels)
test_labels = np.squeeze(test_labels)
logging.info('Splitting data...')
data = split_train_val_unlabeled(train_inputs, train_labels,
target_num_train_per_class, target_num_val,
seed)
train_inputs = data[0]
train_labels = data[1]
val_inputs = data[2]
val_labels = data[3]
unlabeled_inputs = data[4]
unlabeled_labels = data[5]
logging.info('Converting data to Dataset format...')
data = Dataset.build_from_splits(
name=dataset_name,
inputs_train=train_inputs,
labels_train=train_labels,
inputs_val=val_inputs,
labels_val=val_labels,
inputs_test=test_inputs,
labels_test=test_labels,
inputs_unlabeled=unlabeled_inputs,
labels_unlabeled=unlabeled_labels,
feature_preproc_fn=convert_image)
return data
def load_data_realistic_ssl(dataset_name, data_path, label_map_path):
"""Loads data from the `ealistic Evaluation of Deep SSL Algorithms`."""
logging.info('Loading data from pickle at %s.', data_path)
train_set, validation_set, test_set = pickle.load(open(data_path, 'rb'))
train_inputs = train_set['images']
train_labels = train_set['labels']
val_inputs = validation_set['images']
val_labels = validation_set['labels']
test_inputs = test_set['images']
test_labels = test_set['labels']
# Load label map that specifies which trainining labeles are available.
train_indices = json.load(open(label_map_path, 'r'))
train_indices = [
int(key.encode('ascii', 'ignore')) for key in train_indices['values']
]
train_indices = np.asarray(train_indices)
# Select the loaded train indices, and make the rest unlabeled.
unlabeled_mask = np.ones((train_inputs.shape[0],), dtype=np.bool)
unlabeled_mask[train_indices] = False
unlabeled_inputs = train_inputs[unlabeled_mask]
unlabeled_labels = train_labels[unlabeled_mask]
train_inputs = train_inputs[train_indices]
train_labels = train_labels[train_indices]
# Select a feature preprocessing function, depending on the dataset.
feature_preproc_fn = ((lambda image: image)
if dataset_name == 'cifar10' else convert_image)
data = Dataset.build_from_splits(
name=dataset_name,
inputs_train=train_inputs,
labels_train=train_labels,
inputs_val=val_inputs,
labels_val=val_labels,
inputs_test=test_inputs,
labels_test=test_labels,
inputs_unlabeled=unlabeled_inputs,
labels_unlabeled=unlabeled_labels,
feature_preproc_fn=feature_preproc_fn)
return data
def load_from_planetoid_files(dataset_name, path):
"""Loads Planetoid data in GCN format, as released with the GCN code.
This function is adapted from https://github.com/tkipf/gcn.
This function assumes that the following files can be found at the location
specified by `path`:
ind.dataset_str.x => the feature vectors of the training instances
as scipy.sparse.csr.csr_matrix object.
ind.dataset_str.tx => the feature vectors of the test instances as
scipy.sparse.csr.csr_matrix object.
ind.dataset_str.allx => the feature vectors of both labeled and
unlabeled training instances (a superset of
ind.dataset_str.x) as
scipy.sparse.csr.csr_matrix object.
ind.dataset_str.y => the one-hot labels of the labeled training
instances as numpy.ndarray object.
ind.dataset_str.ty => the one-hot labels of the test instances as
numpy.ndarray object.
ind.dataset_str.ally => the labels for instances in
ind.dataset_str.allx as numpy.ndarray object.
ind.dataset_str.graph => a dict in the format
{index: [index_of_neighbor_nodes]} as
collections.defaultdict object.
ind.dataset_str.test.index => the indices of test instances in graph, for
the inductive setting as list object.
Args:
dataset_name: A string representing the dataset name (e.g., `cora`).
path: Path to the directory containing the files.
Returns:
All data input files loaded (as well the training/test data).
"""
def _sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def _parse_index_file(filename):
"""Parse index file."""
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def _load_file(name):
"""Load from data file."""
filename = 'ind.{}.{}'.format(dataset_name, name)
filename = os.path.join(path, filename)
with open(filename, 'rb') as f:
if sys.version_info > (3, 0):
return pickle.load(f, encoding='latin1') # pylint: disable=unexpected-keyword-arg
else:
return pickle.load(f)
x = _load_file('x')
y = _load_file('y')
tx = _load_file('tx')
ty = _load_file('ty')
allx = _load_file('allx')
ally = _load_file('ally')
graph = _load_file('graph')
filename = 'ind.{}.test.index'.format(dataset_name)
filename = os.path.join(path, filename)
test_idx_reorder = _parse_index_file(filename)
test_idx_range = np.sort(test_idx_reorder)
if dataset_name == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph).
# Find isolated nodes, add them as zero-vecs into the right position.
test_idx_range_full = range(
min(test_idx_reorder),
max(test_idx_reorder) + 1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range - min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range - min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
idx_test = test_idx_range.tolist()
idx_train = range(len(y))
idx_val = range(len(y), len(y) + 500)
train_mask = _sample_mask(idx_train, labels.shape[0])
val_mask = _sample_mask(idx_val, labels.shape[0])
test_mask = _sample_mask(idx_test, labels.shape[0])
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
y_train[train_mask, :] = labels[train_mask, :]
y_val[val_mask, :] = labels[val_mask, :]
y_test[test_mask, :] = labels[test_mask, :]
return (adj, features, y_train, y_val, y_test, train_mask, val_mask,
test_mask, labels)
def load_data_planetoid(name, path, splits_path=None, row_normalize=False):
"""Load Planetoid data."""
if splits_path is None:
# Load from file in Planetoid format.
(adj, features, _, _, _, train_mask, val_mask, test_mask,
labels) = load_from_planetoid_files(name, path)
else:
# Otherwise load from a path where we saved a pickle with random splits.
logging.info('Loading from splits path: %s', splits_path)
(adj, features, _, _, _, train_mask, val_mask, test_mask,
labels) = pickle.load(open(splits_path, 'rb'))
return PlanetoidDataset(
name,
adj,
features,
train_mask,
val_mask,
test_mask,
labels,
row_normalize=row_normalize)
|
[
"gam.data.dataset.PlanetoidDataset",
"tensorflow_datasets.load",
"gam.data.dataset.Dataset.build_from_splits",
"networkx.from_dict_of_lists",
"tensorflow_datasets.as_numpy",
"scipy.sparse.vstack",
"numpy.asarray",
"numpy.zeros",
"numpy.ones",
"logging.info",
"numpy.sort",
"pickle.load",
"numpy.array",
"gam.data.preprocessing.split_train_val_unlabeled",
"numpy.squeeze",
"os.path.join",
"numpy.vstack"
] |
[((1277, 1351), 'logging.info', 'logging.info', (['"""Loading and preprocessing data from tensorflow datasets..."""'], {}), "('Loading and preprocessing data from tensorflow datasets...')\n", (1289, 1351), False, 'import logging\n'), ((1380, 1442), 'tensorflow_datasets.load', 'tfds.load', (['dataset_name'], {'split': 'tfds.Split.TRAIN', 'batch_size': '(-1)'}), '(dataset_name, split=tfds.Split.TRAIN, batch_size=-1)\n', (1389, 1442), True, 'import tensorflow_datasets as tfds\n'), ((1450, 1467), 'tensorflow_datasets.as_numpy', 'tfds.as_numpy', (['ds'], {}), '(ds)\n', (1463, 1467), True, 'import tensorflow_datasets as tfds\n'), ((1551, 1612), 'tensorflow_datasets.load', 'tfds.load', (['dataset_name'], {'split': 'tfds.Split.TEST', 'batch_size': '(-1)'}), '(dataset_name, split=tfds.Split.TEST, batch_size=-1)\n', (1560, 1612), True, 'import tensorflow_datasets as tfds\n'), ((1620, 1637), 'tensorflow_datasets.as_numpy', 'tfds.as_numpy', (['ds'], {}), '(ds)\n', (1633, 1637), True, 'import tensorflow_datasets as tfds\n'), ((1749, 1773), 'numpy.squeeze', 'np.squeeze', (['train_labels'], {}), '(train_labels)\n', (1759, 1773), True, 'import numpy as np\n'), ((1790, 1813), 'numpy.squeeze', 'np.squeeze', (['test_labels'], {}), '(test_labels)\n', (1800, 1813), True, 'import numpy as np\n'), ((1817, 1850), 'logging.info', 'logging.info', (['"""Splitting data..."""'], {}), "('Splitting data...')\n", (1829, 1850), False, 'import logging\n'), ((1860, 1967), 'gam.data.preprocessing.split_train_val_unlabeled', 'split_train_val_unlabeled', (['train_inputs', 'train_labels', 'target_num_train_per_class', 'target_num_val', 'seed'], {}), '(train_inputs, train_labels,\n target_num_train_per_class, target_num_val, seed)\n', (1885, 1967), False, 'from gam.data.preprocessing import split_train_val_unlabeled\n'), ((2191, 2243), 'logging.info', 'logging.info', (['"""Converting data to Dataset format..."""'], {}), "('Converting data to Dataset format...')\n", (2203, 2243), False, 'import logging\n'), ((2253, 2569), 'gam.data.dataset.Dataset.build_from_splits', 'Dataset.build_from_splits', ([], {'name': 'dataset_name', 'inputs_train': 'train_inputs', 'labels_train': 'train_labels', 'inputs_val': 'val_inputs', 'labels_val': 'val_labels', 'inputs_test': 'test_inputs', 'labels_test': 'test_labels', 'inputs_unlabeled': 'unlabeled_inputs', 'labels_unlabeled': 'unlabeled_labels', 'feature_preproc_fn': 'convert_image'}), '(name=dataset_name, inputs_train=train_inputs,\n labels_train=train_labels, inputs_val=val_inputs, labels_val=val_labels,\n inputs_test=test_inputs, labels_test=test_labels, inputs_unlabeled=\n unlabeled_inputs, labels_unlabeled=unlabeled_labels, feature_preproc_fn\n =convert_image)\n', (2278, 2569), False, 'from gam.data.dataset import Dataset\n'), ((2775, 2833), 'logging.info', 'logging.info', (['"""Loading data from pickle at %s."""', 'data_path'], {}), "('Loading data from pickle at %s.', data_path)\n", (2787, 2833), False, 'import logging\n'), ((3380, 3405), 'numpy.asarray', 'np.asarray', (['train_indices'], {}), '(train_indices)\n', (3390, 3405), True, 'import numpy as np\n'), ((3492, 3540), 'numpy.ones', 'np.ones', (['(train_inputs.shape[0],)'], {'dtype': 'np.bool'}), '((train_inputs.shape[0],), dtype=np.bool)\n', (3499, 3540), True, 'import numpy as np\n'), ((3972, 4293), 'gam.data.dataset.Dataset.build_from_splits', 'Dataset.build_from_splits', ([], {'name': 'dataset_name', 'inputs_train': 'train_inputs', 'labels_train': 'train_labels', 'inputs_val': 'val_inputs', 'labels_val': 'val_labels', 'inputs_test': 'test_inputs', 'labels_test': 'test_labels', 'inputs_unlabeled': 'unlabeled_inputs', 'labels_unlabeled': 'unlabeled_labels', 'feature_preproc_fn': 'feature_preproc_fn'}), '(name=dataset_name, inputs_train=train_inputs,\n labels_train=train_labels, inputs_val=val_inputs, labels_val=val_labels,\n inputs_test=test_inputs, labels_test=test_labels, inputs_unlabeled=\n unlabeled_inputs, labels_unlabeled=unlabeled_labels, feature_preproc_fn\n =feature_preproc_fn)\n', (3997, 4293), False, 'from gam.data.dataset import Dataset\n'), ((7075, 7103), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (7087, 7103), False, 'import os\n'), ((7172, 7197), 'numpy.sort', 'np.sort', (['test_idx_reorder'], {}), '(test_idx_reorder)\n', (7179, 7197), True, 'import numpy as np\n'), ((7959, 7980), 'numpy.vstack', 'np.vstack', (['(ally, ty)'], {}), '((ally, ty))\n', (7968, 7980), True, 'import numpy as np\n'), ((8321, 8343), 'numpy.zeros', 'np.zeros', (['labels.shape'], {}), '(labels.shape)\n', (8329, 8343), True, 'import numpy as np\n'), ((8354, 8376), 'numpy.zeros', 'np.zeros', (['labels.shape'], {}), '(labels.shape)\n', (8362, 8376), True, 'import numpy as np\n'), ((8388, 8410), 'numpy.zeros', 'np.zeros', (['labels.shape'], {}), '(labels.shape)\n', (8396, 8410), True, 'import numpy as np\n'), ((9211, 9322), 'gam.data.dataset.PlanetoidDataset', 'PlanetoidDataset', (['name', 'adj', 'features', 'train_mask', 'val_mask', 'test_mask', 'labels'], {'row_normalize': 'row_normalize'}), '(name, adj, features, train_mask, val_mask, test_mask,\n labels, row_normalize=row_normalize)\n', (9227, 9322), False, 'from gam.data.dataset import PlanetoidDataset\n'), ((6232, 6243), 'numpy.zeros', 'np.zeros', (['l'], {}), '(l)\n', (6240, 6243), True, 'import numpy as np\n'), ((6273, 6302), 'numpy.array', 'np.array', (['mask'], {'dtype': 'np.bool'}), '(mask, dtype=np.bool)\n', (6281, 6302), True, 'import numpy as np\n'), ((6594, 6622), 'os.path.join', 'os.path.join', (['path', 'filename'], {}), '(path, filename)\n', (6606, 6622), False, 'import os\n'), ((7917, 7945), 'networkx.from_dict_of_lists', 'nx.from_dict_of_lists', (['graph'], {}), '(graph)\n', (7938, 7945), True, 'import networkx as nx\n'), ((9029, 9086), 'logging.info', 'logging.info', (['"""Loading from splits path: %s"""', 'splits_path'], {}), "('Loading from splits path: %s', splits_path)\n", (9041, 9086), False, 'import logging\n'), ((7797, 7818), 'scipy.sparse.vstack', 'sp.vstack', (['(allx, tx)'], {}), '((allx, tx))\n', (7806, 7818), True, 'from scipy import sparse as sp\n'), ((6710, 6743), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (6721, 6743), False, 'import pickle\n'), ((6813, 6827), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6824, 6827), False, 'import pickle\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:-1].values
y = dataset.iloc[:, dataset.shape[1]-1:dataset.shape[1]].values
#Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
sc_y = StandardScaler()
X = sc_X.fit_transform(X)
y = sc_y.fit_transform(y)
#Fitting SVR to the dataset
from sklearn.svm import SVR
regressor = SVR(kernel = 'rbf')
regressor.fit(X, y)
#Predicting a new result
y_pred = sc_y.inverse_transform(regressor.predict(sc_X.transform(np.reshape([6.5], (-1, 1)))))
#Visualizing the SVR results
X_grid = np.arange(min(sc_X.inverse_transform(X)), max(sc_X.inverse_transform(X)), 0.1)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(sc_X.inverse_transform(X), sc_y.inverse_transform(y), color = 'red')
plt.plot(sc_X.inverse_transform(X), sc_y.inverse_transform(regressor.predict(X)), color = 'blue')
plt.scatter(6.5, y_pred, color = 'green')
plt.show()
|
[
"sklearn.svm.SVR",
"sklearn.preprocessing.StandardScaler",
"matplotlib.pyplot.show",
"pandas.read_csv",
"matplotlib.pyplot.scatter",
"numpy.reshape"
] |
[((82, 118), 'pandas.read_csv', 'pd.read_csv', (['"""Position_Salaries.csv"""'], {}), "('Position_Salaries.csv')\n", (93, 118), True, 'import pandas as pd\n'), ((290, 306), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (304, 306), False, 'from sklearn.preprocessing import StandardScaler\n'), ((314, 330), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (328, 330), False, 'from sklearn.preprocessing import StandardScaler\n'), ((452, 469), 'sklearn.svm.SVR', 'SVR', ([], {'kernel': '"""rbf"""'}), "(kernel='rbf')\n", (455, 469), False, 'from sklearn.svm import SVR\n'), ((952, 991), 'matplotlib.pyplot.scatter', 'plt.scatter', (['(6.5)', 'y_pred'], {'color': '"""green"""'}), "(6.5, y_pred, color='green')\n", (963, 991), True, 'import matplotlib.pyplot as plt\n'), ((994, 1004), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1002, 1004), True, 'import matplotlib.pyplot as plt\n'), ((583, 609), 'numpy.reshape', 'np.reshape', (['[6.5]', '(-1, 1)'], {}), '([6.5], (-1, 1))\n', (593, 609), True, 'import numpy as np\n')]
|
import numpy as np
import torch
from scipy import signal
import math
import cv2
import random
class Transform:
def __init__(self):
pass
def add_noise(self, signal, noise_amount):
"""
adding noise
"""
signal = signal.T
noise = (0.4 ** 0.5) * np.random.normal(1, noise_amount, np.shape(signal)[0])
noise = noise[:,None]
noised_signal = signal + noise
noised_signal = noised_signal.T
# print(noised_signal.shape)
return noised_signal
def add_noise_with_SNR(self,signal, noise_amount):
"""
adding noise
created using: https://stackoverflow.com/a/53688043/10700812
"""
signal = signal[0]
target_snr_db = noise_amount # 20
x_watts = signal ** 2 # Calculate signal power and convert to dB
sig_avg_watts = np.mean(x_watts)
sig_avg_db = 10 * np.log10(sig_avg_watts) # Calculate noise then convert to watts
noise_avg_db = sig_avg_db - target_snr_db
noise_avg_watts = 10 ** (noise_avg_db / 10)
mean_noise = 0
noise_volts = np.random.normal(mean_noise, np.sqrt(noise_avg_watts),
len(x_watts)) # Generate an sample of white noise
noised_signal = signal + noise_volts # noise added signal
noised_signal = noised_signal[None,:]
# print(noised_signal.shape)
return noised_signal
def scaled(self,signal, factor_list):
""""
scale the signal
"""
factor = round(np.random.uniform(factor_list[0],factor_list[1]),2)
signal[0] = 1 / (1 + np.exp(-signal[0]))
# print(signal.max())
return signal
def negate(self,signal):
"""
negate the signal
"""
signal[0] = signal[0] * (-1)
return signal
def hor_filp(self,signal):
"""
flipped horizontally
"""
hor_flipped = np.flip(signal,axis=1)
return hor_flipped
def permute(self,signal, pieces):
"""
signal: numpy array (batch x window)
pieces: number of segments along time
"""
signal = signal.T
pieces = int(np.ceil(np.shape(signal)[0] / (np.shape(signal)[0] // pieces)).tolist()) #向上取整
piece_length = int(np.shape(signal)[0] // pieces)
sequence = list(range(0, pieces))
np.random.shuffle(sequence)
permuted_signal = np.reshape(signal[:(np.shape(signal)[0] // pieces * pieces)],
(pieces, piece_length)).tolist()
tail = signal[(np.shape(signal)[0] // pieces * pieces):]
permuted_signal = np.asarray(permuted_signal)[sequence]
permuted_signal = np.concatenate(permuted_signal, axis=0)
permuted_signal = np.concatenate((permuted_signal,tail[:,0]), axis=0)
permuted_signal = permuted_signal[:,None]
permuted_signal = permuted_signal.T
return permuted_signal
def cutout_resize(self,signal,pieces):
"""
signal: numpy array (batch x window)
pieces: number of segments along time
cutout 1 piece
"""
signal = signal.T
pieces = int(np.ceil(np.shape(signal)[0] / (np.shape(signal)[0] // pieces)).tolist()) # 向上取整
piece_length = int(np.shape(signal)[0] // pieces)
import random
sequence = []
cutout = random.randint(0, pieces)
# print(cutout)
# sequence1 = list(range(0, cutout))
# sequence2 = list(range(int(cutout + 1), pieces))
# sequence = np.hstack((sequence1, sequence2))
for i in range(pieces):
if i == cutout:
pass
else:
sequence.append(i)
# print(sequence)
cutout_signal = np.reshape(signal[:(np.shape(signal)[0] // pieces * pieces)],
(pieces, piece_length)).tolist()
tail = signal[(np.shape(signal)[0] // pieces * pieces):]
cutout_signal = np.asarray(cutout_signal)[sequence]
cutout_signal = np.hstack(cutout_signal)
cutout_signal = np.concatenate((cutout_signal, tail[:, 0]), axis=0)
cutout_signal = cv2.resize(cutout_signal, (1, 3072), interpolation=cv2.INTER_LINEAR)
cutout_signal = cutout_signal.T
return cutout_signal
def cutout_zero(self,signal,pieces):
"""
signal: numpy array (batch x window)
pieces: number of segments along time
cutout 1 piece
"""
signal = signal.T
ones = np.ones((np.shape(signal)[0],np.shape(signal)[1]))
# print(ones.shape)
# assert False
pieces = int(np.ceil(np.shape(signal)[0] / (np.shape(signal)[0] // pieces)).tolist()) # 向上取整
piece_length = int(np.shape(signal)[0] // pieces)
cutout = random.randint(1, pieces)
cutout_signal = np.reshape(signal[:(np.shape(signal)[0] // pieces * pieces)],
(pieces, piece_length)).tolist()
ones_pieces = np.reshape(ones[:(np.shape(signal)[0] // pieces * pieces)],
(pieces, piece_length)).tolist()
tail = signal[(np.shape(signal)[0] // pieces * pieces):]
cutout_signal = np.asarray(cutout_signal)
ones_pieces = np.asarray(ones_pieces)
for i in range(pieces):
if i == cutout:
ones_pieces[i]*=0
cutout_signal = cutout_signal * ones_pieces
cutout_signal = np.hstack(cutout_signal)
cutout_signal = np.concatenate((cutout_signal, tail[:, 0]), axis=0)
cutout_signal = cutout_signal[:,None]
cutout_signal = cutout_signal.T
return cutout_signal
# mic
def crop_resize(self, signal, size):
signal = signal.T
size = signal.shape[0] * size
size = int(size)
start = random.randint(0, signal.shape[0]-size)
crop_signal = signal[start:start + size,:]
# print(crop_signal.shape)
crop_signal = cv2.resize(crop_signal, (1, 3072), interpolation=cv2.INTER_LINEAR)
# print(crop_signal.shape)
crop_signal = crop_signal.T
return crop_signal
def move_avg(self,a,n, mode="same"):
# a = a.T
result = np.convolve(a[0], np.ones((n,)) / n, mode=mode)
return result[None,:]
def bandpass_filter(self, x, order, cutoff, fs=100):
result = np.zeros((x.shape[0], x.shape[1]))
w1 = 2 * cutoff[0] / int(fs)
w2 = 2 * cutoff[1] / int(fs)
b, a = signal.butter(order, [w1, w2], btype='bandpass') # 配置滤波器 8 表示滤波器的阶数
result = signal.filtfilt(b, a, x, axis=1)
# print(result.shape)
return result
def lowpass_filter(self, x, order, cutoff, fs=100):
result = np.zeros((x.shape[0], x.shape[1]))
w1 = 2 * cutoff[0] / int(fs)
# w2 = 2 * cutoff[1] / fs
b, a = signal.butter(order, w1, btype='lowpass') # 配置滤波器 8 表示滤波器的阶数
result = signal.filtfilt(b, a, x, axis=1)
# print(result.shape)
return result
def highpass_filter(self, x, order, cutoff, fs=100):
result = np.zeros((x.shape[0], x.shape[1]))
w1 = 2 * cutoff[0] / int(fs)
# w2 = 2 * cutoff[1] / fs
b, a = signal.butter(order, w1, btype='highpass') # 配置滤波器 8 表示滤波器的阶数
result = signal.filtfilt(b, a, x, axis=1)
# print(result.shape)
return result
def time_warp(self,signal, sampling_freq, pieces, stretch_factor, squeeze_factor):
"""
signal: numpy array (batch x window)
sampling freq
pieces: number of segments along time
stretch factor
squeeze factor
"""
signal = signal.T
total_time = np.shape(signal)[0] // sampling_freq
segment_time = total_time / pieces
sequence = list(range(0, pieces))
stretch = np.random.choice(sequence, math.ceil(len(sequence) / 2), replace=False)
squeeze = list(set(sequence).difference(set(stretch)))
initialize = True
for i in sequence:
orig_signal = signal[int(i * np.floor(segment_time * sampling_freq)):int(
(i + 1) * np.floor(segment_time * sampling_freq))]
orig_signal = orig_signal.reshape(np.shape(orig_signal)[0], 1)
if i in stretch:
output_shape = int(np.ceil(np.shape(orig_signal)[0] * stretch_factor))
new_signal = cv2.resize(orig_signal, (1, output_shape), interpolation=cv2.INTER_LINEAR)
if initialize == True:
time_warped = new_signal
initialize = False
else:
time_warped = np.vstack((time_warped, new_signal))
elif i in squeeze:
output_shape = int(np.ceil(np.shape(orig_signal)[0] * squeeze_factor))
new_signal = cv2.resize(orig_signal, (1, output_shape), interpolation=cv2.INTER_LINEAR)
if initialize == True:
time_warped = new_signal
initialize = False
else:
time_warped = np.vstack((time_warped, new_signal))
time_warped = cv2.resize(time_warped, (1,3072), interpolation=cv2.INTER_LINEAR)
time_warped = time_warped.T
return time_warped
if __name__ == '__main__':
from transform import Transform
import matplotlib.pyplot as plt
Trans = Transform()
input = np.zeros((1,3072))
input = Trans.add_noise(input,10)
plt.subplot(211)
plt.plot(input[0])
# print(input.shape)
# output = Trans.cutout_resize(input,10)
order = random.randint(3, 10)
cutoff = random.uniform(5, 20)
output = Trans.filter(input, order, [2,15], mode='lowpass')
plt.subplot(212)
plt.plot(output[0])
plt.savefig('filter.png')
# print(output.shape)
|
[
"numpy.floor",
"numpy.ones",
"numpy.shape",
"numpy.mean",
"numpy.exp",
"random.randint",
"numpy.log10",
"scipy.signal.butter",
"cv2.resize",
"numpy.random.shuffle",
"numpy.asarray",
"transform.Transform",
"numpy.hstack",
"numpy.concatenate",
"numpy.vstack",
"matplotlib.pyplot.subplot",
"numpy.random.uniform",
"numpy.flip",
"matplotlib.pyplot.plot",
"random.uniform",
"scipy.signal.filtfilt",
"numpy.zeros",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] |
[((9807, 9818), 'transform.Transform', 'Transform', ([], {}), '()\n', (9816, 9818), False, 'from transform import Transform\n'), ((9832, 9851), 'numpy.zeros', 'np.zeros', (['(1, 3072)'], {}), '((1, 3072))\n', (9840, 9851), True, 'import numpy as np\n'), ((9895, 9911), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (9906, 9911), True, 'import matplotlib.pyplot as plt\n'), ((9917, 9935), 'matplotlib.pyplot.plot', 'plt.plot', (['input[0]'], {}), '(input[0])\n', (9925, 9935), True, 'import matplotlib.pyplot as plt\n'), ((10023, 10044), 'random.randint', 'random.randint', (['(3)', '(10)'], {}), '(3, 10)\n', (10037, 10044), False, 'import random\n'), ((10059, 10080), 'random.uniform', 'random.uniform', (['(5)', '(20)'], {}), '(5, 20)\n', (10073, 10080), False, 'import random\n'), ((10151, 10167), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (10162, 10167), True, 'import matplotlib.pyplot as plt\n'), ((10173, 10192), 'matplotlib.pyplot.plot', 'plt.plot', (['output[0]'], {}), '(output[0])\n', (10181, 10192), True, 'import matplotlib.pyplot as plt\n'), ((10198, 10223), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""filter.png"""'], {}), "('filter.png')\n", (10209, 10223), True, 'import matplotlib.pyplot as plt\n'), ((906, 922), 'numpy.mean', 'np.mean', (['x_watts'], {}), '(x_watts)\n', (913, 922), True, 'import numpy as np\n'), ((2034, 2057), 'numpy.flip', 'np.flip', (['signal'], {'axis': '(1)'}), '(signal, axis=1)\n', (2041, 2057), True, 'import numpy as np\n'), ((2486, 2513), 'numpy.random.shuffle', 'np.random.shuffle', (['sequence'], {}), '(sequence)\n', (2503, 2513), True, 'import numpy as np\n'), ((2836, 2875), 'numpy.concatenate', 'np.concatenate', (['permuted_signal'], {'axis': '(0)'}), '(permuted_signal, axis=0)\n', (2850, 2875), True, 'import numpy as np\n'), ((2903, 2956), 'numpy.concatenate', 'np.concatenate', (['(permuted_signal, tail[:, 0])'], {'axis': '(0)'}), '((permuted_signal, tail[:, 0]), axis=0)\n', (2917, 2956), True, 'import numpy as np\n'), ((3559, 3584), 'random.randint', 'random.randint', (['(0)', 'pieces'], {}), '(0, pieces)\n', (3573, 3584), False, 'import random\n'), ((4256, 4280), 'numpy.hstack', 'np.hstack', (['cutout_signal'], {}), '(cutout_signal)\n', (4265, 4280), True, 'import numpy as np\n'), ((4306, 4357), 'numpy.concatenate', 'np.concatenate', (['(cutout_signal, tail[:, 0])'], {'axis': '(0)'}), '((cutout_signal, tail[:, 0]), axis=0)\n', (4320, 4357), True, 'import numpy as np\n'), ((4385, 4453), 'cv2.resize', 'cv2.resize', (['cutout_signal', '(1, 3072)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(cutout_signal, (1, 3072), interpolation=cv2.INTER_LINEAR)\n', (4395, 4453), False, 'import cv2\n'), ((5079, 5104), 'random.randint', 'random.randint', (['(1)', 'pieces'], {}), '(1, pieces)\n', (5093, 5104), False, 'import random\n'), ((5508, 5533), 'numpy.asarray', 'np.asarray', (['cutout_signal'], {}), '(cutout_signal)\n', (5518, 5533), True, 'import numpy as np\n'), ((5557, 5580), 'numpy.asarray', 'np.asarray', (['ones_pieces'], {}), '(ones_pieces)\n', (5567, 5580), True, 'import numpy as np\n'), ((5758, 5782), 'numpy.hstack', 'np.hstack', (['cutout_signal'], {}), '(cutout_signal)\n', (5767, 5782), True, 'import numpy as np\n'), ((5808, 5859), 'numpy.concatenate', 'np.concatenate', (['(cutout_signal, tail[:, 0])'], {'axis': '(0)'}), '((cutout_signal, tail[:, 0]), axis=0)\n', (5822, 5859), True, 'import numpy as np\n'), ((6142, 6183), 'random.randint', 'random.randint', (['(0)', '(signal.shape[0] - size)'], {}), '(0, signal.shape[0] - size)\n', (6156, 6183), False, 'import random\n'), ((6295, 6361), 'cv2.resize', 'cv2.resize', (['crop_signal', '(1, 3072)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(crop_signal, (1, 3072), interpolation=cv2.INTER_LINEAR)\n', (6305, 6361), False, 'import cv2\n'), ((6703, 6737), 'numpy.zeros', 'np.zeros', (['(x.shape[0], x.shape[1])'], {}), '((x.shape[0], x.shape[1]))\n', (6711, 6737), True, 'import numpy as np\n'), ((6830, 6878), 'scipy.signal.butter', 'signal.butter', (['order', '[w1, w2]'], {'btype': '"""bandpass"""'}), "(order, [w1, w2], btype='bandpass')\n", (6843, 6878), False, 'from scipy import signal\n'), ((6917, 6949), 'scipy.signal.filtfilt', 'signal.filtfilt', (['b', 'a', 'x'], {'axis': '(1)'}), '(b, a, x, axis=1)\n', (6932, 6949), False, 'from scipy import signal\n'), ((7083, 7117), 'numpy.zeros', 'np.zeros', (['(x.shape[0], x.shape[1])'], {}), '((x.shape[0], x.shape[1]))\n', (7091, 7117), True, 'import numpy as np\n'), ((7207, 7248), 'scipy.signal.butter', 'signal.butter', (['order', 'w1'], {'btype': '"""lowpass"""'}), "(order, w1, btype='lowpass')\n", (7220, 7248), False, 'from scipy import signal\n'), ((7287, 7319), 'scipy.signal.filtfilt', 'signal.filtfilt', (['b', 'a', 'x'], {'axis': '(1)'}), '(b, a, x, axis=1)\n', (7302, 7319), False, 'from scipy import signal\n'), ((7454, 7488), 'numpy.zeros', 'np.zeros', (['(x.shape[0], x.shape[1])'], {}), '((x.shape[0], x.shape[1]))\n', (7462, 7488), True, 'import numpy as np\n'), ((7578, 7620), 'scipy.signal.butter', 'signal.butter', (['order', 'w1'], {'btype': '"""highpass"""'}), "(order, w1, btype='highpass')\n", (7591, 7620), False, 'from scipy import signal\n'), ((7659, 7691), 'scipy.signal.filtfilt', 'signal.filtfilt', (['b', 'a', 'x'], {'axis': '(1)'}), '(b, a, x, axis=1)\n', (7674, 7691), False, 'from scipy import signal\n'), ((9559, 9625), 'cv2.resize', 'cv2.resize', (['time_warped', '(1, 3072)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(time_warped, (1, 3072), interpolation=cv2.INTER_LINEAR)\n', (9569, 9625), False, 'import cv2\n'), ((950, 973), 'numpy.log10', 'np.log10', (['sig_avg_watts'], {}), '(sig_avg_watts)\n', (958, 973), True, 'import numpy as np\n'), ((1195, 1219), 'numpy.sqrt', 'np.sqrt', (['noise_avg_watts'], {}), '(noise_avg_watts)\n', (1202, 1219), True, 'import numpy as np\n'), ((1619, 1668), 'numpy.random.uniform', 'np.random.uniform', (['factor_list[0]', 'factor_list[1]'], {}), '(factor_list[0], factor_list[1])\n', (1636, 1668), True, 'import numpy as np\n'), ((2771, 2798), 'numpy.asarray', 'np.asarray', (['permuted_signal'], {}), '(permuted_signal)\n', (2781, 2798), True, 'import numpy as np\n'), ((4193, 4218), 'numpy.asarray', 'np.asarray', (['cutout_signal'], {}), '(cutout_signal)\n', (4203, 4218), True, 'import numpy as np\n'), ((1701, 1719), 'numpy.exp', 'np.exp', (['(-signal[0])'], {}), '(-signal[0])\n', (1707, 1719), True, 'import numpy as np\n'), ((6564, 6577), 'numpy.ones', 'np.ones', (['(n,)'], {}), '((n,))\n', (6571, 6577), True, 'import numpy as np\n'), ((8081, 8097), 'numpy.shape', 'np.shape', (['signal'], {}), '(signal)\n', (8089, 8097), True, 'import numpy as np\n'), ((8794, 8868), 'cv2.resize', 'cv2.resize', (['orig_signal', '(1, output_shape)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(orig_signal, (1, output_shape), interpolation=cv2.INTER_LINEAR)\n', (8804, 8868), False, 'import cv2\n'), ((357, 373), 'numpy.shape', 'np.shape', (['signal'], {}), '(signal)\n', (365, 373), True, 'import numpy as np\n'), ((2401, 2417), 'numpy.shape', 'np.shape', (['signal'], {}), '(signal)\n', (2409, 2417), True, 'import numpy as np\n'), ((3462, 3478), 'numpy.shape', 'np.shape', (['signal'], {}), '(signal)\n', (3470, 3478), True, 'import numpy as np\n'), ((4800, 4816), 'numpy.shape', 'np.shape', (['signal'], {}), '(signal)\n', (4808, 4816), True, 'import numpy as np\n'), ((4820, 4836), 'numpy.shape', 'np.shape', (['signal'], {}), '(signal)\n', (4828, 4836), True, 'import numpy as np\n'), ((5026, 5042), 'numpy.shape', 'np.shape', (['signal'], {}), '(signal)\n', (5034, 5042), True, 'import numpy as np\n'), ((8617, 8638), 'numpy.shape', 'np.shape', (['orig_signal'], {}), '(orig_signal)\n', (8625, 8638), True, 'import numpy as np\n'), ((9053, 9089), 'numpy.vstack', 'np.vstack', (['(time_warped, new_signal)'], {}), '((time_warped, new_signal))\n', (9062, 9089), True, 'import numpy as np\n'), ((9240, 9314), 'cv2.resize', 'cv2.resize', (['orig_signal', '(1, output_shape)'], {'interpolation': 'cv2.INTER_LINEAR'}), '(orig_signal, (1, output_shape), interpolation=cv2.INTER_LINEAR)\n', (9250, 9314), False, 'import cv2\n'), ((9499, 9535), 'numpy.vstack', 'np.vstack', (['(time_warped, new_signal)'], {}), '((time_warped, new_signal))\n', (9508, 9535), True, 'import numpy as np\n'), ((2702, 2718), 'numpy.shape', 'np.shape', (['signal'], {}), '(signal)\n', (2710, 2718), True, 'import numpy as np\n'), ((4124, 4140), 'numpy.shape', 'np.shape', (['signal'], {}), '(signal)\n', (4132, 4140), True, 'import numpy as np\n'), ((5439, 5455), 'numpy.shape', 'np.shape', (['signal'], {}), '(signal)\n', (5447, 5455), True, 'import numpy as np\n'), ((8457, 8495), 'numpy.floor', 'np.floor', (['(segment_time * sampling_freq)'], {}), '(segment_time * sampling_freq)\n', (8465, 8495), True, 'import numpy as np\n'), ((8529, 8567), 'numpy.floor', 'np.floor', (['(segment_time * sampling_freq)'], {}), '(segment_time * sampling_freq)\n', (8537, 8567), True, 'import numpy as np\n'), ((2302, 2318), 'numpy.shape', 'np.shape', (['signal'], {}), '(signal)\n', (2310, 2318), True, 'import numpy as np\n'), ((3361, 3377), 'numpy.shape', 'np.shape', (['signal'], {}), '(signal)\n', (3369, 3377), True, 'import numpy as np\n'), ((4925, 4941), 'numpy.shape', 'np.shape', (['signal'], {}), '(signal)\n', (4933, 4941), True, 'import numpy as np\n'), ((8720, 8741), 'numpy.shape', 'np.shape', (['orig_signal'], {}), '(orig_signal)\n', (8728, 8741), True, 'import numpy as np\n'), ((2325, 2341), 'numpy.shape', 'np.shape', (['signal'], {}), '(signal)\n', (2333, 2341), True, 'import numpy as np\n'), ((3384, 3400), 'numpy.shape', 'np.shape', (['signal'], {}), '(signal)\n', (3392, 3400), True, 'import numpy as np\n'), ((4948, 4964), 'numpy.shape', 'np.shape', (['signal'], {}), '(signal)\n', (4956, 4964), True, 'import numpy as np\n'), ((9166, 9187), 'numpy.shape', 'np.shape', (['orig_signal'], {}), '(orig_signal)\n', (9174, 9187), True, 'import numpy as np\n'), ((2563, 2579), 'numpy.shape', 'np.shape', (['signal'], {}), '(signal)\n', (2571, 2579), True, 'import numpy as np\n'), ((3985, 4001), 'numpy.shape', 'np.shape', (['signal'], {}), '(signal)\n', (3993, 4001), True, 'import numpy as np\n'), ((5150, 5166), 'numpy.shape', 'np.shape', (['signal'], {}), '(signal)\n', (5158, 5166), True, 'import numpy as np\n'), ((5304, 5320), 'numpy.shape', 'np.shape', (['signal'], {}), '(signal)\n', (5312, 5320), True, 'import numpy as np\n')]
|
import numpy
from obspy import Stream, UTCDateTime
from obspy.clients.neic.client import Client
from geomagio import TimeseriesUtility
from geomagio.edge import SNCL
class MockMiniSeedClient(Client):
"""replaces default obspy miniseed client's get_waveforms method to return trace of ones
Note: includes 'return_empty' parameter to simulate situations where no data is received
"""
def __init__(self, return_empty: bool = False):
self.return_empty = return_empty
def get_waveforms(
self,
network: str,
station: str,
location: str,
channel: str,
starttime: UTCDateTime,
endtime: UTCDateTime,
):
if self.return_empty:
return Stream()
sncl = SNCL(
station=station,
network=network,
channel=channel,
location=location,
)
trace = TimeseriesUtility.create_empty_trace(
starttime=starttime,
endtime=endtime,
observatory=station,
channel=channel,
type=sncl.data_type,
interval=sncl.interval,
network=network,
station=station,
location=location,
)
trace.data = numpy.ones(trace.stats.npts)
return Stream([trace])
class MisalignedMiniSeedClient(MockMiniSeedClient):
"""mock client that adds an offset value to endtime"""
def __init__(self, return_empty: bool = False, increment: int = 1):
super().__init__(return_empty=return_empty)
self.increment = increment
self.offset = 0
def get_waveforms(
self,
network: str,
station: str,
location: str,
channel: str,
starttime: UTCDateTime,
endtime: UTCDateTime,
):
endtime = endtime + self.offset
self.offset = self.offset + self.increment
return super().get_waveforms(
network, station, location, channel, starttime, endtime
)
|
[
"geomagio.TimeseriesUtility.create_empty_trace",
"geomagio.edge.SNCL",
"numpy.ones",
"obspy.Stream"
] |
[((761, 835), 'geomagio.edge.SNCL', 'SNCL', ([], {'station': 'station', 'network': 'network', 'channel': 'channel', 'location': 'location'}), '(station=station, network=network, channel=channel, location=location)\n', (765, 835), False, 'from geomagio.edge import SNCL\n'), ((911, 1130), 'geomagio.TimeseriesUtility.create_empty_trace', 'TimeseriesUtility.create_empty_trace', ([], {'starttime': 'starttime', 'endtime': 'endtime', 'observatory': 'station', 'channel': 'channel', 'type': 'sncl.data_type', 'interval': 'sncl.interval', 'network': 'network', 'station': 'station', 'location': 'location'}), '(starttime=starttime, endtime=endtime,\n observatory=station, channel=channel, type=sncl.data_type, interval=\n sncl.interval, network=network, station=station, location=location)\n', (947, 1130), False, 'from geomagio import TimeseriesUtility\n'), ((1262, 1290), 'numpy.ones', 'numpy.ones', (['trace.stats.npts'], {}), '(trace.stats.npts)\n', (1272, 1290), False, 'import numpy\n'), ((1306, 1321), 'obspy.Stream', 'Stream', (['[trace]'], {}), '([trace])\n', (1312, 1321), False, 'from obspy import Stream, UTCDateTime\n'), ((737, 745), 'obspy.Stream', 'Stream', ([], {}), '()\n', (743, 745), False, 'from obspy import Stream, UTCDateTime\n')]
|
import numpy as np
from yaglm.metrics.base import Scorer
from yaglm.autoassign import autoassign
from yaglm.config.penalty import Lasso, ElasticNet
from yaglm.utils import count_support
from yaglm.extmath import log_binom
class InfoCriteria(Scorer):
"""
Computes information criteria for GLM model selection. Note this returns the negative of the information criterion such that larger values mean indicate a better fit.
Parameters
----------
crit: str
Which information criteria to use. Must be one of ['aic', 'bic', 'ebic'].
gamma: float, str
The gamma argument to ebic()
zero_tol: float, str
The zero tolerance for support counting. See yaglm.utils.count_support()
"""
@autoassign
def __init__(self, crit='ebic', gamma='default', zero_tol=1e-6): pass
@property
def name(self):
return self.crit
# TODO: currently ignores sample_weight
def __call__(self, estimator, X, y, sample_weight=None):
"""
Returns the negative information criteria.
Parameters
----------
estimator: Estimator
The fit estimator to score.
X: array-like, shape (n_samples, n_features)
The covariate data to used for scoring.
y: array-like, shape (n_samples, ) or (n_samples, n_responses)
The response data to used for scoring.
sample_weight: None, array-like (n_samples, )
(Optional) Sample weight to use for scoring.
Output
------
scores: float
The negative information criteria score so that larger values indicate better model fit.
"""
# formatting
if not isinstance(estimator.fit_penalty_, (Lasso, ElasticNet)):
raise NotImplementedError("Information criteria is currently only"
" supported for Lasso and Elastic Net.")
# compute data log-likelihood
log_lik = estimator.sample_log_liks(X=X, y=y).sum()
n_samples = X.shape[0]
if self.crit in ['aic', 'bic']:
dof = estimator.inferencer_.dof_
if dof is None:
raise NotImplementedError("The estimator does not currently"
"support estimating the degrees of"
" freedom.")
if self.crit == 'aic':
return -aic(log_lik=log_lik, n_samples=n_samples, dof=dof)
elif self.crit == 'bic':
return -bic(log_lik=log_lik, n_samples=n_samples, dof=dof)
elif self.crit == 'ebic':
n_support = count_support(estimator.coef_, zero_tol=self.zero_tol)
n_features = estimator.inferencer_.X_shape_[1]
return -ebic(log_lik=log_lik,
n_samples=n_samples, n_features=n_features,
n_support=n_support,
gamma=self.gamma,
fit_intercept=estimator.fit_intercept)
else:
raise NotImplementedError("crit must be on of "
"['aic', 'bic', 'ebic'], "
" not {}".format(self.crit))
def bic(log_lik, n_samples, dof):
"""
Calculates the Bayesian Information Criterion.
Parameters
----------
log_lik: float
The observed data log-likelihood.
n_samples: int
Number of samples.
dof: int
Number of degrees of freedom.
Output
------
aic: float
"""
return - 2 * log_lik + np.log(n_samples) * dof
def aic(log_lik, n_samples, dof):
"""
Calculates the Akaike Information Criterion.
Parameters
----------
log_lik: float
The observed data log-likelihood.
n_samples: int
Number of samples.
dof: int
Number of degrees of freedom.
Output
------
bic: float
"""
return - 2 * log_lik + 2 * dof
# TODO: how to generalize this for more general DoF estimates. Both for the formula and the default gamma.
def ebic(log_lik, n_samples, n_features, n_support, gamma='default',
fit_intercept=True):
"""
Calculates the Extended Bayesian Information Criterion defined as
-2 log(Lik) + n_support * log(n_samples) + 2 * gamma log(|model_space|)
where |model_space| = (n_features choose n_support).
Parameters
----------
log_lik: float
The observed data log-likelihood.
n_samples: int
Number of samples.
n_features: int
Number of features.
n_support: int
Number of non-zero coefficient elements.
gamma: str or float
If a number, must be between 0 and 1 inclusive. If gamma='default' then we use gamma = 1 - 0.5 * log(n_samples) / log(n_features) as suggested in (Chen and Chen, 2008).
fit_intercept: bool
Whether or not an intercept was included in the model.
Output
------
ebic: float
References
----------
<NAME>. and <NAME>., 2008. Extended Bayesian information criteria for model selection with large model spaces. Biometrika, 95(3), pp.759-771.
"""
# agument n_features if there was an intercept
if fit_intercept:
n_features = n_features + 1
n_support = n_support + 1
# maybe compute default
if gamma == 'default':
# default formula from Section 5 of (Chen and Chen, 2008)
gamma = 1 - 0.5 * (np.log(n_samples) / np.log(n_features))
gamma = np.clip(gamma, a_min=0, a_max=1)
# check gamma
assert gamma >= 0 and gamma <= 1, "Gamma should be in [0, 1]"
# log of model space log (n_features choose n_support)
log_model_size = log_binom(n=n_features, k=n_support)
return bic(log_lik=log_lik, n_samples=n_samples, dof=n_support) + \
2 * gamma * log_model_size
|
[
"yaglm.utils.count_support",
"numpy.log",
"yaglm.extmath.log_binom",
"numpy.clip"
] |
[((5733, 5769), 'yaglm.extmath.log_binom', 'log_binom', ([], {'n': 'n_features', 'k': 'n_support'}), '(n=n_features, k=n_support)\n', (5742, 5769), False, 'from yaglm.extmath import log_binom\n'), ((5534, 5566), 'numpy.clip', 'np.clip', (['gamma'], {'a_min': '(0)', 'a_max': '(1)'}), '(gamma, a_min=0, a_max=1)\n', (5541, 5566), True, 'import numpy as np\n'), ((3604, 3621), 'numpy.log', 'np.log', (['n_samples'], {}), '(n_samples)\n', (3610, 3621), True, 'import numpy as np\n'), ((2659, 2713), 'yaglm.utils.count_support', 'count_support', (['estimator.coef_'], {'zero_tol': 'self.zero_tol'}), '(estimator.coef_, zero_tol=self.zero_tol)\n', (2672, 2713), False, 'from yaglm.utils import count_support\n'), ((5478, 5495), 'numpy.log', 'np.log', (['n_samples'], {}), '(n_samples)\n', (5484, 5495), True, 'import numpy as np\n'), ((5498, 5516), 'numpy.log', 'np.log', (['n_features'], {}), '(n_features)\n', (5504, 5516), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
import numpy as np
"""
Input:
Q_tab : Tabulr Q (numpy matrix |S| by |A|)
env : an environment object (e.g. env = Maze())
isMaze : fixed to True
arrow : True if you want to plot arrows.s
"""
def value_plot(Q_tab, env, isMaze = True, arrow = True):
direction={0:(0,-0.4),1:(0,0.4),2:(-0.4,0),3:(0.4,0)} #(x,y) cooridnate
V = np.max(Q_tab,axis=1)
best_action = np.argmax(Q_tab,axis=1)
if isMaze:
idx2cell = env.idx2cell
for i in xrange(8):
f,ax = plt.subplots()
y_mat = np.zeros(env.dim)
for j in xrange(len(idx2cell)):
pos = idx2cell[j]
y_mat[pos[0], pos[1]] = V[8*j+i]
if arrow:
a = best_action[8*j+i]
ax.arrow(pos[1], pos[0], direction[a][0], direction[a][1],
head_width=0.05, head_length=0.1, fc='r', ec='r')
y_mat[env.goal_pos] = max(V)+0.1
ax.imshow(y_mat,cmap='gray')
else:
n = int(np.sqrt(len(V)))
tab = np.zeros((n,n))
for r in xrange(n):
for c in xrange(n):
if not(r==(n-1)and c==(n-1)):
tab[r,c] = V[n*c+r]
if arrow:
d = direction[best_action[n*c+r]]
plt.arrow(c,r,d[0],d[1], head_width=0.05, head_length=0.1, fc='r', ec='r')
tab[env.goal_pos] = max(V[:-1])+0.1
plt.imshow(tab,cmap='gray')
plt.show()
|
[
"matplotlib.pyplot.show",
"numpy.argmax",
"matplotlib.pyplot.imshow",
"numpy.zeros",
"numpy.max",
"matplotlib.pyplot.arrow",
"matplotlib.pyplot.subplots"
] |
[((379, 400), 'numpy.max', 'np.max', (['Q_tab'], {'axis': '(1)'}), '(Q_tab, axis=1)\n', (385, 400), True, 'import numpy as np\n'), ((418, 442), 'numpy.argmax', 'np.argmax', (['Q_tab'], {'axis': '(1)'}), '(Q_tab, axis=1)\n', (427, 442), True, 'import numpy as np\n'), ((1516, 1526), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1524, 1526), True, 'import matplotlib.pyplot as plt\n'), ((1082, 1098), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (1090, 1098), True, 'import numpy as np\n'), ((1483, 1511), 'matplotlib.pyplot.imshow', 'plt.imshow', (['tab'], {'cmap': '"""gray"""'}), "(tab, cmap='gray')\n", (1493, 1511), True, 'import matplotlib.pyplot as plt\n'), ((536, 550), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (548, 550), True, 'import matplotlib.pyplot as plt\n'), ((571, 588), 'numpy.zeros', 'np.zeros', (['env.dim'], {}), '(env.dim)\n', (579, 588), True, 'import numpy as np\n'), ((1356, 1433), 'matplotlib.pyplot.arrow', 'plt.arrow', (['c', 'r', 'd[0]', 'd[1]'], {'head_width': '(0.05)', 'head_length': '(0.1)', 'fc': '"""r"""', 'ec': '"""r"""'}), "(c, r, d[0], d[1], head_width=0.05, head_length=0.1, fc='r', ec='r')\n", (1365, 1433), True, 'import matplotlib.pyplot as plt\n')]
|
#!/usr/bin/env python3
import numpy as np
import pickle
import sklearn.metrics
import sklearn.preprocessing
import sklearn.feature_selection
import sklearn.svm
import utils
def main():
metadata = utils.get_metadata()
settings = utils.get_settings('probablygood.gavin.json')
settings['R_SEED'] = None
# settings['SUBJECTS'] = ['Patient_2']
scaler = sklearn.preprocessing.StandardScaler()
thresh = sklearn.feature_selection.VarianceThreshold()
# selector = sklearn.feature_selection.SelectKBest()
classifier = sklearn.svm.SVC(probability=True)
pipe = sklearn.pipeline.Pipeline([('scl', scaler),
('thr', thresh),
# ('sel', selector),
('cls', classifier)])
output = {}
data = utils.get_data(settings)
da = utils.DataAssembler(settings, data, metadata)
global_results = {}
for subject in list(settings['SUBJECTS']) + ['global']:
global_results[subject] = {}
for i in range(10):
print("iteration {0}".format(i))
for subject in settings['SUBJECTS']:
print(subject)
X, y = da.build_training(subject)
# cv = utils.Sequence_CV(da.training_segments, metadata)
train, test, train_results, test_results = fit_and_return_parts_and_results(
da,
metadata,
pipe,
X,
y)
output.update({subject: {'train': train,
'test': test,
'train_results': train_results,
'test_results': test_results}})
# with open('raw_cv_data.pickle', 'wb') as fh:
# pickle.dump(output, fh)
summary_stats = mean_var_calc(output)
for subject in settings['SUBJECTS']:
for t in summary_stats[subject]:
try:
global_results[subject][t] += [summary_stats[subject][t]]
except KeyError:
global_results[subject][t] = [summary_stats[subject][t]]
print(global_results)
for subject in settings['SUBJECTS']:
for t in global_results[subject]:
meanscore = np.mean(global_results[subject][t])
varscore = np.var(global_results[subject][t])
print("For {0} mean {1} was "
"{2} with sigma {3}".format(subject, t, meanscore, varscore))
with open('summary_stats.pickle', 'wb') as fh:
pickle.dump(global_results, fh)
def fit_and_return_parts_and_results(da, metadata, pipe, X, y):
'''
function to fit a CV and return a list of
parts and results
'''
train_results = []
test_results = []
train_partition = []
test_partition = []
cv = utils.Sequence_CV(da.training_segments, metadata)
for train, test in cv:
weight = len(y[train]) / sum(y[train])
weights = [weight if i == 1 else 1 for i in y[train]]
pipe.fit(X[train], y[train], cls__sample_weight=weights)
ptest = pipe.predict_proba(X[test])
ptrain = pipe.predict_proba(X[train])
# train_score = sklearn.metrics.roc_auc_score(y[train], p_train[:,1])
# test_score = sklearn.metrics.roc_auc_score(y[test], p_test[:,1])
# store subject predictions and true labels
train_results.append(np.hstack([y[train][np.newaxis].T,
ptrain[:, 1][np.newaxis].T]))
test_results.append(np.hstack([y[test][np.newaxis].T,
ptest[:, 1][np.newaxis].T]))
return train_partition, test_partition, train_results, test_results
def mean_var_calc(output):
summary_stats = {}
global_train = []
global_test = []
for subject in output.keys():
train_results = np.vstack(output[subject]['train_results'])
trainscore = sklearn.metrics.roc_auc_score(train_results[:, 0],
train_results[:, 1])
global_train.append(train_results)
test_results = np.vstack(output[subject]['test_results'])
testscore = sklearn.metrics.roc_auc_score(test_results[:, 0],
test_results[:, 1])
global_test.append(test_results)
# mean = np.mean(output[subject]['results'])
# var = np.var(output[subject]['results'])
summary_stats.update({subject: {'trainscore': trainscore,
'testscore': testscore}})
global_train = np.vstack(global_train[:])
globaltrainscore = sklearn.metrics.roc_auc_score(global_train[:, 0],
global_train[:, 1])
global_test = np.vstack(global_test[:])
globaltestscore = sklearn.metrics.roc_auc_score(global_test[:, 0],
global_test[:, 1])
summary_stats['global'] = {'trainscore': globaltrainscore,
'testscore': globaltestscore}
return summary_stats
if __name__ == '__main__':
main()
|
[
"utils.get_settings",
"pickle.dump",
"utils.DataAssembler",
"utils.get_metadata",
"utils.Sequence_CV",
"numpy.hstack",
"numpy.mean",
"utils.get_data",
"numpy.var",
"numpy.vstack"
] |
[((204, 224), 'utils.get_metadata', 'utils.get_metadata', ([], {}), '()\n', (222, 224), False, 'import utils\n'), ((240, 285), 'utils.get_settings', 'utils.get_settings', (['"""probablygood.gavin.json"""'], {}), "('probablygood.gavin.json')\n", (258, 285), False, 'import utils\n'), ((869, 893), 'utils.get_data', 'utils.get_data', (['settings'], {}), '(settings)\n', (883, 893), False, 'import utils\n'), ((903, 948), 'utils.DataAssembler', 'utils.DataAssembler', (['settings', 'data', 'metadata'], {}), '(settings, data, metadata)\n', (922, 948), False, 'import utils\n'), ((3104, 3153), 'utils.Sequence_CV', 'utils.Sequence_CV', (['da.training_segments', 'metadata'], {}), '(da.training_segments, metadata)\n', (3121, 3153), False, 'import utils\n'), ((4878, 4904), 'numpy.vstack', 'np.vstack', (['global_train[:]'], {}), '(global_train[:])\n', (4887, 4904), True, 'import numpy as np\n'), ((5069, 5094), 'numpy.vstack', 'np.vstack', (['global_test[:]'], {}), '(global_test[:])\n', (5078, 5094), True, 'import numpy as np\n'), ((2817, 2848), 'pickle.dump', 'pickle.dump', (['global_results', 'fh'], {}), '(global_results, fh)\n', (2828, 2848), False, 'import pickle\n'), ((4144, 4187), 'numpy.vstack', 'np.vstack', (["output[subject]['train_results']"], {}), "(output[subject]['train_results'])\n", (4153, 4187), True, 'import numpy as np\n'), ((4399, 4441), 'numpy.vstack', 'np.vstack', (["output[subject]['test_results']"], {}), "(output[subject]['test_results'])\n", (4408, 4441), True, 'import numpy as np\n'), ((2541, 2576), 'numpy.mean', 'np.mean', (['global_results[subject][t]'], {}), '(global_results[subject][t])\n', (2548, 2576), True, 'import numpy as np\n'), ((2600, 2634), 'numpy.var', 'np.var', (['global_results[subject][t]'], {}), '(global_results[subject][t])\n', (2606, 2634), True, 'import numpy as np\n'), ((3682, 3745), 'numpy.hstack', 'np.hstack', (['[y[train][np.newaxis].T, ptrain[:, 1][np.newaxis].T]'], {}), '([y[train][np.newaxis].T, ptrain[:, 1][np.newaxis].T])\n', (3691, 3745), True, 'import numpy as np\n'), ((3815, 3876), 'numpy.hstack', 'np.hstack', (['[y[test][np.newaxis].T, ptest[:, 1][np.newaxis].T]'], {}), '([y[test][np.newaxis].T, ptest[:, 1][np.newaxis].T])\n', (3824, 3876), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2019 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
""" Unit tests for the nowcasting.OpticalFlow plugin """
import unittest
from datetime import datetime, timedelta
import iris
import numpy as np
from iris.coords import DimCoord
from iris.exceptions import InvalidCubeError
from iris.tests import IrisTest
from improver.nowcasting.optical_flow import OpticalFlow
from improver.tests.set_up_test_cubes import set_up_variable_cube
from improver.utilities.warnings_handler import ManageWarnings
class Test__init__(IrisTest):
"""Test OpticalFlow class initialisation"""
def test_basic(self):
"""Test initialisation and types"""
plugin = OpticalFlow()
self.assertIsInstance(plugin.data_smoothing_radius_km, float)
self.assertIsInstance(plugin.data_smoothing_method, str)
self.assertIsInstance(plugin.iterations, int)
self.assertIsInstance(plugin.point_weight, float)
self.assertIsNone(plugin.data1)
self.assertIsNone(plugin.data2)
self.assertIsNone(plugin.shape)
class Test__repr__(IrisTest):
"""Test string representation"""
def test_basic(self):
"""Test string representation"""
expected_string = ('<OpticalFlow: data_smoothing_radius_km: 14.0, '
'data_smoothing_method: box, iterations: 100, '
'point_weight: 0.1, metadata_dict: {}>')
result = str(OpticalFlow())
self.assertEqual(result, expected_string)
class Test_makekernel(IrisTest):
"""Test makekernel function"""
def test_basic(self):
"""Test for correct output type"""
result = OpticalFlow().makekernel(2)
self.assertIsInstance(result, np.ndarray)
def test_values(self):
"""Test output values"""
expected_output = np.array([[0., 0., 0., 0., 0.],
[0., 0.0625, 0.1250, 0.0625, 0.],
[0., 0.1250, 0.2500, 0.1250, 0.],
[0., 0.0625, 0.1250, 0.0625, 0.],
[0., 0., 0., 0., 0.]])
result = OpticalFlow().makekernel(2)
self.assertArrayAlmostEqual(result, expected_output)
class OpticalFlowUtilityTest(IrisTest):
"""Class with shared plugin definition for small utility tests"""
def setUp(self):
"""Set up dummy plugin and populate data members"""
self.plugin = OpticalFlow()
self.plugin.data1 = np.array([[1., 2., 3., 4., 5.],
[0., 1., 2., 3., 4.],
[0., 0., 1., 2., 3.]])
self.plugin.data2 = np.array([[0., 1., 2., 3., 4.],
[0., 0., 1., 2., 3.],
[0., 0., 0., 1., 2.]])
self.plugin.shape = self.plugin.data1.shape
class Test_interp_to_midpoint(OpticalFlowUtilityTest):
"""Test interp_to_midpoint averaging function"""
def test_basic(self):
"""Test result is of correct type and shape"""
result = self.plugin.interp_to_midpoint(self.plugin.data1)
self.assertIsInstance(result, np.ndarray)
self.assertSequenceEqual(result.shape, (2, 4))
def test_values(self):
"""Test output values"""
expected_output = np.array([[1., 2., 3., 4.],
[0.25, 1., 2., 3.]])
result = self.plugin.interp_to_midpoint(self.plugin.data1)
self.assertArrayAlmostEqual(result, expected_output)
def test_first_axis(self):
"""Test averaging over first axis"""
expected_output = np.array([[0.5, 1.5, 2.5, 3.5, 4.5],
[0.0, 0.5, 1.5, 2.5, 3.5]])
result = self.plugin.interp_to_midpoint(self.plugin.data1, axis=0)
self.assertArrayAlmostEqual(result, expected_output)
def test_second_axis(self):
"""Test averaging over second axis"""
expected_output = np.array([[1.5, 2.5, 3.5, 4.5],
[0.5, 1.5, 2.5, 3.5],
[0.0, 0.5, 1.5, 2.5]])
result = self.plugin.interp_to_midpoint(self.plugin.data1, axis=1)
self.assertArrayAlmostEqual(result, expected_output)
def test_array_too_small(self):
"""Test returns empty array if averaging over an axis of length 1"""
small_array = self.plugin.data1[0, :].reshape((1, 5))
result = self.plugin.interp_to_midpoint(small_array)
self.assertEqual(result.size, 0.)
def test_small_array_single_axis(self):
"""Test sensible output if averaging over one valid axis"""
expected_output = np.array([[1.5, 2.5, 3.5, 4.5]])
small_array = self.plugin.data1[0, :].reshape((1, 5))
result = self.plugin.interp_to_midpoint(small_array, axis=1)
self.assertArrayAlmostEqual(result, expected_output)
class Test__partial_derivative_spatial(OpticalFlowUtilityTest):
"""Test _partial_derivative_spatial function"""
def test_basic(self):
"""Test for correct output type and shape"""
result = self.plugin._partial_derivative_spatial(axis=0)
self.assertIsInstance(result, np.ndarray)
self.assertSequenceEqual(result.shape, self.plugin.shape)
def test_first_axis(self):
"""Test output values for axis=0"""
expected_output = np.array([[-0.1875, -0.4375, -0.5, -0.5, -0.25],
[-0.2500, -0.6875, -0.9375, -1.0, -0.50],
[-0.0625, -0.2500, -0.4375, -0.5, -0.25]])
result = self.plugin._partial_derivative_spatial(axis=0)
self.assertArrayAlmostEqual(result, expected_output)
def test_second_axis(self):
"""Test output values for axis=1"""
expected_output = np.array([[0.1875, 0.4375, 0.5000, 0.5, 0.25],
[0.2500, 0.6875, 0.9375, 1.0, 0.50],
[0.0625, 0.2500, 0.4375, 0.5, 0.25]])
result = self.plugin._partial_derivative_spatial(axis=1)
self.assertArrayAlmostEqual(result, expected_output)
class Test__partial_derivative_temporal(OpticalFlowUtilityTest):
"""Test _partial_derivative_temporal function"""
def test_basic(self):
"""Test for correct output type and shape"""
result = self.plugin._partial_derivative_temporal()
self.assertIsInstance(result, np.ndarray)
self.assertSequenceEqual(result.shape, self.plugin.shape)
def test_values(self):
"""Test output values. Note this is NOT the same function as
_partial_derivative_spatial(axis=0), the output arrays are the same
as a result of the choice of data."""
expected_output = np.array([[-0.1875, -0.4375, -0.5, -0.5, -0.25],
[-0.2500, -0.6875, -0.9375, -1.0, -0.50],
[-0.0625, -0.2500, -0.4375, -0.5, -0.25]])
result = self.plugin._partial_derivative_temporal()
self.assertArrayAlmostEqual(result, expected_output)
class Test__make_subboxes(OpticalFlowUtilityTest):
"""Test _make_subboxes function"""
def test_basic(self):
"""Test for correct output types"""
self.plugin.boxsize = 2
boxes, weights = self.plugin._make_subboxes(self.plugin.data1)
self.assertIsInstance(boxes, list)
self.assertIsInstance(boxes[0], np.ndarray)
self.assertIsInstance(weights, np.ndarray)
def test_box_list(self):
"""Test function carves up array as expected"""
expected_boxes = \
[np.array([[1., 2.], [0., 1.]]), np.array([[3., 4.], [2., 3.]]),
np.array([[5.], [4.]]), np.array([[0., 0.]]),
np.array([[1., 2.]]), np.array([[3.]])]
self.plugin.boxsize = 2
boxes, _ = self.plugin._make_subboxes(self.plugin.data1)
for box, ebox in zip(boxes, expected_boxes):
self.assertArrayAlmostEqual(box, ebox)
def test_weights_values(self):
"""Test output weights values"""
expected_weights = np.array([0.54216664, 0.95606307, 0.917915, 0.,
0.46473857, 0.54216664])
self.plugin.boxsize = 2
_, weights = self.plugin._make_subboxes(self.plugin.data1)
self.assertArrayAlmostEqual(weights, expected_weights)
class OpticalFlowDisplacementTest(IrisTest):
"""Class with shared plugin definition for smoothing and regridding
tests"""
def setUp(self):
"""Define input matrices and dummy plugin"""
self.umat = np.array([[1., 0., 0., 0., 0.],
[1., 1., 0., 0., 0.],
[2., 1., 1., 0., 0.],
[3., 2., 1., 1., 0.]])
self.vmat = np.array([[3., 2., 1., 0., 0.],
[2., 1., 0., 0., 0.],
[1., 0., 0., 0., 0.],
[0., 0., 0., 1., 0.]])
self.weights = 0.3*np.multiply(self.umat, self.vmat)
self.plugin = OpticalFlow(iterations=20)
self.plugin.data_smoothing_radius = 3
self.plugin.boxsize = 3
# NOTE data dimensions are NOT exact multiples of box size
self.plugin.data1 = np.zeros((11, 14))
self.plugin.shape = self.plugin.data1.shape
class Test__box_to_grid(OpticalFlowDisplacementTest):
"""Test _box_to_grid function"""
def test_basic(self):
"""Test for correct output types"""
umat = self.plugin._box_to_grid(self.umat)
self.assertIsInstance(umat, np.ndarray)
self.assertSequenceEqual(umat.shape, (11, 14))
def test_values(self):
"""Test output matrix values"""
expected_umat = np.array(
[[1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[2., 2., 2., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0.],
[2., 2., 2., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0.],
[2., 2., 2., 1., 1., 1., 1., 1., 1., 0., 0., 0., 0., 0.],
[3., 3., 3., 2., 2., 2., 1., 1., 1., 1., 1., 1., 0., 0.],
[3., 3., 3., 2., 2., 2., 1., 1., 1., 1., 1., 1., 0., 0.]])
umat = self.plugin._box_to_grid(self.umat)
self.assertArrayAlmostEqual(umat, expected_umat)
class Test_smooth(OpticalFlowDisplacementTest):
"""Test simple smooth function"""
def test_basic(self):
"""Test for correct output types"""
output = self.plugin.smooth(self.umat, 2)
self.assertIsInstance(output, np.ndarray)
def test_box_smooth(self):
"""Test smooth over square box (default)"""
expected_output = np.array([[0.84, 0.60, 0.36, 0.12, 0.04],
[1.20, 0.92, 0.60, 0.28, 0.12],
[1.56, 1.24, 0.84, 0.44, 0.20],
[1.92, 1.56, 1.08, 0.60, 0.28]])
output = self.plugin.smooth(self.umat, 2)
self.assertArrayAlmostEqual(output, expected_output)
def test_kernel_smooth(self):
"""Test smooth over circular kernel"""
expected_output = np.array([[0.8125, 0.3750, 0.0625, 0., 0.],
[1.1250, 0.7500, 0.3125, 0.0625, 0.],
[1.8125, 1.3125, 0.7500, 0.3125, 0.0625],
[2.5000, 1.8125, 1.1250, 0.6250, 0.1875]])
output = self.plugin.smooth(self.umat, 2, method='kernel')
self.assertArrayAlmostEqual(output, expected_output)
def test_null_behaviour(self):
"""Test smooth with a kernel radius of 1 has no effect"""
output = self.plugin.smooth(self.umat, 1, method='kernel')
self.assertArrayAlmostEqual(output, self.umat)
class Test__smart_smooth(OpticalFlowDisplacementTest):
"""Test _smart_smooth function"""
def test_basic(self):
"""Test for correct output types"""
umat = self.plugin._smart_smooth(self.umat, self.umat, self.weights)
self.assertIsInstance(umat, np.ndarray)
self.assertSequenceEqual(umat.shape, self.umat.shape)
def test_values(self):
"""Test output matrices have expected values"""
expected_umat = np.array([[1., 1., 1., 0., 0.],
[1.25352113, 1.19354839, 1., 0.08333333, 0.],
[1.48780488, 1.50000000, 1., 1.00000000, 1.],
[2., 2., 1., 1., 1.]])
umat = self.plugin._smart_smooth(self.umat, self.umat, self.weights)
self.assertArrayAlmostEqual(umat, expected_umat)
class Test__smooth_advection_fields(OpticalFlowDisplacementTest):
"""Test smoothing of advection displacements"""
def test_basic(self):
"""Test for correct output types"""
vmat = self.plugin._smooth_advection_fields(self.vmat,
self.weights)
self.assertIsInstance(vmat, np.ndarray)
self.assertSequenceEqual(vmat.shape, (11, 14))
def test_values(self):
"""Test output matrices have expected values"""
first_row_v = np.array(
[2.451711, 2.451711, 2.451711, 2.341303, 2.341303, 2.341303,
2.028805, 2.028805, 2.028805, 1.694845, 1.694845, 1.694845,
1.503583, 1.503583])
vmat = self.plugin._smooth_advection_fields(self.vmat,
self.weights)
self.assertArrayAlmostEqual(vmat[0], first_row_v)
class Test_solve_for_uv(IrisTest):
"""Test solve_for_uv function"""
def setUp(self):
"""Define input matrices"""
self.I_xy = np.array([[2., 3.],
[1., -2.]])
self.I_t = np.array([-8., 3.])
def test_basic(self):
"""Test for correct output types"""
u, v = OpticalFlow().solve_for_uv(self.I_xy, self.I_t)
self.assertIsInstance(u, float)
self.assertIsInstance(v, float)
def test_values(self):
"""Test output values"""
u, v = OpticalFlow().solve_for_uv(self.I_xy, self.I_t)
self.assertAlmostEqual(u, 1.)
self.assertAlmostEqual(v, 2.)
class Test_extreme_value_check(IrisTest):
"""Test extreme_value_check function"""
def setUp(self):
"""Define some test velocity matrices"""
self.umat = 0.2*np.arange(12).reshape((3, 4))
self.vmat = -0.1*np.ones((3, 4), dtype=float)
self.weights = np.full((3, 4), 0.5)
def test_basic(self):
"""Test for correct output types"""
OpticalFlow().extreme_value_check(self.umat, self.vmat, self.weights)
self.assertIsInstance(self.umat, np.ndarray)
self.assertIsInstance(self.vmat, np.ndarray)
self.assertIsInstance(self.weights, np.ndarray)
def test_values(self):
"""Test extreme data values are infilled with zeros"""
expected_umat = np.array([[0., 0.2, 0.4, 0.6],
[0.8, 0., 0., 0.],
[0., 0., 0., 0.]])
expected_vmat = np.array([[-0.1, -0.1, -0.1, -0.1],
[-0.1, 0., 0., 0.],
[0., 0., 0., 0.]])
expected_weights = np.array([[0.5, 0.5, 0.5, 0.5],
[0.5, 0., 0., 0.],
[0., 0., 0., 0.]])
OpticalFlow().extreme_value_check(self.umat, self.vmat, self.weights)
self.assertArrayAlmostEqual(self.umat, expected_umat)
self.assertArrayAlmostEqual(self.vmat, expected_vmat)
self.assertArrayAlmostEqual(self.weights, expected_weights)
def test_null_behaviour(self):
"""Test reasonable data values are preserved"""
umat = 0.5*np.ones((3, 4), dtype=float)
expected_umat = np.copy(umat)
expected_vmat = np.copy(self.vmat)
expected_weights = np.copy(self.weights)
OpticalFlow().extreme_value_check(umat, self.vmat, self.weights)
self.assertArrayAlmostEqual(umat, expected_umat)
self.assertArrayAlmostEqual(self.vmat, expected_vmat)
self.assertArrayAlmostEqual(self.weights, expected_weights)
class Test_calculate_displacement_vectors(IrisTest):
"""Test calculation of advection displacement vectors"""
def setUp(self):
"""Set up plugin options and input rainfall-like matrices that produce
non-singular outputs. Large matrices with zeros are needed for the
smoothing algorithms to behave sensibly."""
self.plugin = OpticalFlow(iterations=20)
self.plugin.data_smoothing_radius = 3
self.plugin.boxsize = 3
rainfall_block = np.array([[1., 1., 1., 1., 1., 1., 1.],
[1., 2., 2., 2., 2., 1., 1.],
[1., 2., 3., 3., 2., 1., 1.],
[1., 2., 3., 3., 2., 1., 1.],
[1., 2., 2., 2., 2., 1., 1.],
[1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1.]])
first_input = np.zeros((10, 10), dtype=np.float32)
first_input[1:8, 2:9] = rainfall_block
self.plugin.data1 = first_input
self.plugin.shape = first_input.shape
second_input = np.zeros((10, 10), dtype=np.float32)
second_input[2:9, 1:8] = rainfall_block
self.plugin.data2 = second_input
self.partial_dx = self.plugin._partial_derivative_spatial(axis=1)
self.partial_dy = self.plugin._partial_derivative_spatial(axis=0)
self.partial_dt = self.plugin._partial_derivative_temporal()
def test_basic(self):
"""Test outputs are of the correct type"""
umat, _ = self.plugin.calculate_displacement_vectors(
self.partial_dx, self.partial_dy, self.partial_dt)
self.assertIsInstance(umat, np.ndarray)
self.assertSequenceEqual(umat.shape, self.plugin.shape)
def test_values(self):
"""Test output values"""
umat, vmat = self.plugin.calculate_displacement_vectors(
self.partial_dx, self.partial_dy, self.partial_dt)
self.assertAlmostEqual(np.mean(umat), np.float32(-0.124607998))
self.assertAlmostEqual(np.mean(vmat), np.float32(0.124607998))
class Test__zero_advection_velocities_warning(IrisTest):
"""Test the _zero_advection_velocities_warning."""
def setUp(self):
"""Set up arrays of advection velocities"""
self.plugin = OpticalFlow()
rain = np.ones((3, 3))
self.rain_mask = np.where(rain > 0)
@ManageWarnings(record=True)
def test_warning_raised(self, warning_list=None):
"""Test that a warning is raised if an excess number of zero values
are present within the input array."""
greater_than_10_percent_zeroes_array = (
np.array([[3., 5., 7.],
[0., 2., 1.],
[1., 1., 1.]]))
warning_msg = "cells within the domain have zero advection"
self.plugin._zero_advection_velocities_warning(
greater_than_10_percent_zeroes_array, self.rain_mask)
self.assertTrue(any(item.category == UserWarning
for item in warning_list))
self.assertTrue(any(warning_msg in str(item)
for item in warning_list))
@ManageWarnings(record=True)
def test_no_warning_raised_if_no_zeroes(self, warning_list=None):
"""Test that no warning is raised if the number of zero values in the
array is below the threshold used to define an excessive number of
zero values."""
nonzero_array = np.array([[3., 5., 7.],
[2., 2., 1.],
[1., 1., 1.]])
self.plugin._zero_advection_velocities_warning(nonzero_array,
self.rain_mask)
self.assertTrue(len(warning_list) == 0)
@ManageWarnings(record=True)
def test_no_warning_raised_if_fewer_zeroes_than_threshold(
self, warning_list=None):
"""Test that no warning is raised if the number of zero values in the
array is below the threshold used to define an excessive number of
zero values when at least one zero exists within the array."""
rain = np.ones((5, 5))
less_than_10_percent_zeroes_array = (
np.array([[1., 3., 5., 7., 1.],
[0., 2., 1., 1., 1.],
[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1.]]))
self.plugin._zero_advection_velocities_warning(
less_than_10_percent_zeroes_array, np.where(rain > 0))
self.assertTrue(len(warning_list) == 0)
@ManageWarnings(record=True)
def test_no_warning_raised_for_modified_threshold(
self, warning_list=None):
"""Test that no warning is raised if the number of zero values in the
array is below the threshold used to define an excessive number of
zero values when the threshold is modified."""
less_than_30_percent_zeroes_array = (
np.array([[3., 5., 7.],
[0., 2., 1.],
[0., 1., 1.]]))
self.plugin._zero_advection_velocities_warning(
less_than_30_percent_zeroes_array, self.rain_mask,
zero_vel_threshold=0.3)
self.assertTrue(len(warning_list) == 0)
@ManageWarnings(record=True)
def test_no_warning_raised_outside_rain(self, warning_list=None):
"""Test warning ignores zeros outside the rain area mask"""
rain = np.array([[0, 0, 1],
[0, 1, 1],
[1, 1, 1]])
wind = np.array([[0, 0, 1],
[0, 1, 1],
[1, 1, 1]])
self.plugin._zero_advection_velocities_warning(
wind, np.where(rain > 0))
self.assertTrue(len(warning_list) == 0)
class Test_process_dimensionless(IrisTest):
"""Test the process_dimensionless method"""
def setUp(self):
"""Set up plugin options and input rainfall-like matrices that produce
non-singular outputs. Large matrices with zeros are needed for the
smoothing algorithms to behave sensibly."""
self.plugin = OpticalFlow(iterations=20)
self.plugin.boxsize = 3
self.smoothing_kernel = 3
rainfall_block = np.array([[1., 1., 1., 1., 1., 1., 1.],
[1., 2., 2., 2., 2., 1., 1.],
[1., 2., 3., 3., 2., 1., 1.],
[1., 2., 3., 3., 2., 1., 1.],
[1., 2., 2., 2., 2., 1., 1.],
[1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1.]])
self.first_input = np.zeros((16, 16), dtype=np.float32)
self.first_input[1:8, 2:9] = rainfall_block
self.second_input = np.zeros((16, 16), dtype=np.float32)
self.second_input[2:9, 1:8] = rainfall_block
def test_basic(self):
"""Test outputs are of the correct type and value"""
ucomp, vcomp = self.plugin.process_dimensionless(
self.first_input, self.second_input, 0, 1, self.smoothing_kernel)
self.assertIsInstance(ucomp, np.ndarray)
self.assertIsInstance(vcomp, np.ndarray)
self.assertAlmostEqual(np.mean(ucomp), 0.97735876)
self.assertAlmostEqual(np.mean(vcomp), -0.97735894)
def test_axis_inversion(self):
"""Test inverting x and y axis indices gives the correct result"""
ucomp, vcomp = self.plugin.process_dimensionless(
self.first_input, self.second_input, 1, 0, self.smoothing_kernel)
self.assertAlmostEqual(np.mean(ucomp), -0.97735894)
self.assertAlmostEqual(np.mean(vcomp), 0.97735876)
class Test_process(IrisTest):
"""Test the process method"""
def setUp(self):
"""Set up plugin and input rainfall-like cubes"""
self.plugin = OpticalFlow(iterations=20)
self.plugin.data_smoothing_radius_km = np.float32(6.)
coord_points = 2000*np.arange(16, dtype=np.float32) # in metres
rainfall_block = np.array([[1., 1., 1., 1., 1., 1., 1.],
[1., 2., 2., 2., 2., 1., 1.],
[1., 2., 3., 3., 2., 1., 1.],
[1., 2., 3., 3., 2., 1., 1.],
[1., 2., 2., 2., 2., 1., 1.],
[1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1.]],
dtype=np.float32)
data1 = np.zeros((16, 16), dtype=np.float32)
data1[1:8, 2:9] = rainfall_block
self.cube1 = set_up_variable_cube(
data1, name="rainfall_rate", units="mm h-1",
spatial_grid="equalarea", time=datetime(2018, 2, 20, 4, 0),
frt=datetime(2018, 2, 20, 4, 0))
self.cube1.coord(axis='x').points = coord_points
self.cube1.coord(axis='y').points = coord_points
data2 = np.zeros((16, 16), dtype=np.float32)
data2[2:9, 1:8] = rainfall_block
self.cube2 = set_up_variable_cube(
data2, name="rainfall_rate", units="mm h-1",
spatial_grid="equalarea", time=datetime(2018, 2, 20, 4, 15),
frt=datetime(2018, 2, 20, 4, 15))
self.cube2.coord(axis='x').points = coord_points
self.cube2.coord(axis='y').points = coord_points
def test_basic(self):
"""Test correct output types and metadata"""
ucube, vcube = self.plugin.process(self.cube1, self.cube2, boxsize=3)
for cube in [ucube, vcube]:
self.assertIsInstance(cube, iris.cube.Cube)
self.assertEqual(cube.coord("time")[0],
self.cube2.coord("time")[0])
self.assertEqual(cube.units, "m s-1")
self.assertIn("precipitation_advection", cube.name())
self.assertIn("velocity", cube.name())
def test_metadata(self):
"""Test correct output types and metadata"""
metadata_dict = {"attributes": {
"mosg__grid_version": "1.0.0",
"mosg__model_configuration": "nc_det",
"source": "Met Office Nowcast",
"institution": "Met Office",
"title": "Nowcast on UK 2 km Standard Grid"}}
plugin = OpticalFlow(iterations=20, metadata_dict=metadata_dict)
plugin.data_smoothing_radius_km = 6.
ucube, vcube = plugin.process(self.cube1, self.cube2, boxsize=3)
for cube in [ucube, vcube]:
self.assertEqual(cube.attributes, metadata_dict["attributes"])
def test_values(self):
"""Test velocity values are as expected (in m/s)"""
ucube, vcube = self.plugin.process(self.cube1, self.cube2, boxsize=3)
self.assertAlmostEqual(
np.mean(ucube.data), -2.1719084)
self.assertAlmostEqual(np.mean(vcube.data), 2.1719084)
def test_values_with_precip_rate_in_m_per_s(self):
"""Test velocity values are as expected (in m/s) when the input
precipitation rates are in units of m/s rather than the expected
mm/hr."""
self.cube1.convert_units('m s-1')
self.cube2.convert_units('m s-1')
ucube, vcube = self.plugin.process(self.cube1, self.cube2, boxsize=3)
self.assertAlmostEqual(
np.mean(ucube.data), -2.1719084)
self.assertAlmostEqual(np.mean(vcube.data), 2.1719084)
def test_values_with_masked_data(self):
"""Test velocity values are as expected when masked cubes are used as
input to the tests. This test is to capture behaviour whereby mask
fill values were being used as valid data. This resulted in far from
correct velocities being calculated by the optical flow code. Notably
the velocity fields did not reflect the position of precipitation in
the input precipitation fields, and the returned velocities were too
low.
In this test masked cubes are used and comparable unmasked cubes in
which there the fill values are included in the field. We expect
the results to be different, with higher velocities returned for the
masked cubes.
"""
mask = np.zeros((16, 16))
mask[:2, :] = 1
mask[:, :2] = 1
# Ensure the masked data points contain a high fill value.
data1 = self.cube1.data
data2 = self.cube2.data
data1[:2, :] = 1.0E36
data1[:, :2] = 1.0E36
data2[:2, :] = 1.0E36
data2[:, :2] = 1.0E36
masked1 = np.ma.MaskedArray(self.cube1.data, mask=mask)
masked2 = np.ma.MaskedArray(self.cube2.data, mask=mask)
masked_cube1 = self.cube1.copy(data=masked1)
masked_cube2 = self.cube2.copy(data=masked2)
unmasked_cube1 = self.cube1.copy(data=data1)
unmasked_cube2 = self.cube2.copy(data=data2)
ucube_masked, vcube_masked = self.plugin.process(
masked_cube1, masked_cube2, boxsize=3)
ucube_unmasked, vcube_unmasked = self.plugin.process(
unmasked_cube1, unmasked_cube2, boxsize=3)
self.assertAlmostEqual(
np.mean(ucube_masked.data), -1.4995803)
self.assertAlmostEqual(
np.mean(vcube_masked.data), 1.4995805)
self.assertAlmostEqual(
np.mean(ucube_unmasked.data), -0.2869996)
self.assertAlmostEqual(
np.mean(vcube_unmasked.data), 0.28699964)
def test_error_for_unconvertable_units(self):
"""Test that an exception is raised if the input precipitation cubes
have units that cannot be converted to mm/hr."""
self.cube1.units = 'm'
self.cube2.units = 'm'
msg = "Input data are in units that cannot be converted to mm/hr"
with self.assertRaisesRegex(ValueError, msg):
self.plugin.process(self.cube1, self.cube2, boxsize=3)
def test_input_cubes_unchanged(self):
"""Test the input precipitation rate cubes are unchanged by use in the
optical flow plugin. One of the cubes is converted to rates in ms-1
before use to ensure the cube remains in these units despite the
default working units within optical flow being mm/hr."""
self.cube1.convert_units("m s-1")
cube1_ref = self.cube1.copy()
cube2_ref = self.cube2.copy()
_, _ = self.plugin.process(self.cube1, self.cube2, boxsize=3)
self.assertEqual(self.cube1, cube1_ref)
self.assertEqual(self.cube2, cube2_ref)
def test_decrease_time_interval(self):
"""Test that decreasing the time interval between radar frames below
15 minutes does not alter the smoothing radius. To test this the time
interval is halved, which should give an answer identical to the values
test above multiplied by a factor of two."""
time_unit = self.cube2.coord("time").units
new_time = time_unit.num2date(self.cube2.coord("time").points[0])
new_time -= timedelta(seconds=450)
self.cube2.remove_coord("time")
time_coord = DimCoord(time_unit.date2num(new_time),
standard_name="time", units=time_unit)
self.cube2.add_aux_coord(time_coord)
ucube, vcube = self.plugin.process(self.cube1, self.cube2, boxsize=3)
self.assertAlmostEqual(
np.mean(ucube.data), -2.1719084 * 2.)
self.assertAlmostEqual(np.mean(vcube.data), 2.1719084 * 2.)
def test_increase_time_interval(self):
"""Test that increasing the time interval between radar frames above
15 minutes leads to an increase in the data smoothing radius. In this
test this will result in a smoothing radius larger than the box size,
which is not allowed and will raise an exception. The updated radius
value in this case is 12 km (6 grid squares), exceeding the 3 square
box size."""
time_unit = self.cube2.coord("time").units
new_time = time_unit.num2date(self.cube2.coord("time").points[0])
new_time += timedelta(seconds=900)
self.cube2.remove_coord("time")
time_coord = DimCoord(time_unit.date2num(new_time),
standard_name="time", units=time_unit)
self.cube2.add_aux_coord(time_coord)
msg = "Box size ([0-9]+) too small"
with self.assertRaisesRegex(ValueError, msg):
self.plugin.process(self.cube1, self.cube2, boxsize=3)
def test_error_small_kernel(self):
"""Test failure if data smoothing radius is too small"""
self.plugin.data_smoothing_radius_km = 3.
msg = "Input data smoothing radius 1 too small "
with self.assertRaisesRegex(ValueError, msg):
_ = self.plugin.process(self.cube1, self.cube2)
def test_error_small_box(self):
"""Test failure if box size is smaller than data smoothing radius"""
msg = "Box size 2 too small"
with self.assertRaisesRegex(ValueError, msg):
self.plugin.process(self.cube1, self.cube2, boxsize=2)
def test_error_unmatched_coords(self):
"""Test failure if cubes are provided on unmatched grids"""
cube2 = self.cube2.copy()
for ax in ["x", "y"]:
cube2.coord(axis=ax).points = 4*np.arange(16)
msg = "Input cubes on unmatched grids"
with self.assertRaisesRegex(InvalidCubeError, msg):
_ = self.plugin.process(self.cube1, cube2)
def test_error_no_time_difference(self):
"""Test failure if two cubes are provided with the same time"""
msg = "Expected positive time difference "
with self.assertRaisesRegex(InvalidCubeError, msg):
_ = self.plugin.process(self.cube1, self.cube1)
def test_error_negative_time_difference(self):
"""Test failure if cubes are provided in the wrong order"""
msg = "Expected positive time difference "
with self.assertRaisesRegex(InvalidCubeError, msg):
_ = self.plugin.process(self.cube2, self.cube1)
@ManageWarnings(record=True)
def test_warning_zero_inputs(self, warning_list=None):
"""Test code raises a warning and sets advection velocities to zero
if there is no rain in the input cubes."""
null_data = np.zeros(self.cube1.shape)
cube1 = self.cube1.copy(data=null_data)
cube2 = self.cube2.copy(data=null_data)
ucube, vcube = self.plugin.process(cube1, cube2)
warning_msg = "No non-zero data in input fields"
self.assertTrue(any(item.category == UserWarning
for item in warning_list))
self.assertTrue(any(warning_msg in str(item)
for item in warning_list))
self.assertArrayAlmostEqual(ucube.data, null_data)
self.assertArrayAlmostEqual(vcube.data, null_data)
def test_error_nonmatching_inputs(self):
"""Test failure if cubes are of different data types"""
self.cube1.rename("snowfall_rate")
msg = "Input cubes contain different data types"
with self.assertRaisesRegex(ValueError, msg):
self.plugin.process(self.cube1, self.cube2)
@ManageWarnings(record=True)
def test_warning_nonprecip_inputs(self, warning_list=None):
"""Test code raises a warning if input cubes have
non-rain variable names"""
self.cube1.rename("snowfall_rate")
self.cube2.rename("snowfall_rate")
_, _ = self.plugin.process(self.cube1, self.cube2, boxsize=3)
warning_msg = "Input data are of non-precipitation type"
self.assertTrue(any(item.category == UserWarning
for item in warning_list))
self.assertTrue(any(warning_msg in str(item)
for item in warning_list))
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"numpy.full",
"numpy.multiply",
"numpy.copy",
"numpy.float32",
"improver.nowcasting.optical_flow.OpticalFlow",
"numpy.zeros",
"numpy.ones",
"numpy.ma.MaskedArray",
"datetime.datetime",
"numpy.where",
"numpy.array",
"datetime.timedelta",
"numpy.mean",
"improver.utilities.warnings_handler.ManageWarnings",
"numpy.arange"
] |
[((20613, 20640), 'improver.utilities.warnings_handler.ManageWarnings', 'ManageWarnings', ([], {'record': '(True)'}), '(record=True)\n', (20627, 20640), False, 'from improver.utilities.warnings_handler import ManageWarnings\n'), ((21393, 21420), 'improver.utilities.warnings_handler.ManageWarnings', 'ManageWarnings', ([], {'record': '(True)'}), '(record=True)\n', (21407, 21420), False, 'from improver.utilities.warnings_handler import ManageWarnings\n'), ((22008, 22035), 'improver.utilities.warnings_handler.ManageWarnings', 'ManageWarnings', ([], {'record': '(True)'}), '(record=True)\n', (22022, 22035), False, 'from improver.utilities.warnings_handler import ManageWarnings\n'), ((22837, 22864), 'improver.utilities.warnings_handler.ManageWarnings', 'ManageWarnings', ([], {'record': '(True)'}), '(record=True)\n', (22851, 22864), False, 'from improver.utilities.warnings_handler import ManageWarnings\n'), ((23531, 23558), 'improver.utilities.warnings_handler.ManageWarnings', 'ManageWarnings', ([], {'record': '(True)'}), '(record=True)\n', (23545, 23558), False, 'from improver.utilities.warnings_handler import ManageWarnings\n'), ((36316, 36343), 'improver.utilities.warnings_handler.ManageWarnings', 'ManageWarnings', ([], {'record': '(True)'}), '(record=True)\n', (36330, 36343), False, 'from improver.utilities.warnings_handler import ManageWarnings\n'), ((37451, 37478), 'improver.utilities.warnings_handler.ManageWarnings', 'ManageWarnings', ([], {'record': '(True)'}), '(record=True)\n', (37465, 37478), False, 'from improver.utilities.warnings_handler import ManageWarnings\n'), ((38110, 38125), 'unittest.main', 'unittest.main', ([], {}), '()\n', (38123, 38125), False, 'import unittest\n'), ((2274, 2287), 'improver.nowcasting.optical_flow.OpticalFlow', 'OpticalFlow', ([], {}), '()\n', (2285, 2287), False, 'from improver.nowcasting.optical_flow import OpticalFlow\n'), ((3419, 3594), 'numpy.array', 'np.array', (['[[0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0625, 0.125, 0.0625, 0.0], [0.0, 0.125,\n 0.25, 0.125, 0.0], [0.0, 0.0625, 0.125, 0.0625, 0.0], [0.0, 0.0, 0.0, \n 0.0, 0.0]]'], {}), '([[0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0625, 0.125, 0.0625, 0.0], [\n 0.0, 0.125, 0.25, 0.125, 0.0], [0.0, 0.0625, 0.125, 0.0625, 0.0], [0.0,\n 0.0, 0.0, 0.0, 0.0]])\n', (3427, 3594), True, 'import numpy as np\n'), ((4042, 4055), 'improver.nowcasting.optical_flow.OpticalFlow', 'OpticalFlow', ([], {}), '()\n', (4053, 4055), False, 'from improver.nowcasting.optical_flow import OpticalFlow\n'), ((4084, 4180), 'numpy.array', 'np.array', (['[[1.0, 2.0, 3.0, 4.0, 5.0], [0.0, 1.0, 2.0, 3.0, 4.0], [0.0, 0.0, 1.0, 2.0,\n 3.0]]'], {}), '([[1.0, 2.0, 3.0, 4.0, 5.0], [0.0, 1.0, 2.0, 3.0, 4.0], [0.0, 0.0, \n 1.0, 2.0, 3.0]])\n', (4092, 4180), True, 'import numpy as np\n'), ((4266, 4362), 'numpy.array', 'np.array', (['[[0.0, 1.0, 2.0, 3.0, 4.0], [0.0, 0.0, 1.0, 2.0, 3.0], [0.0, 0.0, 0.0, 1.0,\n 2.0]]'], {}), '([[0.0, 1.0, 2.0, 3.0, 4.0], [0.0, 0.0, 1.0, 2.0, 3.0], [0.0, 0.0, \n 0.0, 1.0, 2.0]])\n', (4274, 4362), True, 'import numpy as np\n'), ((4923, 4978), 'numpy.array', 'np.array', (['[[1.0, 2.0, 3.0, 4.0], [0.25, 1.0, 2.0, 3.0]]'], {}), '([[1.0, 2.0, 3.0, 4.0], [0.25, 1.0, 2.0, 3.0]])\n', (4931, 4978), True, 'import numpy as np\n'), ((5239, 5303), 'numpy.array', 'np.array', (['[[0.5, 1.5, 2.5, 3.5, 4.5], [0.0, 0.5, 1.5, 2.5, 3.5]]'], {}), '([[0.5, 1.5, 2.5, 3.5, 4.5], [0.0, 0.5, 1.5, 2.5, 3.5]])\n', (5247, 5303), True, 'import numpy as np\n'), ((5581, 5657), 'numpy.array', 'np.array', (['[[1.5, 2.5, 3.5, 4.5], [0.5, 1.5, 2.5, 3.5], [0.0, 0.5, 1.5, 2.5]]'], {}), '([[1.5, 2.5, 3.5, 4.5], [0.5, 1.5, 2.5, 3.5], [0.0, 0.5, 1.5, 2.5]])\n', (5589, 5657), True, 'import numpy as np\n'), ((6284, 6316), 'numpy.array', 'np.array', (['[[1.5, 2.5, 3.5, 4.5]]'], {}), '([[1.5, 2.5, 3.5, 4.5]])\n', (6292, 6316), True, 'import numpy as np\n'), ((6990, 7123), 'numpy.array', 'np.array', (['[[-0.1875, -0.4375, -0.5, -0.5, -0.25], [-0.25, -0.6875, -0.9375, -1.0, -\n 0.5], [-0.0625, -0.25, -0.4375, -0.5, -0.25]]'], {}), '([[-0.1875, -0.4375, -0.5, -0.5, -0.25], [-0.25, -0.6875, -0.9375, \n -1.0, -0.5], [-0.0625, -0.25, -0.4375, -0.5, -0.25]])\n', (6998, 7123), True, 'import numpy as np\n'), ((7425, 7543), 'numpy.array', 'np.array', (['[[0.1875, 0.4375, 0.5, 0.5, 0.25], [0.25, 0.6875, 0.9375, 1.0, 0.5], [\n 0.0625, 0.25, 0.4375, 0.5, 0.25]]'], {}), '([[0.1875, 0.4375, 0.5, 0.5, 0.25], [0.25, 0.6875, 0.9375, 1.0, 0.5\n ], [0.0625, 0.25, 0.4375, 0.5, 0.25]])\n', (7433, 7543), True, 'import numpy as np\n'), ((8367, 8500), 'numpy.array', 'np.array', (['[[-0.1875, -0.4375, -0.5, -0.5, -0.25], [-0.25, -0.6875, -0.9375, -1.0, -\n 0.5], [-0.0625, -0.25, -0.4375, -0.5, -0.25]]'], {}), '([[-0.1875, -0.4375, -0.5, -0.5, -0.25], [-0.25, -0.6875, -0.9375, \n -1.0, -0.5], [-0.0625, -0.25, -0.4375, -0.5, -0.25]])\n', (8375, 8500), True, 'import numpy as np\n'), ((9713, 9786), 'numpy.array', 'np.array', (['[0.54216664, 0.95606307, 0.917915, 0.0, 0.46473857, 0.54216664]'], {}), '([0.54216664, 0.95606307, 0.917915, 0.0, 0.46473857, 0.54216664])\n', (9721, 9786), True, 'import numpy as np\n'), ((10212, 10335), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 0.0, 0.0, 0.0], [2.0, 1.0, 1.0, 0.0,\n 0.0], [3.0, 2.0, 1.0, 1.0, 0.0]]'], {}), '([[1.0, 0.0, 0.0, 0.0, 0.0], [1.0, 1.0, 0.0, 0.0, 0.0], [2.0, 1.0, \n 1.0, 0.0, 0.0], [3.0, 2.0, 1.0, 1.0, 0.0]])\n', (10220, 10335), True, 'import numpy as np\n'), ((10422, 10545), 'numpy.array', 'np.array', (['[[3.0, 2.0, 1.0, 0.0, 0.0], [2.0, 1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0,\n 0.0], [0.0, 0.0, 0.0, 1.0, 0.0]]'], {}), '([[3.0, 2.0, 1.0, 0.0, 0.0], [2.0, 1.0, 0.0, 0.0, 0.0], [1.0, 0.0, \n 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0, 0.0]])\n', (10430, 10545), True, 'import numpy as np\n'), ((10695, 10721), 'improver.nowcasting.optical_flow.OpticalFlow', 'OpticalFlow', ([], {'iterations': '(20)'}), '(iterations=20)\n', (10706, 10721), False, 'from improver.nowcasting.optical_flow import OpticalFlow\n'), ((10895, 10913), 'numpy.zeros', 'np.zeros', (['(11, 14)'], {}), '((11, 14))\n', (10903, 10913), True, 'import numpy as np\n'), ((11376, 12222), 'numpy.array', 'np.array', (['[[1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [\n 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n [1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n [2.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n [2.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n [2.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0],\n [3.0, 3.0, 3.0, 2.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0],\n [3.0, 3.0, 3.0, 2.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0]]'], {}), '([[1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0], [1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0], [1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0], [2.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0,\n 0.0], [2.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0,\n 0.0], [2.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0,\n 0.0], [3.0, 3.0, 3.0, 2.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0,\n 0.0], [3.0, 3.0, 3.0, 2.0, 2.0, 2.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0,\n 0.0]])\n', (11384, 12222), True, 'import numpy as np\n'), ((12646, 12784), 'numpy.array', 'np.array', (['[[0.84, 0.6, 0.36, 0.12, 0.04], [1.2, 0.92, 0.6, 0.28, 0.12], [1.56, 1.24, \n 0.84, 0.44, 0.2], [1.92, 1.56, 1.08, 0.6, 0.28]]'], {}), '([[0.84, 0.6, 0.36, 0.12, 0.04], [1.2, 0.92, 0.6, 0.28, 0.12], [\n 1.56, 1.24, 0.84, 0.44, 0.2], [1.92, 1.56, 1.08, 0.6, 0.28]])\n', (12654, 12784), True, 'import numpy as np\n'), ((13113, 13281), 'numpy.array', 'np.array', (['[[0.8125, 0.375, 0.0625, 0.0, 0.0], [1.125, 0.75, 0.3125, 0.0625, 0.0], [\n 1.8125, 1.3125, 0.75, 0.3125, 0.0625], [2.5, 1.8125, 1.125, 0.625, 0.1875]]'], {}), '([[0.8125, 0.375, 0.0625, 0.0, 0.0], [1.125, 0.75, 0.3125, 0.0625, \n 0.0], [1.8125, 1.3125, 0.75, 0.3125, 0.0625], [2.5, 1.8125, 1.125, \n 0.625, 0.1875]])\n', (13121, 13281), True, 'import numpy as np\n'), ((14202, 14357), 'numpy.array', 'np.array', (['[[1.0, 1.0, 1.0, 0.0, 0.0], [1.25352113, 1.19354839, 1.0, 0.08333333, 0.0],\n [1.48780488, 1.5, 1.0, 1.0, 1.0], [2.0, 2.0, 1.0, 1.0, 1.0]]'], {}), '([[1.0, 1.0, 1.0, 0.0, 0.0], [1.25352113, 1.19354839, 1.0, \n 0.08333333, 0.0], [1.48780488, 1.5, 1.0, 1.0, 1.0], [2.0, 2.0, 1.0, 1.0,\n 1.0]])\n', (14210, 14357), True, 'import numpy as np\n'), ((15114, 15274), 'numpy.array', 'np.array', (['[2.451711, 2.451711, 2.451711, 2.341303, 2.341303, 2.341303, 2.028805, \n 2.028805, 2.028805, 1.694845, 1.694845, 1.694845, 1.503583, 1.503583]'], {}), '([2.451711, 2.451711, 2.451711, 2.341303, 2.341303, 2.341303, \n 2.028805, 2.028805, 2.028805, 1.694845, 1.694845, 1.694845, 1.503583, \n 1.503583])\n', (15122, 15274), True, 'import numpy as np\n'), ((15643, 15678), 'numpy.array', 'np.array', (['[[2.0, 3.0], [1.0, -2.0]]'], {}), '([[2.0, 3.0], [1.0, -2.0]])\n', (15651, 15678), True, 'import numpy as np\n'), ((15724, 15745), 'numpy.array', 'np.array', (['[-8.0, 3.0]'], {}), '([-8.0, 3.0])\n', (15732, 15745), True, 'import numpy as np\n'), ((16448, 16468), 'numpy.full', 'np.full', (['(3, 4)', '(0.5)'], {}), '((3, 4), 0.5)\n', (16455, 16468), True, 'import numpy as np\n'), ((16895, 16971), 'numpy.array', 'np.array', (['[[0.0, 0.2, 0.4, 0.6], [0.8, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]'], {}), '([[0.0, 0.2, 0.4, 0.6], [0.8, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]])\n', (16903, 16971), True, 'import numpy as np\n'), ((17056, 17142), 'numpy.array', 'np.array', (['[[-0.1, -0.1, -0.1, -0.1], [-0.1, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]'], {}), '([[-0.1, -0.1, -0.1, -0.1], [-0.1, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, \n 0.0]])\n', (17064, 17142), True, 'import numpy as np\n'), ((17226, 17302), 'numpy.array', 'np.array', (['[[0.5, 0.5, 0.5, 0.5], [0.5, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]'], {}), '([[0.5, 0.5, 0.5, 0.5], [0.5, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]])\n', (17234, 17302), True, 'import numpy as np\n'), ((17804, 17817), 'numpy.copy', 'np.copy', (['umat'], {}), '(umat)\n', (17811, 17817), True, 'import numpy as np\n'), ((17842, 17860), 'numpy.copy', 'np.copy', (['self.vmat'], {}), '(self.vmat)\n', (17849, 17860), True, 'import numpy as np\n'), ((17888, 17909), 'numpy.copy', 'np.copy', (['self.weights'], {}), '(self.weights)\n', (17895, 17909), True, 'import numpy as np\n'), ((18538, 18564), 'improver.nowcasting.optical_flow.OpticalFlow', 'OpticalFlow', ([], {'iterations': '(20)'}), '(iterations=20)\n', (18549, 18564), False, 'from improver.nowcasting.optical_flow import OpticalFlow\n'), ((18669, 18953), 'numpy.array', 'np.array', (['[[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 2.0, 2.0, 2.0, 2.0, 1.0, 1.0],\n [1.0, 2.0, 3.0, 3.0, 2.0, 1.0, 1.0], [1.0, 2.0, 3.0, 3.0, 2.0, 1.0, 1.0\n ], [1.0, 2.0, 2.0, 2.0, 2.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, \n 1.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]'], {}), '([[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 2.0, 2.0, 2.0, 2.0, \n 1.0, 1.0], [1.0, 2.0, 3.0, 3.0, 2.0, 1.0, 1.0], [1.0, 2.0, 3.0, 3.0, \n 2.0, 1.0, 1.0], [1.0, 2.0, 2.0, 2.0, 2.0, 1.0, 1.0], [1.0, 1.0, 1.0, \n 1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]])\n', (18677, 18953), True, 'import numpy as np\n'), ((19123, 19159), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {'dtype': 'np.float32'}), '((10, 10), dtype=np.float32)\n', (19131, 19159), True, 'import numpy as np\n'), ((19317, 19353), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {'dtype': 'np.float32'}), '((10, 10), dtype=np.float32)\n', (19325, 19353), True, 'import numpy as np\n'), ((20518, 20531), 'improver.nowcasting.optical_flow.OpticalFlow', 'OpticalFlow', ([], {}), '()\n', (20529, 20531), False, 'from improver.nowcasting.optical_flow import OpticalFlow\n'), ((20547, 20562), 'numpy.ones', 'np.ones', (['(3, 3)'], {}), '((3, 3))\n', (20554, 20562), True, 'import numpy as np\n'), ((20588, 20606), 'numpy.where', 'np.where', (['(rain > 0)'], {}), '(rain > 0)\n', (20596, 20606), True, 'import numpy as np\n'), ((20879, 20940), 'numpy.array', 'np.array', (['[[3.0, 5.0, 7.0], [0.0, 2.0, 1.0], [1.0, 1.0, 1.0]]'], {}), '([[3.0, 5.0, 7.0], [0.0, 2.0, 1.0], [1.0, 1.0, 1.0]])\n', (20887, 20940), True, 'import numpy as np\n'), ((21692, 21753), 'numpy.array', 'np.array', (['[[3.0, 5.0, 7.0], [2.0, 2.0, 1.0], [1.0, 1.0, 1.0]]'], {}), '([[3.0, 5.0, 7.0], [2.0, 2.0, 1.0], [1.0, 1.0, 1.0]])\n', (21700, 21753), True, 'import numpy as np\n'), ((22376, 22391), 'numpy.ones', 'np.ones', (['(5, 5)'], {}), '((5, 5))\n', (22383, 22391), True, 'import numpy as np\n'), ((22450, 22600), 'numpy.array', 'np.array', (['[[1.0, 3.0, 5.0, 7.0, 1.0], [0.0, 2.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0,\n 1.0], [1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0]]'], {}), '([[1.0, 3.0, 5.0, 7.0, 1.0], [0.0, 2.0, 1.0, 1.0, 1.0], [1.0, 1.0, \n 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0]])\n', (22458, 22600), True, 'import numpy as np\n'), ((23224, 23285), 'numpy.array', 'np.array', (['[[3.0, 5.0, 7.0], [0.0, 2.0, 1.0], [0.0, 1.0, 1.0]]'], {}), '([[3.0, 5.0, 7.0], [0.0, 2.0, 1.0], [0.0, 1.0, 1.0]])\n', (23232, 23285), True, 'import numpy as np\n'), ((23712, 23755), 'numpy.array', 'np.array', (['[[0, 0, 1], [0, 1, 1], [1, 1, 1]]'], {}), '([[0, 0, 1], [0, 1, 1], [1, 1, 1]])\n', (23720, 23755), True, 'import numpy as np\n'), ((23821, 23864), 'numpy.array', 'np.array', (['[[0, 0, 1], [0, 1, 1], [1, 1, 1]]'], {}), '([[0, 0, 1], [0, 1, 1], [1, 1, 1]])\n', (23829, 23864), True, 'import numpy as np\n'), ((24403, 24429), 'improver.nowcasting.optical_flow.OpticalFlow', 'OpticalFlow', ([], {'iterations': '(20)'}), '(iterations=20)\n', (24414, 24429), False, 'from improver.nowcasting.optical_flow import OpticalFlow\n'), ((24522, 24806), 'numpy.array', 'np.array', (['[[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 2.0, 2.0, 2.0, 2.0, 1.0, 1.0],\n [1.0, 2.0, 3.0, 3.0, 2.0, 1.0, 1.0], [1.0, 2.0, 3.0, 3.0, 2.0, 1.0, 1.0\n ], [1.0, 2.0, 2.0, 2.0, 2.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, \n 1.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]'], {}), '([[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 2.0, 2.0, 2.0, 2.0, \n 1.0, 1.0], [1.0, 2.0, 3.0, 3.0, 2.0, 1.0, 1.0], [1.0, 2.0, 3.0, 3.0, \n 2.0, 1.0, 1.0], [1.0, 2.0, 2.0, 2.0, 2.0, 1.0, 1.0], [1.0, 1.0, 1.0, \n 1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]])\n', (24530, 24806), True, 'import numpy as np\n'), ((24981, 25017), 'numpy.zeros', 'np.zeros', (['(16, 16)'], {'dtype': 'np.float32'}), '((16, 16), dtype=np.float32)\n', (24989, 25017), True, 'import numpy as np\n'), ((25099, 25135), 'numpy.zeros', 'np.zeros', (['(16, 16)'], {'dtype': 'np.float32'}), '((16, 16), dtype=np.float32)\n', (25107, 25135), True, 'import numpy as np\n'), ((26164, 26190), 'improver.nowcasting.optical_flow.OpticalFlow', 'OpticalFlow', ([], {'iterations': '(20)'}), '(iterations=20)\n', (26175, 26190), False, 'from improver.nowcasting.optical_flow import OpticalFlow\n'), ((26238, 26253), 'numpy.float32', 'np.float32', (['(6.0)'], {}), '(6.0)\n', (26248, 26253), True, 'import numpy as np\n'), ((26352, 26659), 'numpy.array', 'np.array', (['[[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 2.0, 2.0, 2.0, 2.0, 1.0, 1.0],\n [1.0, 2.0, 3.0, 3.0, 2.0, 1.0, 1.0], [1.0, 2.0, 3.0, 3.0, 2.0, 1.0, 1.0\n ], [1.0, 2.0, 2.0, 2.0, 2.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, \n 1.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]]'], {'dtype': 'np.float32'}), '([[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 2.0, 2.0, 2.0, 2.0, \n 1.0, 1.0], [1.0, 2.0, 3.0, 3.0, 2.0, 1.0, 1.0], [1.0, 2.0, 3.0, 3.0, \n 2.0, 1.0, 1.0], [1.0, 2.0, 2.0, 2.0, 2.0, 1.0, 1.0], [1.0, 1.0, 1.0, \n 1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]], dtype=np.float32\n )\n', (26360, 26659), True, 'import numpy as np\n'), ((26852, 26888), 'numpy.zeros', 'np.zeros', (['(16, 16)'], {'dtype': 'np.float32'}), '((16, 16), dtype=np.float32)\n', (26860, 26888), True, 'import numpy as np\n'), ((27278, 27314), 'numpy.zeros', 'np.zeros', (['(16, 16)'], {'dtype': 'np.float32'}), '((16, 16), dtype=np.float32)\n', (27286, 27314), True, 'import numpy as np\n'), ((28594, 28649), 'improver.nowcasting.optical_flow.OpticalFlow', 'OpticalFlow', ([], {'iterations': '(20)', 'metadata_dict': 'metadata_dict'}), '(iterations=20, metadata_dict=metadata_dict)\n', (28605, 28649), False, 'from improver.nowcasting.optical_flow import OpticalFlow\n'), ((30502, 30520), 'numpy.zeros', 'np.zeros', (['(16, 16)'], {}), '((16, 16))\n', (30510, 30520), True, 'import numpy as np\n'), ((30840, 30885), 'numpy.ma.MaskedArray', 'np.ma.MaskedArray', (['self.cube1.data'], {'mask': 'mask'}), '(self.cube1.data, mask=mask)\n', (30857, 30885), True, 'import numpy as np\n'), ((30904, 30949), 'numpy.ma.MaskedArray', 'np.ma.MaskedArray', (['self.cube2.data'], {'mask': 'mask'}), '(self.cube2.data, mask=mask)\n', (30921, 30949), True, 'import numpy as np\n'), ((33271, 33293), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(450)'}), '(seconds=450)\n', (33280, 33293), False, 'from datetime import datetime, timedelta\n'), ((34334, 34356), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(900)'}), '(seconds=900)\n', (34343, 34356), False, 'from datetime import datetime, timedelta\n'), ((36550, 36576), 'numpy.zeros', 'np.zeros', (['self.cube1.shape'], {}), '(self.cube1.shape)\n', (36558, 36576), True, 'import numpy as np\n'), ((3032, 3045), 'improver.nowcasting.optical_flow.OpticalFlow', 'OpticalFlow', ([], {}), '()\n', (3043, 3045), False, 'from improver.nowcasting.optical_flow import OpticalFlow\n'), ((9232, 9266), 'numpy.array', 'np.array', (['[[1.0, 2.0], [0.0, 1.0]]'], {}), '([[1.0, 2.0], [0.0, 1.0]])\n', (9240, 9266), True, 'import numpy as np\n'), ((9264, 9298), 'numpy.array', 'np.array', (['[[3.0, 4.0], [2.0, 3.0]]'], {}), '([[3.0, 4.0], [2.0, 3.0]])\n', (9272, 9298), True, 'import numpy as np\n'), ((9309, 9333), 'numpy.array', 'np.array', (['[[5.0], [4.0]]'], {}), '([[5.0], [4.0]])\n', (9317, 9333), True, 'import numpy as np\n'), ((9333, 9355), 'numpy.array', 'np.array', (['[[0.0, 0.0]]'], {}), '([[0.0, 0.0]])\n', (9341, 9355), True, 'import numpy as np\n'), ((9368, 9390), 'numpy.array', 'np.array', (['[[1.0, 2.0]]'], {}), '([[1.0, 2.0]])\n', (9376, 9390), True, 'import numpy as np\n'), ((9390, 9407), 'numpy.array', 'np.array', (['[[3.0]]'], {}), '([[3.0]])\n', (9398, 9407), True, 'import numpy as np\n'), ((10639, 10672), 'numpy.multiply', 'np.multiply', (['self.umat', 'self.vmat'], {}), '(self.umat, self.vmat)\n', (10650, 10672), True, 'import numpy as np\n'), ((16396, 16424), 'numpy.ones', 'np.ones', (['(3, 4)'], {'dtype': 'float'}), '((3, 4), dtype=float)\n', (16403, 16424), True, 'import numpy as np\n'), ((17751, 17779), 'numpy.ones', 'np.ones', (['(3, 4)'], {'dtype': 'float'}), '((3, 4), dtype=float)\n', (17758, 17779), True, 'import numpy as np\n'), ((20196, 20209), 'numpy.mean', 'np.mean', (['umat'], {}), '(umat)\n', (20203, 20209), True, 'import numpy as np\n'), ((20211, 20235), 'numpy.float32', 'np.float32', (['(-0.124607998)'], {}), '(-0.124607998)\n', (20221, 20235), True, 'import numpy as np\n'), ((20268, 20281), 'numpy.mean', 'np.mean', (['vmat'], {}), '(vmat)\n', (20275, 20281), True, 'import numpy as np\n'), ((20283, 20306), 'numpy.float32', 'np.float32', (['(0.124607998)'], {}), '(0.124607998)\n', (20293, 20306), True, 'import numpy as np\n'), ((22763, 22781), 'numpy.where', 'np.where', (['(rain > 0)'], {}), '(rain > 0)\n', (22771, 22781), True, 'import numpy as np\n'), ((23989, 24007), 'numpy.where', 'np.where', (['(rain > 0)'], {}), '(rain > 0)\n', (23997, 24007), True, 'import numpy as np\n'), ((25542, 25556), 'numpy.mean', 'np.mean', (['ucomp'], {}), '(ucomp)\n', (25549, 25556), True, 'import numpy as np\n'), ((25601, 25615), 'numpy.mean', 'np.mean', (['vcomp'], {}), '(vcomp)\n', (25608, 25615), True, 'import numpy as np\n'), ((25908, 25922), 'numpy.mean', 'np.mean', (['ucomp'], {}), '(ucomp)\n', (25915, 25922), True, 'import numpy as np\n'), ((25968, 25982), 'numpy.mean', 'np.mean', (['vcomp'], {}), '(vcomp)\n', (25975, 25982), True, 'import numpy as np\n'), ((26282, 26313), 'numpy.arange', 'np.arange', (['(16)'], {'dtype': 'np.float32'}), '(16, dtype=np.float32)\n', (26291, 26313), True, 'import numpy as np\n'), ((29089, 29108), 'numpy.mean', 'np.mean', (['ucube.data'], {}), '(ucube.data)\n', (29096, 29108), True, 'import numpy as np\n'), ((29153, 29172), 'numpy.mean', 'np.mean', (['vcube.data'], {}), '(vcube.data)\n', (29160, 29172), True, 'import numpy as np\n'), ((29610, 29629), 'numpy.mean', 'np.mean', (['ucube.data'], {}), '(ucube.data)\n', (29617, 29629), True, 'import numpy as np\n'), ((29674, 29693), 'numpy.mean', 'np.mean', (['vcube.data'], {}), '(vcube.data)\n', (29681, 29693), True, 'import numpy as np\n'), ((31435, 31461), 'numpy.mean', 'np.mean', (['ucube_masked.data'], {}), '(ucube_masked.data)\n', (31442, 31461), True, 'import numpy as np\n'), ((31519, 31545), 'numpy.mean', 'np.mean', (['vcube_masked.data'], {}), '(vcube_masked.data)\n', (31526, 31545), True, 'import numpy as np\n'), ((31602, 31630), 'numpy.mean', 'np.mean', (['ucube_unmasked.data'], {}), '(ucube_unmasked.data)\n', (31609, 31630), True, 'import numpy as np\n'), ((31688, 31716), 'numpy.mean', 'np.mean', (['vcube_unmasked.data'], {}), '(vcube_unmasked.data)\n', (31695, 31716), True, 'import numpy as np\n'), ((33631, 33650), 'numpy.mean', 'np.mean', (['ucube.data'], {}), '(ucube.data)\n', (33638, 33650), True, 'import numpy as np\n'), ((33700, 33719), 'numpy.mean', 'np.mean', (['vcube.data'], {}), '(vcube.data)\n', (33707, 33719), True, 'import numpy as np\n'), ((3254, 3267), 'improver.nowcasting.optical_flow.OpticalFlow', 'OpticalFlow', ([], {}), '()\n', (3265, 3267), False, 'from improver.nowcasting.optical_flow import OpticalFlow\n'), ((3737, 3750), 'improver.nowcasting.optical_flow.OpticalFlow', 'OpticalFlow', ([], {}), '()\n', (3748, 3750), False, 'from improver.nowcasting.optical_flow import OpticalFlow\n'), ((15830, 15843), 'improver.nowcasting.optical_flow.OpticalFlow', 'OpticalFlow', ([], {}), '()\n', (15841, 15843), False, 'from improver.nowcasting.optical_flow import OpticalFlow\n'), ((16034, 16047), 'improver.nowcasting.optical_flow.OpticalFlow', 'OpticalFlow', ([], {}), '()\n', (16045, 16047), False, 'from improver.nowcasting.optical_flow import OpticalFlow\n'), ((16548, 16561), 'improver.nowcasting.optical_flow.OpticalFlow', 'OpticalFlow', ([], {}), '()\n', (16559, 16561), False, 'from improver.nowcasting.optical_flow import OpticalFlow\n'), ((17378, 17391), 'improver.nowcasting.optical_flow.OpticalFlow', 'OpticalFlow', ([], {}), '()\n', (17389, 17391), False, 'from improver.nowcasting.optical_flow import OpticalFlow\n'), ((17918, 17931), 'improver.nowcasting.optical_flow.OpticalFlow', 'OpticalFlow', ([], {}), '()\n', (17929, 17931), False, 'from improver.nowcasting.optical_flow import OpticalFlow\n'), ((27073, 27100), 'datetime.datetime', 'datetime', (['(2018)', '(2)', '(20)', '(4)', '(0)'], {}), '(2018, 2, 20, 4, 0)\n', (27081, 27100), False, 'from datetime import datetime, timedelta\n'), ((27118, 27145), 'datetime.datetime', 'datetime', (['(2018)', '(2)', '(20)', '(4)', '(0)'], {}), '(2018, 2, 20, 4, 0)\n', (27126, 27145), False, 'from datetime import datetime, timedelta\n'), ((27499, 27527), 'datetime.datetime', 'datetime', (['(2018)', '(2)', '(20)', '(4)', '(15)'], {}), '(2018, 2, 20, 4, 15)\n', (27507, 27527), False, 'from datetime import datetime, timedelta\n'), ((27545, 27573), 'datetime.datetime', 'datetime', (['(2018)', '(2)', '(20)', '(4)', '(15)'], {}), '(2018, 2, 20, 4, 15)\n', (27553, 27573), False, 'from datetime import datetime, timedelta\n'), ((35554, 35567), 'numpy.arange', 'np.arange', (['(16)'], {}), '(16)\n', (35563, 35567), True, 'import numpy as np\n'), ((16341, 16354), 'numpy.arange', 'np.arange', (['(12)'], {}), '(12)\n', (16350, 16354), True, 'import numpy as np\n')]
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Tests total magnetization integrals calculator."""
from test import QiskitNatureTestCase
from ddt import ddt, data
import numpy as np
from qiskit_nature.problems.second_quantization.electronic.integrals_calculators import (
calc_total_magnetization_ints,
)
@ddt
class TestMagnetizationIntegralsCalculator(QiskitNatureTestCase):
"""Tests total magnetization integrals calculator."""
num_modes_list = [1, 2, 3]
expected_h_1_list = [
[[-0.5 + 0.0j]],
[[0.5 + 0.0j, 0.0 + 0.0j], [0.0 + 0.0j, -0.5 + 0.0j]],
[
[0.5 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j],
[0.0 + 0.0j, -0.5 + 0.0j, -0.0 + 0.0j],
[0.0 + 0.0j, -0.0 + 0.0j, -0.5 + 0.0j],
],
]
expected_h_2_list = [None, None, None]
@data(*num_modes_list)
def test_calc_total_magnetization_ints(self, num_modes):
"""Tests that one-body integrals for total magnetization are calculated correctly."""
expected_h_1 = self.expected_h_1_list[num_modes - 1]
expected_h_2 = self.expected_h_2_list[num_modes - 1]
h_1, h_2 = calc_total_magnetization_ints(num_modes)
assert np.allclose(h_1, expected_h_1)
assert h_2 == expected_h_2
|
[
"qiskit_nature.problems.second_quantization.electronic.integrals_calculators.calc_total_magnetization_ints",
"numpy.allclose",
"ddt.data"
] |
[((1251, 1272), 'ddt.data', 'data', (['*num_modes_list'], {}), '(*num_modes_list)\n', (1255, 1272), False, 'from ddt import ddt, data\n'), ((1570, 1610), 'qiskit_nature.problems.second_quantization.electronic.integrals_calculators.calc_total_magnetization_ints', 'calc_total_magnetization_ints', (['num_modes'], {}), '(num_modes)\n', (1599, 1610), False, 'from qiskit_nature.problems.second_quantization.electronic.integrals_calculators import calc_total_magnetization_ints\n'), ((1626, 1656), 'numpy.allclose', 'np.allclose', (['h_1', 'expected_h_1'], {}), '(h_1, expected_h_1)\n', (1637, 1656), True, 'import numpy as np\n')]
|
import abc
import copy
import numpy as np
from .config import DATABUFFER_CONFIG
class databuffer(object):
def __init__(self, hyperparams):
config = copy.deepcopy(DATABUFFER_CONFIG)
config.update(hyperparams)
self.max_size = config['memory_size']
self.state_dims = config['n_states']
if isinstance(self.state_dims, dict):
self.state_dims = self.state_dims['n_s']
if 'n_actions' in config.keys():
self.n_actions = config['n_actions']
self.actions_dims = config['n_action_dims']
self.dicrete_action = config['dicrete_action']
if isinstance(self.state_dims, (int, np.int64)):
self.state_dims = (self.state_dims, )
self.S = np.zeros((0,) + self.state_dims, dtype = np.float32)
self.A = np.zeros([0, self.actions_dims], dtype = np.uint8 if self.dicrete_action else np.float32)
self.R = np.zeros([0, 1], dtype = np.float32)
self.S_ = np.zeros((0,) + self.state_dims, dtype = np.float32)
self.done = np.zeros([0, 1], dtype = np.uint8)
# other data in transitions. For example, goals, episode infos, etc.
self.other_data = None
if 'other_data' in config:
self.other_data = {}
for key, box in config['other_data'].items():
self.other_data[key] = np.zeros((0,) + box.shape[1:], dtype=box.dtype)
# memory counter: How many transitions are recorded in total
self.mem_c = 0
def store_transition(self, transitions):
self.S = np.concatenate((self.S, transitions['state']), axis=0)
self.A = np.concatenate((self.A, transitions['action']), axis=0)
self.R = np.concatenate((self.R, transitions['reward']), axis=0)
self.done = np.concatenate((self.done, transitions['done']), axis=0)
self.S_ = np.concatenate((self.S_, transitions['next_state']), axis=0)
if self.other_data:
for key in self.other_data.keys():
assert 'other_data' in transitions, \
"Other data types should be included in transitions except S, A, R, Done, and S_."
self.other_data[key] = np.concatenate((self.other_data[key], transitions['other_data'][key]), axis=0)
self.mem_c += transitions['state'].shape[0]
if self.mem_c >self.max_size:
self.S = self.S[-self.max_size:]
self.A = self.A[-self.max_size:]
self.R = self.R[-self.max_size:]
self.done = self.done[-self.max_size:]
self.S_ = self.S_[-self.max_size:]
if self.other_data:
for key in self.other_data.keys():
self.other_data[key] = self.other_data[key][-self.max_size:]
def sample_batch(self, batch_size = None):
if batch_size is not None:
if batch_size > self.mem_c or batch_size > self.max_size:
raise RuntimeError("Batch size is bigger than buffer size")
# sample without putting back
# sample_index = np.random.choice(min(self.max_size, self.mem_c), size=batch_size)
# sample with putting back
sample_index = np.random.randint(0, self.mem_c, size=batch_size)
else:
sample_index = np.arange(min(self.max_size, self.mem_c))
batch = {}
batch['state'] = self.S[sample_index]
batch['action'] = self.A[sample_index]
batch['reward'] = self.R[sample_index]
batch['done'] = self.done[sample_index]
batch['next_state'] = self.S_[sample_index]
batch['other_data'] = None
if self.other_data:
batch['other_data'] = {}
for key in self.other_data.keys():
batch['other_data'][key] = self.other_data[key][sample_index]
return batch, sample_index
def reset_buffer(self):
self.S = np.zeros((0,) + self.state_dims, dtype=np.float32)
self.A = np.zeros([0, self.actions_dims], dtype = np.uint8 if self.dicrete_action else np.float32)
self.R = np.zeros([0, 1], dtype = np.float32)
self.S_ = np.zeros((0,) + self.state_dims, dtype=np.float32)
self.done = np.zeros([0, 1], dtype = np.bool)
if self.other_data:
for key in self.other_data.keys():
self.other_data[key] = np.zeros((0,) + self.other_data[key].shape[1:], dtype=self.other_data[key].dtype)
self.mem_c = 0
class databuffer_PG_gaussian(databuffer):
def __init__(self, hyperparams):
super(databuffer_PG_gaussian, self).__init__(hyperparams)
self.mu = np.zeros([0, self.actions_dims])
self.sigma = np.zeros([0, self.actions_dims])
self.logpac = np.zeros([0, 1], dtype=np.float32)
def store_transition(self, transitions):
databuffer.store_transition(self, transitions)
self.mu = np.concatenate((self.mu, transitions['mu']), axis=0)
self.sigma = np.concatenate((self.sigma, transitions['sigma']), axis=0)
self.logpac = np.concatenate((self.logpac, transitions['logpac']), axis=0)
if self.mem_c >self.max_size:
self.mu = self.mu[-self.max_size:]
self.sigma = self.sigma[-self.max_size:]
self.logpac = self.logpac[-self.max_size:]
def sample_batch(self, batch_size= None):
batch, sample_index = databuffer.sample_batch(self, batch_size)
batch['mu'] = self.mu[sample_index]
batch['sigma'] = self.sigma[sample_index]
batch['logpac'] = self.logpac[sample_index]
return batch, sample_index
def reset_buffer(self):
databuffer.reset_buffer(self)
self.mu = np.zeros([0, self.actions_dims])
self.sigma = np.zeros([0, self.actions_dims])
self.logpac = np.zeros([0, 1], dtype=np.float32)
class databuffer_PG_softmax(databuffer):
def __init__(self, hyperparams):
super(databuffer_PG_softmax, self).__init__(hyperparams)
self.distri = np.zeros([0, self.n_actions])
self.logpac = np.zeros([0, 1], dtype=np.float32)
def store_transition(self, transitions):
databuffer.store_transition(self, transitions)
self.distri = np.concatenate((self.distri, transitions['distri']), axis=0)
self.logpac = np.concatenate((self.logpac, transitions['logpac']), axis=0)
if self.mem_c >self.max_size:
self.distri = self.distri[-self.max_size:]
self.logpac = self.logpac[-self.max_size:]
def sample_batch(self, batch_size= None):
batch, sample_index = databuffer.sample_batch(self, batch_size)
batch['distri'] = self.distri[sample_index]
batch['logpac'] = self.logpac[sample_index]
return batch, sample_index
def reset_buffer(self):
databuffer.reset_buffer(self)
self.distri = np.zeros([0, self.n_actions])
self.logpac = np.zeros([0, 1], dtype=np.float32)
|
[
"copy.deepcopy",
"numpy.random.randint",
"numpy.zeros",
"numpy.concatenate"
] |
[((161, 193), 'copy.deepcopy', 'copy.deepcopy', (['DATABUFFER_CONFIG'], {}), '(DATABUFFER_CONFIG)\n', (174, 193), False, 'import copy\n'), ((740, 790), 'numpy.zeros', 'np.zeros', (['((0,) + self.state_dims)'], {'dtype': 'np.float32'}), '((0,) + self.state_dims, dtype=np.float32)\n', (748, 790), True, 'import numpy as np\n'), ((810, 901), 'numpy.zeros', 'np.zeros', (['[0, self.actions_dims]'], {'dtype': '(np.uint8 if self.dicrete_action else np.float32)'}), '([0, self.actions_dims], dtype=np.uint8 if self.dicrete_action else\n np.float32)\n', (818, 901), True, 'import numpy as np\n'), ((917, 951), 'numpy.zeros', 'np.zeros', (['[0, 1]'], {'dtype': 'np.float32'}), '([0, 1], dtype=np.float32)\n', (925, 951), True, 'import numpy as np\n'), ((972, 1022), 'numpy.zeros', 'np.zeros', (['((0,) + self.state_dims)'], {'dtype': 'np.float32'}), '((0,) + self.state_dims, dtype=np.float32)\n', (980, 1022), True, 'import numpy as np\n'), ((1045, 1077), 'numpy.zeros', 'np.zeros', (['[0, 1]'], {'dtype': 'np.uint8'}), '([0, 1], dtype=np.uint8)\n', (1053, 1077), True, 'import numpy as np\n'), ((1558, 1612), 'numpy.concatenate', 'np.concatenate', (["(self.S, transitions['state'])"], {'axis': '(0)'}), "((self.S, transitions['state']), axis=0)\n", (1572, 1612), True, 'import numpy as np\n'), ((1630, 1685), 'numpy.concatenate', 'np.concatenate', (["(self.A, transitions['action'])"], {'axis': '(0)'}), "((self.A, transitions['action']), axis=0)\n", (1644, 1685), True, 'import numpy as np\n'), ((1703, 1758), 'numpy.concatenate', 'np.concatenate', (["(self.R, transitions['reward'])"], {'axis': '(0)'}), "((self.R, transitions['reward']), axis=0)\n", (1717, 1758), True, 'import numpy as np\n'), ((1779, 1835), 'numpy.concatenate', 'np.concatenate', (["(self.done, transitions['done'])"], {'axis': '(0)'}), "((self.done, transitions['done']), axis=0)\n", (1793, 1835), True, 'import numpy as np\n'), ((1854, 1914), 'numpy.concatenate', 'np.concatenate', (["(self.S_, transitions['next_state'])"], {'axis': '(0)'}), "((self.S_, transitions['next_state']), axis=0)\n", (1868, 1914), True, 'import numpy as np\n'), ((3882, 3932), 'numpy.zeros', 'np.zeros', (['((0,) + self.state_dims)'], {'dtype': 'np.float32'}), '((0,) + self.state_dims, dtype=np.float32)\n', (3890, 3932), True, 'import numpy as np\n'), ((3950, 4041), 'numpy.zeros', 'np.zeros', (['[0, self.actions_dims]'], {'dtype': '(np.uint8 if self.dicrete_action else np.float32)'}), '([0, self.actions_dims], dtype=np.uint8 if self.dicrete_action else\n np.float32)\n', (3958, 4041), True, 'import numpy as np\n'), ((4057, 4091), 'numpy.zeros', 'np.zeros', (['[0, 1]'], {'dtype': 'np.float32'}), '([0, 1], dtype=np.float32)\n', (4065, 4091), True, 'import numpy as np\n'), ((4112, 4162), 'numpy.zeros', 'np.zeros', (['((0,) + self.state_dims)'], {'dtype': 'np.float32'}), '((0,) + self.state_dims, dtype=np.float32)\n', (4120, 4162), True, 'import numpy as np\n'), ((4183, 4214), 'numpy.zeros', 'np.zeros', (['[0, 1]'], {'dtype': 'np.bool'}), '([0, 1], dtype=np.bool)\n', (4191, 4214), True, 'import numpy as np\n'), ((4600, 4632), 'numpy.zeros', 'np.zeros', (['[0, self.actions_dims]'], {}), '([0, self.actions_dims])\n', (4608, 4632), True, 'import numpy as np\n'), ((4654, 4686), 'numpy.zeros', 'np.zeros', (['[0, self.actions_dims]'], {}), '([0, self.actions_dims])\n', (4662, 4686), True, 'import numpy as np\n'), ((4709, 4743), 'numpy.zeros', 'np.zeros', (['[0, 1]'], {'dtype': 'np.float32'}), '([0, 1], dtype=np.float32)\n', (4717, 4743), True, 'import numpy as np\n'), ((4863, 4915), 'numpy.concatenate', 'np.concatenate', (["(self.mu, transitions['mu'])"], {'axis': '(0)'}), "((self.mu, transitions['mu']), axis=0)\n", (4877, 4915), True, 'import numpy as np\n'), ((4937, 4995), 'numpy.concatenate', 'np.concatenate', (["(self.sigma, transitions['sigma'])"], {'axis': '(0)'}), "((self.sigma, transitions['sigma']), axis=0)\n", (4951, 4995), True, 'import numpy as np\n'), ((5018, 5078), 'numpy.concatenate', 'np.concatenate', (["(self.logpac, transitions['logpac'])"], {'axis': '(0)'}), "((self.logpac, transitions['logpac']), axis=0)\n", (5032, 5078), True, 'import numpy as np\n'), ((5657, 5689), 'numpy.zeros', 'np.zeros', (['[0, self.actions_dims]'], {}), '([0, self.actions_dims])\n', (5665, 5689), True, 'import numpy as np\n'), ((5711, 5743), 'numpy.zeros', 'np.zeros', (['[0, self.actions_dims]'], {}), '([0, self.actions_dims])\n', (5719, 5743), True, 'import numpy as np\n'), ((5766, 5800), 'numpy.zeros', 'np.zeros', (['[0, 1]'], {'dtype': 'np.float32'}), '([0, 1], dtype=np.float32)\n', (5774, 5800), True, 'import numpy as np\n'), ((5967, 5996), 'numpy.zeros', 'np.zeros', (['[0, self.n_actions]'], {}), '([0, self.n_actions])\n', (5975, 5996), True, 'import numpy as np\n'), ((6019, 6053), 'numpy.zeros', 'np.zeros', (['[0, 1]'], {'dtype': 'np.float32'}), '([0, 1], dtype=np.float32)\n', (6027, 6053), True, 'import numpy as np\n'), ((6177, 6237), 'numpy.concatenate', 'np.concatenate', (["(self.distri, transitions['distri'])"], {'axis': '(0)'}), "((self.distri, transitions['distri']), axis=0)\n", (6191, 6237), True, 'import numpy as np\n'), ((6260, 6320), 'numpy.concatenate', 'np.concatenate', (["(self.logpac, transitions['logpac'])"], {'axis': '(0)'}), "((self.logpac, transitions['logpac']), axis=0)\n", (6274, 6320), True, 'import numpy as np\n'), ((6816, 6845), 'numpy.zeros', 'np.zeros', (['[0, self.n_actions]'], {}), '([0, self.n_actions])\n', (6824, 6845), True, 'import numpy as np\n'), ((6868, 6902), 'numpy.zeros', 'np.zeros', (['[0, 1]'], {'dtype': 'np.float32'}), '([0, 1], dtype=np.float32)\n', (6876, 6902), True, 'import numpy as np\n'), ((3184, 3233), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.mem_c'], {'size': 'batch_size'}), '(0, self.mem_c, size=batch_size)\n', (3201, 3233), True, 'import numpy as np\n'), ((1354, 1401), 'numpy.zeros', 'np.zeros', (['((0,) + box.shape[1:])'], {'dtype': 'box.dtype'}), '((0,) + box.shape[1:], dtype=box.dtype)\n', (1362, 1401), True, 'import numpy as np\n'), ((2186, 2264), 'numpy.concatenate', 'np.concatenate', (["(self.other_data[key], transitions['other_data'][key])"], {'axis': '(0)'}), "((self.other_data[key], transitions['other_data'][key]), axis=0)\n", (2200, 2264), True, 'import numpy as np\n'), ((4331, 4417), 'numpy.zeros', 'np.zeros', (['((0,) + self.other_data[key].shape[1:])'], {'dtype': 'self.other_data[key].dtype'}), '((0,) + self.other_data[key].shape[1:], dtype=self.other_data[key].\n dtype)\n', (4339, 4417), True, 'import numpy as np\n')]
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test Neural Network."""
import unittest
from test import QiskitMachineLearningTestCase
import numpy as np
from ddt import ddt, data
from qiskit_machine_learning.neural_networks import NeuralNetwork
class _NeuralNetwork(NeuralNetwork):
"""Dummy implementation to test the abstract neural network class."""
def _forward(self, input_data, weights):
"""Expects as input either None, or a 2-dim array and returns."""
# handle None input
if self.num_inputs == 0 and input_data is None:
return np.zeros(self.output_shape)
return np.zeros(self.output_shape)
def _backward(self, input_data, weights):
# return None if there are no weights
input_grad = None
if self.num_inputs > 0:
input_grad = np.zeros((*self.output_shape, self.num_inputs))
weight_grad = None
if self.num_weights > 0:
weight_grad = np.zeros((*self.output_shape, self.num_weights))
return input_grad, weight_grad
@ddt
class TestNeuralNetwork(QiskitMachineLearningTestCase):
"""Neural Network Tests."""
@data(
# no input
((0, 0, 1), None),
((0, 1, 1), None),
((0, 1, 2), None),
((0, 1, (2, 2)), None),
# 1d input
((1, 0, 1), 0),
((1, 1, 1), 0),
((1, 1, 2), 0),
((1, 1, (2, 2)), 0),
# multi-dimensional input and weights
((2, 2, (2, 2)), [0, 0])
)
def test_forward_shape(self, params):
"""Test forward shape."""
config, input_data = params
network = _NeuralNetwork(*config)
shape = network.forward(input_data, np.zeros(network.num_weights)).shape
self.assertEqual(shape, network.output_shape)
@data(
# no input
((0, 0, 1), None),
((0, 1, 1), None),
((0, 1, 2), None),
((0, 1, (2, 2)), None),
# 1d input
((1, 0, 1), 0),
((1, 1, 1), 0),
((1, 1, 2), 0),
((1, 1, (2, 2)), 0),
# multi-dimensional input and weights
((2, 2, (2, 2)), [0, 0])
)
def test_backward_shape(self, params):
""" Test backward shape """
config, input_data = params
network = _NeuralNetwork(*config)
input_grad, weights_grad = network.backward(input_data, np.zeros(network.num_weights))
if network.num_inputs > 0:
self.assertEqual(input_grad.shape, (*network.output_shape, network.num_inputs))
else:
self.assertEqual(input_grad, None)
if network.num_weights > 0:
self.assertEqual(weights_grad.shape, (*network.output_shape, network.num_weights))
else:
self.assertEqual(weights_grad, None)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"ddt.data",
"numpy.zeros"
] |
[((1599, 1789), 'ddt.data', 'data', (['((0, 0, 1), None)', '((0, 1, 1), None)', '((0, 1, 2), None)', '((0, 1, (2, 2)), None)', '((1, 0, 1), 0)', '((1, 1, 1), 0)', '((1, 1, 2), 0)', '((1, 1, (2, 2)), 0)', '((2, 2, (2, 2)), [0, 0])'], {}), '(((0, 0, 1), None), ((0, 1, 1), None), ((0, 1, 2), None), ((0, 1, (2, 2\n )), None), ((1, 0, 1), 0), ((1, 1, 1), 0), ((1, 1, 2), 0), ((1, 1, (2, \n 2)), 0), ((2, 2, (2, 2)), [0, 0]))\n', (1603, 1789), False, 'from ddt import ddt, data\n'), ((2241, 2431), 'ddt.data', 'data', (['((0, 0, 1), None)', '((0, 1, 1), None)', '((0, 1, 2), None)', '((0, 1, (2, 2)), None)', '((1, 0, 1), 0)', '((1, 1, 1), 0)', '((1, 1, 2), 0)', '((1, 1, (2, 2)), 0)', '((2, 2, (2, 2)), [0, 0])'], {}), '(((0, 0, 1), None), ((0, 1, 1), None), ((0, 1, 2), None), ((0, 1, (2, 2\n )), None), ((1, 0, 1), 0), ((1, 1, 1), 0), ((1, 1, 2), 0), ((1, 1, (2, \n 2)), 0), ((2, 2, (2, 2)), [0, 0]))\n', (2245, 2431), False, 'from ddt import ddt, data\n'), ((3257, 3272), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3270, 3272), False, 'import unittest\n'), ((1070, 1097), 'numpy.zeros', 'np.zeros', (['self.output_shape'], {}), '(self.output_shape)\n', (1078, 1097), True, 'import numpy as np\n'), ((1026, 1053), 'numpy.zeros', 'np.zeros', (['self.output_shape'], {}), '(self.output_shape)\n', (1034, 1053), True, 'import numpy as np\n'), ((1274, 1321), 'numpy.zeros', 'np.zeros', (['(*self.output_shape, self.num_inputs)'], {}), '((*self.output_shape, self.num_inputs))\n', (1282, 1321), True, 'import numpy as np\n'), ((1409, 1457), 'numpy.zeros', 'np.zeros', (['(*self.output_shape, self.num_weights)'], {}), '((*self.output_shape, self.num_weights))\n', (1417, 1457), True, 'import numpy as np\n'), ((2809, 2838), 'numpy.zeros', 'np.zeros', (['network.num_weights'], {}), '(network.num_weights)\n', (2817, 2838), True, 'import numpy as np\n'), ((2144, 2173), 'numpy.zeros', 'np.zeros', (['network.num_weights'], {}), '(network.num_weights)\n', (2152, 2173), True, 'import numpy as np\n')]
|
import numpy as np
import torch
import gym
import argparse
import os
import copy
import utils
import TD3
import Q_TD3
import pandas as pd
import json,os
import time
#device = torch.device("cuda:4" if torch.cuda.is_available() else "cpu")
def eval_policy(policy, env_name,eval_episodes=10):
eval_env = gym.make(env_name)
avg_reward = 0.
for _ in range(eval_episodes):
state, done = eval_env.reset(), False
while not done:
action = policy.select_action(np.array(state))
state, reward, done, _ = eval_env.step(action)
avg_reward += reward
avg_reward /= eval_episodes
print("---------------------------------------")
print(f"Evaluation over {eval_episodes} episodes: {avg_reward:.3f}")
print("---------------------------------------")
return avg_reward
def eval_actionnoise_policy(policy, env_name,eval_episodes=10,policy_noise=0.1,noise_clip = 0.5,max_action = 1):
eval_env = gym.make(env_name)
avg_reward = 0.
for _ in range(eval_episodes):
state, done = eval_env.reset(), False
while not done:
action = policy.select_action(np.array(state))
action = torch.Tensor(action)
noise = (torch.randn_like(action) * policy_noise).clamp(-noise_clip, noise_clip)
action = np.array((action + noise).clamp(-max_action, max_action)) #.cpu().detach().numpy()
state, reward, done, _ = eval_env.step(action)
avg_reward += reward
avg_reward /= eval_episodes
return avg_reward
def eval_statenoise_policy(policy, env_name,eval_episodes=10,state_noise=0.1,noise_clip = 0.5):
eval_env = gym.make(env_name)
avg_reward = 0.
for _ in range(eval_episodes):
state, done = eval_env.reset(), False
while not done:
state = torch.Tensor(state)
noise = (torch.randn_like(state) * state_noise).clamp(-noise_clip, noise_clip)
state = state + noise
action = policy.select_action(np.array(state))
state, reward, done, _ = eval_env.step(action)
avg_reward += reward
avg_reward /= eval_episodes
return avg_reward
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--policy", default="TD3") # Policy name
parser.add_argument("--env", default="HalfCheetah-v3") # OpenAI gym environment name
parser.add_argument("--seed", default=0, type=int) # Sets Gym, PyTorch and Numpy seeds
parser.add_argument("--start_timesteps", default=1e4, type=int) # Time steps initial random policy is used
parser.add_argument("--eval_freq", default=5e3, type=int) # How often (time steps) we evaluate
parser.add_argument("--max_timesteps", default=1e6, type=int) # Max time steps to run environment
parser.add_argument("--expl_noise", default=0.1) # Std of Gaussian exploration noise
parser.add_argument("--batch_size", default=100, type=int) # Batch size for both actor and critic
parser.add_argument("--discount", default=0.99) # Discount factor
parser.add_argument("--tau", default=0.005) # Target network update rate
parser.add_argument("--policy_noise", default=0.2,type=float) # Noise added to target policy during critic update
parser.add_argument("--noise_clip", default=0.5,type=float) # Range to clip target policy noise
parser.add_argument("--policy_freq", default=2, type=int) # Frequency of delayed policy updates
parser.add_argument("--save_model", default="False") # Save model and optimizer parameters
parser.add_argument("--load_model", default="") # Model load file name, "" doesn't load, "default" uses file_name
parser.add_argument("--training_mode", default="Online") #training_mode Offline or Online
parser.add_argument("--cuda_device" , default= 1) # Choosing the CUDA device to run it on
parser.add_argument("--comment" , default= "none") # Comment changes file name for hyper paramter search
parser.add_argument("--noisy_testing" , default= "False") # Add noise to testing
parser.add_argument("--hidden_dim" , default= 64, type=int,help="hidden dim for Q-MLP")
parser.add_argument("--nf_fac" , default= 1.0,type=float,help="hidden dim reduction factor for quadratic neruon in for Q-MLP")
parser.add_argument("--init_zeros" , default= "False",help=" quadratic neuron weight initializersfor Q-MLP")
parser.add_argument("--pause_hour" , default= 0,type=int,help="pasue run of script for pause_hour hours.")
# choosing the device to run it on
args = parser.parse_args()
if args.pause_hour > 0: # Pause until a certain time
time.sleep(3600.0*args.pause_hour)
init_zeros= False
if args.init_zeros == "True":
init_zeros= True
policy_name = args.policy
if args.comment != "none":
policy_name = args.policy + args.comment
file_name = f"{policy_name}_{args.env}_{args.seed}_{args.training_mode}"
print("---------------------------------------")
print(f"Policy: {args.policy}, Env: {args.env}, Seed: {args.seed},Training_mode: {args.training_mode}")
print("---------------------------------------")
if args.save_model == "True" and not os.path.exists("./models"):
os.makedirs("./models")
#Set the device for the job
torch.cuda.set_device(int(args.cuda_device))
device_name = str("cuda:" + str(args.cuda_device))
print("The current device is: ", device_name )
device = torch.device( device_name if torch.cuda.is_available() else "cpu")
env = gym.make(args.env)
# Set seeds
env.seed(args.seed)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
state_dim = env.observation_space.shape[0]
state_max = env.observation_space.shape
action_dim = env.action_space.shape[0]
max_action = float(env.action_space.high[0])
kwargs = {
"state_dim": state_dim,
"action_dim": action_dim,
"max_action": max_action,
"discount": args.discount,
"tau": args.tau,
}
# Initialize policy
#print(args.policy == "TD3")
#print(args.policy)
if args.policy == "TD3":
# Target policy smoothing is scaled wrt the action scale
kwargs["env"] = env
kwargs["policy_noise"] = args.policy_noise * max_action
kwargs["noise_clip"] = args.noise_clip * max_action
kwargs["policy_freq"] = args.policy_freq
kwargs["device"] = device
policy = TD3.TD3(**kwargs)
variant = dict(
algorithm= policy_name,
env=args.env,
)
elif args.policy == "Q_TD3":
# Target policy smoothing is scaled wrt the action scale
kwargs["env"] = env
kwargs["policy_noise"] = args.policy_noise * max_action
kwargs["noise_clip"] = args.noise_clip * max_action
kwargs["policy_freq"] = args.policy_freq
kwargs["device"] = device
kwargs["hidden_dim"] = args.hidden_dim
kwargs["nf_fac"] = args.nf_fac
kwargs["init_zeros"] = init_zeros
policy = Q_TD3.TD3(**kwargs)
variant = dict(
algorithm="Q_TD3",
env=args.env,
)
else:
raise Exception("invaled policy!!!")
if not os.path.exists(f"./data/{args.env}/{policy_name}/seed{args.seed}"):
os.makedirs(f'./data/{args.env}/{policy_name}/seed{args.seed}')
with open(f'./data/{args.env}/{policy_name}/seed{int(args.seed)}/variant.json', 'w') as outfile:
json.dump(variant,outfile)
noise_ls = [0.05,0.1,0.15,0.2,0.25]
# if args.noisy_testing == "True":
# for count in range(0,len(noise_ls)):
# os.makedirs(f'./data/{args.env}/{policy_name}/seed{args.seed}/action_noise{count}')
# os.makedirs(f'./data/{args.env}/{policy_name}/seed{args.seed}/state_noise{count}')
if args.load_model != "":
policy_file = file_name if args.load_model == "default" else args.load_model
policy.load(f"./models/{policy_file}")
replay_buffer = utils.ReplayBuffer(state_dim, action_dim)
# Evaluate untrained policy
evaluations = [eval_policy(policy, args.env)]
evaluations_statenoise = []
evaluations_actionnoise = []
state, done = env.reset(), False
episode_reward = 0
episode_timesteps = 0
episode_num = 0
ep_reward_list = []
for t in range(int(args.max_timesteps)):
episode_timesteps += 1
# Select action randomly or according to policy
if t < args.start_timesteps:
action = env.action_space.sample()
else:
action = (
policy.select_action(np.array(state))
+ np.random.normal(0, max_action * args.expl_noise, size=action_dim)
).clip(-max_action, max_action)
# Perform action
next_state, reward, done, _ = env.step(action)
done_bool = float(done) if episode_timesteps < env._max_episode_steps else 0
# Store data in replay buffer
replay_buffer.add(state, action, next_state, reward, done_bool)
state = next_state
# Store observation and reward bounds
policy.obs_upper_bound = np.amax(state) if policy.obs_upper_bound < np.amax(state) else policy.obs_upper_bound
policy.obs_lower_bound = np.amin(state) if policy.obs_lower_bound > np.amin(state) else policy.obs_lower_bound
policy.reward_lower_bound = (reward) if policy.reward_lower_bound > reward else policy.reward_lower_bound
policy.reward_upper_bound = (reward) if policy.reward_upper_bound < reward else policy.reward_upper_bound
episode_reward += reward
# Train agent after collecting sufficient data
if args.training_mode == 'Online':
if t >= args.start_timesteps:
policy.train(replay_buffer, args.batch_size) #,train_steps = 1)
if done:
ep_reward_list.append(episode_reward)
print(f"Total T: {t+1} Episode Num: {episode_num+1} Episode T: {episode_timesteps} Reward: {episode_reward:.3f}")
if args.training_mode == 'Offline':
if t >= args.start_timesteps:
policy.train(replay_buffer, args.batch_size,train_steps = episode_timesteps)
# Reset environment
state, done = env.reset(), False
episode_reward = 0
episode_timesteps = 0
episode_num += 1
# Evaluate episode
if (t + 1) % args.eval_freq == 0:
evaluations.append(eval_policy(policy, args.env))
if args.save_model == "True":
policy.save(f"./models/{file_name}")
data = np.array(evaluations)
df = pd.DataFrame(data=data,columns=["Average Return"]).reset_index()
df['Timesteps'] = df['index'] * args.eval_freq
df['env'] = args.env
df['algorithm_name'] = policy_name#args.policy
df.to_csv(f'./data/{args.env}/{policy_name}/seed{args.seed}/progress.csv', index = False)
if args.noisy_testing == "True":
#count = -1
for noise in noise_ls:
#count +=1
evaluations_actionnoise.append(eval_actionnoise_policy(policy, args.env,policy_noise=noise,max_action = max_action))
data = np.array(evaluations_actionnoise)
df = pd.DataFrame(data=data,columns=["Average Return"]).reset_index()
df['Timesteps'] = df['index'] * args.eval_freq
df['env'] = args.env
df['algorithm_name'] = policy_name#args.policy
df.to_csv(f'./data/{args.env}/{policy_name}/seed{args.seed}/action_noise_progress.csv', index = False)
evaluations_statenoise.append(eval_statenoise_policy(policy, args.env,state_noise=noise/5))
data = np.array(evaluations_statenoise)
df = pd.DataFrame(data=data,columns=["Average Return"]).reset_index()
df['Timesteps'] = df['index'] * args.eval_freq
df['env'] = args.env
df['algorithm_name'] = policy_name#args.policy
df.to_csv(f'./data/{args.env}/{policy_name}/seed{args.seed}/state_noise_progress.csv', index = False)
|
[
"numpy.random.seed",
"argparse.ArgumentParser",
"numpy.amin",
"numpy.random.normal",
"utils.ReplayBuffer",
"pandas.DataFrame",
"os.path.exists",
"TD3.TD3",
"torch.Tensor",
"json.dump",
"torch.randn_like",
"torch.manual_seed",
"time.sleep",
"torch.cuda.is_available",
"gym.make",
"os.makedirs",
"numpy.amax",
"numpy.array",
"Q_TD3.TD3"
] |
[((313, 331), 'gym.make', 'gym.make', (['env_name'], {}), '(env_name)\n', (321, 331), False, 'import gym\n'), ((908, 926), 'gym.make', 'gym.make', (['env_name'], {}), '(env_name)\n', (916, 926), False, 'import gym\n'), ((1530, 1548), 'gym.make', 'gym.make', (['env_name'], {}), '(env_name)\n', (1538, 1548), False, 'import gym\n'), ((2019, 2044), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2042, 2044), False, 'import argparse\n'), ((5464, 5482), 'gym.make', 'gym.make', (['args.env'], {}), '(args.env)\n', (5472, 5482), False, 'import gym\n'), ((5518, 5546), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (5535, 5546), False, 'import torch\n'), ((5548, 5573), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (5562, 5573), True, 'import numpy as np\n'), ((7629, 7670), 'utils.ReplayBuffer', 'utils.ReplayBuffer', (['state_dim', 'action_dim'], {}), '(state_dim, action_dim)\n', (7647, 7670), False, 'import utils\n'), ((4614, 4650), 'time.sleep', 'time.sleep', (['(3600.0 * args.pause_hour)'], {}), '(3600.0 * args.pause_hour)\n', (4624, 4650), False, 'import time\n'), ((5167, 5190), 'os.makedirs', 'os.makedirs', (['"""./models"""'], {}), "('./models')\n", (5178, 5190), False, 'import json, os\n'), ((6265, 6282), 'TD3.TD3', 'TD3.TD3', ([], {}), '(**kwargs)\n', (6272, 6282), False, 'import TD3\n'), ((6914, 6980), 'os.path.exists', 'os.path.exists', (['f"""./data/{args.env}/{policy_name}/seed{args.seed}"""'], {}), "(f'./data/{args.env}/{policy_name}/seed{args.seed}')\n", (6928, 6980), False, 'import json, os\n'), ((6984, 7047), 'os.makedirs', 'os.makedirs', (['f"""./data/{args.env}/{policy_name}/seed{args.seed}"""'], {}), "(f'./data/{args.env}/{policy_name}/seed{args.seed}')\n", (6995, 7047), False, 'import json, os\n'), ((7148, 7175), 'json.dump', 'json.dump', (['variant', 'outfile'], {}), '(variant, outfile)\n', (7157, 7175), False, 'import json, os\n'), ((1096, 1116), 'torch.Tensor', 'torch.Tensor', (['action'], {}), '(action)\n', (1108, 1116), False, 'import torch\n'), ((1667, 1686), 'torch.Tensor', 'torch.Tensor', (['state'], {}), '(state)\n', (1679, 1686), False, 'import torch\n'), ((5137, 5163), 'os.path.exists', 'os.path.exists', (['"""./models"""'], {}), "('./models')\n", (5151, 5163), False, 'import json, os\n'), ((5413, 5438), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5436, 5438), False, 'import torch\n'), ((6768, 6787), 'Q_TD3.TD3', 'Q_TD3.TD3', ([], {}), '(**kwargs)\n', (6777, 6787), False, 'import Q_TD3\n'), ((8619, 8633), 'numpy.amax', 'np.amax', (['state'], {}), '(state)\n', (8626, 8633), True, 'import numpy as np\n'), ((8732, 8746), 'numpy.amin', 'np.amin', (['state'], {}), '(state)\n', (8739, 8746), True, 'import numpy as np\n'), ((9900, 9921), 'numpy.array', 'np.array', (['evaluations'], {}), '(evaluations)\n', (9908, 9921), True, 'import numpy as np\n'), ((472, 487), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (480, 487), True, 'import numpy as np\n'), ((1067, 1082), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (1075, 1082), True, 'import numpy as np\n'), ((1839, 1854), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (1847, 1854), True, 'import numpy as np\n'), ((8662, 8676), 'numpy.amax', 'np.amax', (['state'], {}), '(state)\n', (8669, 8676), True, 'import numpy as np\n'), ((8775, 8789), 'numpy.amin', 'np.amin', (['state'], {}), '(state)\n', (8782, 8789), True, 'import numpy as np\n'), ((9930, 9981), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data', 'columns': "['Average Return']"}), "(data=data, columns=['Average Return'])\n", (9942, 9981), True, 'import pandas as pd\n'), ((10463, 10496), 'numpy.array', 'np.array', (['evaluations_actionnoise'], {}), '(evaluations_actionnoise)\n', (10471, 10496), True, 'import numpy as np\n'), ((10942, 10974), 'numpy.array', 'np.array', (['evaluations_statenoise'], {}), '(evaluations_statenoise)\n', (10950, 10974), True, 'import numpy as np\n'), ((1129, 1153), 'torch.randn_like', 'torch.randn_like', (['action'], {}), '(action)\n', (1145, 1153), False, 'import torch\n'), ((1699, 1722), 'torch.randn_like', 'torch.randn_like', (['state'], {}), '(state)\n', (1715, 1722), False, 'import torch\n'), ((8183, 8249), 'numpy.random.normal', 'np.random.normal', (['(0)', '(max_action * args.expl_noise)'], {'size': 'action_dim'}), '(0, max_action * args.expl_noise, size=action_dim)\n', (8199, 8249), True, 'import numpy as np\n'), ((8160, 8175), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (8168, 8175), True, 'import numpy as np\n'), ((10507, 10558), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data', 'columns': "['Average Return']"}), "(data=data, columns=['Average Return'])\n", (10519, 10558), True, 'import pandas as pd\n'), ((10985, 11036), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data', 'columns': "['Average Return']"}), "(data=data, columns=['Average Return'])\n", (10997, 11036), True, 'import pandas as pd\n')]
|
from typing import Tuple, List
import numpy as np
from .types import GridShape, ReceptiveFieldRect
def estimate_rf_from_gradient(receptive_field_grad: np.ndarray) -> ReceptiveFieldRect:
"""
Given input gradient tensors of shape [N, W, H, C] it returns the
estimated size of gradient `blob` in W-H directions i.e. this
function computes the size of gradient in W-H axis for each feature map.
:param receptive_field_grad: a numpy tensor with gradient values
obtained for certain feature map
:return: a corresponding ReceptiveFieldRect
"""
receptive_field_grad = np.array(receptive_field_grad).mean(0).mean(-1)
binary_map: np.ndarray = (receptive_field_grad[:, :] > 0)
x_cs: np.ndarray = binary_map.sum(-1) >= 1
y_cs: np.ndarray = binary_map.sum(0) >= 1
x = np.arange(len(x_cs))
y = np.arange(len(y_cs))
width = x_cs.sum()
height = y_cs.sum()
x = np.sum(x * x_cs) / width
y = np.sum(y * y_cs) / height
return ReceptiveFieldRect(x, y, width, height)
def estimate_rf_from_gradients(
receptive_field_grads: List[np.ndarray]
) -> List[ReceptiveFieldRect]:
"""
Given input gradient tensors of shape [N, W, H, C] it returns the
estimated size of gradient `blob` in W-H directions i.e. this
function computes the size of gradient in W-H axis for each feature map.
:param receptive_field_grads: a list of numpy tensor with gradient values
obtained for different feature maps
:return: a list of corresponding ReceptiveFieldRect
"""
return [
estimate_rf_from_gradient(receptive_field_grad)
for receptive_field_grad in receptive_field_grads
]
|
[
"numpy.array",
"numpy.sum"
] |
[((870, 886), 'numpy.sum', 'np.sum', (['(x * x_cs)'], {}), '(x * x_cs)\n', (876, 886), True, 'import numpy as np\n'), ((900, 916), 'numpy.sum', 'np.sum', (['(y * y_cs)'], {}), '(y * y_cs)\n', (906, 916), True, 'import numpy as np\n'), ((574, 604), 'numpy.array', 'np.array', (['receptive_field_grad'], {}), '(receptive_field_grad)\n', (582, 604), True, 'import numpy as np\n')]
|
"""
Graph related functions.
"""
import itertools
import json
from networkx.readwrite import json_graph
import networkx as nx
import numpy as np
import seaborn as sns
from scipy.special import comb
import trilearn.auxiliary_functions
def from_json_file(filename):
"""From json graph to graph.
Args:
filename (string): Filename of json graph.
Returns:
NetworksX graph: NetworkX version of the json graph.
"""
with open(filename) as data_file:
json_G = json.load(data_file)
return json_graph.node_link_graph(json_G)
def replace_node(graph, node, new_node):
"""Replaces node by new_node in graph.
Args:
graph (NetworkX graph): A graph.
node (hashable object): A node.
new_node (hashable object): Another node.
"""
graph.add_node(new_node)
graph.add_edges_from([(new_node, n) for n in graph.neighbors(node)])
graph.remove_node(node)
def plot(graph, filename, layout="dot"):
""" Plots a networkx graph and saves it to filename.
Args:
graph (NetworkX graph): A graph.
filename (string): The filename.
"""
agraph = nx.nx_agraph.to_agraph(graph)
agraph.layout(layout)
agraph.draw(filename)
def graph_to_tuple(graph):
""" Takes a NetworkX graph and returns a tuplized adjacency matrix.
Args:
graph (NetworkX graph): A graph
Returns:
tuple: A flattened adjacency matrix in tuple format.
Example:
>>> g.nodes
NodeView((0, 1, 2, 3, 4))
>>> g.edges
EdgeView([(0, 1), (0, 2), (1, 2), (2, 3), (3, 4)])
>>> glib.graph_to_tuple(g)
(0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0)
"""
p = graph.order()
mat = np.array(nx.to_numpy_matrix(graph), dtype=int).reshape(p*p)
return tuple(mat)
def tuple_to_graph(vecmat):
""" Takes a tuple of the rows in an adjacency matrix
and returns a nx.graph. This is a kind of serialization of a graph.
Args:
vecmat (tuple): tuple of the rows in an adjacency matrix.
Returns:
NetworkX graph
"""
p = int(np.sqrt(len(vecmat)))
mat = np.array(vecmat).reshape(p, p)
mat += mat.T
mat = np.matrix(mat)
return nx.from_numpy_matrix(mat)
def hash_graph(graph):
""" A hash value of the tupelized version of graph.
Args:
graph (NetworkX graph): A graph
Returns:
int: A hash value of a graph.
Example:
>>> g = dlib.sample(5)
>>> g.nodes
NodeView((0, 1, 2, 3, 4))
>>> g.edges
EdgeView([(0, 1), (0, 3), (1, 2), (1, 3), (2, 3)])
>>> glib.hash_graph(g)
249771633555694270
"""
return hash(str(graph_to_tuple(graph)))
def true_distribution(seqdist, filename):
"""Calculating true distribution for a graph with 6 nodes.
Args:
seqdist (SequentialDistribution): A (Sequential) distribution for a decomposable graph.
filename (string): Filename to save marginal edge distribtion.
Returns:
dict: The graph distribution evaluated for each graph.
"""
p = seqdist.p
no_chordal = 0
true_heatmap = np.matrix(np.zeros(p*p).reshape(p, p))
max_ll = -100000
graph_ll = {}
graph_post = {}
for val in itertools.product(*([[0, 1]] * comb(p, 2))):
vec_mat = [0]
vec_mat += list(val[0:5])
vec_mat += [0]*2
vec_mat += list(val[5:9])
vec_mat += [0]*3
vec_mat += list(val[9:12])
vec_mat += [0]*4
vec_mat += list(val[12:14])
vec_mat += [0]*5
vec_mat += [val[14]]
vec_mat += [0]*6
mat = np.array(vec_mat).reshape(p, p)
mat += mat.T
mat = np.matrix(mat)
graph1 = nx.from_numpy_matrix(mat)
if nx.is_chordal(graph1):
no_chordal += 1
logl = seqdist.log_likelihood(graph1)
if logl > max_ll:
max_ll = logl
graph_ll[tuple(vec_mat)] = logl
# Rescaled normalizing constant
norm_const_rescaled = sum([np.exp(rp-max_ll)
for g, rp in graph_ll.iteritems()])
for vec_mat, ll in graph_ll.iteritems():
mat = np.array(vec_mat).reshape(p, p)
mat += mat.T
mat = np.matrix(mat)
graph1 = nx.from_numpy_matrix(mat)
if nx.is_chordal(graph1):
graph_post[vec_mat] = np.exp(ll-max_ll) / norm_const_rescaled
true_heatmap += mat * graph_post[vec_mat]
with sns.axes_style("white"):
sns.heatmap(heatmap, mask=mask, annot=False,
cmap="Blues",
xticklabels=range(1, p+1),
yticklabels=range(1, p+1),
vmin=0.0, vmax=1.0, square=True,
cbar=True)
plt.yticks(rotation=0)
plt.savefig(filename_prefix+"_edge_heatmap_cbar.eps",
format="eps",bbox_inches='tight', dpi=100)
plt.clf()
trilearn.auxiliary_functions.plot_matrix(np.array(true_heatmap), filename, "png",
title="Czech Autoworkers posterior heatmap, lambda=" +
str(seqdist.cell_alpha))
return graph_post
def plot_adjmat(graph, cbar=False):
""" Plots the adjecency matrix of graph.
Args:
graph (NetworkX graph): A graph.
"""
heatmap = nx.to_numpy_matrix(graph)
mask = np.zeros_like(heatmap)
mask[np.triu_indices_from(mask)] = True
with sns.axes_style("white"):
sns.heatmap(heatmap, mask=mask, annot=False,
cmap="Blues",
vmin=0.0, vmax=1.0, square=True,
cbar=cbar, xticklabels=5, yticklabels=5)
|
[
"networkx.readwrite.json_graph.node_link_graph",
"numpy.matrix",
"numpy.zeros_like",
"json.load",
"networkx.from_numpy_matrix",
"seaborn.axes_style",
"seaborn.heatmap",
"scipy.special.comb",
"networkx.nx_agraph.to_agraph",
"numpy.zeros",
"networkx.is_chordal",
"networkx.to_numpy_matrix",
"numpy.array",
"numpy.exp",
"numpy.triu_indices_from"
] |
[((535, 569), 'networkx.readwrite.json_graph.node_link_graph', 'json_graph.node_link_graph', (['json_G'], {}), '(json_G)\n', (561, 569), False, 'from networkx.readwrite import json_graph\n'), ((1151, 1180), 'networkx.nx_agraph.to_agraph', 'nx.nx_agraph.to_agraph', (['graph'], {}), '(graph)\n', (1173, 1180), True, 'import networkx as nx\n'), ((2237, 2251), 'numpy.matrix', 'np.matrix', (['mat'], {}), '(mat)\n', (2246, 2251), True, 'import numpy as np\n'), ((2263, 2288), 'networkx.from_numpy_matrix', 'nx.from_numpy_matrix', (['mat'], {}), '(mat)\n', (2283, 2288), True, 'import networkx as nx\n'), ((5447, 5472), 'networkx.to_numpy_matrix', 'nx.to_numpy_matrix', (['graph'], {}), '(graph)\n', (5465, 5472), True, 'import networkx as nx\n'), ((5484, 5506), 'numpy.zeros_like', 'np.zeros_like', (['heatmap'], {}), '(heatmap)\n', (5497, 5506), True, 'import numpy as np\n'), ((502, 522), 'json.load', 'json.load', (['data_file'], {}), '(data_file)\n', (511, 522), False, 'import json\n'), ((3744, 3758), 'numpy.matrix', 'np.matrix', (['mat'], {}), '(mat)\n', (3753, 3758), True, 'import numpy as np\n'), ((3776, 3801), 'networkx.from_numpy_matrix', 'nx.from_numpy_matrix', (['mat'], {}), '(mat)\n', (3796, 3801), True, 'import networkx as nx\n'), ((3814, 3835), 'networkx.is_chordal', 'nx.is_chordal', (['graph1'], {}), '(graph1)\n', (3827, 3835), True, 'import networkx as nx\n'), ((4298, 4312), 'numpy.matrix', 'np.matrix', (['mat'], {}), '(mat)\n', (4307, 4312), True, 'import numpy as np\n'), ((4330, 4355), 'networkx.from_numpy_matrix', 'nx.from_numpy_matrix', (['mat'], {}), '(mat)\n', (4350, 4355), True, 'import networkx as nx\n'), ((4367, 4388), 'networkx.is_chordal', 'nx.is_chordal', (['graph1'], {}), '(graph1)\n', (4380, 4388), True, 'import networkx as nx\n'), ((5083, 5105), 'numpy.array', 'np.array', (['true_heatmap'], {}), '(true_heatmap)\n', (5091, 5105), True, 'import numpy as np\n'), ((5516, 5542), 'numpy.triu_indices_from', 'np.triu_indices_from', (['mask'], {}), '(mask)\n', (5536, 5542), True, 'import numpy as np\n'), ((5560, 5583), 'seaborn.axes_style', 'sns.axes_style', (['"""white"""'], {}), "('white')\n", (5574, 5583), True, 'import seaborn as sns\n'), ((5593, 5730), 'seaborn.heatmap', 'sns.heatmap', (['heatmap'], {'mask': 'mask', 'annot': '(False)', 'cmap': '"""Blues"""', 'vmin': '(0.0)', 'vmax': '(1.0)', 'square': '(True)', 'cbar': 'cbar', 'xticklabels': '(5)', 'yticklabels': '(5)'}), "(heatmap, mask=mask, annot=False, cmap='Blues', vmin=0.0, vmax=\n 1.0, square=True, cbar=cbar, xticklabels=5, yticklabels=5)\n", (5604, 5730), True, 'import seaborn as sns\n'), ((2179, 2195), 'numpy.array', 'np.array', (['vecmat'], {}), '(vecmat)\n', (2187, 2195), True, 'import numpy as np\n'), ((4086, 4105), 'numpy.exp', 'np.exp', (['(rp - max_ll)'], {}), '(rp - max_ll)\n', (4092, 4105), True, 'import numpy as np\n'), ((4532, 4555), 'seaborn.axes_style', 'sns.axes_style', (['"""white"""'], {}), "('white')\n", (4546, 4555), True, 'import seaborn as sns\n'), ((1780, 1805), 'networkx.to_numpy_matrix', 'nx.to_numpy_matrix', (['graph'], {}), '(graph)\n', (1798, 1805), True, 'import networkx as nx\n'), ((3200, 3215), 'numpy.zeros', 'np.zeros', (['(p * p)'], {}), '(p * p)\n', (3208, 3215), True, 'import numpy as np\n'), ((3334, 3344), 'scipy.special.comb', 'comb', (['p', '(2)'], {}), '(p, 2)\n', (3338, 3344), False, 'from scipy.special import comb\n'), ((3677, 3694), 'numpy.array', 'np.array', (['vec_mat'], {}), '(vec_mat)\n', (3685, 3694), True, 'import numpy as np\n'), ((4231, 4248), 'numpy.array', 'np.array', (['vec_mat'], {}), '(vec_mat)\n', (4239, 4248), True, 'import numpy as np\n'), ((4424, 4443), 'numpy.exp', 'np.exp', (['(ll - max_ll)'], {}), '(ll - max_ll)\n', (4430, 4443), True, 'import numpy as np\n')]
|
import sys
if sys.version_info < (3,):
range = xrange
import numpy as np
import pandas as pd
import scipy.linalg as la
import scipy.sparse as sp
import scipy.stats as ss
from scipy.stats import multivariate_normal
from .. import arma
from .. import output as op
from .. import tests as tst
from .. import tsm as tsm
from .. import data_check as dc
from .kernels import *
class GPNARX(tsm.TSM):
""" Inherits time series methods from TSM class.
**** GAUSSIAN PROCESS NONLINEAR AUTOREGRESSIVE (GP-NARX) MODELS ****
Parameters
----------
data : pd.DataFrame or np.array
Field to specify the time series data that will be used.
ar : int
Field to specify how many AR terms the model will have.
kernel : kernel object
For example, SquaredExponential() or OrnsteinUhlenbeck()
integ : int (default : 0)
Specifies how many time to difference the time series.
target : str (pd.DataFrame) or int (np.array)
Specifies which column name or array index to use. By default, first
column/array will be selected as the dependent variable.
"""
def __init__(self, data, ar, kernel, integ=0, target=None):
# Initialize TSM object
super(GPNARX,self).__init__('GPNARX')
# Latent variables
self.ar = ar
if ar < 1:
raise ValueError('Cannot have less than 1 AR term!')
self.integ = integ
self.max_lag = self.ar
self.model_name = 'GPNARX(' + str(self.ar) + ')'
self._z_hide = 0 # Whether to cutoff variance latent variables from results
self.supported_methods = ["MLE","PML","Laplace","M-H","BBVI"]
self.default_method = "MLE"
self.multivariate_model = False
# Format the data
self.data, self.data_name, self.is_pandas, self.index = dc.data_check(data,target)
self.data_original = self.data.copy()
# Difference data
for order in range(self.integ):
self.data = np.diff(self.data)
self.data_name = "Differenced " + self.data_name
self.index = self.index[self.integ:len(self.index)]
# Apply normalization
self.data_full = self.data.copy()
self.data = np.array(self.data_full[self.max_lag:self.data_full.shape[0]]) # adjust for lags
self._norm_mean = np.mean(self.data)
self._norm_std = np.std(self.data)
self.data = (self.data - self._norm_mean) / self._norm_std
self.data_full = (self.data_full - self._norm_mean) / self._norm_std
self.kernel = kernel
self.kernel.X = self.X().T
# Define latent variables
self._create_latent_variables()
self.neg_loglik = self.full_neg_loglik
def _alpha(self, L):
""" Covariance-derived term to construct expectations. See Rasmussen & Williams.
Parameters
----------
L : np.ndarray
Cholesky triangular
Returns
----------
np.ndarray (alpha)
"""
return la.cho_solve((L.T, True), la.cho_solve((L, True), np.transpose(self.data)))
def _construct_predict(self, beta, h):
""" Creates h-step ahead forecasts for the Gaussian process
Parameters
----------
beta : np.array
Contains untransformed starting values for the latent variables
h: int
How many steps ahead to forecast
Returns
----------
- predictions
- variance of predictions
"""
# Refactor this entire code in future
parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])])
Xstart = self.X().copy()
Xstart = [i for i in Xstart]
predictions = np.zeros(h)
variances = np.zeros(h)
for step in range(0,h):
Xstar = []
for lag in range(0,self.max_lag):
if lag == 0:
if step == 0:
Xstar.append([self.data[-1]])
Xstart[0] = np.append(Xstart[0],self.data[-1])
else:
Xstar.append([predictions[step-1]])
Xstart[0] = np.append(Xstart[0],predictions[step-1])
else:
Xstar.append([Xstart[lag-1][-2]])
Xstart[lag] = np.append(Xstart[lag],Xstart[lag-1][-2])
Kstar = self.kernel.Kstar(parm, np.transpose(np.array(Xstar)))
L = self._L(parm)
alpha = self._alpha(L)
predictions[step] = np.dot(np.transpose(Kstar), alpha)
v = la.cho_solve((L, True), Kstar)
variances[step] = self.kernel.Kstarstar(parm, np.transpose(np.array(Xstar))) - np.dot(v.T, v)
return predictions, variances, predictions - 1.98*np.power(variances,0.5), predictions + 1.98*np.power(variances,0.5)
def _create_latent_variables(self):
""" Creates model latent variables
Returns
----------
None (changes model attributes)
"""
# Create latent variables
for no, i in enumerate(self.kernel.build_latent_variables()):
self.latent_variables.add_z(i[0],i[1],i[2])
self.latent_variables.z_list[no].start = i[3]
self.z_no = len(self.kernel.build_latent_variables())
# Use an ARIMA model to find starting point for the initial noise latent variable
arma_start = arma.ARIMA(self.data, ar=self.ar, ma=0, integ=self.integ)
x = arma_start.fit()
arma_starting_values = arma_start.latent_variables.get_z_values()
self.latent_variables.z_list[0].start = np.log(np.exp(np.power(arma_starting_values[-1],2)))
def _L(self, parm):
""" Creates cholesky decomposition of covariance matrix
Parameters
----------
parm : np.array
Contains transformed latent variables
Returns
----------
The cholesky decomposition (L) of K
"""
return np.linalg.cholesky(self.kernel.K(parm) + np.identity(self.X().shape[1])*parm[0])
def X(self):
""" Creates design matrix of variables to use in GP regression
Returns
----------
The design matrix
"""
if self.ar == 1:
return np.array([self.data_full[(self.max_lag-1):-1]])
else:
for i in range(0,self.ar):
datapoint = self.data_full[(self.max_lag-i-1):-i-1]
if i == 0:
X = datapoint
else:
X = np.vstack((X,datapoint))
return X
def expected_values(self, beta):
""" Expected values of the function given the covariance matrix and hyperparameters
Parameters
----------
beta : np.ndarray
Contains untransformed values for latent variables
Returns
----------
The expected values of the function
"""
parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])])
L = self._L(parm)
alpha = self._alpha(L)
return np.dot(np.transpose(self.kernel.K(parm)), alpha)
def variance_values(self, beta):
""" Covariance matrix for the estimated function
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
Covariance matrix for the estimated function
"""
parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])])
L = self._L(parm)
v = la.cho_solve((L, True), self.kernel.K(parm))
return self.kernel.K(parm) - np.dot(v.T, v)
def full_neg_loglik(self, beta):
""" Creates the negative log marginal likelihood of the model
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
The negative log marginal logliklihood of the model
"""
parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])])
L = self._L(parm)
return -(-0.5*(np.dot(np.transpose(self.data),self._alpha(L))) - np.log(np.diag(L)).sum() - (self.data.shape[0]/2.0)*np.log(2.0*np.pi))
def plot_fit(self, intervals=True, **kwargs):
""" Plots the fit of the Gaussian process model to the data
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
intervals : Boolean
Whether to plot uncertainty intervals or not
Returns
----------
None (plots the fit of the function)
"""
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
date_index = self.index[self.max_lag:]
expectation = self.expected_values(self.latent_variables.get_z_values())
variance = self.variance_values(self.latent_variables.get_z_values())
upper = expectation + 1.98*np.power(np.diag(variance),0.5)
lower = expectation - 1.98*np.power(np.diag(variance),0.5)
plt.figure(figsize=figsize)
plt.subplot(2, 2, 1)
plt.title(self.data_name + " Raw")
plt.plot(date_index,self.data*self._norm_std + self._norm_mean,'k')
plt.subplot(2, 2, 2)
plt.title(self.data_name + " Raw and Expected")
plt.plot(date_index,self.data*self._norm_std + self._norm_mean,'k',alpha=0.2)
plt.plot(date_index,self.expected_values(self.latent_variables.get_z_values())*self._norm_std + self._norm_mean,'b')
plt.subplot(2, 2, 3)
plt.title(self.data_name + " Raw and Expected (with intervals)")
if intervals == True:
plt.fill_between(date_index, lower*self._norm_std + self._norm_mean, upper*self._norm_std + self._norm_mean, alpha=0.2)
plt.plot(date_index,self.data*self._norm_std + self._norm_mean,'k',alpha=0.2)
plt.plot(date_index,self.expected_values(self.latent_variables.get_z_values())*self._norm_std + self._norm_mean,'b')
plt.subplot(2, 2, 4)
plt.title("Expected " + self.data_name + " (with intervals)")
if intervals == True:
plt.fill_between(date_index, lower*self._norm_std + self._norm_mean, upper*self._norm_std + self._norm_mean, alpha=0.2)
plt.plot(date_index,self.expected_values(self.latent_variables.get_z_values())*self._norm_std + self._norm_mean,'b')
plt.show()
def plot_predict(self, h=5, past_values=20, intervals=True,**kwargs):
""" Plots forecast with the estimated model
Parameters
----------
h : int (default : 5)
How many steps ahead would you like to forecast?
past_values : int (default : 20)
How many past observations to show on the forecast graph?
intervals : Boolean
Would you like to show 95% prediction intervals for the forecast?
Returns
----------
- Plot of the forecast
- Error bars, forecasted_values, plot_values, plot_index
"""
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
predictions, variance, lower, upper = self._construct_predict(self.latent_variables.get_z_values(),h)
full_predictions = np.append(self.data,predictions)
full_lower = np.append(self.data,lower)
full_upper = np.append(self.data,upper)
date_index = self.shift_dates(h)
# Plot values (how far to look back)
plot_values = full_predictions[-h-past_values:]*self._norm_std + self._norm_mean
plot_index = date_index[-h-past_values:]
# Lower and upper intervals
lower = np.append(full_predictions[-h-1],lower)
upper = np.append(full_predictions[-h-1],upper)
plt.figure(figsize=figsize)
if intervals == True:
plt.fill_between(date_index[-h-1:],
lower*self._norm_std + self._norm_mean,
upper*self._norm_std + self._norm_mean,
alpha=0.2)
plt.plot(plot_index,plot_values)
plt.title("Forecast for " + self.data_name)
plt.xlabel("Time")
plt.ylabel(self.data_name)
plt.show()
def predict_is(self, h=5, fit_once=True):
""" Makes dynamic in-sample predictions with the estimated model
Parameters
----------
h : int (default : 5)
How many steps would you like to forecast?
fit_once : boolean
(default: True) Fits only once before the in-sample prediction; if False, fits after every new datapoint
Returns
----------
- pd.DataFrame with predicted values
"""
predictions = []
for t in range(0,h):
x = GPNARX(ar=self.ar,kernel=self.kernel,integ=self.integ,
data=self.data_original[:-h+t])
if fit_once is False:
x.fit(printer=False)
if t == 0:
if fit_once is True:
x.fit(printer=False)
saved_lvs = x.latent_variables
predictions = x.predict(1)
else:
if fit_once is True:
x.latent_variables = saved_lvs
predictions = pd.concat([predictions,x.predict(1)])
predictions.rename(columns={0:self.data_name}, inplace=True)
predictions.index = self.index[-h:]
return predictions
def plot_predict_is(self, h=5, fit_once=True, **kwargs):
""" Plots forecasts with the estimated model against data
(Simulated prediction with data)
Parameters
----------
h : int (default : 5)
How many steps to forecast
fit_once : boolean
(default: True) Fits only once before the in-sample prediction; if False, fits after every new datapoint
Returns
----------
- Plot of the forecast against data
"""
import matplotlib.pyplot as plt
import seaborn as sns
figsize = kwargs.get('figsize',(10,7))
plt.figure(figsize=figsize)
date_index = self.index[-h:]
predictions = self.predict_is(h, fit_once=fit_once)
data = self.data[-h:]
plt.plot(date_index,data*self._norm_std + self._norm_mean,label='Data')
plt.plot(date_index,predictions,label='Predictions',c='black')
plt.title(self.data_name)
plt.legend(loc=2)
plt.show()
def predict(self, h=5):
""" Makes forecast with the estimated model
Parameters
----------
h : int (default : 5)
How many steps ahead would you like to forecast?
Returns
----------
- pd.DataFrame with predicted values
"""
if self.latent_variables.estimated is False:
raise Exception("No latent variables estimated!")
else:
predictions, _, _, _ = self._construct_predict(self.latent_variables.get_z_values(),h)
predictions = predictions*self._norm_std + self._norm_mean
date_index = self.shift_dates(h)
result = pd.DataFrame(predictions)
result.rename(columns={0:self.data_name}, inplace=True)
result.index = date_index[-h:]
return result
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.diag",
"matplotlib.pyplot.fill_between",
"pandas.DataFrame",
"numpy.std",
"numpy.power",
"scipy.linalg.cho_solve",
"numpy.transpose",
"numpy.append",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"numpy.dot",
"numpy.vstack",
"matplotlib.pyplot.subplot",
"numpy.log",
"matplotlib.pyplot.plot",
"numpy.zeros",
"numpy.diff",
"numpy.array",
"matplotlib.pyplot.xlabel"
] |
[((2273, 2335), 'numpy.array', 'np.array', (['self.data_full[self.max_lag:self.data_full.shape[0]]'], {}), '(self.data_full[self.max_lag:self.data_full.shape[0]])\n', (2281, 2335), True, 'import numpy as np\n'), ((2380, 2398), 'numpy.mean', 'np.mean', (['self.data'], {}), '(self.data)\n', (2387, 2398), True, 'import numpy as np\n'), ((2424, 2441), 'numpy.std', 'np.std', (['self.data'], {}), '(self.data)\n', (2430, 2441), True, 'import numpy as np\n'), ((3882, 3893), 'numpy.zeros', 'np.zeros', (['h'], {}), '(h)\n', (3890, 3893), True, 'import numpy as np\n'), ((3914, 3925), 'numpy.zeros', 'np.zeros', (['h'], {}), '(h)\n', (3922, 3925), True, 'import numpy as np\n'), ((9581, 9608), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (9591, 9608), True, 'import matplotlib.pyplot as plt\n'), ((9619, 9639), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(1)'], {}), '(2, 2, 1)\n', (9630, 9639), True, 'import matplotlib.pyplot as plt\n'), ((9648, 9682), 'matplotlib.pyplot.title', 'plt.title', (["(self.data_name + ' Raw')"], {}), "(self.data_name + ' Raw')\n", (9657, 9682), True, 'import matplotlib.pyplot as plt\n'), ((9693, 9764), 'matplotlib.pyplot.plot', 'plt.plot', (['date_index', '(self.data * self._norm_std + self._norm_mean)', '"""k"""'], {}), "(date_index, self.data * self._norm_std + self._norm_mean, 'k')\n", (9701, 9764), True, 'import matplotlib.pyplot as plt\n'), ((9770, 9790), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(2)'], {}), '(2, 2, 2)\n', (9781, 9790), True, 'import matplotlib.pyplot as plt\n'), ((9799, 9846), 'matplotlib.pyplot.title', 'plt.title', (["(self.data_name + ' Raw and Expected')"], {}), "(self.data_name + ' Raw and Expected')\n", (9808, 9846), True, 'import matplotlib.pyplot as plt\n'), ((9856, 9942), 'matplotlib.pyplot.plot', 'plt.plot', (['date_index', '(self.data * self._norm_std + self._norm_mean)', '"""k"""'], {'alpha': '(0.2)'}), "(date_index, self.data * self._norm_std + self._norm_mean, 'k',\n alpha=0.2)\n", (9864, 9942), True, 'import matplotlib.pyplot as plt\n'), ((10068, 10088), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(3)'], {}), '(2, 2, 3)\n', (10079, 10088), True, 'import matplotlib.pyplot as plt\n'), ((10097, 10161), 'matplotlib.pyplot.title', 'plt.title', (["(self.data_name + ' Raw and Expected (with intervals)')"], {}), "(self.data_name + ' Raw and Expected (with intervals)')\n", (10106, 10161), True, 'import matplotlib.pyplot as plt\n'), ((10360, 10446), 'matplotlib.pyplot.plot', 'plt.plot', (['date_index', '(self.data * self._norm_std + self._norm_mean)', '"""k"""'], {'alpha': '(0.2)'}), "(date_index, self.data * self._norm_std + self._norm_mean, 'k',\n alpha=0.2)\n", (10368, 10446), True, 'import matplotlib.pyplot as plt\n'), ((10572, 10592), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(2)', '(4)'], {}), '(2, 2, 4)\n', (10583, 10592), True, 'import matplotlib.pyplot as plt\n'), ((10602, 10663), 'matplotlib.pyplot.title', 'plt.title', (["('Expected ' + self.data_name + ' (with intervals)')"], {}), "('Expected ' + self.data_name + ' (with intervals)')\n", (10611, 10663), True, 'import matplotlib.pyplot as plt\n'), ((10987, 10997), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10995, 10997), True, 'import matplotlib.pyplot as plt\n'), ((14995, 15022), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (15005, 15022), True, 'import matplotlib.pyplot as plt\n'), ((15159, 15234), 'matplotlib.pyplot.plot', 'plt.plot', (['date_index', '(data * self._norm_std + self._norm_mean)'], {'label': '"""Data"""'}), "(date_index, data * self._norm_std + self._norm_mean, label='Data')\n", (15167, 15234), True, 'import matplotlib.pyplot as plt\n'), ((15239, 15304), 'matplotlib.pyplot.plot', 'plt.plot', (['date_index', 'predictions'], {'label': '"""Predictions"""', 'c': '"""black"""'}), "(date_index, predictions, label='Predictions', c='black')\n", (15247, 15304), True, 'import matplotlib.pyplot as plt\n'), ((15310, 15335), 'matplotlib.pyplot.title', 'plt.title', (['self.data_name'], {}), '(self.data_name)\n', (15319, 15335), True, 'import matplotlib.pyplot as plt\n'), ((15344, 15361), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(2)'}), '(loc=2)\n', (15354, 15361), True, 'import matplotlib.pyplot as plt\n'), ((15373, 15383), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (15381, 15383), True, 'import matplotlib.pyplot as plt\n'), ((2033, 2051), 'numpy.diff', 'np.diff', (['self.data'], {}), '(self.data)\n', (2040, 2051), True, 'import numpy as np\n'), ((4760, 4790), 'scipy.linalg.cho_solve', 'la.cho_solve', (['(L, True)', 'Kstar'], {}), '((L, True), Kstar)\n', (4772, 4790), True, 'import scipy.linalg as la\n'), ((6464, 6511), 'numpy.array', 'np.array', (['[self.data_full[self.max_lag - 1:-1]]'], {}), '([self.data_full[self.max_lag - 1:-1]])\n', (6472, 6511), True, 'import numpy as np\n'), ((7985, 7999), 'numpy.dot', 'np.dot', (['v.T', 'v'], {}), '(v.T, v)\n', (7991, 7999), True, 'import numpy as np\n'), ((10209, 10337), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['date_index', '(lower * self._norm_std + self._norm_mean)', '(upper * self._norm_std + self._norm_mean)'], {'alpha': '(0.2)'}), '(date_index, lower * self._norm_std + self._norm_mean, \n upper * self._norm_std + self._norm_mean, alpha=0.2)\n', (10225, 10337), True, 'import matplotlib.pyplot as plt\n'), ((10710, 10838), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['date_index', '(lower * self._norm_std + self._norm_mean)', '(upper * self._norm_std + self._norm_mean)'], {'alpha': '(0.2)'}), '(date_index, lower * self._norm_std + self._norm_mean, \n upper * self._norm_std + self._norm_mean, alpha=0.2)\n', (10726, 10838), True, 'import matplotlib.pyplot as plt\n'), ((12045, 12078), 'numpy.append', 'np.append', (['self.data', 'predictions'], {}), '(self.data, predictions)\n', (12054, 12078), True, 'import numpy as np\n'), ((12103, 12130), 'numpy.append', 'np.append', (['self.data', 'lower'], {}), '(self.data, lower)\n', (12112, 12130), True, 'import numpy as np\n'), ((12155, 12182), 'numpy.append', 'np.append', (['self.data', 'upper'], {}), '(self.data, upper)\n', (12164, 12182), True, 'import numpy as np\n'), ((12484, 12526), 'numpy.append', 'np.append', (['full_predictions[-h - 1]', 'lower'], {}), '(full_predictions[-h - 1], lower)\n', (12493, 12526), True, 'import numpy as np\n'), ((12544, 12586), 'numpy.append', 'np.append', (['full_predictions[-h - 1]', 'upper'], {}), '(full_predictions[-h - 1], upper)\n', (12553, 12586), True, 'import numpy as np\n'), ((12597, 12624), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (12607, 12624), True, 'import matplotlib.pyplot as plt\n'), ((12899, 12932), 'matplotlib.pyplot.plot', 'plt.plot', (['plot_index', 'plot_values'], {}), '(plot_index, plot_values)\n', (12907, 12932), True, 'import matplotlib.pyplot as plt\n'), ((12944, 12987), 'matplotlib.pyplot.title', 'plt.title', (["('Forecast for ' + self.data_name)"], {}), "('Forecast for ' + self.data_name)\n", (12953, 12987), True, 'import matplotlib.pyplot as plt\n'), ((13000, 13018), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), "('Time')\n", (13010, 13018), True, 'import matplotlib.pyplot as plt\n'), ((13031, 13057), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['self.data_name'], {}), '(self.data_name)\n', (13041, 13057), True, 'import matplotlib.pyplot as plt\n'), ((13070, 13080), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13078, 13080), True, 'import matplotlib.pyplot as plt\n'), ((16091, 16116), 'pandas.DataFrame', 'pd.DataFrame', (['predictions'], {}), '(predictions)\n', (16103, 16116), True, 'import pandas as pd\n'), ((3146, 3169), 'numpy.transpose', 'np.transpose', (['self.data'], {}), '(self.data)\n', (3158, 3169), True, 'import numpy as np\n'), ((4716, 4735), 'numpy.transpose', 'np.transpose', (['Kstar'], {}), '(Kstar)\n', (4728, 4735), True, 'import numpy as np\n'), ((4882, 4896), 'numpy.dot', 'np.dot', (['v.T', 'v'], {}), '(v.T, v)\n', (4888, 4896), True, 'import numpy as np\n'), ((5813, 5850), 'numpy.power', 'np.power', (['arma_starting_values[-1]', '(2)'], {}), '(arma_starting_values[-1], 2)\n', (5821, 5850), True, 'import numpy as np\n'), ((12675, 12812), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['date_index[-h - 1:]', '(lower * self._norm_std + self._norm_mean)', '(upper * self._norm_std + self._norm_mean)'], {'alpha': '(0.2)'}), '(date_index[-h - 1:], lower * self._norm_std + self.\n _norm_mean, upper * self._norm_std + self._norm_mean, alpha=0.2)\n', (12691, 12812), True, 'import matplotlib.pyplot as plt\n'), ((4490, 4533), 'numpy.append', 'np.append', (['Xstart[lag]', 'Xstart[lag - 1][-2]'], {}), '(Xstart[lag], Xstart[lag - 1][-2])\n', (4499, 4533), True, 'import numpy as np\n'), ((4589, 4604), 'numpy.array', 'np.array', (['Xstar'], {}), '(Xstar)\n', (4597, 4604), True, 'import numpy as np\n'), ((4956, 4980), 'numpy.power', 'np.power', (['variances', '(0.5)'], {}), '(variances, 0.5)\n', (4964, 4980), True, 'import numpy as np\n'), ((5000, 5024), 'numpy.power', 'np.power', (['variances', '(0.5)'], {}), '(variances, 0.5)\n', (5008, 5024), True, 'import numpy as np\n'), ((6749, 6774), 'numpy.vstack', 'np.vstack', (['(X, datapoint)'], {}), '((X, datapoint))\n', (6758, 6774), True, 'import numpy as np\n'), ((8645, 8664), 'numpy.log', 'np.log', (['(2.0 * np.pi)'], {}), '(2.0 * np.pi)\n', (8651, 8664), True, 'import numpy as np\n'), ((9482, 9499), 'numpy.diag', 'np.diag', (['variance'], {}), '(variance)\n', (9489, 9499), True, 'import numpy as np\n'), ((9549, 9566), 'numpy.diag', 'np.diag', (['variance'], {}), '(variance)\n', (9556, 9566), True, 'import numpy as np\n'), ((4182, 4217), 'numpy.append', 'np.append', (['Xstart[0]', 'self.data[-1]'], {}), '(Xstart[0], self.data[-1])\n', (4191, 4217), True, 'import numpy as np\n'), ((4339, 4382), 'numpy.append', 'np.append', (['Xstart[0]', 'predictions[step - 1]'], {}), '(Xstart[0], predictions[step - 1])\n', (4348, 4382), True, 'import numpy as np\n'), ((4862, 4877), 'numpy.array', 'np.array', (['Xstar'], {}), '(Xstar)\n', (4870, 4877), True, 'import numpy as np\n'), ((8550, 8573), 'numpy.transpose', 'np.transpose', (['self.data'], {}), '(self.data)\n', (8562, 8573), True, 'import numpy as np\n'), ((8600, 8610), 'numpy.diag', 'np.diag', (['L'], {}), '(L)\n', (8607, 8610), True, 'import numpy as np\n')]
|
"""
Composite Laminate Module (:mod:`compmech.composite.laminate`)
==============================================================
.. currentmodule:: compmech.composite.laminate
"""
from __future__ import division, absolute_import
import numpy as np
from .lamina import Lamina
from .matlamina import read_laminaprop
from compmech.constants import DOUBLE
from compmech.logger import *
from numpy.linalg.linalg import inv
def read_stack(stack, plyt=None, laminaprop=None, plyts=[], laminaprops=[],
offset=0., lam3D=False):
"""Read a laminate stacking sequence data.
An ``Laminate`` object is returned based on the inputs given.
Parameters
----------
stack : list
Angles of the stacking sequence in degrees.
plyt : float, optional
When all plies have the same thickness, ``plyt`` can be supplied.
laminaprop : tuple, optional
When all plies have the same material properties, ``laminaprop``
can be supplied.
plyts : list, optional
A list of floats with the thickness of each ply.
laminaprops : list, optional
A list of tuples with a laminaprop for each ply.
offset : float, optional
Offset along the normal axis about the mid-surface, which influences
the laminate properties.
lam3D : bool
Use 3D model by Chou 1971, requires 3D material properties
Notes
-----
``plyt`` or ``plyts`` must be supplied
``laminaprop`` or ``laminaprops`` must be supplied
For orthotropic plies, the ``laminaprop`` should be::
laminaprop = (E11, E22, nu12, G12, G13, G23)
For isotropic plies, the ``laminaprop`` should be::
laminaprop = (E, E, nu)
For lam3D, the ``laminaprop`` should be::
laminaprop = (e1, e2, nu12, g12, g13, g23, e3, nu13, nu23, a1, a2, a3)
"""
lam = Laminate()
lam.offset = offset
lam.stack = stack
lam.lam3D = lam3D
if not plyts:
if not plyt:
error('plyt or plyts must be supplied')
raise ValueError
else:
plyts = [plyt for i in stack]
if not laminaprops:
if not laminaprop:
error('laminaprop or laminaprops must be supplied')
raise ValueError
else:
laminaprops = [laminaprop for i in stack]
lam.plies = []
for plyt, laminaprop, theta in zip(plyts, laminaprops, stack):
laminaprop = laminaprop
ply = Lamina()
ply.theta = float(theta)
ply.t = plyt
ply.matobj = read_laminaprop(laminaprop)
lam.plies.append(ply)
lam.rebuild()
lam.calc_constitutive_matrix()
return lam
def read_lamination_parameters(thickness, laminaprop,
xiA1, xiA2, xiA3, xiA4,
xiB1, xiB2, xiB3, xiB4,
xiD1, xiD2, xiD3, xiD4,
xiE1, xiE2, xiE3, xiE4):
r"""Calculates a laminate based on the lamination parameters.
The lamination parameters:
`\xi_{A1} \cdots \xi_{A4}`, `\xi_{B1} \cdots \xi_{B4}`,
`\xi_{C1} \cdots \xi_{C4}`, `\xi_{D1} \cdots \xi_{D4}`,
`\xi_{E1} \cdots \xi_{E4}`
are used to calculate the laminate constitutive matrix.
Parameters
----------
thickness : float
The total thickness of the laminate
laminaprop : tuple
The laminaprop tuple used to define the laminate material.
xiA1 to xiD4 : float
The 16 lamination parameters used to define the laminate.
Returns
-------
lam : Laminate
laminate with the ABD and ABDE matrices already calculated
"""
lam = Laminate()
lam.t = thickness
lam.matobj = read_laminaprop(laminaprop)
lam.xiA = np.array([1, xiA1, xiA2, xiA3, xiA4], dtype=DOUBLE)
lam.xiB = np.array([0, xiB1, xiB2, xiB3, xiB4], dtype=DOUBLE)
lam.xiD = np.array([1, xiD1, xiD2, xiD3, xiD4], dtype=DOUBLE)
lam.xiE = np.array([1, xiE1, xiE2, xiE3, xiE4], dtype=DOUBLE)
lam.calc_ABDE_from_lamination_parameters()
return lam
class Laminate(object):
r"""
========= ===========================================================
attribute description
========= ===========================================================
plies list of plies
t total thickness of the laminate
offset offset at the normal direction
e1 equivalent laminate modulus in 1 direction
e2 equivalent laminate modulus in 2 direction
g12 equivalent laminate shear modulus in 12 direction
nu12 equivalent laminate Poisson ratio in 12 direction
nu21 equivalent laminate Poisson ratio in 21 direction
xiA laminate parameters for extensional matrix A
xiB laminate parameters for extension-bending matrix B
xiD laminate parameters for bending matrix D
A laminate extension matrix
B laminate extension-bending matrix
D laminate bending matrix
E laminate transferse shear matrix
ABD laminate ABD matrix
ABDE laminate ABD matrix with transverse shear terms
========= ===========================================================
"""
def __init__(self):
self.plies = []
self.matobj = None
self.t = None
self.offset = 0.
self.e1 = None
self.e2 = None
self.e3 = None
self.nu12 = None
self.g12 = None
self.g13 = None
self.g23 = None
self.a1 = None
self.a2 = None
self.xiA = None
self.xiB = None
self.xiD = None
self.A = None
self.B = None
self.D = None
self.E = None
self.ABD = None
self.ABDE = None
self.lam3D = False
def rebuild(self):
lam_thick = 0
for ply in self.plies:
ply.rebuild()
lam_thick += ply.t
self.t = lam_thick
def calc_equivalent_modulus(self):
"""Calculates the equivalent laminate properties.
The following attributes are calculated:
e1, e2, g12, nu12, nu21
"""
if not self.lam3D:
AI = np.matrix(self.ABD, dtype=DOUBLE).I
a11, a12, a22, a33 = AI[0, 0], AI[0, 1], AI[1, 1], AI[2, 2]
self.e1 = 1. / (self.t * a11)
self.e2 = 1. / (self.t * a22)
self.g12 = 1. / (self.t * a33)
self.nu12 = - a12 / a11
self.nu21 = - a12 / a22
# Eq. 5.110 Ganesh/Rana Lecture19 Hygrothermal laminate theory
# or Eq. 4.72 into Eg.4.64 with delta_T=1 (Kaw 2006)
a = np.squeeze(np.array(np.dot(AI, self.QLAL)))
self.a1 = a[0]
self.a2 = a[1]
self.a12 = a[2]
else:
H = inv(self.C_general) # Bogetti 1995 Eq. 29
self.e1 = 1. / H[0, 0] # Bogetti 1995 Eq. 30
self.e2 = 1. / H[1, 1] # Bogetti 1995 Eq. 31
self.e3 = 1. / H[2, 2] # Bogetti 1995 Eq. 32
self.g23 = 1. / H[3, 3] # Bogetti 1995 Eq. 33
self.g13 = 1. / H[4, 4] # Bogetti 1995 Eq. 34
self.g12 = 1. / H[5, 5] # Bogetti 1995 Eq. 35
self.nu23 = - H[1, 2] / H[1, 1] # Bogetti 1995 Eq. 36
self.nu13 = - H[0, 2] / H[0, 0] # Bogetti 1995 Eq. 37
self.nu12 = - H[0, 1] / H[0, 0] # Bogetti 1995 Eq. 38
self.nu32 = - H[1, 2] / H[2, 2] # Bogetti 1995 Eq. 39
self.nu31 = - H[0, 2] / H[2, 2] # Bogetti 1995 Eq. 40
self.nu21 = - H[0, 1] / H[1, 1] # Bogetti 1995 Eq. 41
N = self.N
self.a1 = np.dot(H[0, :], N) # Bogetti Eq. 44
self.a2 = np.dot(H[1, :], N) # Bogetti Eq. 45
self.a3 = np.dot(H[2, :], N) # Bogetti Eq. 46
self.a23 = np.dot(H[3, :], N) # Bogetti Eq. 47
self.a13 = np.dot(H[4, :], N) # Bogetti Eq. 48
self.a12 = np.dot(H[5, :], N) # Bogetti Eq. 49
def calc_lamination_parameters(self):
"""Calculate the lamination parameters.
The following attributes are calculated:
xiA, xiB, xiD, xiE
"""
xiA1, xiA2, xiA3, xiA4 = 0, 0, 0, 0
xiB1, xiB2, xiB3, xiB4 = 0, 0, 0, 0
xiD1, xiD2, xiD3, xiD4 = 0, 0, 0, 0
xiE1, xiE2, xiE3, xiE4 = 0, 0, 0, 0
lam_thick = sum([ply.t for ply in self.plies])
self.t = lam_thick
h0 = -lam_thick / 2. + self.offset
for ply in self.plies:
hk_1 = h0
h0 += ply.t
hk = h0
Afac = ply.t / lam_thick
Bfac = (2. / lam_thick**2) * (hk**2 - hk_1**2)
Dfac = (4. / lam_thick**3) * (hk**3 - hk_1**3)
Efac = (1. / lam_thick) * (hk - hk_1) # * (5./6) * (5./6)
cos2t = ply.cos2t
cos4t = ply.cos4t
sin2t = ply.sin2t
sin4t = ply.sin4t
xiA1 += Afac * cos2t
xiA2 += Afac * sin2t
xiA3 += Afac * cos4t
xiA4 += Afac * sin4t
xiB1 += Bfac * cos2t
xiB2 += Bfac * sin2t
xiB3 += Bfac * cos4t
xiB4 += Bfac * sin4t
xiD1 += Dfac * cos2t
xiD2 += Dfac * sin2t
xiD3 += Dfac * cos4t
xiD4 += Dfac * sin4t
xiE1 += Efac * cos2t
xiE2 += Efac * sin2t
xiE3 += Efac * cos4t
xiE4 += Efac * sin4t
self.xiA = np.array([1, xiA1, xiA2, xiA3, xiA4], dtype=DOUBLE)
self.xiB = np.array([0, xiB1, xiB2, xiB3, xiB4], dtype=DOUBLE)
self.xiD = np.array([1, xiD1, xiD2, xiD3, xiD4], dtype=DOUBLE)
self.xiE = np.array([1, xiE1, xiE2, xiE3, xiE4], dtype=DOUBLE)
def calc_ABDE_from_lamination_parameters(self):
"""Use the ABDE matrix based on lamination parameters.
Given the lamination parameters ``xiA``, ``xiB``, ``xiC`` and ``xiD``,
the ABD matrix is calculated.
"""
# dummies used to unpack vector results
du1, du2, du3, du4, du5, du6 = 0, 0, 0, 0, 0, 0
# A matrix terms
A11, A22, A12, du1, du2, du3, A66, A16, A26 =\
(self.t) * np.dot(self.matobj.u, self.xiA)
# B matrix terms
B11, B22, B12, du1, du2, du3, B66, B16, B26 =\
(self.t**2 / 4.) * np.dot(self.matobj.u, self.xiB)
# D matrix terms
D11, D22, D12, du1, du2, du3, D66, D16, D26 =\
(self.t**3 / 12.) * np.dot(self.matobj.u, self.xiD)
# E matrix terms
du1, du2, du3, E44, E55, E45, du4, du5, du6 =\
(self.t) * np.dot(self.matobj.u, self.xiE)
self.A = np.array([[A11, A12, A16],
[A12, A22, A26],
[A16, A26, A66]], dtype=DOUBLE)
self.B = np.array([[B11, B12, B16],
[B12, B22, B26],
[B16, B26, B66]], dtype=DOUBLE)
self.D = np.array([[D11, D12, D16],
[D12, D22, D26],
[D16, D26, D66]], dtype=DOUBLE)
# printing E acoordingly to Reddy definition for E44, E45 and E55
self.E = np.array([[E55, E45],
[E45, E44]], dtype=DOUBLE)
self.ABD = np.array([[A11, A12, A16, B11, B12, B16],
[A12, A22, A26, B12, B22, B26],
[A16, A26, A66, B16, B26, B66],
[B11, B12, B16, D11, D12, D16],
[B12, B22, B26, D12, D22, D26],
[B16, B26, B66, D16, D26, D66]], dtype=DOUBLE)
# printing ABDE acoordingly to Reddy definition for E44, E45 and E55
self.ABDE = np.array([[A11, A12, A16, B11, B12, B16, 0, 0],
[A12, A22, A26, B12, B22, B26, 0, 0],
[A16, A26, A66, B16, B26, B66, 0, 0],
[B11, B12, B16, D11, D12, D16, 0, 0],
[B12, B22, B26, D12, D22, D26, 0, 0],
[B16, B26, B66, D16, D26, D66, 0, 0],
[0, 0, 0, 0, 0, 0, E55, E45],
[0, 0, 0, 0, 0, 0, E45, E44]],
dtype=DOUBLE)
def calc_constitutive_matrix(self):
"""Calculates the laminate constitutive matrix
This is the commonly called ``ABD`` matrix with ``shape=(6, 6)`` when
the classical laminated plate theory is used, or the ``ABDE`` matrix
when the first-order shear deformation theory is used, containing the
transverse shear terms.
"""
self.A_general = np.zeros([5, 5], dtype=DOUBLE)
self.B_general = np.zeros([5, 5], dtype=DOUBLE)
self.D_general = np.zeros([5, 5], dtype=DOUBLE)
self.QLALN_general = np.zeros([5], dtype=DOUBLE)
self.QLALM_general = np.zeros([5], dtype=DOUBLE)
lam_thick = sum([ply.t for ply in self.plies])
self.t = lam_thick
h0 = -lam_thick / 2 + self.offset
for ply in self.plies:
hk_1 = h0
h0 += ply.t
hk = h0
self.A_general += ply.QL * (hk - hk_1)
self.B_general += 1 / 2. * ply.QL * (hk**2 - hk_1**2)
self.D_general += 1 / 3. * ply.QL * (hk**3 - hk_1**3)
# TODO add CTE laminate matrix
# Reddy Eq. 3.3.41
QLAL_dot = np.dot(ply.QL, ply.AL)
self.QLALN_general += QLAL_dot * (hk - hk_1)
self.QLALM_general += 1 / 2. * QLAL_dot * (hk**2 - hk_1**2)
self.A = self.A_general[0:3, 0:3]
self.B = self.B_general[0:3, 0:3]
self.D = self.D_general[0:3, 0:3]
self.E = self.A_general[3:5, 3:5]
# Note Reddy convention Reddy Eq. 2.4.8
# E11 = A44 = 23 = yz
# E22 = A55 = 13 = xz
conc1 = np.concatenate([self.A, self.B], axis=1)
conc2 = np.concatenate([self.B, self.D], axis=1)
self.ABD = np.concatenate([conc1, conc2], axis=0)
self.ABDE = np.zeros((8, 8), dtype=DOUBLE)
self.ABDE[0:6, 0:6] = self.ABD
self.ABDE[6:8, 6:8] = self.E
self.QLALN = self.QLALN_general[0:3]
self.QLALM = self.QLALM_general[0:3]
self.QLAL = np.concatenate([self.QLALN, self.QLALM], axis=0)
self._calc_stiffness_matrix_3D()
def _calc_stiffness_matrix_3D(self):
''' Calculates the laminate stiffness matrix
<NAME>, 1971, Elastic Constants of Layered Media
Theory assumes symmetric laminate
'''
# general laminate stiffness matrix
self.C_general = np.zeros([6, 6], dtype=DOUBLE)
lam_thick = self.t
# Chou 1971 Eq. 8
def _sum_l_up(j, lam_thick):
sum_l_up = 0.
for plyl in self.plies:
tl = plyl.t
vl = tl / lam_thick
CLl = plyl.CL
sum_l_up += vl * CLl[2, j] / CLl[2, 2]
return sum_l_up
def _sum_l_low(lam_thick):
sum_l_low = 0.
for plyl in self.plies:
tl = plyl.t
vl = tl / lam_thick
CLl = plyl.CL
sum_l_low += vl / CLl[2, 2]
return sum_l_low
for i in [0, 1, 2, 5]:
for j in [0, 1, 2, 5]:
for plyk in self.plies:
tk = plyk.t
vk = tk / lam_thick
CLk = plyk.CL
self.C_general[i, j] += vk * (CLk[i, j] -
(CLk[i, 2] * CLk[2, j]) /
(CLk[2, 2]) +
(CLk[i, 2] * _sum_l_up(j, lam_thick)) /
(CLk[2, 2] * _sum_l_low(lam_thick)))
# Chou 1971 Eq. 9
def _sum_k_up_34(i, j, lam_thick):
sum_k_up_34 = 0.
for plyk in self.plies:
tk = plyk.t
vk = tk / lam_thick
CLk = plyk.CL
deltak = plyk.delta_CL45
sum_k_up_34 += vk / deltak * CLk[i, j]
return sum_k_up_34
def _sum_kl_low_34(lam_thick):
sum_kl_low_34 = 0.
for plyk in self.plies:
tk = plyk.t
vk = tk / lam_thick
CLk = plyk.CL
deltak = plyk.delta_CL45
for plyl in self.plies:
tl = plyl.t
vl = tl / lam_thick
CLl = plyl.CL
deltal = plyk.delta_CL45
sum_kl_low_34 += (vk * vl) /\
(deltak * deltal) * \
(CLk[3, 3] * CLl[4, 4] - CLk[3, 4] * CLl[4, 3])
return sum_kl_low_34
for i in [3, 4]:
for j in [3, 4]:
self.C_general[i, j] = _sum_k_up_34(
i, j, lam_thick) / _sum_kl_low_34(lam_thick)
# Bogetti Eq. 43
self.N = np.zeros([6], dtype=DOUBLE)
for i in range(6):
for j in range(6):
for plyk in self.plies:
tk = plyk.t
vk = tk / lam_thick
CLk = plyk.CL
AL3D = plyk.AL3D
self.N[i] += CLk[i, j] * AL3D[j] * vk
def force_balanced_LP(self):
r"""Force balanced lamination parameters
The lamination parameters `\xi_{A2}` and `\xi_{A4}` are set to null
to force a balanced laminate.
"""
dummy, xiA1, xiA2, xiA3, xiA4 = self.xiA
self.xiA = np.array([1, xiA1, 0, xiA3, 0], dtype=DOUBLE)
self.calc_ABDE_from_lamination_parameters()
def force_symmetric_LP(self):
r"""Force symmetric lamination parameters
The lamination parameters `\xi_{Bi}` are set to null
to force a symmetric laminate.
"""
self.xiB = np.zeros(5)
self.calc_ABDE_from_lamination_parameters()
def force_orthotropic(self):
r"""Force an orthotropic laminate
The terms
`A_{13}`, `A_{23}`, `A_{31}`, `A_{32}`,
`B_{13}`, `B_{23}`, `B_{31}`, `B_{32}`,
`D_{13}`, `D_{23}`, `D_{31}`, `D_{32}` are set to zero to force an
orthotropic laminate.
"""
if self.offset != 0.:
raise RuntimeError(
'Laminates with offset cannot be forced orthotropic!')
self.A[0, 2] = 0.
self.A[1, 2] = 0.
self.A[2, 0] = 0.
self.A[2, 1] = 0.
self.B[0, 2] = 0.
self.B[1, 2] = 0.
self.B[2, 0] = 0.
self.B[2, 1] = 0.
self.D[0, 2] = 0.
self.D[1, 2] = 0.
self.D[2, 0] = 0.
self.D[2, 1] = 0.
self.ABD[0, 2] = 0. # A16
self.ABD[1, 2] = 0. # A26
self.ABD[2, 0] = 0. # A61
self.ABD[2, 1] = 0. # A62
self.ABD[0, 5] = 0. # B16
self.ABD[5, 0] = 0. # B61
self.ABD[1, 5] = 0. # B26
self.ABD[5, 1] = 0. # B62
self.ABD[3, 2] = 0. # B16
self.ABD[2, 3] = 0. # B61
self.ABD[4, 2] = 0. # B26
self.ABD[2, 4] = 0. # B62
self.ABD[3, 5] = 0. # D16
self.ABD[4, 5] = 0. # D26
self.ABD[5, 3] = 0. # D61
self.ABD[5, 4] = 0. # D62
self.ABDE[0, 2] = 0. # A16
self.ABDE[1, 2] = 0. # A26
self.ABDE[2, 0] = 0. # A61
self.ABDE[2, 1] = 0. # A62
self.ABDE[0, 5] = 0. # B16
self.ABDE[5, 0] = 0. # B61
self.ABDE[1, 5] = 0. # B26
self.ABDE[5, 1] = 0. # B62
self.ABDE[3, 2] = 0. # B16
self.ABDE[2, 3] = 0. # B61
self.ABDE[4, 2] = 0. # B26
self.ABDE[2, 4] = 0. # B62
self.ABDE[3, 5] = 0. # D16
self.ABDE[4, 5] = 0. # D26
self.ABDE[5, 3] = 0. # D61
self.ABDE[5, 4] = 0. # D62
def force_symmetric(self):
"""Force a symmetric laminate
The `B` terms of the constitutive matrix are set to zero.
"""
if self.offset != 0.:
raise RuntimeError(
'Laminates with offset cannot be forced symmetric!')
self.B = np.zeros((3, 3))
self.ABD[0:3, 3:6] = 0
self.ABD[3:6, 0:3] = 0
self.ABDE[0:3, 3:6] = 0
self.ABDE[3:6, 0:3] = 0
def apply_load(self, F, dT):
''' Obtain strains of stacking due to loading
F = [n_x, n_y, n_xy, m_x, m_y, m_xy] in N/m
:param: F: force vector
:param: dT: delta temperature
:return: eps0: vector of strains due to normal loads eps0=[eps_x, eps_y, eps_xy]
:return: eps1: vector of strains due to bending loads eps1=[eps_x, eps_y, eps_xy]
'''
# Reddy, Eq. 3.3.40
eps = np.dot(inv(self.ABD), (F + self.QLAL * dT))
eps0 = eps[0:3]
eps1 = eps[3:6]
return eps0, eps1
|
[
"numpy.matrix",
"numpy.linalg.linalg.inv",
"numpy.zeros",
"numpy.array",
"numpy.dot",
"numpy.concatenate"
] |
[((3755, 3806), 'numpy.array', 'np.array', (['[1, xiA1, xiA2, xiA3, xiA4]'], {'dtype': 'DOUBLE'}), '([1, xiA1, xiA2, xiA3, xiA4], dtype=DOUBLE)\n', (3763, 3806), True, 'import numpy as np\n'), ((3821, 3872), 'numpy.array', 'np.array', (['[0, xiB1, xiB2, xiB3, xiB4]'], {'dtype': 'DOUBLE'}), '([0, xiB1, xiB2, xiB3, xiB4], dtype=DOUBLE)\n', (3829, 3872), True, 'import numpy as np\n'), ((3887, 3938), 'numpy.array', 'np.array', (['[1, xiD1, xiD2, xiD3, xiD4]'], {'dtype': 'DOUBLE'}), '([1, xiD1, xiD2, xiD3, xiD4], dtype=DOUBLE)\n', (3895, 3938), True, 'import numpy as np\n'), ((3953, 4004), 'numpy.array', 'np.array', (['[1, xiE1, xiE2, xiE3, xiE4]'], {'dtype': 'DOUBLE'}), '([1, xiE1, xiE2, xiE3, xiE4], dtype=DOUBLE)\n', (3961, 4004), True, 'import numpy as np\n'), ((9511, 9562), 'numpy.array', 'np.array', (['[1, xiA1, xiA2, xiA3, xiA4]'], {'dtype': 'DOUBLE'}), '([1, xiA1, xiA2, xiA3, xiA4], dtype=DOUBLE)\n', (9519, 9562), True, 'import numpy as np\n'), ((9582, 9633), 'numpy.array', 'np.array', (['[0, xiB1, xiB2, xiB3, xiB4]'], {'dtype': 'DOUBLE'}), '([0, xiB1, xiB2, xiB3, xiB4], dtype=DOUBLE)\n', (9590, 9633), True, 'import numpy as np\n'), ((9653, 9704), 'numpy.array', 'np.array', (['[1, xiD1, xiD2, xiD3, xiD4]'], {'dtype': 'DOUBLE'}), '([1, xiD1, xiD2, xiD3, xiD4], dtype=DOUBLE)\n', (9661, 9704), True, 'import numpy as np\n'), ((9724, 9775), 'numpy.array', 'np.array', (['[1, xiE1, xiE2, xiE3, xiE4]'], {'dtype': 'DOUBLE'}), '([1, xiE1, xiE2, xiE3, xiE4], dtype=DOUBLE)\n', (9732, 9775), True, 'import numpy as np\n'), ((10702, 10777), 'numpy.array', 'np.array', (['[[A11, A12, A16], [A12, A22, A26], [A16, A26, A66]]'], {'dtype': 'DOUBLE'}), '([[A11, A12, A16], [A12, A22, A26], [A16, A26, A66]], dtype=DOUBLE)\n', (10710, 10777), True, 'import numpy as np\n'), ((10850, 10925), 'numpy.array', 'np.array', (['[[B11, B12, B16], [B12, B22, B26], [B16, B26, B66]]'], {'dtype': 'DOUBLE'}), '([[B11, B12, B16], [B12, B22, B26], [B16, B26, B66]], dtype=DOUBLE)\n', (10858, 10925), True, 'import numpy as np\n'), ((10998, 11073), 'numpy.array', 'np.array', (['[[D11, D12, D16], [D12, D22, D26], [D16, D26, D66]]'], {'dtype': 'DOUBLE'}), '([[D11, D12, D16], [D12, D22, D26], [D16, D26, D66]], dtype=DOUBLE)\n', (11006, 11073), True, 'import numpy as np\n'), ((11220, 11268), 'numpy.array', 'np.array', (['[[E55, E45], [E45, E44]]'], {'dtype': 'DOUBLE'}), '([[E55, E45], [E45, E44]], dtype=DOUBLE)\n', (11228, 11268), True, 'import numpy as np\n'), ((11316, 11541), 'numpy.array', 'np.array', (['[[A11, A12, A16, B11, B12, B16], [A12, A22, A26, B12, B22, B26], [A16, A26,\n A66, B16, B26, B66], [B11, B12, B16, D11, D12, D16], [B12, B22, B26,\n D12, D22, D26], [B16, B26, B66, D16, D26, D66]]'], {'dtype': 'DOUBLE'}), '([[A11, A12, A16, B11, B12, B16], [A12, A22, A26, B12, B22, B26], [\n A16, A26, A66, B16, B26, B66], [B11, B12, B16, D11, D12, D16], [B12,\n B22, B26, D12, D22, D26], [B16, B26, B66, D16, D26, D66]], dtype=DOUBLE)\n', (11324, 11541), True, 'import numpy as np\n'), ((11776, 12104), 'numpy.array', 'np.array', (['[[A11, A12, A16, B11, B12, B16, 0, 0], [A12, A22, A26, B12, B22, B26, 0, 0],\n [A16, A26, A66, B16, B26, B66, 0, 0], [B11, B12, B16, D11, D12, D16, 0,\n 0], [B12, B22, B26, D12, D22, D26, 0, 0], [B16, B26, B66, D16, D26, D66,\n 0, 0], [0, 0, 0, 0, 0, 0, E55, E45], [0, 0, 0, 0, 0, 0, E45, E44]]'], {'dtype': 'DOUBLE'}), '([[A11, A12, A16, B11, B12, B16, 0, 0], [A12, A22, A26, B12, B22,\n B26, 0, 0], [A16, A26, A66, B16, B26, B66, 0, 0], [B11, B12, B16, D11,\n D12, D16, 0, 0], [B12, B22, B26, D12, D22, D26, 0, 0], [B16, B26, B66,\n D16, D26, D66, 0, 0], [0, 0, 0, 0, 0, 0, E55, E45], [0, 0, 0, 0, 0, 0,\n E45, E44]], dtype=DOUBLE)\n', (11784, 12104), True, 'import numpy as np\n'), ((12728, 12758), 'numpy.zeros', 'np.zeros', (['[5, 5]'], {'dtype': 'DOUBLE'}), '([5, 5], dtype=DOUBLE)\n', (12736, 12758), True, 'import numpy as np\n'), ((12784, 12814), 'numpy.zeros', 'np.zeros', (['[5, 5]'], {'dtype': 'DOUBLE'}), '([5, 5], dtype=DOUBLE)\n', (12792, 12814), True, 'import numpy as np\n'), ((12840, 12870), 'numpy.zeros', 'np.zeros', (['[5, 5]'], {'dtype': 'DOUBLE'}), '([5, 5], dtype=DOUBLE)\n', (12848, 12870), True, 'import numpy as np\n'), ((12900, 12927), 'numpy.zeros', 'np.zeros', (['[5]'], {'dtype': 'DOUBLE'}), '([5], dtype=DOUBLE)\n', (12908, 12927), True, 'import numpy as np\n'), ((12957, 12984), 'numpy.zeros', 'np.zeros', (['[5]'], {'dtype': 'DOUBLE'}), '([5], dtype=DOUBLE)\n', (12965, 12984), True, 'import numpy as np\n'), ((13933, 13973), 'numpy.concatenate', 'np.concatenate', (['[self.A, self.B]'], {'axis': '(1)'}), '([self.A, self.B], axis=1)\n', (13947, 13973), True, 'import numpy as np\n'), ((13990, 14030), 'numpy.concatenate', 'np.concatenate', (['[self.B, self.D]'], {'axis': '(1)'}), '([self.B, self.D], axis=1)\n', (14004, 14030), True, 'import numpy as np\n'), ((14051, 14089), 'numpy.concatenate', 'np.concatenate', (['[conc1, conc2]'], {'axis': '(0)'}), '([conc1, conc2], axis=0)\n', (14065, 14089), True, 'import numpy as np\n'), ((14110, 14140), 'numpy.zeros', 'np.zeros', (['(8, 8)'], {'dtype': 'DOUBLE'}), '((8, 8), dtype=DOUBLE)\n', (14118, 14140), True, 'import numpy as np\n'), ((14329, 14377), 'numpy.concatenate', 'np.concatenate', (['[self.QLALN, self.QLALM]'], {'axis': '(0)'}), '([self.QLALN, self.QLALM], axis=0)\n', (14343, 14377), True, 'import numpy as np\n'), ((14696, 14726), 'numpy.zeros', 'np.zeros', (['[6, 6]'], {'dtype': 'DOUBLE'}), '([6, 6], dtype=DOUBLE)\n', (14704, 14726), True, 'import numpy as np\n'), ((17126, 17153), 'numpy.zeros', 'np.zeros', (['[6]'], {'dtype': 'DOUBLE'}), '([6], dtype=DOUBLE)\n', (17134, 17153), True, 'import numpy as np\n'), ((17733, 17778), 'numpy.array', 'np.array', (['[1, xiA1, 0, xiA3, 0]'], {'dtype': 'DOUBLE'}), '([1, xiA1, 0, xiA3, 0], dtype=DOUBLE)\n', (17741, 17778), True, 'import numpy as np\n'), ((18049, 18060), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (18057, 18060), True, 'import numpy as np\n'), ((20311, 20327), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (20319, 20327), True, 'import numpy as np\n'), ((6848, 6867), 'numpy.linalg.linalg.inv', 'inv', (['self.C_general'], {}), '(self.C_general)\n', (6851, 6867), False, 'from numpy.linalg.linalg import inv\n'), ((7691, 7709), 'numpy.dot', 'np.dot', (['H[0, :]', 'N'], {}), '(H[0, :], N)\n', (7697, 7709), True, 'import numpy as np\n'), ((7750, 7768), 'numpy.dot', 'np.dot', (['H[1, :]', 'N'], {}), '(H[1, :], N)\n', (7756, 7768), True, 'import numpy as np\n'), ((7809, 7827), 'numpy.dot', 'np.dot', (['H[2, :]', 'N'], {}), '(H[2, :], N)\n', (7815, 7827), True, 'import numpy as np\n'), ((7869, 7887), 'numpy.dot', 'np.dot', (['H[3, :]', 'N'], {}), '(H[3, :], N)\n', (7875, 7887), True, 'import numpy as np\n'), ((7929, 7947), 'numpy.dot', 'np.dot', (['H[4, :]', 'N'], {}), '(H[4, :], N)\n', (7935, 7947), True, 'import numpy as np\n'), ((7989, 8007), 'numpy.dot', 'np.dot', (['H[5, :]', 'N'], {}), '(H[5, :], N)\n', (7995, 8007), True, 'import numpy as np\n'), ((10230, 10261), 'numpy.dot', 'np.dot', (['self.matobj.u', 'self.xiA'], {}), '(self.matobj.u, self.xiA)\n', (10236, 10261), True, 'import numpy as np\n'), ((10373, 10404), 'numpy.dot', 'np.dot', (['self.matobj.u', 'self.xiB'], {}), '(self.matobj.u, self.xiB)\n', (10379, 10404), True, 'import numpy as np\n'), ((10517, 10548), 'numpy.dot', 'np.dot', (['self.matobj.u', 'self.xiD'], {}), '(self.matobj.u, self.xiD)\n', (10523, 10548), True, 'import numpy as np\n'), ((10652, 10683), 'numpy.dot', 'np.dot', (['self.matobj.u', 'self.xiE'], {}), '(self.matobj.u, self.xiE)\n', (10658, 10683), True, 'import numpy as np\n'), ((13488, 13510), 'numpy.dot', 'np.dot', (['ply.QL', 'ply.AL'], {}), '(ply.QL, ply.AL)\n', (13494, 13510), True, 'import numpy as np\n'), ((20905, 20918), 'numpy.linalg.linalg.inv', 'inv', (['self.ABD'], {}), '(self.ABD)\n', (20908, 20918), False, 'from numpy.linalg.linalg import inv\n'), ((6227, 6260), 'numpy.matrix', 'np.matrix', (['self.ABD'], {'dtype': 'DOUBLE'}), '(self.ABD, dtype=DOUBLE)\n', (6236, 6260), True, 'import numpy as np\n'), ((6711, 6732), 'numpy.dot', 'np.dot', (['AI', 'self.QLAL'], {}), '(AI, self.QLAL)\n', (6717, 6732), True, 'import numpy as np\n')]
|
import qulacs
from qulacs.gate import to_matrix_gate, RZ
from math import pi
import numpy as np
from ..op.util import break_operators_into_subsets_dummy
from ..context import rotation_factor
from time import time
class ElpTime:
init_tot = 0.0
init_0 = 0.0
init_1 = 0.0
init_2 = 0.0
init_3 = 0.0
init_4 = 0.0
symbol = 0.0
coef_list_0 = 0.0
coef_list_1 = 0.0
coef_list_2 = 0.0
coef_list_3 = 0.0
circuit = 0.0
def pauli_product_exponentiate_circuit(circuit, qoperator, control_bit=None):
assert len(qoperator.terms) == 1, 'len(qoperator.terms) == {}'.format(len(qoperator.terms))
pauli_product = list(qoperator.terms)[0]
# e.x. pauli_product = ((0,'X'), (1, 'Z'))
if len(pauli_product)==0: return
coeff = qoperator.terms[pauli_product]
#kura> assert coeff.real == 0, 'coeff.real == {}'.format(coeff.real)
# print('pauli_product',pauli_product,'coeff',coeff)
assert abs(coeff.real) < 1e-10, 'coeff.real == {0:10.3e}'.format(coeff.real)
theta = qoperator.terms[pauli_product].imag
# 1.
relevant_qubits = []
gates = []
for index_qubit, pauli_axis in pauli_product:
relevant_qubits += [index_qubit]
if pauli_axis=='X':
circuit.add_H_gate(index_qubit)
if pauli_axis=='Y':
circuit.add_RX_gate(index_qubit, -pi/4 * rotation_factor)
# 2.
pairs_cnot = [(relevant_qubits[i], relevant_qubits[i+1]) for i in range(len(relevant_qubits)-1)]
for pair_cnot in pairs_cnot:
circuit.add_CNOT_gate(*pair_cnot)
rz_gate = RZ(relevant_qubits[-1], theta * rotation_factor)
if control_bit==None:
circuit.add_gate(rz_gate)
else:
rz_mat_gate = to_matrix_gate(rz_gate)
rz_mat_gate.add_control_qubit(control_bit, 1)
circuit.add_gate(rz_mat_gate)
for pair_cnot in reversed(pairs_cnot):
circuit.add_CNOT_gate(*pair_cnot)
# 3.
gates = []
for index_qubit, pauli_axis in pauli_product:
if pauli_axis=='X':
circuit.add_H_gate(index_qubit)
if pauli_axis=='Y':
circuit.add_RX_gate(index_qubit, pi/4 * rotation_factor)
def trotter_step_1st_order(circuit, qubit_operator, control_bit=None, function_break_operator=break_operators_into_subsets_dummy, *, reverse_order=False):
subsets_qubit_operator = function_break_operator(qubit_operator)
for subset in subsets_qubit_operator:
for term in subset:
pauli_product_exponentiate_circuit(circuit, term, control_bit)
def trotter_step_2nd_order(circuit, qubit_operator, control_bit=None, function_break_operator=break_operators_into_subsets_dummy):
subsets_qubit_operator = function_break_operator(qubit_operator)
for subset in subsets_qubit_operator[:-1]:
for term in subset:
pauli_product_exponentiate_circuit(circuit, .5*term, control_bit)
for term in subsets_qubit_operator[-1]:
pauli_product_exponentiate_circuit(circuit, term, control_bit)
for subset in reversed(subsets_qubit_operator[:-1]):
for term in subset:
pauli_product_exponentiate_circuit(circuit, .5*term, control_bit)
from ..op.symbol import WrappedExpr
class TrotterStep:
"""TrotterStepは、qubit operatorの指数関数をTrotter展開によりシミュレートする量子回路を扱うクラスです。
qubit operatorの係数にはWrappedExprクラスのインスタンスである数式を含むことができます。
Args:
n_qubit (int): 量子ビット数
qubit_operator (openfermion.QubitOperator or list[tuple[pauli_string, coefficient]]): qubit operator
order (int, optional): Trotter order (1 or 2)
Examples:
.. code-block:: python
n_site = 4
fop = FermionOperator((), const)
for p in range(n_site):
for q in range(n_site):
fop += FermionOperator('p^ q', WrappedExpr('t{}{}'.format(p,q)))
qop = jordan_wigner(fop)
ts = TrotterStep(n_site, qop)
Note:
hoge
Attributes:
circuit (qulacs.ParametricQuantumCircuit): パラメータを含む量子回路
"""
def __init__(self, n_qubit, qubit_operator, order=2, reverse_exp_sequence=False):
self._n_qubit = n_qubit
ElpTime.init_tot -= time()
ElpTime.init_0 -= time()
import openfermion
if isinstance(qubit_operator, openfermion.QubitOperator):
pauli_string_and_amplitude_list = list(qubit_operator.terms.items())
elif isinstance(qubit_operator, dict):
pauli_string_and_amplitude_list = list(qubit_operator.items())
self._n_term = len(pauli_string_and_amplitude_list)
self._pauli_string_list = []
self._amplitude_expr_list = []
for pauli_string, amplitude_expr in pauli_string_and_amplitude_list[::-1 if reverse_exp_sequence else 1]:
self._pauli_string_list.append(pauli_string)
self._amplitude_expr_list.append(amplitude_expr)
ElpTime.init_0 += time()
# initialize parametrized 1st- or 2nd-order trotter circuit
ElpTime.init_1 -= time()
self._circuit = qulacs.ParametricQuantumCircuit(self._n_qubit)
assert order in (1,2), 'order({}) not in (1,2)'.format(order)
if order==1:
self._iterm_list = list(range(self._n_term))
self._rotation_coeff_list = [1.0]*self._n_term
elif order==2:
self._iterm_list = list(range(self._n_term-1)) + [self._n_term-1] + list(range(self._n_term-2, -1, -1))
self._rotation_coeff_list = [0.5]*(self._n_term-1) + [1.0] + [0.5]*(self._n_term-1)
for iterm in self._iterm_list:
pauli_string = self._pauli_string_list[iterm]
target = [index for index, axis in pauli_string]
pauli_ids = [{'X':1, 'Y':2, 'Z':3}[axis] for index, axis in pauli_string]
self._circuit.add_parametric_multi_Pauli_rotation_gate(target, pauli_ids, 0.0)
ElpTime.init_1 += time()
ElpTime.init_2 -= time()
self._symbols = set()
for amplitude_expr in self._amplitude_expr_list:
if isinstance(amplitude_expr, WrappedExpr):
self._symbols.update(amplitude_expr.expr.free_symbols)
self._symbol_number_pairs = {}
self._amplitude_value_list = [0]*self._n_term
self._angle_list = [0]*self._n_term
ElpTime.init_2 += time()
ElpTime.init_3 -= time()
import sympy #from sympy import expand, Matrix
_coef_symbols = []
for iterm, amp_expr in enumerate(self._amplitude_expr_list):
if isinstance(amp_expr, WrappedExpr):
#print(sympy.expand(amp_expr.expr))
symbols_sorted = sorted(list(self._symbols),key=str)
_coef_symbols.append([complex(sympy.expand(amp_expr.expr).coeff(symbol)) for symbol in symbols_sorted])
self._coef_symbols = np.array(_coef_symbols)
self.subs([] if len(self._symbols)==0 else [(symbol, 0.0) for symbol in self._symbols])
ElpTime.init_3 += time()
ElpTime.init_4 -= time()
#only if you need expantion coefficient for T(\theta)> from sympy import solve, Eq
#only if you need expantion coefficient for T(\theta)> equation_list = []
#only if you need expantion coefficient for T(\theta)> for iterm, amp_expr in enumerate(self._amplitude_expr_list):
#only if you need expantion coefficient for T(\theta)> amp_expr = amp_expr.expr if isinstance(amp_expr, WrappedExpr) else amp_expr
#only if you need expantion coefficient for T(\theta)> p = WrappedExpr('p{}'.format(iterm)).expr
#only if you need expantion coefficient for T(\theta)> equation_list.append(Eq(amp_expr, p))
#only if you need expantion coefficient for T(\theta)> symbol_list = list(self._symbols)
#only if you need expantion coefficient for T(\theta)> self._fop_coeff_expr = solve(equation_list, symbol_list)
ElpTime.init_4 += time()
ElpTime.init_tot += time()
#PRINT print("TrotterStep:{0:5.2f}".format(ElpTime.init_tot),end="")
#PRINT print(" ( init_0:{0:5.2f}".format(ElpTime.init_0),end="")
#PRINT print(" | init_1:{0:5.2f}".format(ElpTime.init_1),end="")
#PRINT print(" | init_2:{0:5.2f}".format(ElpTime.init_2),end="")
#PRINT print(" | init_3:{0:5.2f}".format(ElpTime.init_3),end="")
#PRINT print(" | init_4:{0:5.2f}".format(ElpTime.init_4),end="")
#PRINT print(")")
def subs(self, symbol_number_pairs):
"""引数として渡されたテーブルに基づいて量子回路のパラメータを更新します。
"""
from sympy import Array
from sympy.utilities.iterables import flatten
ElpTime.symbol -= time()
# update self._symbol_number_pairs
if isinstance(symbol_number_pairs, dict): symbol_number_pairs = list(symbol_number_pairs.items())
symbol_list, number_list = [], []
for old, new in symbol_number_pairs:
if isinstance(old, Array):
assert old.shape == new.shape
symbol_list += flatten(old)
number_list += list(new.flatten()) # assume new as numpy.ndarray
else:
symbol_list += [old]
number_list += [new]
#PRINT print('slist', symbol_list)
#PRINT print('nlist', number_list)
update_pairs = {}
for symbol, number in zip(symbol_list, number_list):
symbol = symbol.expr if isinstance(symbol, WrappedExpr) else symbol
if symbol not in self._symbols: continue
assert (symbol not in update_pairs) or (update_pairs[symbol]==number) ,'{} {} {}'.format(symbol, update_pairs[symbol], number) # assert that no conflicts occur
update_pairs[symbol] = number
#PRINT print('update_pairs', update_pairs)
self._symbol_number_pairs.update(update_pairs)
ElpTime.symbol += time()
# update self._substituted_coeff_list & self._angle_list
#SLOW>
#SLOW> for iterm in range(self._n_term):
#SLOW> ElpTime.coef_list_0 -= time()
#SLOW> amplitude_expr = self._amplitude_expr_list[iterm]
#SLOW> ElpTime.coef_list_0 += time()
#SLOW> ElpTime.coef_list_1 -= time()
#SLOW> if isinstance(amplitude_expr, WrappedExpr):
#SLOW> amp_value = complex(amplitude_expr.subs(self._symbol_number_pairs))
#SLOW> else:
#SLOW> amp_value = complex(amplitude_expr)
#SLOW> self._amplitude_value_list[iterm] = amp_value
#SLOW> ElpTime.coef_list_1 += time()
#SLOW> ElpTime.coef_list_2 -= time()
#SLOW> self._angle_list[iterm] = float(amp_value.imag)
#SLOW> ElpTime.coef_list_2 += time()
#BGN:FAST
if isinstance(self._amplitude_expr_list[0], WrappedExpr):
ElpTime.coef_list_3 -= time()
symbols_sorted = sorted(list(self._symbols),key=str)
subs_number_vec = np.array([self._symbol_number_pairs.get(symbol) for symbol in symbols_sorted])
_amplitude_value_vec = np.einsum('ij,j->i', self._coef_symbols, subs_number_vec)
np.allclose(np.array(self._amplitude_value_list),_amplitude_value_vec)
self._amplitude_value_list = _amplitude_value_vec.tolist()
self._angle_list = _amplitude_value_vec.imag.tolist()
ElpTime.coef_list_3 += time()
else:
for iterm in range(self._n_term):
amplitude_value = complex(self._amplitude_expr_list[iterm])
self._angle_list[iterm] = float(amplitude_value.imag)
#END:FAST
ElpTime.circuit -= time()
# update params of self._circuit
for iparam, iterm in enumerate(self._iterm_list):
angle = self._angle_list[iterm]
rotation_coeff = self._rotation_coeff_list[iterm]
param = rotation_coeff * angle * rotation_factor
self._circuit.set_parameter(iparam, param)
ElpTime.circuit += time()
#PRINT print("symbol:{0:5.2f}".format(ElpTime.symbol),end="")
#PRINT print(" | coef_list_0:{0:5.2f}".format(ElpTime.coef_list_0),end="")
#PRINT print(" | coef_list_1:{0:5.2f}".format(ElpTime.coef_list_1),end="")
#PRINT print(" | coef_list_2:{0:5.2f}".format(ElpTime.coef_list_2),end="")
#PRINT print(" | coef_list_3:{0:5.2f}".format(ElpTime.coef_list_3),end="")
#PRINT print(" | circuit:{0:5.2f}".format(ElpTime.circuit),end="")
#PRINT print("")
def count_gates(self):
ngate = [0]*(self._n_qubit+1)
for ps in self._pauli_string_list: ngate[len(ps)] += 1
return ngate
def __repr__(self):
return str(self)
def __str__(self):
len_pauli_string_print = sum([len(str(isite))+2 for isite in range(self._n_qubit)])
s = '='*(len_pauli_string_print+101) + '\n'
s += str(self._circuit)
s += 'n_qubits ({:3d})\n'.format(self._n_qubit)
s += 'free_symbols ({:3d}) : {}\n'.format(len(self._symbols), str(self._symbols))
for symbol, number in sorted(self._symbol_number_pairs.items(), key=lambda x:str(x[0])):
s += '{:6} : {:.8e}\n'.format(str(symbol), number)
s += '-'*(len_pauli_string_print+101) + '\n'
s += ' '*7+'PauliString' + ' '*(len_pauli_string_print-8) + 'Amplitude[Expr]'
s += ' '*37 + 'Amplitude[Value]' +'\n'# + ' '*13 + 'Angle(deg)\n'
for iterm in range(self._n_term):
pauli_string = self._pauli_string_list[iterm]
amplitude_expr = self._amplitude_expr_list[iterm]
amplitude_value = self._amplitude_value_list[iterm]
angle = self._angle_list[iterm]
s += '{:3d}: '.format(iterm)
ipauli = 0
for index in range(self._n_qubit):
if ipauli < len(pauli_string) and index == pauli_string[ipauli][0]:
index, axis = pauli_string[ipauli]
s += axis + str(index) + ' '
ipauli += 1
else:
s += ' ' * (len(str(index))+2)
str_amp_expr = str(amplitude_expr)
if len(str_amp_expr)>50: str_amp_expr = str_amp_expr[:47] + '...'
s += ' {:50} '.format(str_amp_expr)
s += '{:+.10e} '.format(amplitude_value.imag)
s += ' '*11 if amplitude_value.real==0 else '({:+1.0e} ~ real part) '.format(amplitude_value.real)
#s += '{:+.7f}\n'.format(angle)
s += '\n'
s += '='*(len_pauli_string_print+101)
return s
def _test():
from symbol_for_openfermion import WrappedExpr as Symbol
from openfermion import FermionOperator, jordan_wigner
x = Symbol('x')
y = Symbol('y')
fop = FermionOperator('3^ 0 3', x)
fop += FermionOperator('3^ 1 2', y**2)
fop *= FermionOperator('2^', x + y*4 -2)
c1 = TrotterStep(n_qubit=4, qubit_operator=jordan_wigner(fop))
print(c1)
c1.subs([(x, 1.), (y, 5.)])
print(c1)
x, y = 1., 5.
fop = FermionOperator('3^ 0 3', x)
fop += FermionOperator('3^ 1 2', y**2)
fop *= FermionOperator('2^', x + y*4 -2)
c1 = TrotterStep(n_qubit=4, qubit_operator=jordan_wigner(fop))
print(c1)
def _test2():
from symbol_for_openfermion import WrappedExpr as Symbol
from openfermion import FermionOperator, jordan_wigner, get_sparse_operator
from sympy import Array
np.random.seed(100)
import scipy
n_orb = 2
const = Symbol('const')
T1 = [[None for _ in range(n_orb)] for _ in range(n_orb)]
for p in range(n_orb):
for q in range(p, n_orb):
t = Symbol('t{}{}'.format(p,q))
T1[p][q] = T1[q][p] = t
T1 = Array(T1)
print(T1)
const_value = np.random.rand()
T1_value = np.random.rand(n_orb,n_orb)*0.01
T1_value += T1_value.T
print(const_value)
print(T1_value)
def op1e(const, Tmat):
fop = FermionOperator('', const)
for p in range(n_orb):
for q in range(n_orb):
fop += FermionOperator( ((2*p , 1),(2*q , 0)), Tmat[p,q] )
fop += FermionOperator( ((2*p+1, 1),(2*q+1, 0)), Tmat[p,q] )
return fop
fop_symbol = op1e(const, T1)
qop_symbol = jordan_wigner(fop_symbol)
print(fop_symbol)
print(qop_symbol)
n_qubit = n_orb*2
# c1 : TrotterStep with symbolic qop
c1 = TrotterStep(n_qubit, (-1j)*qop_symbol)
print(c1)
symbol_number_pairs = [(const, const_value), (T1, T1_value)]
c1.subs(symbol_number_pairs)
print(c1)
# c2 : TrotterStep with numerical qop
fop_number = op1e(const_value, T1_value)
qop_number = jordan_wigner(fop_number)
c2 = TrotterStep(n_qubit, (-1j)*qop_number)
print(c2)
# c3 : oldtype with numerical qop
c3 = qulacs.QuantumCircuit(n_qubit)
trotter_step_2nd_order(c3, (-1j)*qop_number)
s0 = qulacs.QuantumState(n_qubit)
s0.set_Haar_random_state()
s1 = s0.copy()
s2 = s0.copy()
s3 = s0.copy()
from util_qulacs import convert_state_vector
sv = convert_state_vector( n_qubit, s0.get_vector() )
corr1 = []
corr2 = []
corr3 = []
corrv = []
for t in range(100):
corr1.append( qulacs.state.inner_product(s0, s1) )
corr2.append( qulacs.state.inner_product(s0, s2) )
corr3.append( qulacs.state.inner_product(s0, s3)*np.exp(-1j*qop_number.terms[()]*t) )
corrv.append( np.dot(np.conjugate(convert_state_vector(n_qubit, s0.get_vector())), sv) )
c1._circuit.update_quantum_state(s1)
c2._circuit.update_quantum_state(s2)
c3.update_quantum_state(s3)
sv = scipy.sparse.linalg.expm_multiply(-1j * get_sparse_operator(qop_number), sv)
import matplotlib.pyplot as plt
plt.plot(np.array(corr1).real, label='1')
plt.plot(np.array(corr2).real, label='2')
plt.plot(np.array(corr3).real, label='3')
plt.plot(np.array(corrv).real, label='4')
plt.legend()
plt.show()
# print('s1', s1.get_vector())
# print('s2', s2.get_vector())
# print('s3', s3.get_vector())
def _test3():
n_qubit = 1
c1 = qulacs.ParametricQuantumCircuit(n_qubit)
pauli_string = ((0,'Z'),)
target = [index for index, axis in pauli_string]
pauli_ids = [{'X':1, 'Y':2, 'Z':3}[axis] for index, axis in pauli_string]
angle = 1.0
c1.add_parametric_multi_Pauli_rotation_gate(target, pauli_ids, angle)
s1 = qulacs.QuantumState(n_qubit)
c1.update_quantum_state(s1)
print(s1)
def _example():
from symbol_for_openfermion import WrappedExpr as Symbol
from openfermion import FermionOperator, jordan_wigner, get_sparse_operator
import sympy
np.random.seed(100)
import scipy
n_orb = 2
const = Symbol('const')
T1 = [[None for _ in range(n_orb)] for _ in range(n_orb)]
for p in range(n_orb):
for q in range(p, n_orb):
t = Symbol('t{}{}'.format(p,q))
T1[p][q] = T1[q][p] = t
T1 = sympy.Array(T1)
print('T1:')
print(T1)
const_value = np.random.rand()
T1_value = np.random.rand(n_orb,n_orb)*0.01
T1_value += T1_value.T
print('const_value = ', const_value)
print('T1_value = ')
print(T1_value)
def op1e(const, Tmat):
fop = FermionOperator('', const)
for p in range(n_orb):
for q in range(n_orb):
fop += FermionOperator( ((2*p , 1),(2*q , 0)), Tmat[p,q] )
fop += FermionOperator( ((2*p+1, 1),(2*q+1, 0)), Tmat[p,q] )
return fop
fop_symbol = op1e(const, T1)
qop_symbol = jordan_wigner(fop_symbol)
print(fop_symbol)
print(qop_symbol)
n_qubit = n_orb*2
# c1 : TrotterStep with symbolic qop
c1 = TrotterStep(n_qubit, (-1j)*qop_symbol)
print(c1)
symbol_number_pairs = [(const, const_value), (T1, T1_value)]
c1.subs(symbol_number_pairs)
print(c1)
# c2 : TrotterStep with numerical qop
fop_number = op1e(const_value, T1_value)
qop_number = jordan_wigner(fop_number)
c2 = TrotterStep(n_qubit, (-1j)*qop_number)
print(c2)
s0 = qulacs.QuantumState(n_qubit)
s0.set_Haar_random_state()
s1 = s0.copy()
s2 = s0.copy()
corr1 = []
corr2 = []
for t in range(100):
corr1.append( qulacs.state.inner_product(s0, s1) )
corr2.append( qulacs.state.inner_product(s0, s2) )
c1._circuit.update_quantum_state(s1)
c2._circuit.update_quantum_state(s2)
import matplotlib.pyplot as plt
plt.plot(np.array(corr1).real, '--', label='c1')
plt.plot(np.array(corr2).real, '-.', label='c2')
plt.legend()
plt.show()
if __name__=='__main__':
_example()
|
[
"numpy.random.seed",
"numpy.einsum",
"qulacs.ParametricQuantumCircuit",
"numpy.exp",
"sympy.utilities.iterables.flatten",
"sympy.expand",
"openfermion.jordan_wigner",
"qulacs.gate.RZ",
"openfermion.FermionOperator",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"qulacs.QuantumState",
"qulacs.state.inner_product",
"symbol_for_openfermion.WrappedExpr",
"qulacs.QuantumCircuit",
"time.time",
"openfermion.get_sparse_operator",
"sympy.Array",
"numpy.array",
"qulacs.gate.to_matrix_gate",
"numpy.random.rand"
] |
[((1577, 1625), 'qulacs.gate.RZ', 'RZ', (['relevant_qubits[-1]', '(theta * rotation_factor)'], {}), '(relevant_qubits[-1], theta * rotation_factor)\n', (1579, 1625), False, 'from qulacs.gate import to_matrix_gate, RZ\n'), ((14664, 14675), 'symbol_for_openfermion.WrappedExpr', 'Symbol', (['"""x"""'], {}), "('x')\n", (14670, 14675), True, 'from symbol_for_openfermion import WrappedExpr as Symbol\n'), ((14684, 14695), 'symbol_for_openfermion.WrappedExpr', 'Symbol', (['"""y"""'], {}), "('y')\n", (14690, 14695), True, 'from symbol_for_openfermion import WrappedExpr as Symbol\n'), ((14707, 14735), 'openfermion.FermionOperator', 'FermionOperator', (['"""3^ 0 3"""', 'x'], {}), "('3^ 0 3', x)\n", (14722, 14735), False, 'from openfermion import FermionOperator, jordan_wigner, get_sparse_operator\n'), ((14747, 14780), 'openfermion.FermionOperator', 'FermionOperator', (['"""3^ 1 2"""', '(y ** 2)'], {}), "('3^ 1 2', y ** 2)\n", (14762, 14780), False, 'from openfermion import FermionOperator, jordan_wigner, get_sparse_operator\n'), ((14790, 14826), 'openfermion.FermionOperator', 'FermionOperator', (['"""2^"""', '(x + y * 4 - 2)'], {}), "('2^', x + y * 4 - 2)\n", (14805, 14826), False, 'from openfermion import FermionOperator, jordan_wigner, get_sparse_operator\n'), ((14981, 15009), 'openfermion.FermionOperator', 'FermionOperator', (['"""3^ 0 3"""', 'x'], {}), "('3^ 0 3', x)\n", (14996, 15009), False, 'from openfermion import FermionOperator, jordan_wigner, get_sparse_operator\n'), ((15021, 15054), 'openfermion.FermionOperator', 'FermionOperator', (['"""3^ 1 2"""', '(y ** 2)'], {}), "('3^ 1 2', y ** 2)\n", (15036, 15054), False, 'from openfermion import FermionOperator, jordan_wigner, get_sparse_operator\n'), ((15064, 15100), 'openfermion.FermionOperator', 'FermionOperator', (['"""2^"""', '(x + y * 4 - 2)'], {}), "('2^', x + y * 4 - 2)\n", (15079, 15100), False, 'from openfermion import FermionOperator, jordan_wigner, get_sparse_operator\n'), ((15368, 15387), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (15382, 15387), True, 'import numpy as np\n'), ((15433, 15448), 'symbol_for_openfermion.WrappedExpr', 'Symbol', (['"""const"""'], {}), "('const')\n", (15439, 15448), True, 'from symbol_for_openfermion import WrappedExpr as Symbol\n'), ((15661, 15670), 'sympy.Array', 'Array', (['T1'], {}), '(T1)\n', (15666, 15670), False, 'from sympy import Array\n'), ((15704, 15720), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (15718, 15720), True, 'import numpy as np\n'), ((16198, 16223), 'openfermion.jordan_wigner', 'jordan_wigner', (['fop_symbol'], {}), '(fop_symbol)\n', (16211, 16223), False, 'from openfermion import FermionOperator, jordan_wigner, get_sparse_operator\n'), ((16612, 16637), 'openfermion.jordan_wigner', 'jordan_wigner', (['fop_number'], {}), '(fop_number)\n', (16625, 16637), False, 'from openfermion import FermionOperator, jordan_wigner, get_sparse_operator\n'), ((16748, 16778), 'qulacs.QuantumCircuit', 'qulacs.QuantumCircuit', (['n_qubit'], {}), '(n_qubit)\n', (16769, 16778), False, 'import qulacs\n'), ((16840, 16868), 'qulacs.QuantumState', 'qulacs.QuantumState', (['n_qubit'], {}), '(n_qubit)\n', (16859, 16868), False, 'import qulacs\n'), ((17900, 17912), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (17910, 17912), True, 'import matplotlib.pyplot as plt\n'), ((17917, 17927), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17925, 17927), True, 'import matplotlib.pyplot as plt\n'), ((18075, 18115), 'qulacs.ParametricQuantumCircuit', 'qulacs.ParametricQuantumCircuit', (['n_qubit'], {}), '(n_qubit)\n', (18106, 18115), False, 'import qulacs\n'), ((18377, 18405), 'qulacs.QuantumState', 'qulacs.QuantumState', (['n_qubit'], {}), '(n_qubit)\n', (18396, 18405), False, 'import qulacs\n'), ((18632, 18651), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (18646, 18651), True, 'import numpy as np\n'), ((18697, 18712), 'symbol_for_openfermion.WrappedExpr', 'Symbol', (['"""const"""'], {}), "('const')\n", (18703, 18712), True, 'from symbol_for_openfermion import WrappedExpr as Symbol\n'), ((18925, 18940), 'sympy.Array', 'sympy.Array', (['T1'], {}), '(T1)\n', (18936, 18940), False, 'import sympy\n'), ((18991, 19007), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (19005, 19007), True, 'import numpy as np\n'), ((19528, 19553), 'openfermion.jordan_wigner', 'jordan_wigner', (['fop_symbol'], {}), '(fop_symbol)\n', (19541, 19553), False, 'from openfermion import FermionOperator, jordan_wigner, get_sparse_operator\n'), ((19942, 19967), 'openfermion.jordan_wigner', 'jordan_wigner', (['fop_number'], {}), '(fop_number)\n', (19955, 19967), False, 'from openfermion import FermionOperator, jordan_wigner, get_sparse_operator\n'), ((20040, 20068), 'qulacs.QuantumState', 'qulacs.QuantumState', (['n_qubit'], {}), '(n_qubit)\n', (20059, 20068), False, 'import qulacs\n'), ((20549, 20561), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (20559, 20561), True, 'import matplotlib.pyplot as plt\n'), ((20566, 20576), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20574, 20576), True, 'import matplotlib.pyplot as plt\n'), ((1718, 1741), 'qulacs.gate.to_matrix_gate', 'to_matrix_gate', (['rz_gate'], {}), '(rz_gate)\n', (1732, 1741), False, 'from qulacs.gate import to_matrix_gate, RZ\n'), ((4186, 4192), 'time.time', 'time', ([], {}), '()\n', (4190, 4192), False, 'from time import time\n'), ((4219, 4225), 'time.time', 'time', ([], {}), '()\n', (4223, 4225), False, 'from time import time\n'), ((4919, 4925), 'time.time', 'time', ([], {}), '()\n', (4923, 4925), False, 'from time import time\n'), ((5021, 5027), 'time.time', 'time', ([], {}), '()\n', (5025, 5027), False, 'from time import time\n'), ((5052, 5098), 'qulacs.ParametricQuantumCircuit', 'qulacs.ParametricQuantumCircuit', (['self._n_qubit'], {}), '(self._n_qubit)\n', (5083, 5098), False, 'import qulacs\n'), ((5904, 5910), 'time.time', 'time', ([], {}), '()\n', (5908, 5910), False, 'from time import time\n'), ((5938, 5944), 'time.time', 'time', ([], {}), '()\n', (5942, 5944), False, 'from time import time\n'), ((6323, 6329), 'time.time', 'time', ([], {}), '()\n', (6327, 6329), False, 'from time import time\n'), ((6357, 6363), 'time.time', 'time', ([], {}), '()\n', (6361, 6363), False, 'from time import time\n'), ((6835, 6858), 'numpy.array', 'np.array', (['_coef_symbols'], {}), '(_coef_symbols)\n', (6843, 6858), True, 'import numpy as np\n'), ((6982, 6988), 'time.time', 'time', ([], {}), '()\n', (6986, 6988), False, 'from time import time\n'), ((7016, 7022), 'time.time', 'time', ([], {}), '()\n', (7020, 7022), False, 'from time import time\n'), ((7912, 7918), 'time.time', 'time', ([], {}), '()\n', (7916, 7918), False, 'from time import time\n'), ((7947, 7953), 'time.time', 'time', ([], {}), '()\n', (7951, 7953), False, 'from time import time\n'), ((8631, 8637), 'time.time', 'time', ([], {}), '()\n', (8635, 8637), False, 'from time import time\n'), ((9828, 9834), 'time.time', 'time', ([], {}), '()\n', (9832, 9834), False, 'from time import time\n'), ((11566, 11572), 'time.time', 'time', ([], {}), '()\n', (11570, 11572), False, 'from time import time\n'), ((11921, 11927), 'time.time', 'time', ([], {}), '()\n', (11925, 11927), False, 'from time import time\n'), ((15736, 15764), 'numpy.random.rand', 'np.random.rand', (['n_orb', 'n_orb'], {}), '(n_orb, n_orb)\n', (15750, 15764), True, 'import numpy as np\n'), ((15881, 15907), 'openfermion.FermionOperator', 'FermionOperator', (['""""""', 'const'], {}), "('', const)\n", (15896, 15907), False, 'from openfermion import FermionOperator, jordan_wigner, get_sparse_operator\n'), ((19023, 19051), 'numpy.random.rand', 'np.random.rand', (['n_orb', 'n_orb'], {}), '(n_orb, n_orb)\n', (19037, 19051), True, 'import numpy as np\n'), ((19211, 19237), 'openfermion.FermionOperator', 'FermionOperator', (['""""""', 'const'], {}), "('', const)\n", (19226, 19237), False, 'from openfermion import FermionOperator, jordan_wigner, get_sparse_operator\n'), ((10787, 10793), 'time.time', 'time', ([], {}), '()\n', (10791, 10793), False, 'from time import time\n'), ((11003, 11060), 'numpy.einsum', 'np.einsum', (['"""ij,j->i"""', 'self._coef_symbols', 'subs_number_vec'], {}), "('ij,j->i', self._coef_symbols, subs_number_vec)\n", (11012, 11060), True, 'import numpy as np\n'), ((11316, 11322), 'time.time', 'time', ([], {}), '()\n', (11320, 11322), False, 'from time import time\n'), ((14871, 14889), 'openfermion.jordan_wigner', 'jordan_wigner', (['fop'], {}), '(fop)\n', (14884, 14889), False, 'from openfermion import FermionOperator, jordan_wigner, get_sparse_operator\n'), ((15145, 15163), 'openfermion.jordan_wigner', 'jordan_wigner', (['fop'], {}), '(fop)\n', (15158, 15163), False, 'from openfermion import FermionOperator, jordan_wigner, get_sparse_operator\n'), ((17172, 17206), 'qulacs.state.inner_product', 'qulacs.state.inner_product', (['s0', 's1'], {}), '(s0, s1)\n', (17198, 17206), False, 'import qulacs\n'), ((17231, 17265), 'qulacs.state.inner_product', 'qulacs.state.inner_product', (['s0', 's2'], {}), '(s0, s2)\n', (17257, 17265), False, 'import qulacs\n'), ((17725, 17740), 'numpy.array', 'np.array', (['corr1'], {}), '(corr1)\n', (17733, 17740), True, 'import numpy as np\n'), ((17771, 17786), 'numpy.array', 'np.array', (['corr2'], {}), '(corr2)\n', (17779, 17786), True, 'import numpy as np\n'), ((17817, 17832), 'numpy.array', 'np.array', (['corr3'], {}), '(corr3)\n', (17825, 17832), True, 'import numpy as np\n'), ((17863, 17878), 'numpy.array', 'np.array', (['corrv'], {}), '(corrv)\n', (17871, 17878), True, 'import numpy as np\n'), ((20216, 20250), 'qulacs.state.inner_product', 'qulacs.state.inner_product', (['s0', 's1'], {}), '(s0, s1)\n', (20242, 20250), False, 'import qulacs\n'), ((20275, 20309), 'qulacs.state.inner_product', 'qulacs.state.inner_product', (['s0', 's2'], {}), '(s0, s2)\n', (20301, 20309), False, 'import qulacs\n'), ((20452, 20467), 'numpy.array', 'np.array', (['corr1'], {}), '(corr1)\n', (20460, 20467), True, 'import numpy as np\n'), ((20505, 20520), 'numpy.array', 'np.array', (['corr2'], {}), '(corr2)\n', (20513, 20520), True, 'import numpy as np\n'), ((8991, 9003), 'sympy.utilities.iterables.flatten', 'flatten', (['old'], {}), '(old)\n', (8998, 9003), False, 'from sympy.utilities.iterables import flatten\n'), ((11085, 11121), 'numpy.array', 'np.array', (['self._amplitude_value_list'], {}), '(self._amplitude_value_list)\n', (11093, 11121), True, 'import numpy as np\n'), ((15997, 16050), 'openfermion.FermionOperator', 'FermionOperator', (['((2 * p, 1), (2 * q, 0))', 'Tmat[p, q]'], {}), '(((2 * p, 1), (2 * q, 0)), Tmat[p, q])\n', (16012, 16050), False, 'from openfermion import FermionOperator, jordan_wigner, get_sparse_operator\n'), ((16074, 16135), 'openfermion.FermionOperator', 'FermionOperator', (['((2 * p + 1, 1), (2 * q + 1, 0))', 'Tmat[p, q]'], {}), '(((2 * p + 1, 1), (2 * q + 1, 0)), Tmat[p, q])\n', (16089, 16135), False, 'from openfermion import FermionOperator, jordan_wigner, get_sparse_operator\n'), ((17290, 17324), 'qulacs.state.inner_product', 'qulacs.state.inner_product', (['s0', 's3'], {}), '(s0, s3)\n', (17316, 17324), False, 'import qulacs\n'), ((17325, 17365), 'numpy.exp', 'np.exp', (['(-1.0j * qop_number.terms[()] * t)'], {}), '(-1.0j * qop_number.terms[()] * t)\n', (17331, 17365), True, 'import numpy as np\n'), ((17638, 17669), 'openfermion.get_sparse_operator', 'get_sparse_operator', (['qop_number'], {}), '(qop_number)\n', (17657, 17669), False, 'from openfermion import FermionOperator, jordan_wigner, get_sparse_operator\n'), ((19327, 19380), 'openfermion.FermionOperator', 'FermionOperator', (['((2 * p, 1), (2 * q, 0))', 'Tmat[p, q]'], {}), '(((2 * p, 1), (2 * q, 0)), Tmat[p, q])\n', (19342, 19380), False, 'from openfermion import FermionOperator, jordan_wigner, get_sparse_operator\n'), ((19404, 19465), 'openfermion.FermionOperator', 'FermionOperator', (['((2 * p + 1, 1), (2 * q + 1, 0))', 'Tmat[p, q]'], {}), '(((2 * p + 1, 1), (2 * q + 1, 0)), Tmat[p, q])\n', (19419, 19465), False, 'from openfermion import FermionOperator, jordan_wigner, get_sparse_operator\n'), ((6732, 6759), 'sympy.expand', 'sympy.expand', (['amp_expr.expr'], {}), '(amp_expr.expr)\n', (6744, 6759), False, 'import sympy\n')]
|
import sys
import numpy as np
from numpy.core._multiarray_umath import ndarray
import models as md
class individual:
model: md.individual_graph
start_direction: ndarray
end_direction: ndarray
end_position: ndarray
start_position: ndarray
hermite_matrix: ndarray
def __init__(self, state_person=1, state_sickness=1, prob_of_death=0.1, days_until_recovered=10,
steps=24, city_size=100):
# Cinematic properties
self.time = 0
# self.range = np.random.uniform(0,city_size)
self.range = city_size
self.steps = steps
self.delta = 1 / self.steps
self.current_step = 0
self.city_size = city_size
self.start_position = np.random.uniform(0, 100, [2, 1])
self.end_position = self.get_random_position(self.range)
self.start_direction = self.get_random_vector()
self.end_direction = self.get_random_vector()
self.matrix_cinematic = np.concatenate([self.start_position, self.end_position,
-self.start_direction, -self.end_direction], axis=1)
self.hermite_matrix = np.array([[1, -1, -1, 1], [0, 0, 3, -2],
[0, 1, -2, 1], [0, 0, -1, 1]],
dtype=np.float32)
self.move = self.steps_forward # set movement action
# Health state properties
self.health_condition = state_person # health state of the person. 0 for very Healthy, 1 for average, 2 for vulnerable
self.probability_of_death = prob_of_death
self.days_sick = 0
self.days_until_recovered = days_until_recovered
self.state_sickness = state_sickness # 0 for dead, 1 for susceptible, 2 for sick, 3 for immune
# model
self.model = md.individual_graph(self.city_size, self.start_position,
self.state_sickness)
self.pass_one_day = None
self.is_sick = self.non_sick_person_exam
if state_sickness == 2:
self.get_sick = self.get_sick_non_receptive_person
self.get_sick_non_immune()
else:
self.get_sick = self.get_sick_non_immune
self.pass_one_day = self.non_sick_day
# print('positions : \n start:',self.start_position,' \n end: ',self.end_position)
def reached_position(self):
# print('before: \n start:',self.start_position,' \n end: ',self.end_position)
# print('before: \n start:',self.start_direction,' \n end: ',self.end_direction)
pos = self.start_position.copy()
self.start_position = self.end_position.copy()
self.end_position = pos
self.start_direction = self.end_position
self.end_direction = self.get_random_vector()
# print('positions : \n start:',self.start_position,' \n end: ',self.end_position)
# print('after : \n start:',self.start_direction,' \n end: ',self.end_direction)
self.matrix_cinematic = np.concatenate([self.start_position, self.end_position,
self.start_direction, self.end_direction], axis=1)
def get_random_position(self, radius):
"""
:rtype: ndarray
"""
pos = self.start_position + np.random.uniform(-radius / 2, radius / 2, [2, 1])
counter = 0
# print("start: ",self.start_position)
while pos[0] < 0 or pos[0] > self.city_size:
pos[0] = self.start_position[0] + np.random.uniform(-radius / 2, radius / 2, [1])
counter += 1
# print("new: ", pos)
if counter > 5:
pos[0] = np.random.uniform(0, self.city_size)
# if pos[0] > 0:
# pos[0] = self.city_size-1
# else:
# pos[0] = 0
break
counter = 0
while pos[1] < 0 or pos[1] > self.city_size:
pos[1] = self.start_position[1] + np.random.uniform(-radius / 2, radius / 2, [1])
counter += 1
# print("new: ", pos)
if counter > 5:
pos[1] = np.random.uniform(0, self.city_size)
# if pos[1] > 0:
# pos[1] = self.city_size - 1
# else:
# pos[1] = 0
break
return pos
def get_random_vector(self):
"""
Method to get a random vector, used mainly to get a unitary direction to define next position
:rtype: ndarray
"""
vector = np.random.normal(0, 25, [2, 1])
# return vector / np.linalg.norm(vector)
return vector
def steps_forward(self):
# print("time: {:.2f}".format(self.time))
# print("time: {:d}/{:d}".format(self.current_step,self.steps))
if self.current_step == self.steps:
# print("end moving")
self.reached_position()
self.time = 0
self.current_step = 0
# self.model.update_location(self.start_position)
return self.start_position
# print("move forward")
time = np.array([[1], [self.time], [self.time ** 2], [self.time ** 3]])
self.time += self.delta
self.current_step += 1
# pos = self.hermite_location(time)
# self.model.update_location(pos)
# return pos
return self.hermite_location(time)
def death_state_no_move(self):
"""
Method to give the position of dead units. Dead units don't move
:rtype: ndarray
:return:
"""
return self.start_position
def hermite_location(self, time):
# print("shape Cinematic:",self.matrix_cinematic.shape)
# print("shape hermite_matrix:",self.hermite_matrix.shape)
# print("shape time:",time.shape)
return np.matmul(self.matrix_cinematic, np.matmul(self.hermite_matrix, time))
def strong_immune_system_sick(self):
if np.random.uniform() < self.probability_of_death / 50:
self.set_death_state()
return True
self.sickness_evolution_one_day()
return False
def average_immune_system_sick(self):
if np.random.uniform() < self.probability_of_death:
self.set_death_state()
return True
self.sickness_evolution_one_day()
return False
def weak_immune_system_sick(self):
if np.random.uniform() < self.probability_of_death * 2:
self.set_death_state()
return True
self.sickness_evolution_one_day()
return False
def non_sick_day(self):
pass
def sickness_evolution_one_day(self):
self.days_sick += 1
# print("increase in days: ", self.days_sick)
if self.days_sick > self.days_until_recovered:
# print("cured")
self.state_sickness = 3 # recovered/immune
self.get_sick = self.get_sick_non_receptive_person
self.model.update_state(3)
self.pass_one_day = self.non_sick_day
self.is_sick = self.non_sick_person_exam
def set_death_state(self):
# print("new death")
self.start_position = self.steps_forward() # last step
self.move = self.death_state_no_move
self.get_sick = self.get_sick_non_receptive_person
self.state_sickness = 0
self.model.update_state(0)
self.pass_one_day = self.non_sick_day
self.is_sick = self.non_sick_person_exam
def get_sick_non_immune(self):
self.state_sickness = 2
self.is_sick = self.sick_person_exam
self.model.update_state(2)
if self.health_condition == 0:
self.pass_one_day = self.strong_immune_system_sick
elif self.health_condition == 1:
self.pass_one_day = self.average_immune_system_sick
else:
self.pass_one_day = self.weak_immune_system_sick
def get_sick_non_receptive_person(self):
"""
Method that simulates that an immune person or dead person enter in contact with the virus. Nothing happens
in this representation.
:return:
"""
pass
def sick_person_exam(self):
return True
def non_sick_person_exam(self):
return False
def set_model(self, model):
self.model = model
def draw(self, pipeline):
self.model.draw(pipeline)
|
[
"numpy.random.uniform",
"models.individual_graph",
"numpy.array",
"numpy.random.normal",
"numpy.matmul",
"numpy.concatenate"
] |
[((738, 771), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(100)', '[2, 1]'], {}), '(0, 100, [2, 1])\n', (755, 771), True, 'import numpy as np\n'), ((979, 1092), 'numpy.concatenate', 'np.concatenate', (['[self.start_position, self.end_position, -self.start_direction, -self.\n end_direction]'], {'axis': '(1)'}), '([self.start_position, self.end_position, -self.\n start_direction, -self.end_direction], axis=1)\n', (993, 1092), True, 'import numpy as np\n'), ((1167, 1260), 'numpy.array', 'np.array', (['[[1, -1, -1, 1], [0, 0, 3, -2], [0, 1, -2, 1], [0, 0, -1, 1]]'], {'dtype': 'np.float32'}), '([[1, -1, -1, 1], [0, 0, 3, -2], [0, 1, -2, 1], [0, 0, -1, 1]],\n dtype=np.float32)\n', (1175, 1260), True, 'import numpy as np\n'), ((1839, 1916), 'models.individual_graph', 'md.individual_graph', (['self.city_size', 'self.start_position', 'self.state_sickness'], {}), '(self.city_size, self.start_position, self.state_sickness)\n', (1858, 1916), True, 'import models as md\n'), ((3038, 3149), 'numpy.concatenate', 'np.concatenate', (['[self.start_position, self.end_position, self.start_direction, self.\n end_direction]'], {'axis': '(1)'}), '([self.start_position, self.end_position, self.\n start_direction, self.end_direction], axis=1)\n', (3052, 3149), True, 'import numpy as np\n'), ((4594, 4625), 'numpy.random.normal', 'np.random.normal', (['(0)', '(25)', '[2, 1]'], {}), '(0, 25, [2, 1])\n', (4610, 4625), True, 'import numpy as np\n'), ((5171, 5235), 'numpy.array', 'np.array', (['[[1], [self.time], [self.time ** 2], [self.time ** 3]]'], {}), '([[1], [self.time], [self.time ** 2], [self.time ** 3]])\n', (5179, 5235), True, 'import numpy as np\n'), ((3322, 3372), 'numpy.random.uniform', 'np.random.uniform', (['(-radius / 2)', '(radius / 2)', '[2, 1]'], {}), '(-radius / 2, radius / 2, [2, 1])\n', (3339, 3372), True, 'import numpy as np\n'), ((5920, 5956), 'numpy.matmul', 'np.matmul', (['self.hermite_matrix', 'time'], {}), '(self.hermite_matrix, time)\n', (5929, 5956), True, 'import numpy as np\n'), ((6011, 6030), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (6028, 6030), True, 'import numpy as np\n'), ((6241, 6260), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (6258, 6260), True, 'import numpy as np\n'), ((6463, 6482), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (6480, 6482), True, 'import numpy as np\n'), ((3539, 3586), 'numpy.random.uniform', 'np.random.uniform', (['(-radius / 2)', '(radius / 2)', '[1]'], {}), '(-radius / 2, radius / 2, [1])\n', (3556, 3586), True, 'import numpy as np\n'), ((3699, 3735), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'self.city_size'], {}), '(0, self.city_size)\n', (3716, 3735), True, 'import numpy as np\n'), ((4015, 4062), 'numpy.random.uniform', 'np.random.uniform', (['(-radius / 2)', '(radius / 2)', '[1]'], {}), '(-radius / 2, radius / 2, [1])\n', (4032, 4062), True, 'import numpy as np\n'), ((4175, 4211), 'numpy.random.uniform', 'np.random.uniform', (['(0)', 'self.city_size'], {}), '(0, self.city_size)\n', (4192, 4211), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 4 19:44:03 2021
@author: mike_ubuntu
"""
'''
По данным из решения волнового уравнения получаем 1 уравнение (тесты методов src.structure.)
'''
import time
import sys
sys.path.append('/media/mike_ubuntu/DATA/ESYS/')
import numpy as np
import copy
from collections import OrderedDict
from src.moeadd.moeadd import *
from src.moeadd.moeadd_supplementary import *
import src.globals as global_var
import src.structure as structure
from src.supplementary import memory_assesment
from src.evaluators import simple_function_evaluator
from src.cache.cache import Cache, upload_simple_tokens, download_variable
from src.token_family import Evaluator, Token_family, constancy_hard_equality
from src.supplementary import Define_Derivatives, factor_params_to_str
from src.evo_optimizer import Operator_director, Operator_builder
import src.sys_search_operators as operators
import matplotlib.pyplot as plt
def test_single_token_type():
seed = None
if type(seed) != type(None):
np.random.seed(seed)
folder= sys.path[-1] + 'preprocessing/Wave/'
boundary = 15
print(sys.path)
u_tensors = download_variable(folder + 'wave_HP.npy', folder + 'Derivatives.npy', boundary, time_axis=0)
u_names = Define_Derivatives('u', 3, 2)
global_var.init_caches(set_grids=False)
global_var.tensor_cache.memory_usage_properties(obj_test_case=u_tensors[0, ...], mem_for_cache_frac = 25)
upload_simple_tokens(u_names, u_tensors, global_var.tensor_cache)
u_tokens = Token_family('U')
u_tokens.set_status(unique_specific_token=False, unique_token_type=False,
meaningful = True, unique_for_right_part = True)
equal_params = {'power' : 0}
u_token_params = OrderedDict([('power', (1, 2))])
u_tokens.set_params(u_names, u_token_params, equal_params)
u_tokens.use_glob_cache()
# u_eval_params = {'params_names':['power'], 'params_equality':{'power' : 0}}
u_tokens.set_evaluator(simple_function_evaluator)
director = Operator_director()
director.operator_assembly()
tokens = [u_tokens,]
pop_constructor = operators.systems_population_constructor(tokens=tokens, terms_number=5,
max_factors_in_term=1,
eq_search_evo = director.constructor.operator)
equation_creation_params = {'eq_search_iters':2}
optimizer = moeadd_optimizer(pop_constructor, 7, 20, equation_creation_params, delta = 1/50., neighbors_number = 3)
evo_operator = operators.sys_search_evolutionary_operator(operators.mixing_xover,
operators.gaussian_mutation)
optimizer.set_evolutionary(operator=evo_operator)
best_obj = np.concatenate(np.ones([1]),
np.zeros(shape=len([1 for token_family in tokens if token_family.status['meaningful']])))
print(best_obj)
raise NotImplementedError
#test_single_token_type()
# del u_tensors
# u_tensors =
|
[
"sys.path.append",
"src.globals.tensor_cache.memory_usage_properties",
"src.cache.cache.upload_simple_tokens",
"numpy.random.seed",
"src.cache.cache.download_variable",
"src.token_family.Token_family",
"src.evo_optimizer.Operator_director",
"src.supplementary.Define_Derivatives",
"numpy.ones",
"src.globals.init_caches",
"collections.OrderedDict",
"src.sys_search_operators.sys_search_evolutionary_operator",
"src.sys_search_operators.systems_population_constructor"
] |
[((240, 288), 'sys.path.append', 'sys.path.append', (['"""/media/mike_ubuntu/DATA/ESYS/"""'], {}), "('/media/mike_ubuntu/DATA/ESYS/')\n", (255, 288), False, 'import sys\n'), ((1190, 1286), 'src.cache.cache.download_variable', 'download_variable', (["(folder + 'wave_HP.npy')", "(folder + 'Derivatives.npy')", 'boundary'], {'time_axis': '(0)'}), "(folder + 'wave_HP.npy', folder + 'Derivatives.npy',\n boundary, time_axis=0)\n", (1207, 1286), False, 'from src.cache.cache import Cache, upload_simple_tokens, download_variable\n'), ((1297, 1326), 'src.supplementary.Define_Derivatives', 'Define_Derivatives', (['"""u"""', '(3)', '(2)'], {}), "('u', 3, 2)\n", (1315, 1326), False, 'from src.supplementary import Define_Derivatives, factor_params_to_str\n'), ((1332, 1371), 'src.globals.init_caches', 'global_var.init_caches', ([], {'set_grids': '(False)'}), '(set_grids=False)\n', (1354, 1371), True, 'import src.globals as global_var\n'), ((1376, 1483), 'src.globals.tensor_cache.memory_usage_properties', 'global_var.tensor_cache.memory_usage_properties', ([], {'obj_test_case': 'u_tensors[0, ...]', 'mem_for_cache_frac': '(25)'}), '(obj_test_case=u_tensors[0,\n ...], mem_for_cache_frac=25)\n', (1423, 1483), True, 'import src.globals as global_var\n'), ((1486, 1551), 'src.cache.cache.upload_simple_tokens', 'upload_simple_tokens', (['u_names', 'u_tensors', 'global_var.tensor_cache'], {}), '(u_names, u_tensors, global_var.tensor_cache)\n', (1506, 1551), False, 'from src.cache.cache import Cache, upload_simple_tokens, download_variable\n'), ((1576, 1593), 'src.token_family.Token_family', 'Token_family', (['"""U"""'], {}), "('U')\n", (1588, 1593), False, 'from src.token_family import Evaluator, Token_family, constancy_hard_equality\n'), ((1801, 1833), 'collections.OrderedDict', 'OrderedDict', (["[('power', (1, 2))]"], {}), "([('power', (1, 2))])\n", (1812, 1833), False, 'from collections import OrderedDict\n'), ((2088, 2107), 'src.evo_optimizer.Operator_director', 'Operator_director', ([], {}), '()\n', (2105, 2107), False, 'from src.evo_optimizer import Operator_director, Operator_builder\n'), ((2197, 2340), 'src.sys_search_operators.systems_population_constructor', 'operators.systems_population_constructor', ([], {'tokens': 'tokens', 'terms_number': '(5)', 'max_factors_in_term': '(1)', 'eq_search_evo': 'director.constructor.operator'}), '(tokens=tokens, terms_number=5,\n max_factors_in_term=1, eq_search_evo=director.constructor.operator)\n', (2237, 2340), True, 'import src.sys_search_operators as operators\n'), ((2664, 2763), 'src.sys_search_operators.sys_search_evolutionary_operator', 'operators.sys_search_evolutionary_operator', (['operators.mixing_xover', 'operators.gaussian_mutation'], {}), '(operators.mixing_xover,\n operators.gaussian_mutation)\n', (2706, 2763), True, 'import src.sys_search_operators as operators\n'), ((1061, 1081), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1075, 1081), True, 'import numpy as np\n'), ((2908, 2920), 'numpy.ones', 'np.ones', (['[1]'], {}), '([1])\n', (2915, 2920), True, 'import numpy as np\n')]
|
from bert.preprocess import PAD_INDEX
from sklearn.metrics import f1_score, balanced_accuracy_score
import numpy as np
def mlm_accuracy(predictions, targets):
mlm_predictions, nsp_predictions = predictions
mlm_targets, is_nexts = targets
relevent_indexes = np.where(mlm_targets != PAD_INDEX)
relevent_predictions = mlm_predictions[relevent_indexes]
relevent_targets = mlm_targets[relevent_indexes]
corrects = np.equal(relevent_predictions, relevent_targets)
return corrects.mean()
def nsp_accuracy(predictions, targets):
mlm_predictions, nsp_predictions = predictions
mlm_targets, is_nexts = targets
corrects = np.equal(nsp_predictions, is_nexts)
return corrects.mean()
def classification_accuracy(predictions, targets):
# corrects = np.equal(predictions, targets)
# return corrects.mean()
return balanced_accuracy_score(targets, predictions)
def f1_weighted(predictions, targets):
return f1_score(targets, predictions, average='weighted')
|
[
"sklearn.metrics.f1_score",
"numpy.where",
"sklearn.metrics.balanced_accuracy_score",
"numpy.equal"
] |
[((273, 307), 'numpy.where', 'np.where', (['(mlm_targets != PAD_INDEX)'], {}), '(mlm_targets != PAD_INDEX)\n', (281, 307), True, 'import numpy as np\n'), ((438, 486), 'numpy.equal', 'np.equal', (['relevent_predictions', 'relevent_targets'], {}), '(relevent_predictions, relevent_targets)\n', (446, 486), True, 'import numpy as np\n'), ((671, 706), 'numpy.equal', 'np.equal', (['nsp_predictions', 'is_nexts'], {}), '(nsp_predictions, is_nexts)\n', (679, 706), True, 'import numpy as np\n'), ((891, 936), 'sklearn.metrics.balanced_accuracy_score', 'balanced_accuracy_score', (['targets', 'predictions'], {}), '(targets, predictions)\n', (914, 936), False, 'from sklearn.metrics import f1_score, balanced_accuracy_score\n'), ((996, 1046), 'sklearn.metrics.f1_score', 'f1_score', (['targets', 'predictions'], {'average': '"""weighted"""'}), "(targets, predictions, average='weighted')\n", (1004, 1046), False, 'from sklearn.metrics import f1_score, balanced_accuracy_score\n')]
|
import numpy as np
import astropy.units as u
from astropy.units.quantity import Quantity
from astropy.units import UnitTypeError, get_physical_type
from astropy.config.paths import get_cache_dir
from snewpy import get_models
import os
try:
from snewpy import model_path
except ImportError:
model_path = os.path.join(get_cache_dir(), 'snewpy/models')
import logging
from snewpy.models import ccsn, presn
def init_model(model_name, download=True, download_dir=model_path, **user_param):
"""Attempts to retrieve instantiated SNEWPY model using model class name and model parameters.
If a model name is valid, but is not found and `download`=True, this function will attempt to download the model
Parameters
----------
model_name : str
Name of SNEWPY model to import, must exactly match the name of the corresponding model class
download : bool
Switch for attempting to download model data if the first load attempt failed due to a missing file.
download_dir : str
Local directory to download model files to.
user_param : varies
User-requested model parameters used to initialize the model, if one is found.
Error checking is performed during model initialization
Raises
------
ValueError
If the requested model_name does not match any SNEWPY models
See Also
--------
snewpy.models.ccsn
snewpy.models.presn
Example
-------
>>> from snewpy.models.registry import init_model; import astropy.units as u
>>> init_model('Nakazato_2013', progenitor_mass=13*u.Msun, metallicity=0.004, revival_time=0*u.s, eos='shen')
Nakazato_2013 Model: nakazato-shen-BH-z0.004-s30.0.fits
Progenitor mass : 30.0 solMass
EOS : Shen
Metallicity : 0.004
Revival time : 0.0 ms
"""
if model_name in dir(ccsn):
module = ccsn
elif model_name in dir(presn):
module = presn
else:
raise ValueError(f"Unable to find model with name '{model_name}' in snewpy.models.ccsn or snewpy.models.presn")
try:
return getattr(module, model_name)(**user_param)
except FileNotFoundError as e:
logger = logging.getLogger()
logger.warning(f"Unable to find model {model_name} in {download_dir}")
if not download:
raise e
logger.warning(f"Attempting to download model...")
get_models(model_name, download_dir)
return getattr(module, model_name)(**user_param)
def check_param_values(model, **user_param):
"""Performs generic check that the requested model parameters have valid values and units for the requested
SNEWPY model.
Parameters
----------
model : snewpy.model.SupernovaModel
Model class used to perform parameter check
user_param : varies
User-requested model parameters to be tested for validity. MUST be provided as keyword arguments that match the
model `param` class member
Raises
------
ValueError
If invalid model parameters are provided based on units, allowed values, etc.
UnitTypeError
If invalid units are provided for a model parameter
See Also
--------
snewpy.models.ccsn
snewpy.models.presn
"""
model_param = model.param
# Check that the appropriate number of params are provided
if len(user_param) != len(model_param):
raise ValueError(f"Invalid model parameters, expected {len(model_param)} "
f"but {len(user_param)} were given")
# Check that user-requested params have valid units and values
for (key, allowed_params), user_param in zip(model_param.items(), user_param.values()):
# If both have units, check that the user param value is valid. If valid, continue. Else, error
if type(user_param) == Quantity and type(allowed_params) == Quantity:
if get_physical_type(user_param.unit) != get_physical_type(allowed_params.unit):
raise UnitTypeError(f"Incorrect units {user_param.unit} provided for parameter {key}, "
f"expected {allowed_params.unit}")
elif user_param.to(allowed_params.unit).value in allowed_params.value:
continue
else:
raise ValueError(f"Invalid value '{user_param}' provided for parameter {key}, "
f"allowed value(s): {allowed_params}")
# If one only one has units, then error
elif (type(user_param) == Quantity) ^ (type(allowed_params) == Quantity):
# User param has units, model param is unitless
if type(user_param) == Quantity:
raise ValueError(f"Invalid units {user_param.unit} for parameter {key} provided, expected None")
else:
raise ValueError(f"Missing units for parameter {key}, expected {allowed_params.unit}")
# Check that unitless user param value is valid. If valid, continue. Else, Error
elif user_param in allowed_params:
continue
else:
raise ValueError(f"Invalid value '{user_param}' provided for parameter {key}, "
f"allowed value(s): {allowed_params}")
def get_sukhbold_2015_fname(eos, progenitor_mass, **kwargs):
if eos not in ('LS220', 'SFHo'):
raise ValueError(f'Invalid value for model argument `eos`, expected ("LS220", "SFHo") given "{eos}"')
if progenitor_mass.value == 9.6:
fname = f'sukhbold-{eos}-z{progenitor_mass.value:3.1f}.fits'
elif progenitor_mass.value == 27.0:
fname = f'sukhbold-{eos}-s{progenitor_mass.value:3.1f}.fits'
else:
raise ValueError('Invalid value for model argument `progenitor_mass`, expected (9.6, 27.0) Msun, '
f'given {progenitor_mass}')
return fname
def get_tamborra_2014_fname(progenitor_mass, **kwargs):
if progenitor_mass.value in (20.0, 27.0):
fname = f's{progenitor_mass.value:3.1f}c_3D_dir1'
else:
raise ValueError('Invalid value for model argument `progenitor_mass`, expected (20.0, 27.0) Msun, '
f'given {progenitor_mass}')
return fname
def get_bollig_2016_fname(progenitor_mass, **kwargs):
if progenitor_mass.value in (11.2, 27.0):
fname = f's{progenitor_mass.value:3.1f}c'
else:
raise ValueError('Invalid value for model argument `progenitor_mass`, expected (20.0, 27.0) Msun, '
f'given {progenitor_mass.value}')
return fname
def get_walk_2018_fname(progenitor_mass=15. * u.Msun, **kwargs):
if progenitor_mass.value != 15.0:
raise ValueError('Invalid value for model argument `progenitor_mass`, expected (15.0) Msun, '
f'given {progenitor_mass}')
return f's{progenitor_mass.value:3.1f}c_3D_nonrot_dir1'
def get_walk_2019_fname(progenitor_mass=40. * u.Msun, **kwargs):
if progenitor_mass.value != 40.0:
raise ValueError('Invalid value for model argument `progenitor_mass`, expected (40.0) Msun, '
f'given {progenitor_mass}')
return f's{progenitor_mass.value:3.1f}c_3DBH_dir1'
def get_oconnor_2013_params(progenitor_mass, eos, **kwargs):
if eos not in ('LS220', 'HShen'):
raise ValueError(f'Invalid value for model argument `eos`, expected ("LS220", "SFHo") given "{eos}"')
if progenitor_mass.value not in (12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
33, 35, 40, 45, 50, 55, 60, 70, 80, 100, 120):
raise ValueError('Invalid value for model argument `progenitor_mass`, expected (12..33, 35, 40, 45, 50, 55, 60,'
f' 70, 80, 100, 120) Msun, given {progenitor_mass}')
return int(progenitor_mass.value), eos
def get_oconnor_2015_fname(**kwargs):
return 'M1_neutrinos.dat'
def get_zha_2021_fname(progenitor_mass, **kwargs):
if progenitor_mass.value not in (16, 17, 18, 19.89, 19, 20, 21, 22.39, 22, 23, 24, 25, 26, 30, 33):
raise ValueError('Invalid value for model argument `progenitor_mass`, expected (16, 17, 18, 19.89, 19, 20, 21, '
f'22.39, 22, 23, 24, 25, 26, 30, 33) Msun, given {progenitor_mass}')
if progenitor_mass.value.is_integer():
fname = f's{int(progenitor_mass.value):2d}.dat'
else:
fname = f's{progenitor_mass.value:4.2f}.dat'
return fname
def get_warren_2020_fname(progenitor_mass, turb_mixing, **kwargs):
if turb_mixing not in (1.23, 1.25, 1.27):
raise ValueError('Invalid value for model argument `alpha_lambda`, expected (1.23, 1.25, 1.27) '
f'{turb_mixing}')
if progenitor_mass.value in np.arange(9.25, 13., 0.25) or progenitor_mass.value in np.arange(13., 30.1, 0.1) or \
progenitor_mass.value == 90.:
fname = f'stir_a{turb_mixing:3.2f}/stir_multimessenger_a{turb_mixing:3.2f}_m{progenitor_mass.value:.2f}.h5'
elif progenitor_mass.value in (31, 32, 33, 34, 35, 40, 45, 50, 55, 60, 70, 80, 100, 120):
fname = f'stir_a{turb_mixing:3.2f}/stir_multimessenger_a{turb_mixing:3.2f}_m{progenitor_mass.value:d}.h5'
else:
raise ValueError(f'Invalid value for model argument `progenitor_mass`, given {progenitor_mass}, expected '
'9.25.. 0.25 ..13.0, 13.0.. 0.1 .. 30.0, 31..33, 35.. 5 ..60, 60.. 10 ..90, 80.. 20 ..120')
return fname
def get_kuroda_2020_fname(rot_vel, mag_field_exponent, **kwargs):
if rot_vel.value not in (0, 1):
raise ValueError(f'Invalid value for model argument `rot_vel`, expected (0, 1) rad / s, given {rot_vel}')
if mag_field_exponent not in (0, 12, 13):
raise ValueError('Invalid value for model argument `mag_field_exponent, expected (0, 12, 13), '
f'given {mag_field_exponent}')
if (rot_vel.value == 0 and mag_field_exponent in (12, 13)) or (rot_vel.value == 1 and mag_field_exponent == 0):
raise ValueError('Invalid combination of model arguments, Allowed combinations of model arguments `rot_val` and'
' `mag_field_exponent` are (0 rad/s, 0), (1 rad/s, 12), and (1 rad/s, 13). Given '
f'{(rot_vel, mag_field_exponent)}')
return f'LnuR{int(rot_vel.value):1d}0B{int(mag_field_exponent):02d}.dat'
def get_fornax_2019_fname(progenitor_mass, **kwargs):
if progenitor_mass.value not in (9, 10, 12, 13, 14, 15, 16, 19, 25, 60):
ValueError(f'Invalid value for model argument `progenitor_mass`, given {progenitor_mass}, expected '
'(9, 10, 12, 13, 14, 15, 16, 19, 25, 60)')
if progenitor_mass.value == 16:
return f'lum_spec_{int(progenitor_mass.value):2d}M_r250.h5'
return f'lum_spec_{int(progenitor_mass.value):2d}M.h5'
lookup_dict = {'Sukhbold_2015': get_sukhbold_2015_fname,
'Tamborra_2014': get_tamborra_2014_fname,
'Bollig_2016': get_bollig_2016_fname,
'Walk_2018': get_walk_2018_fname,
'Walk_2019': get_walk_2019_fname,
'OConnor_2013': get_oconnor_2013_params, # UNIQUE INIT SIGNATURE
'OConnor_2015': get_oconnor_2015_fname,
'Zha_2021': get_zha_2021_fname,
'Warren_2020': get_warren_2020_fname,
'Kuroda_2020': get_kuroda_2020_fname,
'Fornax_2019': get_fornax_2019_fname}
# import numpy as np
# from numbers import Number
# from scipy.special import loggamma, gdtr
#
# def _energy_pdf(a, Ea, E):
# return np.exp((1 + a) * np.log(1 + a) - loggamma(1 + a) +
# a * np.log(E) - (1 + a) * np.log(Ea) - (1 + a) * (E / Ea))
#
# def parts_by_index(x, n):
# """Returns a list of size-n numpy arrays containing indices for the
# elements of x, and one size-m array ( with m<n ) if there are remaining
# elements of x.
#
# Returns
# -------
# i_part : list
# List of index partitions (partitions are numpy array).
# """
# nParts = x.size//n
# i_part = [ np.arange( i*n, (i+1)*n ) for i in range(nParts) ]
#
# # Generate final partition of size <n if x.size is not multiple of n
# if len(i_part)*n != x.size:
# i_part += [ np.arange( len(i_part)*n, x.size ) ]
#
# # Ensure that last partition always has 2 or more elements
# if len(i_part[-1]) < 2:
# i_part[-2] = np.append(i_part[-2], i_part[-1])
# i_part = i_part[0:-1]
#
# return i_part
#
#
#
# def energy_pdf(a, Ea, E, *, limit_size=True):
# # TODO: Figure out how to reconcile this
# if isinstance(E, np.ndarray):
# if limit_size and E.size > 1e6:
# raise ValueError('Input argument size exceeded. Argument`E` is a np.ndarray with size {E.size}, which may '
# 'lead to large memory consumption while this function executes. To proceed, please reduce '
# 'the size of `E` or use keyword argument `limit_size=False`')
# if all(isinstance(var, np.ndarray) for var in (a, Ea)):
# if a.size == Ea.size:
# # Vectorized function can lead to unregulated memory usage, better to define it only when needed
# _vec_energy_pdf = np.vectorize(_energy_pdf, excluded=['E'], signature='(1,n),(1,n)->(m,n)')
# return _vec_energy_pdf(a=a, Ea=Ea, E=E)
# else:
# raise ValueError('Invalid input array size. Arguments `a` and `Ea` must have the same size. '
# f'Given sizes ({a.size}) and ({Ea.size}) respectively.')
# elif all(isinstance(var, Number) for var in (a, Ea)):
# return _energy_pdf(a, Ea, E)
# else:
# raise ValueError('Invalid argument types, arguments `a` and `Ea` must be numbers or np.ndarray. '
# f'Given types ({type(a)}) and ({type(Ea)}) respectively.')
#
# # def integrate_by_partitions(func, func_args, func_kwargs, axis=1, partition_size=1000):
# # # Perform core calculation on partitions in E to regulate memory usage in vectorized function
# # _size = func_args.values()[axis].size
# # result = np.zeros(_size)
# # idx = 0
# # if limit < _size:
# # idc_split = np.arange(E.size, step=limit)
# # for idx in idc_split[:-1]:
# # _E = Enu[idx:idx + limit]
# # _phot = phot[idx:idx + limit]
# # result[:, idx:idx + limit] = np.trapz(self.energy_spectrum(t=t, E=_E, flavor=flavor).value * _phot, _E,
# # axis=0)
# #
# # _E = Enu[idx:]
# # _phot = phot[idx:]
# # result[:, idx:idx + limit] = np.trapz(self.energy_spectrum(t=t, E=_E, flavor=flavor).value * _phot, _E, axis=0)
# # return result
#
# def energy_cdf(a, Ea, E):
# return gdtr(1., a + 1., (a + 1.) * (E / Ea))
|
[
"snewpy.get_models",
"astropy.units.UnitTypeError",
"astropy.config.paths.get_cache_dir",
"astropy.units.get_physical_type",
"numpy.arange",
"logging.getLogger"
] |
[((327, 342), 'astropy.config.paths.get_cache_dir', 'get_cache_dir', ([], {}), '()\n', (340, 342), False, 'from astropy.config.paths import get_cache_dir\n'), ((2196, 2215), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (2213, 2215), False, 'import logging\n'), ((2407, 2443), 'snewpy.get_models', 'get_models', (['model_name', 'download_dir'], {}), '(model_name, download_dir)\n', (2417, 2443), False, 'from snewpy import get_models\n'), ((8764, 8791), 'numpy.arange', 'np.arange', (['(9.25)', '(13.0)', '(0.25)'], {}), '(9.25, 13.0, 0.25)\n', (8773, 8791), True, 'import numpy as np\n'), ((8819, 8845), 'numpy.arange', 'np.arange', (['(13.0)', '(30.1)', '(0.1)'], {}), '(13.0, 30.1, 0.1)\n', (8828, 8845), True, 'import numpy as np\n'), ((3900, 3934), 'astropy.units.get_physical_type', 'get_physical_type', (['user_param.unit'], {}), '(user_param.unit)\n', (3917, 3934), False, 'from astropy.units import UnitTypeError, get_physical_type\n'), ((3938, 3976), 'astropy.units.get_physical_type', 'get_physical_type', (['allowed_params.unit'], {}), '(allowed_params.unit)\n', (3955, 3976), False, 'from astropy.units import UnitTypeError, get_physical_type\n'), ((4000, 4122), 'astropy.units.UnitTypeError', 'UnitTypeError', (['f"""Incorrect units {user_param.unit} provided for parameter {key}, expected {allowed_params.unit}"""'], {}), "(\n f'Incorrect units {user_param.unit} provided for parameter {key}, expected {allowed_params.unit}'\n )\n", (4013, 4122), False, 'from astropy.units import UnitTypeError, get_physical_type\n')]
|
##############################################################################
#
# Author: <NAME>
# Date: 30 April 2019
# Name: file_decoder.py
# Description:
# This script takes in .mat files (produced by record_spectrum_orbcomm.py) and
# produces multiple plots of the signals spectrum, constellation, timing
# recovery, eye diagram, and IQ samples. Additionally, raw decoded bits are
# printed and saved to a file (./packets.txt).
#
##############################################################################
import glob
from datetime import datetime, timedelta
import ephem
import numpy as np
from scipy.io import loadmat
import scipy.signal as scisig
import matplotlib.pyplot as plt
from sat_db import active_orbcomm_satellites
from orbcomm_packet import packet_dict
from helpers import butter_lowpass_filter, complex_mix, rrcosfilter
from helpers import fletcher_checksum, ecef_to_lla, lla_to_ecef
# speed of light
c = 299792458.0 # m/s
# Perform brute force error correction
brute_force_1bit_error_correction = True
# Where to save decoded packets
packet_file = r'./packets.txt'
# Where the data files are located
data_dir = r'./data/'
sample_file = sorted(glob.glob(data_dir + "*.mat"))[0]
# Load the .mat file and print some of the metadata
data = loadmat(sample_file)
print("Filename: {}".format(sample_file))
print("Timestamp: {}".format(data['timestamp'][0][0]))
print("Data collected on: {}".format(datetime.utcfromtimestamp(data['timestamp'][0][0])))
print("Satellites in recording: {}".format(', '.join(data['sats'])))
print("SDR Sample rate: {} Hz".format(data['fs'][0][0]))
print("SDR Center frequency: {} Hz".format(data['fc'][0][0]))
frequencies = []
for sat_name in data['sats']:
freq1, freq2 = active_orbcomm_satellites[sat_name]['frequencies']
frequencies.append(freq1)
frequencies.append(freq2)
print("Satellite frequencies: {}".format(', '.join([str(xx) for xx in frequencies])))
# Extract the values from file for some further processing
samples = data['samples'][0]
center_freq = data['fc'][0][0]
sample_rate = data['fs'][0][0]
timestamp = data['timestamp'][0][0]
lat = data['lat'][0][0]
lon = data['lon'][0][0]
alt = data['alt'][0][0]
print("Number of samples: {}".format(len(samples)))
# Check which satellite frequency is contained in the recording
# If both frequencies are present, decode the lower one.
sat_center_frequency = 0
for freq in frequencies:
if freq > (center_freq - sample_rate/2.0) and freq < (center_freq + sample_rate/2.0):
sat_center_frequency = freq
break
if sat_center_frequency == 0:
print("Satellite channel frequencies are not in the recorded spectrum.")
exit()
# To force decoding the upper frequency uncomment this line
# sat_center_frequency = frequencies[1]
# PyEphem observer
obs = ephem.Observer()
obs.lat, obs.lon = '{}'.format(lat), '{}'.format(lon)
obs.elevation = alt # Technically is the altitude of observer
obs.date = datetime.utcfromtimestamp(timestamp)
# Normalize samples
samples /= np.median(np.abs(samples))
# Get the TLE information from the .mat file
sat_line0, sat_line1, sat_line2 = [str(xx) for xx in data['tles'][0]]
sat = ephem.readtle(sat_line0, sat_line1, sat_line2)
sat.compute(obs)
# Use the TLE info that was in the .mat file to calculate doppler shift
# of the satellite's transmission
relative_vel = sat.range_velocity
doppler = c/(c+relative_vel) * sat_center_frequency - sat_center_frequency
# Mix the samples to baseband (compensating for doppler shift)
# There will be a residual carrier error because of the RLTSDR frequency offset
freq_shift = center_freq - sat_center_frequency - doppler
mixed_down_samples, _ = complex_mix(samples,
freq_shift,
sample_rate)
filter_freq = 10e3
baud_rate = 4800.0
samples_per_symbol = 2 # We should only need 2 samples per symbol
if sample_rate == 1.2288e6:
# Low pass filter the signal
filtered_samples = butter_lowpass_filter(mixed_down_samples, filter_freq, sample_rate, order=5)
# Decimated signal
decimation = int(sample_rate/(samples_per_symbol*baud_rate))
decimated_samples = filtered_samples[::decimation]
elif sample_rate == 19200.0:
print("Single channel recording detected.")
filter_freq = 4e3
filtered_samples = butter_lowpass_filter(mixed_down_samples, filter_freq, sample_rate, order=2)
decimation = 2
decimated_samples = filtered_samples[::decimation]
filtered_samples = filtered_samples
# estimate remaining carrier error (RTLSDR frequency error)
# signal to the fourth power, take the fft, peak is at frequency offset
rbw = 1
nperseg = int(sample_rate/decimation/rbw)
# take first min(50e3, len(samples)) of the samples to the 4th power.
signal_to_4th_power = np.power(decimated_samples[:int(min(len(decimated_samples), 50e3)):], 4)
f, pxx = scisig.welch(signal_to_4th_power,
fs=sample_rate/decimation,
return_onesided=False,
nperseg=nperseg,
scaling='density')
f = (np.roll(f, int(len(f)/2)))
pxx = np.roll(pxx, int(len(pxx)/2))
search_window = int(1000. / ((sample_rate/decimation)/nperseg)) # search +/- 1 kHz around fc
frequency_peak = np.argmax(pxx[int(len(pxx)/2 - search_window):int(len(pxx)/2 + search_window)])
freq_offset = -(frequency_peak - search_window)*(sample_rate/decimation/nperseg) / 4
baseband_samples, _ = complex_mix(decimated_samples, freq_offset, sample_rate/decimation)
print("Remaining frequency offset after doppler compensation: {} Hz".format(freq_offset))
# Create RRC taps
alpha = 0.4
baudrate = 1.
num_of_symbols_half_filter = 8.
rrc_num_taps = samples_per_symbol * num_of_symbols_half_filter * 2. + 1.
t_idx, rrc_taps = rrcosfilter(rrc_num_taps, alpha, baudrate, samples_per_symbol)
matched_filtered_samples = scisig.lfilter(rrc_taps, [1.0], baseband_samples)
# Manually find a close timing offset
# For only 2 samples per symbol, 0 works all the time.
sample_delay = 0
tau = 0. # initial timing offset estimate
dtau = 0. # initial timing _rate_ offset estimate
buf = np.zeros(5, dtype=np.complex64) # filter buffer
th = np.array([-2., -1., 0., 1., 2.]) # time vector for interpolating filter
w = np.sqrt(np.hamming(5).T) # window function for interpolating filter
alpha = 0.0005 # loop filter bandwidth (timing _rate_ adjustment factor)
beta = 2*np.sqrt(alpha) # (timing _phase_ adjustment factor)
counter = sample_delay + 1 # interpolating filter adds delay
time_recovery_samples = np.zeros(len(matched_filtered_samples), dtype=np.complex64)
buf_dz = [0.,0.,0.]
dtau_vect = np.zeros(len(matched_filtered_samples))
tau_vect = np.zeros(len(matched_filtered_samples))
timing_error = np.zeros(len(matched_filtered_samples))
err = 0.0
# Timing recovery
i = 1
i_offset = 0
while i < len(time_recovery_samples) - 1 and i - i_offset + 2 < len(matched_filtered_samples):
# push sample into interpolating filter
# If the time offset exceeds one sample in either direction
if tau > 1.0:
# Use the samples from last time
i_offset += 1
tau += -1
elif tau < -1.0:
# Push two samples into the buffer
buf[:-2] = buf[2:]
buf[-2:] = matched_filtered_samples[int(i - i_offset):int(i - i_offset + 2)]
i_offset += -1
tau += 1
else:
# Or just normally add one sample to the buffer
buf[:-1] = buf[1:]
buf[-1] = matched_filtered_samples[int(i - i_offset)]
# interpolate matched filter output
hi = np.sinc(th - tau) * w # interpolating filter coefficients
hi /= np.sum(hi)
time_recovery_samples[i] = np.dot(buf, np.flip(hi, 0)) # compute matched filter output
# take (approximate) derivative of filter output
buf_dz[:-1] = buf_dz[1:]
buf_dz[-1] = time_recovery_samples[i]
dz = -np.dot(buf_dz, np.array([-1, 0, 1]))
# determine if an output sample needs to be computed
counter = counter + 1
if counter >= samples_per_symbol:
# decrement counter by samples per symbol
counter = counter - samples_per_symbol
# compute timing error signal, accounting for delay
err = np.tanh( (dz * np.conj(time_recovery_samples[i-1])).real )
# update timing rate change
dtau = dtau + alpha * err
tau = tau + beta * err
timing_error[i] = err
# update timing error
tau = tau + dtau/samples_per_symbol
# save results for plotting
dtau_vect[i] = dtau
tau_vect[i] = tau + i_offset
i += 1
# Plot timing offset
plt.figure()
plt.subplot(311)
plt.title("Timing offset (tau)")
plt.plot(tau_vect)
plt.subplot(312)
plt.title("Derivative (Dtau)")
plt.plot(dtau_vect)
plt.tight_layout()
plt.subplot(313)
plt.title("Timing error signal")
plt.plot(timing_error)
plt.tight_layout()
# Plot eye-diagram
plt.figure()
plt.subplot(311)
plt.title("Eye Diagram before matched filter")
offset = samples_per_symbol * 200
num_plots = 8
length = 64
for xx in range(num_plots):
plt.plot(baseband_samples[offset:offset+length].imag)
offset += length
plt.subplot(312)
plt.title("After matched filter")
offset = samples_per_symbol * 200
for xx in range(num_plots):
plt.plot(matched_filtered_samples[offset:offset+length].imag)
offset += length
plt.grid()
plt.subplot(313)
plt.title("After timing recovery")
offset = samples_per_symbol * 200
for xx in range(num_plots):
plt.plot(time_recovery_samples[offset:offset+length].imag)
offset += length
plt.grid()
plt.tight_layout()
# Costas loop
phase_est = np.zeros(len(time_recovery_samples)+1)
alpha = 0.03
beta = 0.2 * alpha**2
frequency_out = 0.0
freq_out = []
for idx, sample in enumerate(time_recovery_samples):
signal_out = sample * np.exp(-1j * phase_est[idx])
phase_error = np.sign(signal_out.real)*signal_out.imag - np.sign(signal_out.imag)*signal_out.real
frequency_out += beta * phase_error
phase_est[idx+1] = phase_est[idx] + alpha * phase_error + frequency_out
freq_out.append(frequency_out)
# Alternative phase correcting code:
# PLL code loosely based on liquid dsp simple pll tutorial:
# http://liquidsdr.org/blog/pll-simple-howto/
## After timing recovery, performing a fine-frequency PLL
## First take the signal to the fourth power, then low pass filter
# filter_freq = 0.5e3
# signal_to_4th_power2 = np.power(time_recovery_samples, 4)
# filtered_sig4th = butter_lowpass_filter(signal_to_4th_power2, filter_freq, sample_rate/decimation, order=5)
# phase_est = np.zeros(len(time_recovery_samples)+1)
# alpha = 0.01
# beta = 0.2 * alpha**2
# frequency_out = 0.0
# freq_out = []
# for idx, sample in enumerate(filtered_sig4th):
# signal_out = np.exp(1j * phase_est[idx])
# phase_error = np.angle( sample * np.conj(signal_out) )
# old_freq = frequency_out
# frequency_out += beta * phase_error
# phase_est[idx+1] = phase_est[idx] + alpha * phase_error + frequency_out
# freq_out.append(frequency_out)
plt.figure()
plt.subplot(211)
plt.title('Phase output of PLL')
plt.plot(phase_est)
plt.grid()
plt.subplot(212)
plt.title('Frequency of PLL')
plt.plot(freq_out)
plt.grid()
# Phase compensate the IQ samples
phase_comp_samples = time_recovery_samples * np.conj(np.exp(1j*phase_est[:-1]))
# Decode to bits
# normalize phase compensated samples;
phase_comp_samples /= np.median(np.abs(phase_comp_samples))
# Select peak sample from each symbol
demod_symbols = phase_comp_samples[::samples_per_symbol]
#Differential demodulation
# A 1 is a 90 degree phase shift forward from the last symbol
# A 0 is a -90 degree phase shift forward from the last symbol
bits = []
angles = []
for xx in range(1, len(demod_symbols)):
angle = np.angle(demod_symbols[xx], deg=True) - np.angle(demod_symbols[xx-1], deg=True)
if angle > 180:
angle -= 360
if angle < -180:
angle += 360
angles.append(angle)
bit = 0
if angle > 0: bit = 1
bits.append(bit)
# Plot the angles between each symbol, should be +/- 90 degrees
plt.figure()
plt.title('Angle between successive symbols')
plt.xlabel('symbol number')
plt.ylabel('Angle (degrees)')
plt.plot(angles, 'x')
plt.grid()
# convert to a string of 0's and 1's
bit_string = ''.join([str(bit) for bit in bits])
# Find first full packet
size_of_packets = 12*8 # bits
num_of_possible_packets = len(bit_string)/size_of_packets
bit_offset = 0
print("Number of possible packets: {}".format(num_of_possible_packets))
# Extract the headers from the packet dictionary
packet_headers = [packet_dict[packet_type]['header'] for packet_type in packet_dict]
# for all bit offsets (of the length of the packets)
# calculate a score for most valid headers of that offset
# this also checks a bit-reversed score (in case my endianness is reversed)
scores = np.zeros(size_of_packets)
revscores = np.zeros(size_of_packets)
for xx in range(0, size_of_packets):
for yy in range(xx, len(bit_string)-xx-8, size_of_packets):
if bit_string[yy:yy+8][::-1] in packet_headers:
scores[xx] += 1
if bit_string[yy:yy+8] in packet_headers:
revscores[xx] += 1
reverse = False
if np.max(scores) < np.max(revscores):
reverse = True
if reverse:
bit_offset = np.argmax(revscores)
else:
bit_offset = np.argmax(scores)
print("Bit stream offset: {}".format(bit_offset))
packets = []
last_packet_epheris = False
for xx in range(bit_offset, len(bit_string)-size_of_packets, size_of_packets):
if last_packet_epheris == True:
last_packet_epheris = False
continue
packet = ''
if reverse:
header = '{:02X}'.format(int(bit_string[xx:xx+8], 2))
else:
header = '{:02X}'.format(int(bit_string[xx:xx+8][::-1], 2))
ephemeris_header = packet_dict['Ephemeris']['hex_header']
packet_length = 12*8
if header == ephemeris_header:
packet_length = 24*8
last_packet_epheris = True
for yy in range(0, packet_length, 8):
if reverse:
packet += '{:02X}'.format(int(bit_string[xx+yy:xx+yy+8], 2))
else:
packet += '{:02X}'.format(int(bit_string[xx+yy:xx+yy+8][::-1], 2))
packets.append(packet)
# Save the packets (in hex) to a file
with open(packet_file, 'w') as f:
for packet in packets:
f.write(packet + '\n')
# Print out the parsed packets
error_packets = 0
fixed_packets = 0
print("\nList of packets: (### indicates checksum failed)")
for packet in packets:
output = ''
# Compute the fletcher16 checksum over the whole packet
# 0000 output is a good packet
if fletcher_checksum(packet) != '0000' and brute_force_1bit_error_correction:
# Attempt to correct errors by flipping bits until the checksum comes out correct
binary_packet = format(int(packet, 16), '0>{}b'.format(int(len(packet)/2*8)))
for xx in range(0, len(binary_packet)):
temp_packet = ''
if binary_packet[xx] == '0':
temp_bits = binary_packet[:xx] + '1' + binary_packet[xx+1:]
else:
temp_bits = binary_packet[:xx] + '0' + binary_packet[xx+1:]
for yy in range(0, len(binary_packet), 8):
if reverse:
temp_packet += '{:02X}'.format(int(temp_bits[yy:yy+8][::-1], 2))
else:
temp_packet += '{:02X}'.format(int(temp_bits[yy:yy+8], 2))
if fletcher_checksum(temp_packet) == '0000':
# print("Found correct packet!")
packet = temp_packet
fixed_packets += 1
break
if fletcher_checksum(packet) != '0000':
output += '### '
error_packets += 1
for packet_type in packet_dict:
packet_info = packet_dict[packet_type]
if packet[:2] == packet_info['hex_header']:
output += '{}: '.format(packet_type)
for part, (start, stop) in packet_info['message_parts']:
output += '{}: {} '.format(part, packet[start:stop])
print(output)
if packet_type == 'Ephemeris':
payload = ''.join([packet[xx:xx+2] for xx in range(42, 2, -2)])
# calculate current satellite time
start_date = datetime.strptime('Jan 6 1980 00:00', '%b %d %Y %H:%M')
week_number = payload[:4]
time_of_week = payload[4:10]
this_week = start_date + timedelta(weeks=int(week_number, 16))
this_time = this_week + timedelta(seconds=int(time_of_week, 16))
print("\tCurrent satellite time: {} Z".format(this_time))
# calculate satellite ECEF position
zdot = payload[10:15][::-1]
ydot = payload[15:20][::-1]
xdot = payload[20:25][::-1]
zpos = payload[25:30][::-1]
ypos = payload[30:35][::-1]
xpos = payload[35:40][::-1]
# Constants/equations from Orbcomm Serial Interface Specification E80050015-Rev F
max_r_sat = 8378155.0
max_v_sat = 7700.0
val_20_bits = 1048576.0
# ECEF position.
x_temp = int(xpos[:2][::-1], 16) + 256. * int(xpos[2:4][::-1], 16) + 256**2 * int(xpos[4:], 16)
x_ecef = ((2*x_temp*max_r_sat)/val_20_bits - max_r_sat)
y_temp = int(ypos[:2][::-1], 16) + 256. * int(ypos[2:4][::-1], 16) + 256**2 * int(ypos[4:], 16)
y_ecef = ((2*y_temp*max_r_sat)/val_20_bits - max_r_sat)
z_temp = int(zpos[:2][::-1], 16) + 256. * int(zpos[2:4][::-1], 16) + 256**2 * int(zpos[4:], 16)
z_ecef = ((2*z_temp*max_r_sat)/val_20_bits - max_r_sat)
# ECEF velocity vectors
vx_temp = int(xdot[:2][::-1], 16) + 256. * int(xdot[2:4][::-1], 16) + 256**2 * int(xdot[4:], 16)
vx_ecef = ((2*vx_temp*max_v_sat)/val_20_bits - max_v_sat)
vy_temp = int(ydot[:2][::-1], 16) + 256. * int(ydot[2:4][::-1], 16) + 256**2 * int(ydot[4:], 16)
vy_ecef = ((2*vy_temp*max_v_sat)/val_20_bits - max_v_sat)
vz_temp = int(zdot[:2][::-1], 16) + 256. * int(zdot[2:4][::-1], 16) + 256**2 * int(zdot[4:], 16)
vz_ecef = ((2*vz_temp*max_v_sat)/val_20_bits - max_v_sat)
# Calculate satellites reported velocity
sat_vel = np.sqrt(vx_ecef**2 + vy_ecef**2 + vz_ecef**2)
# Calculate the satellites reported position
lat, lon, alt = ecef_to_lla(x_ecef, y_ecef, z_ecef)
# Calculate the distance between satellite's reported position and the ephemeris position
ephem_lat, ephem_lon, ephem_alt = (np.degrees(sat.sublat), np.degrees(sat.sublong), sat.elevation)
ephem_x_ecef, ephem_y_ecef, ephem_z_ecef = lla_to_ecef(ephem_lat, ephem_lon, ephem_alt)
distance = np.sqrt((x_ecef - ephem_x_ecef)**2 + (y_ecef - ephem_y_ecef)**2 + (z_ecef - ephem_z_ecef)**2)
# Calculate velocity from pyephem by calculating the position half a second before and after timestamp
# to get "distance traveled per second"
# 0.5 seconds in the past
obs.date = datetime.utcfromtimestamp(timestamp - 0.5)
sat.compute(obs)
ephem_lat, ephem_lon, ephem_alt = (np.degrees(sat.sublat), np.degrees(sat.sublong), sat.elevation)
ephem_x_ecef_1, ephem_y_ecef_1, ephem_z_ecef_1 = lla_to_ecef(ephem_lat, ephem_lon, ephem_alt)
# 0.5 seconds in the future
obs.date = datetime.utcfromtimestamp(timestamp + 0.5)
sat.compute(obs)
ephem_lat, ephem_lon, ephem_alt = (np.degrees(sat.sublat), np.degrees(sat.sublong), sat.elevation)
ephem_x_ecef_2, ephem_y_ecef_2, ephem_z_ecef_2 = lla_to_ecef(ephem_lat, ephem_lon, ephem_alt)
# Calculate distance between those points (since it is over 1 second, it is the same as velocity)
ephem_vel = np.sqrt((ephem_x_ecef_1 - ephem_x_ecef_2)**2 + (ephem_y_ecef_1 - ephem_y_ecef_2)**2 + (ephem_z_ecef_1 - ephem_z_ecef_2)**2)
print("\tSat Lat/Lon: {:8.4f}, {:8.4f}, Altitude: {:6.1f} km, Velocity: {:6.1f} m/s".format(lat, lon, alt/1000.0, sat_vel))
print("\tEphem Lat/Lon: {:8.4f}, {:8.4f}, Altitude: {:6.1f} km, Velocity: {:6.1f} m/s".format(np.degrees(sat.sublat), np.degrees(sat.sublong), sat.elevation/1000.0, ephem_vel))
print("\tDifference in reported and ephemeris position: {:6.1f} km".format(distance/1000.0))
print("\tDifference in reported and ephemeris velocity: {:6.1f} m/s".format(abs(sat_vel - ephem_vel)))
break
# Unrecognized just means I don't know what these packets are for
# would also happen if the header is corrupted
if output in ['', '### ']:
print("{}Unrecognized packet: {}".format(output, packet))
print("{} packets with errors, {} packets corrected, PER: {:5.1f}%".format(error_packets+fixed_packets,
fixed_packets,
float(error_packets)/len(packets)*100))
# Plot IQ samples
plt.figure()
plt.subplot(211)
plt.title("Before carrier recovery")
plt.plot(phase_comp_samples[1000:1080].real)
plt.plot(phase_comp_samples[1000:1080].imag)
plt.grid()
plt.subplot(212)
plt.title("After carrier recovery")
plt.plot(time_recovery_samples[1000:1080].real)
plt.plot(time_recovery_samples[1000:1080].imag)
plt.grid()
plt.tight_layout()
# Plot spectrum of recording
plt.figure()
plt.subplot(221)
nperseg = int(sample_rate/100.0)
f, full_pxx = scisig.welch(samples, fs=sample_rate, nperseg=nperseg, \
return_onesided=False, scaling='density')
f = (np.roll(f, int(len(f)/2)) + center_freq)/1e6
full_pxx = np.roll(full_pxx, int(len(full_pxx)/2))
full_pxx = 10*np.log10(full_pxx)
plt.plot(f, full_pxx)
plt.title("Periodogram of recording\nRed dots are orbcomm channels")
plt.xlabel("Frequency (Hz)")
plt.ylabel("Magnitude (dB)")
low_point = np.min(full_pxx)
for freq in frequencies:
plt.plot(freq/1e6, low_point, 'ro')
# Plot one of the channels as baseband
# and plot the decimated signal
plt.subplot(222)
f, pxx = scisig.welch(baseband_samples, fs=sample_rate/decimation, \
return_onesided=False, nperseg=nperseg, scaling='density')
f = (np.roll(f, int(len(f)/2)))
pxx = np.roll(pxx, int(len(pxx)/2))
pxx = 10*np.log10(pxx)
plt.plot(f, pxx, label='Decimated')
# plot the low-pass filtered signal
f, pxx = scisig.welch(filtered_samples, fs=sample_rate, nperseg=nperseg, \
return_onesided=False, scaling='density')
f = (np.roll(f, int(len(f)/2)))
pxx = np.roll(pxx, int(len(pxx)/2))
pxx = 10*np.log10(pxx)
plt.plot(f, pxx, label='original signal')
plt.xlim([-45e3, 45e3])
plt.ylim([np.min(full_pxx)-40, np.max(pxx)+1])
plt.title("Spectrum of one channel at baseband")
plt.xlabel("Frequency (Hz)")
plt.ylabel("Magnitude (dB)")
plt.legend(loc='best')
# plot the signal raised to the 4th power
# Gives an idea of frequency offset after doppler compensation
plt.subplot(223)
nperseg = len(signal_to_4th_power)
f, pxx = scisig.welch(signal_to_4th_power, fs=sample_rate/decimation, \
return_onesided=False, nperseg=nperseg, scaling='density')
f = (np.roll(f, int(len(f)/2)))
pxx = np.roll(pxx, int(len(pxx)/2))
pxx = 10*np.log10(pxx)
plt.plot(f, pxx)
plt.xlim([-2e3, 2e3])
plt.title("Spectrum of signal to 4th power")
plt.xlabel("Frequency (Hz)")
plt.ylabel("Magnitude (dB)")
plt.tight_layout()
# Plot complex samples from one channel
plt.figure()
ax = plt.subplot(111)
matched_filtered_samples /= np.median(np.abs(matched_filtered_samples))
phase_comp_samples /= np.median(np.abs(phase_comp_samples))
plt.scatter(matched_filtered_samples.real[100*samples_per_symbol::samples_per_symbol], matched_filtered_samples.imag[100*samples_per_symbol::samples_per_symbol], marker='x', label='after MF')
plt.scatter(phase_comp_samples.real[100*samples_per_symbol::samples_per_symbol], phase_comp_samples.imag[100*samples_per_symbol::samples_per_symbol], marker='x', label='timing recovery')
plt.legend(loc='best')
plt.title("Complex samples (10k samples)")
plt.xlabel("Real")
plt.ylabel("Imag")
plt.grid()
ax.set_aspect(aspect=1)
plt.tight_layout()
plt.show()
|
[
"matplotlib.pyplot.title",
"ephem.Observer",
"numpy.abs",
"scipy.signal.welch",
"numpy.sum",
"scipy.io.loadmat",
"helpers.butter_lowpass_filter",
"numpy.argmax",
"numpy.angle",
"helpers.complex_mix",
"matplotlib.pyplot.figure",
"numpy.sinc",
"numpy.exp",
"glob.glob",
"matplotlib.pyplot.tight_layout",
"numpy.degrees",
"scipy.signal.lfilter",
"ephem.readtle",
"helpers.lla_to_ecef",
"datetime.datetime.utcfromtimestamp",
"numpy.max",
"numpy.log10",
"numpy.conj",
"matplotlib.pyplot.show",
"numpy.hamming",
"matplotlib.pyplot.legend",
"numpy.min",
"datetime.datetime.strptime",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.grid",
"helpers.rrcosfilter",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.xlim",
"numpy.flip",
"matplotlib.pyplot.plot",
"helpers.ecef_to_lla",
"matplotlib.pyplot.scatter",
"numpy.zeros",
"helpers.fletcher_checksum",
"numpy.array",
"numpy.sign",
"matplotlib.pyplot.xlabel",
"numpy.sqrt"
] |
[((1269, 1289), 'scipy.io.loadmat', 'loadmat', (['sample_file'], {}), '(sample_file)\n', (1276, 1289), False, 'from scipy.io import loadmat\n'), ((2796, 2812), 'ephem.Observer', 'ephem.Observer', ([], {}), '()\n', (2810, 2812), False, 'import ephem\n'), ((2940, 2976), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['timestamp'], {}), '(timestamp)\n', (2965, 2976), False, 'from datetime import datetime, timedelta\n'), ((3158, 3204), 'ephem.readtle', 'ephem.readtle', (['sat_line0', 'sat_line1', 'sat_line2'], {}), '(sat_line0, sat_line1, sat_line2)\n', (3171, 3204), False, 'import ephem\n'), ((3664, 3709), 'helpers.complex_mix', 'complex_mix', (['samples', 'freq_shift', 'sample_rate'], {}), '(samples, freq_shift, sample_rate)\n', (3675, 3709), False, 'from helpers import butter_lowpass_filter, complex_mix, rrcosfilter\n'), ((4868, 4993), 'scipy.signal.welch', 'scisig.welch', (['signal_to_4th_power'], {'fs': '(sample_rate / decimation)', 'return_onesided': '(False)', 'nperseg': 'nperseg', 'scaling': '"""density"""'}), "(signal_to_4th_power, fs=sample_rate / decimation,\n return_onesided=False, nperseg=nperseg, scaling='density')\n", (4880, 4993), True, 'import scipy.signal as scisig\n'), ((5442, 5511), 'helpers.complex_mix', 'complex_mix', (['decimated_samples', 'freq_offset', '(sample_rate / decimation)'], {}), '(decimated_samples, freq_offset, sample_rate / decimation)\n', (5453, 5511), False, 'from helpers import butter_lowpass_filter, complex_mix, rrcosfilter\n'), ((5768, 5830), 'helpers.rrcosfilter', 'rrcosfilter', (['rrc_num_taps', 'alpha', 'baudrate', 'samples_per_symbol'], {}), '(rrc_num_taps, alpha, baudrate, samples_per_symbol)\n', (5779, 5830), False, 'from helpers import butter_lowpass_filter, complex_mix, rrcosfilter\n'), ((5858, 5907), 'scipy.signal.lfilter', 'scisig.lfilter', (['rrc_taps', '[1.0]', 'baseband_samples'], {}), '(rrc_taps, [1.0], baseband_samples)\n', (5872, 5907), True, 'import scipy.signal as scisig\n'), ((6175, 6206), 'numpy.zeros', 'np.zeros', (['(5)'], {'dtype': 'np.complex64'}), '(5, dtype=np.complex64)\n', (6183, 6206), True, 'import numpy as np\n'), ((6233, 6270), 'numpy.array', 'np.array', (['[-2.0, -1.0, 0.0, 1.0, 2.0]'], {}), '([-2.0, -1.0, 0.0, 1.0, 2.0])\n', (6241, 6270), True, 'import numpy as np\n'), ((8684, 8696), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8694, 8696), True, 'import matplotlib.pyplot as plt\n'), ((8697, 8713), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(311)'], {}), '(311)\n', (8708, 8713), True, 'import matplotlib.pyplot as plt\n'), ((8714, 8746), 'matplotlib.pyplot.title', 'plt.title', (['"""Timing offset (tau)"""'], {}), "('Timing offset (tau)')\n", (8723, 8746), True, 'import matplotlib.pyplot as plt\n'), ((8747, 8765), 'matplotlib.pyplot.plot', 'plt.plot', (['tau_vect'], {}), '(tau_vect)\n', (8755, 8765), True, 'import matplotlib.pyplot as plt\n'), ((8767, 8783), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(312)'], {}), '(312)\n', (8778, 8783), True, 'import matplotlib.pyplot as plt\n'), ((8784, 8814), 'matplotlib.pyplot.title', 'plt.title', (['"""Derivative (Dtau)"""'], {}), "('Derivative (Dtau)')\n", (8793, 8814), True, 'import matplotlib.pyplot as plt\n'), ((8815, 8834), 'matplotlib.pyplot.plot', 'plt.plot', (['dtau_vect'], {}), '(dtau_vect)\n', (8823, 8834), True, 'import matplotlib.pyplot as plt\n'), ((8835, 8853), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8851, 8853), True, 'import matplotlib.pyplot as plt\n'), ((8855, 8871), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(313)'], {}), '(313)\n', (8866, 8871), True, 'import matplotlib.pyplot as plt\n'), ((8872, 8904), 'matplotlib.pyplot.title', 'plt.title', (['"""Timing error signal"""'], {}), "('Timing error signal')\n", (8881, 8904), True, 'import matplotlib.pyplot as plt\n'), ((8905, 8927), 'matplotlib.pyplot.plot', 'plt.plot', (['timing_error'], {}), '(timing_error)\n', (8913, 8927), True, 'import matplotlib.pyplot as plt\n'), ((8928, 8946), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8944, 8946), True, 'import matplotlib.pyplot as plt\n'), ((8967, 8979), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8977, 8979), True, 'import matplotlib.pyplot as plt\n'), ((8980, 8996), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(311)'], {}), '(311)\n', (8991, 8996), True, 'import matplotlib.pyplot as plt\n'), ((8997, 9043), 'matplotlib.pyplot.title', 'plt.title', (['"""Eye Diagram before matched filter"""'], {}), "('Eye Diagram before matched filter')\n", (9006, 9043), True, 'import matplotlib.pyplot as plt\n'), ((9211, 9227), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(312)'], {}), '(312)\n', (9222, 9227), True, 'import matplotlib.pyplot as plt\n'), ((9228, 9261), 'matplotlib.pyplot.title', 'plt.title', (['"""After matched filter"""'], {}), "('After matched filter')\n", (9237, 9261), True, 'import matplotlib.pyplot as plt\n'), ((9412, 9422), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (9420, 9422), True, 'import matplotlib.pyplot as plt\n'), ((9423, 9439), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(313)'], {}), '(313)\n', (9434, 9439), True, 'import matplotlib.pyplot as plt\n'), ((9440, 9474), 'matplotlib.pyplot.title', 'plt.title', (['"""After timing recovery"""'], {}), "('After timing recovery')\n", (9449, 9474), True, 'import matplotlib.pyplot as plt\n'), ((9622, 9632), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (9630, 9632), True, 'import matplotlib.pyplot as plt\n'), ((9633, 9651), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9649, 9651), True, 'import matplotlib.pyplot as plt\n'), ((11086, 11098), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (11096, 11098), True, 'import matplotlib.pyplot as plt\n'), ((11099, 11115), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (11110, 11115), True, 'import matplotlib.pyplot as plt\n'), ((11116, 11148), 'matplotlib.pyplot.title', 'plt.title', (['"""Phase output of PLL"""'], {}), "('Phase output of PLL')\n", (11125, 11148), True, 'import matplotlib.pyplot as plt\n'), ((11149, 11168), 'matplotlib.pyplot.plot', 'plt.plot', (['phase_est'], {}), '(phase_est)\n', (11157, 11168), True, 'import matplotlib.pyplot as plt\n'), ((11169, 11179), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (11177, 11179), True, 'import matplotlib.pyplot as plt\n'), ((11181, 11197), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (11192, 11197), True, 'import matplotlib.pyplot as plt\n'), ((11198, 11227), 'matplotlib.pyplot.title', 'plt.title', (['"""Frequency of PLL"""'], {}), "('Frequency of PLL')\n", (11207, 11227), True, 'import matplotlib.pyplot as plt\n'), ((11228, 11246), 'matplotlib.pyplot.plot', 'plt.plot', (['freq_out'], {}), '(freq_out)\n', (11236, 11246), True, 'import matplotlib.pyplot as plt\n'), ((11247, 11257), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (11255, 11257), True, 'import matplotlib.pyplot as plt\n'), ((12125, 12137), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (12135, 12137), True, 'import matplotlib.pyplot as plt\n'), ((12138, 12183), 'matplotlib.pyplot.title', 'plt.title', (['"""Angle between successive symbols"""'], {}), "('Angle between successive symbols')\n", (12147, 12183), True, 'import matplotlib.pyplot as plt\n'), ((12184, 12211), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""symbol number"""'], {}), "('symbol number')\n", (12194, 12211), True, 'import matplotlib.pyplot as plt\n'), ((12212, 12241), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Angle (degrees)"""'], {}), "('Angle (degrees)')\n", (12222, 12241), True, 'import matplotlib.pyplot as plt\n'), ((12242, 12263), 'matplotlib.pyplot.plot', 'plt.plot', (['angles', '"""x"""'], {}), "(angles, 'x')\n", (12250, 12263), True, 'import matplotlib.pyplot as plt\n'), ((12264, 12274), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (12272, 12274), True, 'import matplotlib.pyplot as plt\n'), ((12895, 12920), 'numpy.zeros', 'np.zeros', (['size_of_packets'], {}), '(size_of_packets)\n', (12903, 12920), True, 'import numpy as np\n'), ((12933, 12958), 'numpy.zeros', 'np.zeros', (['size_of_packets'], {}), '(size_of_packets)\n', (12941, 12958), True, 'import numpy as np\n'), ((21440, 21452), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (21450, 21452), True, 'import matplotlib.pyplot as plt\n'), ((21453, 21469), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (21464, 21469), True, 'import matplotlib.pyplot as plt\n'), ((21470, 21506), 'matplotlib.pyplot.title', 'plt.title', (['"""Before carrier recovery"""'], {}), "('Before carrier recovery')\n", (21479, 21506), True, 'import matplotlib.pyplot as plt\n'), ((21507, 21551), 'matplotlib.pyplot.plot', 'plt.plot', (['phase_comp_samples[1000:1080].real'], {}), '(phase_comp_samples[1000:1080].real)\n', (21515, 21551), True, 'import matplotlib.pyplot as plt\n'), ((21552, 21596), 'matplotlib.pyplot.plot', 'plt.plot', (['phase_comp_samples[1000:1080].imag'], {}), '(phase_comp_samples[1000:1080].imag)\n', (21560, 21596), True, 'import matplotlib.pyplot as plt\n'), ((21597, 21607), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (21605, 21607), True, 'import matplotlib.pyplot as plt\n'), ((21609, 21625), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (21620, 21625), True, 'import matplotlib.pyplot as plt\n'), ((21626, 21661), 'matplotlib.pyplot.title', 'plt.title', (['"""After carrier recovery"""'], {}), "('After carrier recovery')\n", (21635, 21661), True, 'import matplotlib.pyplot as plt\n'), ((21662, 21709), 'matplotlib.pyplot.plot', 'plt.plot', (['time_recovery_samples[1000:1080].real'], {}), '(time_recovery_samples[1000:1080].real)\n', (21670, 21709), True, 'import matplotlib.pyplot as plt\n'), ((21710, 21757), 'matplotlib.pyplot.plot', 'plt.plot', (['time_recovery_samples[1000:1080].imag'], {}), '(time_recovery_samples[1000:1080].imag)\n', (21718, 21757), True, 'import matplotlib.pyplot as plt\n'), ((21758, 21768), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (21766, 21768), True, 'import matplotlib.pyplot as plt\n'), ((21769, 21787), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (21785, 21787), True, 'import matplotlib.pyplot as plt\n'), ((21818, 21830), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (21828, 21830), True, 'import matplotlib.pyplot as plt\n'), ((21831, 21847), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(221)'], {}), '(221)\n', (21842, 21847), True, 'import matplotlib.pyplot as plt\n'), ((21895, 21996), 'scipy.signal.welch', 'scisig.welch', (['samples'], {'fs': 'sample_rate', 'nperseg': 'nperseg', 'return_onesided': '(False)', 'scaling': '"""density"""'}), "(samples, fs=sample_rate, nperseg=nperseg, return_onesided=\n False, scaling='density')\n", (21907, 21996), True, 'import scipy.signal as scisig\n'), ((22155, 22176), 'matplotlib.pyplot.plot', 'plt.plot', (['f', 'full_pxx'], {}), '(f, full_pxx)\n', (22163, 22176), True, 'import matplotlib.pyplot as plt\n'), ((22177, 22248), 'matplotlib.pyplot.title', 'plt.title', (['"""Periodogram of recording\nRed dots are orbcomm channels"""'], {}), '("""Periodogram of recording\nRed dots are orbcomm channels""")\n', (22186, 22248), True, 'import matplotlib.pyplot as plt\n'), ((22246, 22274), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency (Hz)"""'], {}), "('Frequency (Hz)')\n", (22256, 22274), True, 'import matplotlib.pyplot as plt\n'), ((22275, 22303), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Magnitude (dB)"""'], {}), "('Magnitude (dB)')\n", (22285, 22303), True, 'import matplotlib.pyplot as plt\n'), ((22316, 22332), 'numpy.min', 'np.min', (['full_pxx'], {}), '(full_pxx)\n', (22322, 22332), True, 'import numpy as np\n'), ((22470, 22486), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(222)'], {}), '(222)\n', (22481, 22486), True, 'import matplotlib.pyplot as plt\n'), ((22496, 22619), 'scipy.signal.welch', 'scisig.welch', (['baseband_samples'], {'fs': '(sample_rate / decimation)', 'return_onesided': '(False)', 'nperseg': 'nperseg', 'scaling': '"""density"""'}), "(baseband_samples, fs=sample_rate / decimation, return_onesided\n =False, nperseg=nperseg, scaling='density')\n", (22508, 22619), True, 'import scipy.signal as scisig\n'), ((22728, 22763), 'matplotlib.pyplot.plot', 'plt.plot', (['f', 'pxx'], {'label': '"""Decimated"""'}), "(f, pxx, label='Decimated')\n", (22736, 22763), True, 'import matplotlib.pyplot as plt\n'), ((22810, 22919), 'scipy.signal.welch', 'scisig.welch', (['filtered_samples'], {'fs': 'sample_rate', 'nperseg': 'nperseg', 'return_onesided': '(False)', 'scaling': '"""density"""'}), "(filtered_samples, fs=sample_rate, nperseg=nperseg,\n return_onesided=False, scaling='density')\n", (22822, 22919), True, 'import scipy.signal as scisig\n'), ((23032, 23073), 'matplotlib.pyplot.plot', 'plt.plot', (['f', 'pxx'], {'label': '"""original signal"""'}), "(f, pxx, label='original signal')\n", (23040, 23073), True, 'import matplotlib.pyplot as plt\n'), ((23074, 23103), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-45000.0, 45000.0]'], {}), '([-45000.0, 45000.0])\n', (23082, 23103), True, 'import matplotlib.pyplot as plt\n'), ((23146, 23194), 'matplotlib.pyplot.title', 'plt.title', (['"""Spectrum of one channel at baseband"""'], {}), "('Spectrum of one channel at baseband')\n", (23155, 23194), True, 'import matplotlib.pyplot as plt\n'), ((23195, 23223), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency (Hz)"""'], {}), "('Frequency (Hz)')\n", (23205, 23223), True, 'import matplotlib.pyplot as plt\n'), ((23224, 23252), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Magnitude (dB)"""'], {}), "('Magnitude (dB)')\n", (23234, 23252), True, 'import matplotlib.pyplot as plt\n'), ((23253, 23275), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (23263, 23275), True, 'import matplotlib.pyplot as plt\n'), ((23382, 23398), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(223)'], {}), '(223)\n', (23393, 23398), True, 'import matplotlib.pyplot as plt\n'), ((23443, 23568), 'scipy.signal.welch', 'scisig.welch', (['signal_to_4th_power'], {'fs': '(sample_rate / decimation)', 'return_onesided': '(False)', 'nperseg': 'nperseg', 'scaling': '"""density"""'}), "(signal_to_4th_power, fs=sample_rate / decimation,\n return_onesided=False, nperseg=nperseg, scaling='density')\n", (23455, 23568), True, 'import scipy.signal as scisig\n'), ((23680, 23696), 'matplotlib.pyplot.plot', 'plt.plot', (['f', 'pxx'], {}), '(f, pxx)\n', (23688, 23696), True, 'import matplotlib.pyplot as plt\n'), ((23697, 23724), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-2000.0, 2000.0]'], {}), '([-2000.0, 2000.0])\n', (23705, 23724), True, 'import matplotlib.pyplot as plt\n'), ((23719, 23763), 'matplotlib.pyplot.title', 'plt.title', (['"""Spectrum of signal to 4th power"""'], {}), "('Spectrum of signal to 4th power')\n", (23728, 23763), True, 'import matplotlib.pyplot as plt\n'), ((23764, 23792), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency (Hz)"""'], {}), "('Frequency (Hz)')\n", (23774, 23792), True, 'import matplotlib.pyplot as plt\n'), ((23793, 23821), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Magnitude (dB)"""'], {}), "('Magnitude (dB)')\n", (23803, 23821), True, 'import matplotlib.pyplot as plt\n'), ((23822, 23840), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (23838, 23840), True, 'import matplotlib.pyplot as plt\n'), ((23882, 23894), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (23892, 23894), True, 'import matplotlib.pyplot as plt\n'), ((23900, 23916), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(111)'], {}), '(111)\n', (23911, 23916), True, 'import matplotlib.pyplot as plt\n'), ((24051, 24255), 'matplotlib.pyplot.scatter', 'plt.scatter', (['matched_filtered_samples.real[100 * samples_per_symbol::samples_per_symbol]', 'matched_filtered_samples.imag[100 * samples_per_symbol::samples_per_symbol]'], {'marker': '"""x"""', 'label': '"""after MF"""'}), "(matched_filtered_samples.real[100 * samples_per_symbol::\n samples_per_symbol], matched_filtered_samples.imag[100 *\n samples_per_symbol::samples_per_symbol], marker='x', label='after MF')\n", (24062, 24255), True, 'import matplotlib.pyplot as plt\n'), ((24243, 24443), 'matplotlib.pyplot.scatter', 'plt.scatter', (['phase_comp_samples.real[100 * samples_per_symbol::samples_per_symbol]', 'phase_comp_samples.imag[100 * samples_per_symbol::samples_per_symbol]'], {'marker': '"""x"""', 'label': '"""timing recovery"""'}), "(phase_comp_samples.real[100 * samples_per_symbol::\n samples_per_symbol], phase_comp_samples.imag[100 * samples_per_symbol::\n samples_per_symbol], marker='x', label='timing recovery')\n", (24254, 24443), True, 'import matplotlib.pyplot as plt\n'), ((24430, 24452), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""best"""'}), "(loc='best')\n", (24440, 24452), True, 'import matplotlib.pyplot as plt\n'), ((24453, 24495), 'matplotlib.pyplot.title', 'plt.title', (['"""Complex samples (10k samples)"""'], {}), "('Complex samples (10k samples)')\n", (24462, 24495), True, 'import matplotlib.pyplot as plt\n'), ((24496, 24514), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Real"""'], {}), "('Real')\n", (24506, 24514), True, 'import matplotlib.pyplot as plt\n'), ((24515, 24533), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Imag"""'], {}), "('Imag')\n", (24525, 24533), True, 'import matplotlib.pyplot as plt\n'), ((24534, 24544), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (24542, 24544), True, 'import matplotlib.pyplot as plt\n'), ((24569, 24587), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (24585, 24587), True, 'import matplotlib.pyplot as plt\n'), ((24588, 24598), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (24596, 24598), True, 'import matplotlib.pyplot as plt\n'), ((3019, 3034), 'numpy.abs', 'np.abs', (['samples'], {}), '(samples)\n', (3025, 3034), True, 'import numpy as np\n'), ((3973, 4049), 'helpers.butter_lowpass_filter', 'butter_lowpass_filter', (['mixed_down_samples', 'filter_freq', 'sample_rate'], {'order': '(5)'}), '(mixed_down_samples, filter_freq, sample_rate, order=5)\n', (3994, 4049), False, 'from helpers import butter_lowpass_filter, complex_mix, rrcosfilter\n'), ((6490, 6504), 'numpy.sqrt', 'np.sqrt', (['alpha'], {}), '(alpha)\n', (6497, 6504), True, 'import numpy as np\n'), ((7733, 7743), 'numpy.sum', 'np.sum', (['hi'], {}), '(hi)\n', (7739, 7743), True, 'import numpy as np\n'), ((9136, 9191), 'matplotlib.pyplot.plot', 'plt.plot', (['baseband_samples[offset:offset + length].imag'], {}), '(baseband_samples[offset:offset + length].imag)\n', (9144, 9191), True, 'import matplotlib.pyplot as plt\n'), ((9329, 9392), 'matplotlib.pyplot.plot', 'plt.plot', (['matched_filtered_samples[offset:offset + length].imag'], {}), '(matched_filtered_samples[offset:offset + length].imag)\n', (9337, 9392), True, 'import matplotlib.pyplot as plt\n'), ((9542, 9602), 'matplotlib.pyplot.plot', 'plt.plot', (['time_recovery_samples[offset:offset + length].imag'], {}), '(time_recovery_samples[offset:offset + length].imag)\n', (9550, 9602), True, 'import matplotlib.pyplot as plt\n'), ((11462, 11488), 'numpy.abs', 'np.abs', (['phase_comp_samples'], {}), '(phase_comp_samples)\n', (11468, 11488), True, 'import numpy as np\n'), ((13245, 13259), 'numpy.max', 'np.max', (['scores'], {}), '(scores)\n', (13251, 13259), True, 'import numpy as np\n'), ((13262, 13279), 'numpy.max', 'np.max', (['revscores'], {}), '(revscores)\n', (13268, 13279), True, 'import numpy as np\n'), ((13329, 13349), 'numpy.argmax', 'np.argmax', (['revscores'], {}), '(revscores)\n', (13338, 13349), True, 'import numpy as np\n'), ((13373, 13390), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (13382, 13390), True, 'import numpy as np\n'), ((22136, 22154), 'numpy.log10', 'np.log10', (['full_pxx'], {}), '(full_pxx)\n', (22144, 22154), True, 'import numpy as np\n'), ((22362, 22405), 'matplotlib.pyplot.plot', 'plt.plot', (['(freq / 1000000.0)', 'low_point', '"""ro"""'], {}), "(freq / 1000000.0, low_point, 'ro')\n", (22370, 22405), True, 'import matplotlib.pyplot as plt\n'), ((22714, 22727), 'numpy.log10', 'np.log10', (['pxx'], {}), '(pxx)\n', (22722, 22727), True, 'import numpy as np\n'), ((23017, 23030), 'numpy.log10', 'np.log10', (['pxx'], {}), '(pxx)\n', (23025, 23030), True, 'import numpy as np\n'), ((23665, 23678), 'numpy.log10', 'np.log10', (['pxx'], {}), '(pxx)\n', (23673, 23678), True, 'import numpy as np\n'), ((23956, 23988), 'numpy.abs', 'np.abs', (['matched_filtered_samples'], {}), '(matched_filtered_samples)\n', (23962, 23988), True, 'import numpy as np\n'), ((24022, 24048), 'numpy.abs', 'np.abs', (['phase_comp_samples'], {}), '(phase_comp_samples)\n', (24028, 24048), True, 'import numpy as np\n'), ((1175, 1204), 'glob.glob', 'glob.glob', (["(data_dir + '*.mat')"], {}), "(data_dir + '*.mat')\n", (1184, 1204), False, 'import glob\n'), ((1424, 1474), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (["data['timestamp'][0][0]"], {}), "(data['timestamp'][0][0])\n", (1449, 1474), False, 'from datetime import datetime, timedelta\n'), ((4317, 4393), 'helpers.butter_lowpass_filter', 'butter_lowpass_filter', (['mixed_down_samples', 'filter_freq', 'sample_rate'], {'order': '(2)'}), '(mixed_down_samples, filter_freq, sample_rate, order=2)\n', (4338, 4393), False, 'from helpers import butter_lowpass_filter, complex_mix, rrcosfilter\n'), ((6323, 6336), 'numpy.hamming', 'np.hamming', (['(5)'], {}), '(5)\n', (6333, 6336), True, 'import numpy as np\n'), ((7664, 7681), 'numpy.sinc', 'np.sinc', (['(th - tau)'], {}), '(th - tau)\n', (7671, 7681), True, 'import numpy as np\n'), ((7787, 7801), 'numpy.flip', 'np.flip', (['hi', '(0)'], {}), '(hi, 0)\n', (7794, 7801), True, 'import numpy as np\n'), ((9867, 9897), 'numpy.exp', 'np.exp', (['(-1.0j * phase_est[idx])'], {}), '(-1.0j * phase_est[idx])\n', (9873, 9897), True, 'import numpy as np\n'), ((11346, 11375), 'numpy.exp', 'np.exp', (['(1.0j * phase_est[:-1])'], {}), '(1.0j * phase_est[:-1])\n', (11352, 11375), True, 'import numpy as np\n'), ((11813, 11850), 'numpy.angle', 'np.angle', (['demod_symbols[xx]'], {'deg': '(True)'}), '(demod_symbols[xx], deg=True)\n', (11821, 11850), True, 'import numpy as np\n'), ((11853, 11894), 'numpy.angle', 'np.angle', (['demod_symbols[xx - 1]'], {'deg': '(True)'}), '(demod_symbols[xx - 1], deg=True)\n', (11861, 11894), True, 'import numpy as np\n'), ((15685, 15710), 'helpers.fletcher_checksum', 'fletcher_checksum', (['packet'], {}), '(packet)\n', (15702, 15710), False, 'from helpers import fletcher_checksum, ecef_to_lla, lla_to_ecef\n'), ((7988, 8008), 'numpy.array', 'np.array', (['[-1, 0, 1]'], {}), '([-1, 0, 1])\n', (7996, 8008), True, 'import numpy as np\n'), ((9914, 9938), 'numpy.sign', 'np.sign', (['signal_out.real'], {}), '(signal_out.real)\n', (9921, 9938), True, 'import numpy as np\n'), ((9957, 9981), 'numpy.sign', 'np.sign', (['signal_out.imag'], {}), '(signal_out.imag)\n', (9964, 9981), True, 'import numpy as np\n'), ((14668, 14693), 'helpers.fletcher_checksum', 'fletcher_checksum', (['packet'], {}), '(packet)\n', (14685, 14693), False, 'from helpers import fletcher_checksum, ecef_to_lla, lla_to_ecef\n'), ((23108, 23124), 'numpy.min', 'np.min', (['full_pxx'], {}), '(full_pxx)\n', (23114, 23124), True, 'import numpy as np\n'), ((23129, 23140), 'numpy.max', 'np.max', (['pxx'], {}), '(pxx)\n', (23135, 23140), True, 'import numpy as np\n'), ((15492, 15522), 'helpers.fletcher_checksum', 'fletcher_checksum', (['temp_packet'], {}), '(temp_packet)\n', (15509, 15522), False, 'from helpers import fletcher_checksum, ecef_to_lla, lla_to_ecef\n'), ((16326, 16381), 'datetime.datetime.strptime', 'datetime.strptime', (['"""Jan 6 1980 00:00"""', '"""%b %d %Y %H:%M"""'], {}), "('Jan 6 1980 00:00', '%b %d %Y %H:%M')\n", (16343, 16381), False, 'from datetime import datetime, timedelta\n'), ((18504, 18555), 'numpy.sqrt', 'np.sqrt', (['(vx_ecef ** 2 + vy_ecef ** 2 + vz_ecef ** 2)'], {}), '(vx_ecef ** 2 + vy_ecef ** 2 + vz_ecef ** 2)\n', (18511, 18555), True, 'import numpy as np\n'), ((18643, 18678), 'helpers.ecef_to_lla', 'ecef_to_lla', (['x_ecef', 'y_ecef', 'z_ecef'], {}), '(x_ecef, y_ecef, z_ecef)\n', (18654, 18678), False, 'from helpers import fletcher_checksum, ecef_to_lla, lla_to_ecef\n'), ((18960, 19004), 'helpers.lla_to_ecef', 'lla_to_ecef', (['ephem_lat', 'ephem_lon', 'ephem_alt'], {}), '(ephem_lat, ephem_lon, ephem_alt)\n', (18971, 19004), False, 'from helpers import fletcher_checksum, ecef_to_lla, lla_to_ecef\n'), ((19032, 19136), 'numpy.sqrt', 'np.sqrt', (['((x_ecef - ephem_x_ecef) ** 2 + (y_ecef - ephem_y_ecef) ** 2 + (z_ecef -\n ephem_z_ecef) ** 2)'], {}), '((x_ecef - ephem_x_ecef) ** 2 + (y_ecef - ephem_y_ecef) ** 2 + (\n z_ecef - ephem_z_ecef) ** 2)\n', (19039, 19136), True, 'import numpy as np\n'), ((19371, 19413), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['(timestamp - 0.5)'], {}), '(timestamp - 0.5)\n', (19396, 19413), False, 'from datetime import datetime, timedelta\n'), ((19627, 19671), 'helpers.lla_to_ecef', 'lla_to_ecef', (['ephem_lat', 'ephem_lon', 'ephem_alt'], {}), '(ephem_lat, ephem_lon, ephem_alt)\n', (19638, 19671), False, 'from helpers import fletcher_checksum, ecef_to_lla, lla_to_ecef\n'), ((19744, 19786), 'datetime.datetime.utcfromtimestamp', 'datetime.utcfromtimestamp', (['(timestamp + 0.5)'], {}), '(timestamp + 0.5)\n', (19769, 19786), False, 'from datetime import datetime, timedelta\n'), ((20000, 20044), 'helpers.lla_to_ecef', 'lla_to_ecef', (['ephem_lat', 'ephem_lon', 'ephem_alt'], {}), '(ephem_lat, ephem_lon, ephem_alt)\n', (20011, 20044), False, 'from helpers import fletcher_checksum, ecef_to_lla, lla_to_ecef\n'), ((20187, 20320), 'numpy.sqrt', 'np.sqrt', (['((ephem_x_ecef_1 - ephem_x_ecef_2) ** 2 + (ephem_y_ecef_1 - ephem_y_ecef_2) **\n 2 + (ephem_z_ecef_1 - ephem_z_ecef_2) ** 2)'], {}), '((ephem_x_ecef_1 - ephem_x_ecef_2) ** 2 + (ephem_y_ecef_1 -\n ephem_y_ecef_2) ** 2 + (ephem_z_ecef_1 - ephem_z_ecef_2) ** 2)\n', (20194, 20320), True, 'import numpy as np\n'), ((8319, 8356), 'numpy.conj', 'np.conj', (['time_recovery_samples[i - 1]'], {}), '(time_recovery_samples[i - 1])\n', (8326, 8356), True, 'import numpy as np\n'), ((18837, 18859), 'numpy.degrees', 'np.degrees', (['sat.sublat'], {}), '(sat.sublat)\n', (18847, 18859), True, 'import numpy as np\n'), ((18861, 18884), 'numpy.degrees', 'np.degrees', (['sat.sublong'], {}), '(sat.sublong)\n', (18871, 18884), True, 'import numpy as np\n'), ((19498, 19520), 'numpy.degrees', 'np.degrees', (['sat.sublat'], {}), '(sat.sublat)\n', (19508, 19520), True, 'import numpy as np\n'), ((19522, 19545), 'numpy.degrees', 'np.degrees', (['sat.sublong'], {}), '(sat.sublong)\n', (19532, 19545), True, 'import numpy as np\n'), ((19871, 19893), 'numpy.degrees', 'np.degrees', (['sat.sublat'], {}), '(sat.sublat)\n', (19881, 19893), True, 'import numpy as np\n'), ((19895, 19918), 'numpy.degrees', 'np.degrees', (['sat.sublong'], {}), '(sat.sublong)\n', (19905, 19918), True, 'import numpy as np\n'), ((20564, 20586), 'numpy.degrees', 'np.degrees', (['sat.sublat'], {}), '(sat.sublat)\n', (20574, 20586), True, 'import numpy as np\n'), ((20588, 20611), 'numpy.degrees', 'np.degrees', (['sat.sublong'], {}), '(sat.sublong)\n', (20598, 20611), True, 'import numpy as np\n')]
|
import json
import numpy as np
import glob
import re
import copy
import uuid
from preprocessing import prune_sentence
with open('data/concepts_and_synonyms.txt', "r") as f:
sec_tag_labels = f.readlines()
sec_tag_labels = sec_tag_labels[1:]
headers = set([line.strip().split("\t")[-2].lower().strip().replace('_', ' ') for line in sec_tag_labels])
def split_sections(lines):
"""
:param lines: Clinical note in the form of a list of strings. Each element is a line.
:return:
- sections: List of strings, each element is a section.
- section_nums: List of integer lists, each element is a list of lines belonging to each section.
In order to split the clinical notes into sections, we notice that most sections begin with easily identifiable headers.
To detect these headers we use a combination of heuristics such as whether the line contains colons, all uppercase formatting or
phrases found in a list of clinical headers taken from SecTag {Denny et al. 2008}.
Conditions for Header Detection
1) (Line matches with header regex) AND
(Group 1 of regex is (Upper AND shorter than 4) OR in the header list OR there is nothing in this line following ':')
2) (Line matches alpha regex) AND
(first word segment is short) AND
(more than one line in section) AND
(last line ends in period) AND
(Group 1 is (in header OR Upper)
"""
# Regex (Non Letters) (Letter without ':') (Post Colon)
header_regex = re.compile("([^A-Za-z]*)([^:]+):(.*)")
# Words in first part of header. '2. Pulmonary disease. The patient ...' group(1) = Pulmonary disease
alpha = re.compile("[^A-Za-z]*([A-Za-z ]*)[^A-Za-z](.*)")
sections = []
section_nums = []
section = []
lines_in_section = []
for line_num, original_line in enumerate(lines):
line = original_line.strip()
is_header = False
match = header_regex.match(line)
alpha_match = alpha.match(line)
# If there's a match check first group
if match:
header = match.group(2)
post_colon = match.group(3).strip()
upper = header.isupper()
short = len(header.split(" ")) < 5
ends_in_colon = post_colon == ""
# All caps is always header
if (upper and short) or ends_in_colon:
is_header = True
# If header in headers
else:
header = header.strip().lower()
is_header = header in headers
# If no match check first word section of whole line as header
elif alpha_match and len(section) > 1:
last_line = section[-1]
if last_line != "" and last_line[-1] == ".":
header = alpha_match.group(1).strip()
if len(header.split(" ")) < 5:
upper = header.isupper()
in_headers = header.lower() in headers
if upper or in_headers:
is_header = True
#Add previous section if it exists and we encounter a header
if is_header and section != []:
sections.append(section)
section_nums.append(lines_in_section)
section = []
lines_in_section = []
section.append(original_line)
lines_in_section.append(line_num)
sections.append(section)
section_nums.append(lines_in_section)
return sections, section_nums
def load_emrqa_datasets(data_dir="../data/datasets/*json"):
"""
:return: dictionary from filename to decoded json objects in data directory
"""
datasets = {}
files = glob.glob(data_dir)
for file in files:
with open(file, "r") as f:
json_file = f.read()
result = json.loads(json_file)
datasets[file] = result
return datasets
def flip_section_list(section_nums):
line_to_section = {}
for section_num, line_list in enumerate(section_nums):
for line in line_list:
line_to_section[line] = section_num
return line_to_section
def group_answers_by_section(qa, sections, section_nums, line_to_section, orig_num_answers, new_num_answers, errors):
new_answers = []
answers = qa['answers']
# Group answers by section number for one qa
section_num_to_answers = {}
for answer in answers:
orig_num_answers += 1
evidence_lines = answer['evidence_start']
answer_texts = answer['text']
evidences = answer['evidence']
if isinstance(evidence_lines, int):
evidence_lines = [evidence_lines]
answer_texts = [answer_texts]
evidences = [evidences]
for evidence_line, evidence, answer_text in zip(evidence_lines, evidences, answer_texts):
new_answer = copy.deepcopy(answer)
new_num_answers += 1
section_num = line_to_section[evidence_line - 1]
first_section_line = section_nums[section_num][0]
new_evidence_line = evidence_line - first_section_line
new_answer['evidence_start'] = new_evidence_line
new_answer['answer_start'][0] = new_evidence_line
new_answer['evidence'] = evidence
new_answer['text'] = answer_text
new_answer['answer_entity_type'] = 'single'
section = sections[section_num]
if section_num in section_num_to_answers:
new_answers = section_num_to_answers[section_num]
else:
new_answers = []
section_text = ''.join(section)
if evidence in section_text:
new_answers.append(new_answer)
section_num_to_answers[section_num] = new_answers
else:
errors += 1
return section_num_to_answers, orig_num_answers, new_num_answers, errors
def create_split_docs_emrqa(emrqa):
"""
:param emrqa: Decoded emrQA dataset json
:return: json dataset with the same structure as the emrQA but linking each question with a section in a document
instead of the whole report.
"""
errors = 0
orig_num_answers = 0
new_num_answers = 0
emrqa_datasets = {}
for task in emrqa['data']:
title = task['title']
#Splitting only medication and relations datasets due for evaluation
if title in ['medication', 'relations']:
reports = task['paragraphs']
documents = []
#Looping through all medical reports
for report in reports:
note_id = report['note_id']
text_lines = report['context']
qas = report['qas']
new_report = {"title": note_id}
new_paragraphs = []
#Splitting Sections
sections, section_nums = split_sections(text_lines)
#Reversing the map from lines to section numbers
line_to_section = flip_section_list(section_nums)
section_num_to_qas = {}
#Looping through all questions for this report, each question might have multiple answers
for qa in qas:
section_num_to_answers, orig_num_answers, new_num_answers, errors = group_answers_by_section(qa, sections, section_nums, line_to_section,
orig_num_answers, new_num_answers, errors)
# Aggregate qas with equivalent section num
for section_num in section_num_to_answers.keys():
new_answers = section_num_to_answers[section_num]
new_qa = copy.deepcopy(qa)
new_qa['answers'] = new_answers
if section_num in section_num_to_qas:
new_qas = section_num_to_qas[section_num]
else:
new_qas = []
new_qas.append(new_qa)
section_num_to_qas[section_num] = new_qas
for section_num in section_num_to_qas.keys():
section = sections[section_num]
paragraph = {"note_id": note_id + "_" + str(section_num),
"context": section,
"qas": section_num_to_qas[section_num]}
new_paragraphs.append(paragraph)
new_report['paragraphs'] = new_paragraphs
documents.append(new_report)
print("Saving {}".format(title))
emrqa_datasets[title] = documents
return emrqa_datasets, orig_num_answers, new_num_answers, errors
def locate_answer_start(evidence, context):
while evidence[-1] in [',', '.', '?', '!', '-', ' ']:
evidence = evidence[:-1]
char_pos = -1
temp_evidence = evidence
final_evidence = temp_evidence
while char_pos == -1:
char_pos = context.find(temp_evidence)
final_evidence = temp_evidence
temp_evidence = ' '.join(temp_evidence.split()[:-1])
return char_pos, final_evidence
def combine_answer_lines(answers):
line_numbers = []
answers_by_line = {}
combined_answers = []
for answer in answers:
line = answer['evidence_start']
line_numbers.append(line)
answers_by_line[line] = answer
ordered_line_numbers = np.sort(line_numbers)
prev_line = ordered_line_numbers[0]
combined_line = answers_by_line[prev_line]['evidence']
for line_num in ordered_line_numbers[1:]:
if line_num - prev_line == 1:
combined_line += " " + answers_by_line[line_num]['evidence']
else:
combined_answers.append({'evidence': combined_line})
combined_line = answers_by_line[line_num]['evidence']
prev_line = line_num
combined_answers.append({'evidence': combined_line})
return combined_answers
def transform_emrqa_to_squad_format(emrqa_format_dataset):
answers_checked = 0
long_answers = 0
squad_format_dataset = {}
for title in emrqa_format_dataset.keys():
records = emrqa_format_dataset[title]
new_records = []
for record in records:
record_id = record['title']
sections = record['paragraphs']
new_record = {'title': record_id}
new_sections = []
context_set = set()
for section in sections:
text_list = section['context']
context = " ".join((" ".join(text_list).split()))
context = prune_sentence(context)
qas = section['qas']
new_qas = []
for qa in qas:
questions = qa['question']
answers = qa['answers']
new_answers = []
if len(answers) > 1:
answers = combine_answer_lines(answers)
# Clean Answers and Check that they are in the context
for answer in answers:
evidence = answer['evidence']
evidence = prune_sentence(evidence)
evidence_length = len(evidence.split())
if len(evidence.strip()) > 0 and evidence_length < 20:
answer_start, evidence = locate_answer_start(evidence, context)
new_answers.append({'answer_start': answer_start, 'text': evidence})
assert evidence in context
answers_checked += 1
else:
long_answers += 1
# Add new qa pair for each question paraphrase
for question in questions:
# If all answers were too long don't append question
if len(new_answers) > 0:
new_qas.append({'question': question, 'id': str(uuid.uuid1().hex), 'answers': new_answers})
# If all questions in section had longer than acceptable answers
if len(new_qas) > 0:
context_set.add(section['note_id'])
new_sections.append({'qas': new_qas, 'context': context})
# else:
# print("Lost Section")
assert len(context_set) == len(new_sections)
# if all sections in record only had longer than accetable answers
if len(new_sections) > 0:
new_record['paragraphs'] = new_sections
new_records.append(new_record)
# else:
# print("Lost Whole record")
squad_format_dataset[title] = new_records
return squad_format_dataset, answers_checked, long_answers
def count_squad_format_qas_and_contexts(noheader_squad_dataset):
num_qas = {}
num_contexts = {}
for title in noheader_squad_dataset.keys():
num_qa = 0
num_context = 0
records = noheader_squad_dataset[title]
for record in records:
sections = record['paragraphs']
num_context += 1
for section in sections:
qas = section['qas']
for qa in qas:
num_qa += 1
num_qas[title] = num_qa
num_contexts[title] = num_context
return num_qas, num_contexts
def add_header_and_save(emrqa_datasets, directory):
for name in emrqa_datasets.keys():
emrqa_datasets[name] = {'version': '1', 'data': emrqa_datasets[name]}
json.dump(emrqa_datasets[name], open(directory + '{}.json'.format(name), 'w'))
return emrqa_datasets
|
[
"copy.deepcopy",
"json.loads",
"numpy.sort",
"uuid.uuid1",
"glob.glob",
"preprocessing.prune_sentence",
"re.compile"
] |
[((1504, 1542), 're.compile', 're.compile', (['"""([^A-Za-z]*)([^:]+):(.*)"""'], {}), "('([^A-Za-z]*)([^:]+):(.*)')\n", (1514, 1542), False, 'import re\n'), ((1662, 1711), 're.compile', 're.compile', (['"""[^A-Za-z]*([A-Za-z ]*)[^A-Za-z](.*)"""'], {}), "('[^A-Za-z]*([A-Za-z ]*)[^A-Za-z](.*)')\n", (1672, 1711), False, 'import re\n'), ((3689, 3708), 'glob.glob', 'glob.glob', (['data_dir'], {}), '(data_dir)\n', (3698, 3708), False, 'import glob\n'), ((9491, 9512), 'numpy.sort', 'np.sort', (['line_numbers'], {}), '(line_numbers)\n', (9498, 9512), True, 'import numpy as np\n'), ((3823, 3844), 'json.loads', 'json.loads', (['json_file'], {}), '(json_file)\n', (3833, 3844), False, 'import json\n'), ((4860, 4881), 'copy.deepcopy', 'copy.deepcopy', (['answer'], {}), '(answer)\n', (4873, 4881), False, 'import copy\n'), ((10695, 10718), 'preprocessing.prune_sentence', 'prune_sentence', (['context'], {}), '(context)\n', (10709, 10718), False, 'from preprocessing import prune_sentence\n'), ((7750, 7767), 'copy.deepcopy', 'copy.deepcopy', (['qa'], {}), '(qa)\n', (7763, 7767), False, 'import copy\n'), ((11261, 11285), 'preprocessing.prune_sentence', 'prune_sentence', (['evidence'], {}), '(evidence)\n', (11275, 11285), False, 'from preprocessing import prune_sentence\n'), ((12121, 12133), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (12131, 12133), False, 'import uuid\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 1 21:22:56 2022
Using Gaussian basis set to propagate the nonadiabatic molecular dynamics
@author: <NAME>
"""
import numpy as np
class GWP:
def __init__(self, x, p, a, phase, coeff):
self.x = x
self.p = p
self.a = a
self.phase = phase
self.coeff = coeff # electronic coefficients
class NAMD:
def __init__(self, bases, dim=1):
self.nbasis = len(bases)
self.nstates = len(bases[0].coeff)
self.dim = dim
def overlap(self):
"""
construct overlap matrix from GWPs defined by {a,x,p}
"""
# N = self.nbasis
# S = np.identity(N, dtype=np.complex128)
# for j in range(N):
# gj = self.bases[j]
# aj, qj, pj = gj.a, gj.x, gj.p
# for k in range(j):
# gk = self.bases[k]
# ak, qk, pk = gk.a, gk.x, gk.p
# dq = qk - qj
# dp = pk - pj
# S[j,k] = (aj*ak)**0.25 * np.sqrt(2./(aj+ak)) * np.exp( \
# -0.5 * aj*ak/(aj+ak) * (dp**2/aj/ak + dq**2 \
# + 2.0*1j* (pj/aj + pk/ak) *dq) )
# S[k, j] = S[j, k].conj()
# return S
def kmat(self):
pass
def vmat(self):
pass
def run(self):
pass
def overlap_1d(aj, x, px, sj, ak, y, py, sk):
"""
overlap between two 1D GWPs
"""
dp = py - px
dq = y - x
return (aj*ak)**0.25 * np.sqrt(2./(aj+ak)) * np.exp( \
-0.5 * aj*ak/(aj+ak) * (dp**2/aj/ak + dq**2 \
+ 2.0*1j* (px/aj + py/ak) *dq) ) * np.exp(1j * (sk-sj))
def overlap(gj, gk):
"""
overlap between two GWPs defined by {a,x,p}
"""
aj, qj, pj, sj = gj.a, gj.x, gj.p, gj.phase
ak, qk, pk, sk = gk.a, gk.x, gk.p, gk.phase
tmp = 1.0
for d in range(ndim):
tmp *= overlap_1d(aj[d], qj[d], pj[d], sj, ak[d], qk[d], pk[d], sk)
return tmp
def kin_me(gj, gk):
"""
kinetic energy matrix elements between two multidimensional GWPs
"""
aj, qj, pj, sj = gj.a, gj.x, gj.p, gj.phase
ak, qk, pk, sk = gk.a, gk.x, gk.p, gk.phase
l = 0.0
for d in range(ndim):
l += kin_1d(aj[d], qj[d], pj[d], sj, ak[d], qk[d], pk[d], sk, am[d])
return l
# @numba.jit
def kin_1d(aj, qj, pj, sj, ak, qk, pk, sk, am):
"""
kinetic energy matrix elements between two multidimensional GWPs
"""
p0 = (aj*pk + ak*pj)/(aj+ak)
d0 = 0.5/am * ( (p0+1j*aj*ak/(aj+ak)*(qj-qk))**2 + aj*ak/(aj+ak) )
l = d0 * overlap_1d(aj, qj, pj, sj, ak, qk, pk, sk)
return l
def kmat(bset):
"""
kinetic energy matrix
"""
nb = len(bset)
kin = np.zeros((nb, nb), dtype=complex)
for i in range(nb):
gi = bset[i]
for j in range(i+1):
gj = bset[j]
kin[i,j] = kin_me(gi, gj)
kin[j,i] = np.conj(kin[i,j])
return kin
def H():
"""
construct the hamiltonian matrix
Kinetic energy operator can be computed exactly.
Potential energy operator - approximation
Nonadiabatic coupling -
"""
|
[
"numpy.conj",
"numpy.zeros",
"numpy.exp",
"numpy.sqrt"
] |
[((2769, 2802), 'numpy.zeros', 'np.zeros', (['(nb, nb)'], {'dtype': 'complex'}), '((nb, nb), dtype=complex)\n', (2777, 2802), True, 'import numpy as np\n'), ((1685, 1709), 'numpy.exp', 'np.exp', (['(1.0j * (sk - sj))'], {}), '(1.0j * (sk - sj))\n', (1691, 1709), True, 'import numpy as np\n'), ((1566, 1677), 'numpy.exp', 'np.exp', (['(-0.5 * aj * ak / (aj + ak) * (dp ** 2 / aj / ak + dq ** 2 + 2.0 * 1.0j * (\n px / aj + py / ak) * dq))'], {}), '(-0.5 * aj * ak / (aj + ak) * (dp ** 2 / aj / ak + dq ** 2 + 2.0 * \n 1.0j * (px / aj + py / ak) * dq))\n', (1572, 1677), True, 'import numpy as np\n'), ((2966, 2984), 'numpy.conj', 'np.conj', (['kin[i, j]'], {}), '(kin[i, j])\n', (2973, 2984), True, 'import numpy as np\n'), ((1544, 1568), 'numpy.sqrt', 'np.sqrt', (['(2.0 / (aj + ak))'], {}), '(2.0 / (aj + ak))\n', (1551, 1568), True, 'import numpy as np\n')]
|
"""
2019 (c) piteren
"""
import numpy as np
from typing import List
from ptools.neuralmess.get_tf import tf
from ptools.neuralmess.base_elements import my_initializer, flatten_LOTens
# residual (advanced) connection for (any) layer
def lay_res(
lay_in, # layer input
lay_out, # layer output
name= 'residual',
use_RCW= False, # use residual connection weights
use_PWRCW= False, # pointwise weights
match_dims= True): # concatenates zeros to input when thinner
# TODO: not working for higher dimm tensors
with tf.variable_scope(name):
output = lay_out
iW = int(lay_in.shape[-1])
oW = int(output.shape[-1])
matchedDims = iW == oW
# pad input with zeros to match dimension of output
if iW < oW and match_dims:
lay_in = tf.pad(
tensor= lay_in,
paddings= tf.constant([[0,0],[0,oW-iW]]))
matchedDims = True
if matchedDims:
if use_RCW:
if use_PWRCW: shape = [oW]
else: shape = []
convRCW = tf.get_variable(
name= 'rcw',
shape= shape,
initializer= tf.constant_initializer(0))
output = lay_in * (1 - tf.sigmoid(convRCW)) + output * tf.sigmoid(convRCW)
else:
output = lay_in + output
return output
# returns [0,1] tensor: 1 where input not activated over whole batch, seq... (nane - Not Activated NEurons)
# we assume that value =< 0 means: not activated
# we check over all axes but last (feats)
def zeroes(input :tf.Tensor) -> List[tf.Tensor]:
axes = [ix for ix in range(len(input.shape))][:-1] # all but last(feats) axes indexes list like: [0,1,2] for 4d shape
activated = tf.where( # 1 for value greater than zero, other 0
condition= tf.math.greater(input, 0),
x= tf.ones_like(input), # true
y= tf.zeros_like(input)) # false
activated_reduced = tf.reduce_sum(activated, axis=axes) # 1 or more for activated, 0 for not activated
not_activated = tf.equal(activated_reduced, 0) # true where summed gives zero (~invert)
nn_zeros = tf.cast(not_activated, dtype=tf.int8) # cast to 1 where summed gives zero
return nn_zeros
# dense layer
def lay_dense(
input,
units :int, # layer width
name= 'dense',
reuse= False,
activation= None,
use_bias= True,
initializer= None,
seed= 12321):
if initializer is None: initializer = my_initializer(seed)
dense_lay = tf.layers.Dense(
units= units,
activation= activation,
use_bias= use_bias,
kernel_initializer= initializer,
name= name,
_reuse= reuse)
output = dense_lay(input)
return output
# 1d convolution layer, with Gated Linear Unit option
def lay_conv1D(
input,
name= 'conv1D',
kernels= (3,5,7), # layer kernels
filters= (36,12,6), # int divisible by len(kernels) or tuple of len(kernels)
dilation= 1,
activation= None,
use_bias= True,
gated_LU= False, # Gated Linear Unit architecture
initializer= None,
padding= 'valid', # 'same' adds padding, 'valid' does not
seed= 12321,
verb= 0):
if initializer is None: initializer = my_initializer(seed)
with tf.variable_scope(name):
sub_out_list = []
if type(kernels) is not tuple: kernels = (kernels,)
if verb > 1:
print(' > %s: kernels %s, filters %s, dilation %s' % (name, kernels, filters, dilation))
for k in range(len(kernels)):
with tf.variable_scope('kernel_%d' % k):
sub_kernel = kernels[k]
if type(filters) is not tuple: sub_filters = filters // len(kernels)
else: sub_filters = filters[k]
if gated_LU: sub_filters *= 2
conv_lay = tf.layers.Conv1D(
filters= sub_filters,
kernel_size= sub_kernel,
dilation_rate= dilation,
activation= None,
use_bias= use_bias,
kernel_initializer= initializer,
padding= padding,
data_format= 'channels_last')
sub_output = conv_lay(input)
if verb > 1: print(' >> sub_conv: filters %s, kernel %s' % (sub_filters, sub_kernel))
sub_out_list.append(sub_output)
output = tf.concat(sub_out_list, axis=-1)
if gated_LU:
s1, s2 = tf.split(output, num_or_size_splits=2, axis=-1)
output = s1 * tf.sigmoid(s2)
elif activation: output = activation(output)
return output
# 2d convolution layer
def lay_conv2D(
input,
name= 'conv2d',
kernels= (3, 5, 7), # layer kernels
filters= (36, 12, 6), # int divisible by len(kernels) or tuple of len(kernels)
dilation= 1,
activation= None,
useBias= True,
gatedLU= False, # Gated Linear Unit architecture
initializer= None,
seed= 12321,
verbLev= 0):
if initializer is None: initializer = my_initializer(seed)
with tf.variable_scope(name):
variables = []
subOutList = []
if type(kernels) is not tuple: kernels = (kernels,)
if verbLev > 0:
print(' > %s: kernels %s, filetrs %s, dilation %s' % (name, kernels, filters, dilation))
for k in range(len(kernels)):
with tf.variable_scope('kernel_%d' % k):
subKernel = kernels[k]
if type(filters) is not tuple:
subFilters = filters / len(kernels)
else:
subFilters = filters[k]
if gatedLU: subFilters *= 2
convLay = tf.layers.Conv2D(
filters= subFilters,
kernel_size= subKernel,
dilation_rate= dilation,
activation= None,
use_bias= useBias,
kernel_initializer= initializer,
padding= 'valid',
data_format= 'channels_last')
subOutput = convLay(input)
for var in convLay.variables: variables.append(var)
if verbLev > 1: print(' >> subConv: filters %s, kernel %s' % (subFilters, subKernel))
subOutList.append(subOutput)
output = tf.concat(subOutList, axis=-1)
if gatedLU:
s1, s2 = tf.split(output, num_or_size_splits=2, axis=-1)
output = s1 * tf.sigmoid(s2)
else:
if activation: output = activation(output)
variables = flatten_LOTens(variables)
return output, variables
# attention for Query, Key and Value
def attn(
q, # decides about output shape
k,
v,
dropout= 0.0,
drop_flag= None,
seed= 12321):
w = tf.matmul(q, k, transpose_b=True) # q*kT, here we calculate weights - how much each key is relevant to each query
w = w * tf.rsqrt(tf.cast(v.shape[-1].value, w.dtype)) # scale by 1/sqrt(v.dim[-1])
w = tf.nn.softmax(w) # normalize sum to 1
if dropout:
w = tf.layers.dropout(
inputs= w,
rate= dropout,
training= drop_flag,
seed= seed)
att = tf.matmul(w, v)
return {
'attention': att,
'att_weights': w}
# time & feats dropout (for sequences)
def tf_drop(
input, # tensor [batch,seq,feats]
time_drop :float,
feat_drop :float,
train_flag, # tensor
seed= 12321):
output = input
in_shape = tf.shape(input)
# time (per vector) dropout
if time_drop:
t_drop = tf.ones(shape=in_shape[-2])
t_drop = tf.layers.dropout(
inputs= t_drop,
rate= time_drop,
training= train_flag,
seed= seed)
t_drop = tf.expand_dims(t_drop, axis=-1)
output *= t_drop
# feature (constant in time) dropout
if feat_drop:
f_drop = tf.ones(shape=in_shape[-1])
f_drop = tf.layers.dropout(
inputs= f_drop,
rate= feat_drop,
training= train_flag,
seed= seed)
f_drop = tf.expand_dims(f_drop, axis=-2)
output *= f_drop
return output
# positional encoding layer
def positional_encoding(
positions :int, # max number of positions to encode
width :int, # width of positions vector
min_pi_range= 1,
max_pi_range= 10,
as_numpy= True,
verb= 0):
angle_rates = np.linspace(min_pi_range/max_pi_range, 1, num=width)
if verb > 0: print(f'\ni.linspace\n{angle_rates}')
angle_rates = angle_rates[np.newaxis, :]
if verb > 0: print(f'\nangle_rates.new_axis\n{angle_rates}')
pos = np.arange(positions)[:, np.newaxis]
if verb > 0: print(f'\npos.arange.newaxis\n{pos}')
pos = pos / positions * max_pi_range
if verb > 0: print(f'\npos.scaled to range\n{pos}')
angle_rads = pos * angle_rates
if verb > 0: print(f'\nangle_rads {angle_rads.shape}\n{angle_rads}')
angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2] * np.pi)
angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2] * np.pi)
pos_encoding = angle_rads[np.newaxis, ...]
pos_encoding = pos_encoding - pos_encoding.mean()
if as_numpy: pos_encoding = pos_encoding.astype(dtype=np.float32)
else: pos_encoding = tf.cast(pos_encoding, dtype=tf.float32)
return pos_encoding
def attention_example():
print('self attention')
q = tf.constant(value=np.random.rand(5,4))
k = tf.constant(value=np.random.rand(5,4))
v = tf.constant(value=np.random.rand(5,4))
attn_out = attn(q,k,v)
print(attn_out)
print('task attention')
q = tf.constant(value=np.random.rand(1, 4))
k = tf.constant(value=np.random.rand(5, 4))
v = tf.constant(value=np.random.rand(5, 4))
attn_out = attn(q, k, v)
print(attn_out)
print('general attention')
vector_width = 4
number_of_queries = 2 # number of queries may vary
number_of_keys_and_values = 5 # each key is for each value, so their number has to match
q = tf.constant(value=np.random.rand(number_of_queries, vector_width))
k = tf.constant(value=np.random.rand(number_of_keys_and_values, vector_width))
v = tf.constant(value=np.random.rand(number_of_keys_and_values, vector_width))
attn_out = attn(q, k, v)
print(attn_out)
def tf_drop_example():
v = tf.constant(value=np.random.rand(2,3,4).astype(np.float32))
print(v)
v_drop = tf_drop(input=v, time_drop=0.0, feat_drop=0.5, train_flag=True, seed=112)
print(v_drop)
with tf.Session() as sess:
v, v_drop = sess.run([v, v_drop])
print(v)
print(v_drop)
if __name__ == '__main__':
#attention_example()
tf_drop_example()
|
[
"ptools.neuralmess.get_tf.tf.split",
"ptools.neuralmess.get_tf.tf.layers.dropout",
"ptools.neuralmess.get_tf.tf.equal",
"ptools.neuralmess.get_tf.tf.matmul",
"ptools.neuralmess.base_elements.flatten_LOTens",
"ptools.neuralmess.get_tf.tf.cast",
"numpy.sin",
"numpy.arange",
"ptools.neuralmess.get_tf.tf.Session",
"ptools.neuralmess.get_tf.tf.math.greater",
"ptools.neuralmess.get_tf.tf.reduce_sum",
"ptools.neuralmess.get_tf.tf.nn.softmax",
"ptools.neuralmess.get_tf.tf.expand_dims",
"ptools.neuralmess.get_tf.tf.layers.Conv1D",
"numpy.linspace",
"ptools.neuralmess.get_tf.tf.zeros_like",
"ptools.neuralmess.get_tf.tf.concat",
"ptools.neuralmess.get_tf.tf.ones_like",
"ptools.neuralmess.get_tf.tf.layers.Dense",
"numpy.cos",
"ptools.neuralmess.get_tf.tf.constant",
"ptools.neuralmess.get_tf.tf.constant_initializer",
"ptools.neuralmess.get_tf.tf.sigmoid",
"ptools.neuralmess.base_elements.my_initializer",
"ptools.neuralmess.get_tf.tf.ones",
"ptools.neuralmess.get_tf.tf.variable_scope",
"numpy.random.rand",
"ptools.neuralmess.get_tf.tf.layers.Conv2D",
"ptools.neuralmess.get_tf.tf.shape"
] |
[((2240, 2275), 'ptools.neuralmess.get_tf.tf.reduce_sum', 'tf.reduce_sum', (['activated'], {'axis': 'axes'}), '(activated, axis=axes)\n', (2253, 2275), False, 'from ptools.neuralmess.get_tf import tf\n'), ((2343, 2373), 'ptools.neuralmess.get_tf.tf.equal', 'tf.equal', (['activated_reduced', '(0)'], {}), '(activated_reduced, 0)\n', (2351, 2373), False, 'from ptools.neuralmess.get_tf import tf\n'), ((2439, 2476), 'ptools.neuralmess.get_tf.tf.cast', 'tf.cast', (['not_activated'], {'dtype': 'tf.int8'}), '(not_activated, dtype=tf.int8)\n', (2446, 2476), False, 'from ptools.neuralmess.get_tf import tf\n'), ((2901, 3032), 'ptools.neuralmess.get_tf.tf.layers.Dense', 'tf.layers.Dense', ([], {'units': 'units', 'activation': 'activation', 'use_bias': 'use_bias', 'kernel_initializer': 'initializer', 'name': 'name', '_reuse': 'reuse'}), '(units=units, activation=activation, use_bias=use_bias,\n kernel_initializer=initializer, name=name, _reuse=reuse)\n', (2916, 3032), False, 'from ptools.neuralmess.get_tf import tf\n'), ((7754, 7787), 'ptools.neuralmess.get_tf.tf.matmul', 'tf.matmul', (['q', 'k'], {'transpose_b': '(True)'}), '(q, k, transpose_b=True)\n', (7763, 7787), False, 'from ptools.neuralmess.get_tf import tf\n'), ((7983, 7999), 'ptools.neuralmess.get_tf.tf.nn.softmax', 'tf.nn.softmax', (['w'], {}), '(w)\n', (7996, 7999), False, 'from ptools.neuralmess.get_tf import tf\n'), ((8238, 8253), 'ptools.neuralmess.get_tf.tf.matmul', 'tf.matmul', (['w', 'v'], {}), '(w, v)\n', (8247, 8253), False, 'from ptools.neuralmess.get_tf import tf\n'), ((8584, 8599), 'ptools.neuralmess.get_tf.tf.shape', 'tf.shape', (['input'], {}), '(input)\n', (8592, 8599), False, 'from ptools.neuralmess.get_tf import tf\n'), ((9636, 9690), 'numpy.linspace', 'np.linspace', (['(min_pi_range / max_pi_range)', '(1)'], {'num': 'width'}), '(min_pi_range / max_pi_range, 1, num=width)\n', (9647, 9690), True, 'import numpy as np\n'), ((10188, 10223), 'numpy.sin', 'np.sin', (['(angle_rads[:, 0::2] * np.pi)'], {}), '(angle_rads[:, 0::2] * np.pi)\n', (10194, 10223), True, 'import numpy as np\n'), ((10250, 10285), 'numpy.cos', 'np.cos', (['(angle_rads[:, 1::2] * np.pi)'], {}), '(angle_rads[:, 1::2] * np.pi)\n', (10256, 10285), True, 'import numpy as np\n'), ((652, 675), 'ptools.neuralmess.get_tf.tf.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (669, 675), False, 'from ptools.neuralmess.get_tf import tf\n'), ((2864, 2884), 'ptools.neuralmess.base_elements.my_initializer', 'my_initializer', (['seed'], {}), '(seed)\n', (2878, 2884), False, 'from ptools.neuralmess.base_elements import my_initializer, flatten_LOTens\n'), ((3834, 3854), 'ptools.neuralmess.base_elements.my_initializer', 'my_initializer', (['seed'], {}), '(seed)\n', (3848, 3854), False, 'from ptools.neuralmess.base_elements import my_initializer, flatten_LOTens\n'), ((3864, 3887), 'ptools.neuralmess.get_tf.tf.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (3881, 3887), False, 'from ptools.neuralmess.get_tf import tf\n'), ((5103, 5135), 'ptools.neuralmess.get_tf.tf.concat', 'tf.concat', (['sub_out_list'], {'axis': '(-1)'}), '(sub_out_list, axis=-1)\n', (5112, 5135), False, 'from ptools.neuralmess.get_tf import tf\n'), ((5873, 5893), 'ptools.neuralmess.base_elements.my_initializer', 'my_initializer', (['seed'], {}), '(seed)\n', (5887, 5893), False, 'from ptools.neuralmess.base_elements import my_initializer, flatten_LOTens\n'), ((5903, 5926), 'ptools.neuralmess.get_tf.tf.variable_scope', 'tf.variable_scope', (['name'], {}), '(name)\n', (5920, 5926), False, 'from ptools.neuralmess.get_tf import tf\n'), ((7232, 7262), 'ptools.neuralmess.get_tf.tf.concat', 'tf.concat', (['subOutList'], {'axis': '(-1)'}), '(subOutList, axis=-1)\n', (7241, 7262), False, 'from ptools.neuralmess.get_tf import tf\n'), ((7483, 7508), 'ptools.neuralmess.base_elements.flatten_LOTens', 'flatten_LOTens', (['variables'], {}), '(variables)\n', (7497, 7508), False, 'from ptools.neuralmess.base_elements import my_initializer, flatten_LOTens\n'), ((8084, 8156), 'ptools.neuralmess.get_tf.tf.layers.dropout', 'tf.layers.dropout', ([], {'inputs': 'w', 'rate': 'dropout', 'training': 'drop_flag', 'seed': 'seed'}), '(inputs=w, rate=dropout, training=drop_flag, seed=seed)\n', (8101, 8156), False, 'from ptools.neuralmess.get_tf import tf\n'), ((8668, 8695), 'ptools.neuralmess.get_tf.tf.ones', 'tf.ones', ([], {'shape': 'in_shape[-2]'}), '(shape=in_shape[-2])\n', (8675, 8695), False, 'from ptools.neuralmess.get_tf import tf\n'), ((8713, 8798), 'ptools.neuralmess.get_tf.tf.layers.dropout', 'tf.layers.dropout', ([], {'inputs': 't_drop', 'rate': 'time_drop', 'training': 'train_flag', 'seed': 'seed'}), '(inputs=t_drop, rate=time_drop, training=train_flag, seed=seed\n )\n', (8730, 8798), False, 'from ptools.neuralmess.get_tf import tf\n'), ((8882, 8913), 'ptools.neuralmess.get_tf.tf.expand_dims', 'tf.expand_dims', (['t_drop'], {'axis': '(-1)'}), '(t_drop, axis=-1)\n', (8896, 8913), False, 'from ptools.neuralmess.get_tf import tf\n'), ((9016, 9043), 'ptools.neuralmess.get_tf.tf.ones', 'tf.ones', ([], {'shape': 'in_shape[-1]'}), '(shape=in_shape[-1])\n', (9023, 9043), False, 'from ptools.neuralmess.get_tf import tf\n'), ((9061, 9146), 'ptools.neuralmess.get_tf.tf.layers.dropout', 'tf.layers.dropout', ([], {'inputs': 'f_drop', 'rate': 'feat_drop', 'training': 'train_flag', 'seed': 'seed'}), '(inputs=f_drop, rate=feat_drop, training=train_flag, seed=seed\n )\n', (9078, 9146), False, 'from ptools.neuralmess.get_tf import tf\n'), ((9230, 9261), 'ptools.neuralmess.get_tf.tf.expand_dims', 'tf.expand_dims', (['f_drop'], {'axis': '(-2)'}), '(f_drop, axis=-2)\n', (9244, 9261), False, 'from ptools.neuralmess.get_tf import tf\n'), ((9865, 9885), 'numpy.arange', 'np.arange', (['positions'], {}), '(positions)\n', (9874, 9885), True, 'import numpy as np\n'), ((10485, 10524), 'ptools.neuralmess.get_tf.tf.cast', 'tf.cast', (['pos_encoding'], {'dtype': 'tf.float32'}), '(pos_encoding, dtype=tf.float32)\n', (10492, 10524), False, 'from ptools.neuralmess.get_tf import tf\n'), ((11745, 11757), 'ptools.neuralmess.get_tf.tf.Session', 'tf.Session', ([], {}), '()\n', (11755, 11757), False, 'from ptools.neuralmess.get_tf import tf\n'), ((2054, 2079), 'ptools.neuralmess.get_tf.tf.math.greater', 'tf.math.greater', (['input', '(0)'], {}), '(input, 0)\n', (2069, 2079), False, 'from ptools.neuralmess.get_tf import tf\n'), ((2101, 2120), 'ptools.neuralmess.get_tf.tf.ones_like', 'tf.ones_like', (['input'], {}), '(input)\n', (2113, 2120), False, 'from ptools.neuralmess.get_tf import tf\n'), ((2168, 2188), 'ptools.neuralmess.get_tf.tf.zeros_like', 'tf.zeros_like', (['input'], {}), '(input)\n', (2181, 2188), False, 'from ptools.neuralmess.get_tf import tf\n'), ((5178, 5225), 'ptools.neuralmess.get_tf.tf.split', 'tf.split', (['output'], {'num_or_size_splits': '(2)', 'axis': '(-1)'}), '(output, num_or_size_splits=2, axis=-1)\n', (5186, 5225), False, 'from ptools.neuralmess.get_tf import tf\n'), ((7304, 7351), 'ptools.neuralmess.get_tf.tf.split', 'tf.split', (['output'], {'num_or_size_splits': '(2)', 'axis': '(-1)'}), '(output, num_or_size_splits=2, axis=-1)\n', (7312, 7351), False, 'from ptools.neuralmess.get_tf import tf\n'), ((7907, 7942), 'ptools.neuralmess.get_tf.tf.cast', 'tf.cast', (['v.shape[-1].value', 'w.dtype'], {}), '(v.shape[-1].value, w.dtype)\n', (7914, 7942), False, 'from ptools.neuralmess.get_tf import tf\n'), ((10630, 10650), 'numpy.random.rand', 'np.random.rand', (['(5)', '(4)'], {}), '(5, 4)\n', (10644, 10650), True, 'import numpy as np\n'), ((10677, 10697), 'numpy.random.rand', 'np.random.rand', (['(5)', '(4)'], {}), '(5, 4)\n', (10691, 10697), True, 'import numpy as np\n'), ((10724, 10744), 'numpy.random.rand', 'np.random.rand', (['(5)', '(4)'], {}), '(5, 4)\n', (10738, 10744), True, 'import numpy as np\n'), ((10847, 10867), 'numpy.random.rand', 'np.random.rand', (['(1)', '(4)'], {}), '(1, 4)\n', (10861, 10867), True, 'import numpy as np\n'), ((10895, 10915), 'numpy.random.rand', 'np.random.rand', (['(5)', '(4)'], {}), '(5, 4)\n', (10909, 10915), True, 'import numpy as np\n'), ((10943, 10963), 'numpy.random.rand', 'np.random.rand', (['(5)', '(4)'], {}), '(5, 4)\n', (10957, 10963), True, 'import numpy as np\n'), ((11253, 11300), 'numpy.random.rand', 'np.random.rand', (['number_of_queries', 'vector_width'], {}), '(number_of_queries, vector_width)\n', (11267, 11300), True, 'import numpy as np\n'), ((11336, 11391), 'numpy.random.rand', 'np.random.rand', (['number_of_keys_and_values', 'vector_width'], {}), '(number_of_keys_and_values, vector_width)\n', (11350, 11391), True, 'import numpy as np\n'), ((11419, 11474), 'numpy.random.rand', 'np.random.rand', (['number_of_keys_and_values', 'vector_width'], {}), '(number_of_keys_and_values, vector_width)\n', (11433, 11474), True, 'import numpy as np\n'), ((4152, 4186), 'ptools.neuralmess.get_tf.tf.variable_scope', 'tf.variable_scope', (["('kernel_%d' % k)"], {}), "('kernel_%d' % k)\n", (4169, 4186), False, 'from ptools.neuralmess.get_tf import tf\n'), ((4461, 4670), 'ptools.neuralmess.get_tf.tf.layers.Conv1D', 'tf.layers.Conv1D', ([], {'filters': 'sub_filters', 'kernel_size': 'sub_kernel', 'dilation_rate': 'dilation', 'activation': 'None', 'use_bias': 'use_bias', 'kernel_initializer': 'initializer', 'padding': 'padding', 'data_format': '"""channels_last"""'}), "(filters=sub_filters, kernel_size=sub_kernel, dilation_rate\n =dilation, activation=None, use_bias=use_bias, kernel_initializer=\n initializer, padding=padding, data_format='channels_last')\n", (4477, 4670), False, 'from ptools.neuralmess.get_tf import tf\n'), ((5252, 5266), 'ptools.neuralmess.get_tf.tf.sigmoid', 'tf.sigmoid', (['s2'], {}), '(s2)\n', (5262, 5266), False, 'from ptools.neuralmess.get_tf import tf\n'), ((6215, 6249), 'ptools.neuralmess.get_tf.tf.variable_scope', 'tf.variable_scope', (["('kernel_%d' % k)"], {}), "('kernel_%d' % k)\n", (6232, 6249), False, 'from ptools.neuralmess.get_tf import tf\n'), ((6530, 6736), 'ptools.neuralmess.get_tf.tf.layers.Conv2D', 'tf.layers.Conv2D', ([], {'filters': 'subFilters', 'kernel_size': 'subKernel', 'dilation_rate': 'dilation', 'activation': 'None', 'use_bias': 'useBias', 'kernel_initializer': 'initializer', 'padding': '"""valid"""', 'data_format': '"""channels_last"""'}), "(filters=subFilters, kernel_size=subKernel, dilation_rate=\n dilation, activation=None, use_bias=useBias, kernel_initializer=\n initializer, padding='valid', data_format='channels_last')\n", (6546, 6736), False, 'from ptools.neuralmess.get_tf import tf\n'), ((7378, 7392), 'ptools.neuralmess.get_tf.tf.sigmoid', 'tf.sigmoid', (['s2'], {}), '(s2)\n', (7388, 7392), False, 'from ptools.neuralmess.get_tf import tf\n'), ((993, 1028), 'ptools.neuralmess.get_tf.tf.constant', 'tf.constant', (['[[0, 0], [0, oW - iW]]'], {}), '([[0, 0], [0, oW - iW]])\n', (1004, 1028), False, 'from ptools.neuralmess.get_tf import tf\n'), ((11576, 11599), 'numpy.random.rand', 'np.random.rand', (['(2)', '(3)', '(4)'], {}), '(2, 3, 4)\n', (11590, 11599), True, 'import numpy as np\n'), ((1359, 1385), 'ptools.neuralmess.get_tf.tf.constant_initializer', 'tf.constant_initializer', (['(0)'], {}), '(0)\n', (1382, 1385), False, 'from ptools.neuralmess.get_tf import tf\n'), ((1459, 1478), 'ptools.neuralmess.get_tf.tf.sigmoid', 'tf.sigmoid', (['convRCW'], {}), '(convRCW)\n', (1469, 1478), False, 'from ptools.neuralmess.get_tf import tf\n'), ((1427, 1446), 'ptools.neuralmess.get_tf.tf.sigmoid', 'tf.sigmoid', (['convRCW'], {}), '(convRCW)\n', (1437, 1446), False, 'from ptools.neuralmess.get_tf import tf\n')]
|
#!/usr/bin/python
#coding = utf-8
import numpy as np
from RiskQuantLib.SecurityList.BondList.bondList import bondList
from RiskQuantLib.Security.Bond.bondIndexUnderlyingBond import bondIndexUnderlyingBond
from RiskQuantLib.Set.SecurityList.BondList.bondIndexUnderlyingBondList import setBondIndexUnderlyingBondList
class bondIndexUnderlyingBondList(bondList,setBondIndexUnderlyingBondList):
elementClass = bondIndexUnderlyingBond
def __init__(self):
super(bondIndexUnderlyingBondList,self).__init__()
self.listType = 'Bond Index Underlying Bond List'
def addBond(self, codeString, nameString, weightNum = np.nan, securityTypeString = 'Bond Index Underlying Bond'):
underlyingBond = bondIndexUnderlyingBond(codeString,nameString,securityTypeString)
underlyingBond.setWeight(weightNum)
tmpList = self.all + [underlyingBond]
self.setAll(tmpList)
def addBondSeries(self, bondCodeSeries, bondNameSeries, bondWeightSeries = np.nan, securityTypeString = 'Bond Index Underlying Bond'):
bondSeries = [bondIndexUnderlyingBond(i,j,securityTypeString) for i,j in zip(bondCodeSeries,bondNameSeries)]
if not (type(bondWeightSeries)==type(np.nan) and np.isnan(bondWeightSeries)):
[i.setWeight(j) for i,j in zip(bondSeries,bondWeightSeries)]
tmpList = self.all + bondSeries
self.setAll(tmpList)
|
[
"RiskQuantLib.Security.Bond.bondIndexUnderlyingBond.bondIndexUnderlyingBond",
"numpy.isnan"
] |
[((720, 787), 'RiskQuantLib.Security.Bond.bondIndexUnderlyingBond.bondIndexUnderlyingBond', 'bondIndexUnderlyingBond', (['codeString', 'nameString', 'securityTypeString'], {}), '(codeString, nameString, securityTypeString)\n', (743, 787), False, 'from RiskQuantLib.Security.Bond.bondIndexUnderlyingBond import bondIndexUnderlyingBond\n'), ((1067, 1116), 'RiskQuantLib.Security.Bond.bondIndexUnderlyingBond.bondIndexUnderlyingBond', 'bondIndexUnderlyingBond', (['i', 'j', 'securityTypeString'], {}), '(i, j, securityTypeString)\n', (1090, 1116), False, 'from RiskQuantLib.Security.Bond.bondIndexUnderlyingBond import bondIndexUnderlyingBond\n'), ((1219, 1245), 'numpy.isnan', 'np.isnan', (['bondWeightSeries'], {}), '(bondWeightSeries)\n', (1227, 1245), True, 'import numpy as np\n')]
|
#Author: <NAME>. Licence: MIT. Objective: Create representations of texts
import os
import random
import sys # Import other directory
import timeit # Measure time
import numpy
import scipy
import torch
from afinn import Afinn
from scipy.sparse import hstack
from sklearn.datasets import dump_svmlight_file # save format svmlight
from sklearn.datasets import load_svmlight_file, load_svmlight_files
from sklearn.decomposition import NMF # modelagem de topicos
from sklearn.decomposition import (TruncatedSVD)
from sklearn.feature_extraction.text import TfidfVectorizer # representation tfidf
from sklearn.preprocessing import (MaxAbsScaler, MinMaxScaler, Normalizer,PowerTransformer, QuantileTransformer, StandardScaler)
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import claudio_funcoes_sub as cv # Functions utils author
random.seed(42); torch.manual_seed(42); numpy.random.seed(seed=42) # reproducibily soluction
def assign_GPU(Tokenizer_output):
tokens_tensor = Tokenizer_output['input_ids'].to('cuda:0')
attention_mask = Tokenizer_output['attention_mask'].to('cuda:0')
output = {'input_ids' : tokens_tensor,
#'token_type_ids' : token_type_ids,
'attention_mask' : attention_mask}
return output
def vader(text):
"""Return score Vader"""
analyzer = SentimentIntensityAnalyzer()
dict_vader = analyzer.polarity_scores(text)
return [dict_vader['neg'], dict_vader['neu'], dict_vader['pos']]
#return [dict_vader['compound']]
def representation_bert(x, pooling=None):
"""Create representation BERT"""
import numpy
from transformers import BertModel, BertTokenizer
if "16" in pooling: limit_token=16
elif "32" in pooling: limit_token=32
elif "64" in pooling: limit_token=64
elif "128" in pooling: limit_token=128
elif "256" in pooling: limit_token=256
elif "512" in pooling: limit_token=512
limit_token=512
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased', output_hidden_states = True)
model = model.to('cuda:0') # gpu
for index_doc in range(len(x)):
inputs = tokenizer(x[index_doc], return_tensors="pt", max_length=limit_token, truncation=True)
inputs = assign_GPU(inputs)
outputs = model(**inputs)
if 'bert_concat' in pooling or 'bert_sum' in pooling or 'bert_last_avg' in pooling or 'bert_cls' in pooling:
hidden_states = outputs[2]
token_embeddings = torch.stack(hidden_states, dim=0)
token_embeddings = torch.squeeze(token_embeddings, dim=1) # remove a primeira dimensao que do embedding incial
token_embeddings = token_embeddings.permute(1,0,2) # reordena para em cada linha ser um token diferente
vets = []
for token in token_embeddings:
if 'bert_concat' == pooling:
vets.append( torch.cat((token[-1], token[-2], token[-3], token[-4]), dim=0).cpu().detach().numpy() ) # concatena as 4 ultimas dimensoes
elif 'bert_sum' == pooling:
vets.append( torch.sum(token[-4:], dim=0).cpu().detach().numpy() )
elif 'bert_last_avg' == pooling:
vets.append( torch.mean(token[-4:], dim=0).cpu().detach().numpy() )
elif 'bert_cls' == pooling:
x[index_doc] = token[-1].cpu().detach().numpy() # o primeiro token é o cls e ultima camada
break
if 'bert_cls' != pooling:
x[index_doc] = numpy.mean( vets, axis=0)
else:
tokens = outputs[0].cpu().detach().numpy()[0]
if 'bert_avg' in pooling:
x[index_doc] = numpy.mean(tokens, axis=0) #average
elif 'bert_max' in pooling: x[index_doc] = numpy.amax(tokens, axis=0)
return x
def process_transform(dataset, x_all=None):
for trans in sys.argv[6].split(','):
if x_all == None:
x_all = cv.file_to_corpus('dataset/' +dataset +'/orig/texts.txt')
y = cv.file_to_corpus('dataset/' +dataset +'/orig/score.txt')
y = [float(y) for y in y]
elif 'bert' in trans:
x_all = [cv.preprocessor(x) for x in x_all]
x = representation_bert(x_all, trans)
elif trans == 'vader':
x_all = [cv.preprocessor(x) for x in x_all]
x = [vader(x) for x in x_all]
elif trans == 'affin':
afinn = Afinn(emoticons=True)
x = [[afinn.score(x)] for x in x_all]
try:
os.mkdir("dataset/representations/"+dataset +'_f_' +trans) # Create directory
except OSError:
print('directory exist')
print(dataset +'_f_' +trans)
dump_svmlight_file(x, y, "dataset/representations/" +dataset +'_f_' +trans +'/feature')
cv.save_dict_file('times/' +name_dataset +"_0", {'time_representation': (timeit.default_timer() - ini)})
if __name__ == "__main__":
ini = timeit.default_timer() # Time process
name_dataset = sys.argv[1] # name dataset
ids=sys.argv[2] # file ids
datas=sys.argv[3] # file texts
labels=sys.argv[4] # file label
index = int(sys.argv[5]) # index fold
rs = sys.argv[6] # representations
name_dataset_real = ids.split("/")[1]
#process_transform(name_dataset_real) # generate bert
try:
os.mkdir("dataset/representations/"+name_dataset) # Create directory
except OSError:
pass#; print('directory exist')
x_train, y_train, x_test, y_test = cv.ids_train_test(ids, datas, labels, index)
x_train = [cv.preprocessor(x) for x in x_train]
x_test = [cv.preprocessor(x) for x in x_test]
y_train = [float(y) for y in y_train] # float para permitir utilizar no classificador
y_test = [float(y) for y in y_test]
if ',' in rs or 'bert' in rs:
x_train = None
x_test = None
for r in rs.split(','):
scaler_boolean = False
soma_um = False
if '_min_max' in r:
scaler_boolean = True
scaler = MinMaxScaler()
r = r.split('_min_max')[0]
if '_scaler' in r:
scaler_boolean = True
if 'tfidf' in r:
scaler = StandardScaler(with_mean=False)
else :
scaler = StandardScaler()
r = r.split('_scaler')[0]
if '_maxabs' in r:
scaler_boolean = True
scaler = MaxAbsScaler()
r = r.split('_maxabs')[0]
if '_quantile' in r:
scaler_boolean = True
scaler = QuantileTransformer(random_state=42)
r = r.split('_quantile')[0]
if '_power' in r:
scaler_boolean = True
scaler = PowerTransformer()
r = r.split('_power')[0]
if '_normalizer' in r:
scaler_boolean = True
if '_normalizer_l1' in r:
scaler = Normalizer(norm='l1')
r = r.split('_normalizer_l1')[0]
elif '_normalizer_l2' in r:
scaler = Normalizer(norm='l2')
r = r.split('_normalizer_l2')[0]
elif '_normalizer_max' in r:
scaler = Normalizer(norm='max')
r = r.split('_normalizer_max')[0]
if 'word_tfidf' in r or 'char_tfidf' in r or 'graph' in r:# in r or 'roberta_min_max' in r or 'metafeature' in r:
f_x_train, nao_usa1, f_x_test, nao_usa2 = load_svmlight_files([open('dataset/representations/' +name_dataset_real +'_' +r +'/train'+str(index), 'rb'), open('dataset/representations/' +name_dataset_real +'_' +r +'/test'+str(index), 'rb')])
else:
f_x, nao_usa = load_svmlight_file(open("dataset/representations/" +name_dataset_real +'_f_' +r +'/feature', 'rb'))
f_x_train, f_x_test = cv.ids_train_test_representation(ids, f_x, index, ' ')
if scaler_boolean == True:
f_x_train = scaler.fit_transform(f_x_train)
f_x_test = scaler.transform(f_x_test)
if x_train is None:
x_train = f_x_train
x_test = f_x_test
else:
x_train = hstack([ x_train, scipy.sparse.csr_matrix(f_x_train) ])
x_test = hstack([ x_test, scipy.sparse.csr_matrix(f_x_test) ])
if ',' in rs or 'min_max' in rs or 'scaler' in rs or '_maxabs' in rs or '_porwer' in rs or '_normalizer' in rs:
cv.save_dict_file('times/' +name_dataset +"_" +str(index), {'time_representation': (timeit.default_timer() - ini)})
dump_svmlight_file(x_train, y_train, "dataset/representations/" + name_dataset +'/train'+str(index))
dump_svmlight_file(x_test, y_test, "dataset/representations/" + name_dataset +'/test'+str(index))
print("Time End: %f" % (timeit.default_timer() - ini))
else: #utilizar para representacoes que dependem do fold
r = rs
if 'word_tfidf_bigram' in r:
word_tfidf = TfidfVectorizer(ngram_range=(1,2))
x_train = word_tfidf.fit_transform(x_train)
x_test = word_tfidf.transform(x_test)
elif 'word_tfidf' in r :
from sklearn.decomposition import PCA
word_tfidf = TfidfVectorizer()#tokenizer=word_tokenize )
x_train = word_tfidf.fit_transform(x_train)
x_test = word_tfidf.transform(x_test)
if 'pca' in r or 'svd' in r:
lenght_reduction = int(sys.argv[7])
if 'pca' in r:
transformador = PCA(n_components=lenght_reduction, random_state=42)
elif 'svd' in r:
print('SVD')
transformador = TruncatedSVD(n_components=lenght_reduction, random_state=42)
x_train = transformador.fit_transform(x_train)
#print(f"reduction: {x_train.shape}")
x_test = transformador.transform(x_test)
try:
os.mkdir( f"dataset/representations/{name_dataset}_{lenght_reduction}") # Create directory
except:
pass
dump_svmlight_file(x_train, y_train, f"dataset/representations/{name_dataset}_{lenght_reduction}/train{str(index)}")
dump_svmlight_file(x_test, y_test, f"dataset/representations/{name_dataset}_{lenght_reduction}/test{str(index)}")
elif r == 'char_tfidf':
char_tfidf = TfidfVectorizer(analyzer='char_wb', ngram_range=(2,6))
x_train = char_tfidf.fit_transform(x_train)
x_test = char_tfidf.transform(x_test)
elif r =='vader':
x_train = [vader(x) for x in x_train]
x_test = [vader(x) for x in x_test]
elif r == 'affin':
afinn = Afinn(emoticons=True)
x_train = [[afinn.score(x)] for x in x_train]
x_test = [[afinn.score(x)] for x in x_test]
print("dataset/representations/" + name_dataset +'/train'+str(index))
try:
os.mkdir( f"dataset/representations/{name_dataset}") # Create directory
except:
pass
dump_svmlight_file(x_train, y_train, f"dataset/representations/{name_dataset}/train{str(index)}")
dump_svmlight_file(x_test, y_test, f"dataset/representations/{name_dataset}/test{str(index)}")
cv.save_dict_file('times/' +name_dataset +"_" +str(index), {'time_representation': (timeit.default_timer() - ini)})
print("Time End: %f" % (timeit.default_timer() - ini))
|
[
"os.mkdir",
"numpy.random.seed",
"sklearn.preprocessing.StandardScaler",
"sklearn.feature_extraction.text.TfidfVectorizer",
"sklearn.preprocessing.MinMaxScaler",
"torch.cat",
"sklearn.preprocessing.MaxAbsScaler",
"numpy.mean",
"claudio_funcoes_sub.file_to_corpus",
"claudio_funcoes_sub.preprocessor",
"sklearn.decomposition.TruncatedSVD",
"claudio_funcoes_sub.ids_train_test",
"torch.squeeze",
"random.seed",
"transformers.BertModel.from_pretrained",
"torch.mean",
"sklearn.preprocessing.PowerTransformer",
"torch.manual_seed",
"sklearn.preprocessing.QuantileTransformer",
"claudio_funcoes_sub.ids_train_test_representation",
"scipy.sparse.csr_matrix",
"transformers.BertTokenizer.from_pretrained",
"afinn.Afinn",
"torch.sum",
"sklearn.datasets.dump_svmlight_file",
"torch.stack",
"timeit.default_timer",
"numpy.amax",
"vaderSentiment.vaderSentiment.SentimentIntensityAnalyzer",
"sklearn.decomposition.PCA",
"sklearn.preprocessing.Normalizer"
] |
[((853, 868), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (864, 868), False, 'import random\n'), ((870, 891), 'torch.manual_seed', 'torch.manual_seed', (['(42)'], {}), '(42)\n', (887, 891), False, 'import torch\n'), ((893, 919), 'numpy.random.seed', 'numpy.random.seed', ([], {'seed': '(42)'}), '(seed=42)\n', (910, 919), False, 'import numpy\n'), ((1333, 1361), 'vaderSentiment.vaderSentiment.SentimentIntensityAnalyzer', 'SentimentIntensityAnalyzer', ([], {}), '()\n', (1359, 1361), False, 'from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n'), ((1974, 2024), 'transformers.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['"""bert-base-uncased"""'], {}), "('bert-base-uncased')\n", (2003, 2024), False, 'from transformers import BertModel, BertTokenizer\n'), ((2037, 2110), 'transformers.BertModel.from_pretrained', 'BertModel.from_pretrained', (['"""bert-base-uncased"""'], {'output_hidden_states': '(True)'}), "('bert-base-uncased', output_hidden_states=True)\n", (2062, 2110), False, 'from transformers import BertModel, BertTokenizer\n'), ((5149, 5171), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (5169, 5171), False, 'import timeit\n'), ((5716, 5760), 'claudio_funcoes_sub.ids_train_test', 'cv.ids_train_test', (['ids', 'datas', 'labels', 'index'], {}), '(ids, datas, labels, index)\n', (5733, 5760), True, 'import claudio_funcoes_sub as cv\n'), ((4910, 5005), 'sklearn.datasets.dump_svmlight_file', 'dump_svmlight_file', (['x', 'y', "('dataset/representations/' + dataset + '_f_' + trans + '/feature')"], {}), "(x, y, 'dataset/representations/' + dataset + '_f_' +\n trans + '/feature')\n", (4928, 5005), False, 'from sklearn.datasets import dump_svmlight_file\n'), ((5539, 5590), 'os.mkdir', 'os.mkdir', (["('dataset/representations/' + name_dataset)"], {}), "('dataset/representations/' + name_dataset)\n", (5547, 5590), False, 'import os\n'), ((5776, 5794), 'claudio_funcoes_sub.preprocessor', 'cv.preprocessor', (['x'], {}), '(x)\n', (5791, 5794), True, 'import claudio_funcoes_sub as cv\n'), ((5827, 5845), 'claudio_funcoes_sub.preprocessor', 'cv.preprocessor', (['x'], {}), '(x)\n', (5842, 5845), True, 'import claudio_funcoes_sub as cv\n'), ((2571, 2604), 'torch.stack', 'torch.stack', (['hidden_states'], {'dim': '(0)'}), '(hidden_states, dim=0)\n', (2582, 2604), False, 'import torch\n'), ((2636, 2674), 'torch.squeeze', 'torch.squeeze', (['token_embeddings'], {'dim': '(1)'}), '(token_embeddings, dim=1)\n', (2649, 2674), False, 'import torch\n'), ((4105, 4164), 'claudio_funcoes_sub.file_to_corpus', 'cv.file_to_corpus', (["('dataset/' + dataset + '/orig/texts.txt')"], {}), "('dataset/' + dataset + '/orig/texts.txt')\n", (4122, 4164), True, 'import claudio_funcoes_sub as cv\n'), ((4179, 4238), 'claudio_funcoes_sub.file_to_corpus', 'cv.file_to_corpus', (["('dataset/' + dataset + '/orig/score.txt')"], {}), "('dataset/' + dataset + '/orig/score.txt')\n", (4196, 4238), True, 'import claudio_funcoes_sub as cv\n'), ((4713, 4775), 'os.mkdir', 'os.mkdir', (["('dataset/representations/' + dataset + '_f_' + trans)"], {}), "('dataset/representations/' + dataset + '_f_' + trans)\n", (4721, 4775), False, 'import os\n'), ((9520, 9555), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'ngram_range': '(1, 2)'}), '(ngram_range=(1, 2))\n', (9535, 9555), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((11558, 11609), 'os.mkdir', 'os.mkdir', (['f"""dataset/representations/{name_dataset}"""'], {}), "(f'dataset/representations/{name_dataset}')\n", (11566, 11609), False, 'import os\n'), ((3642, 3666), 'numpy.mean', 'numpy.mean', (['vets'], {'axis': '(0)'}), '(vets, axis=0)\n', (3652, 3666), False, 'import numpy\n'), ((3834, 3860), 'numpy.mean', 'numpy.mean', (['tokens'], {'axis': '(0)'}), '(tokens, axis=0)\n', (3844, 3860), False, 'import numpy\n'), ((6270, 6284), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), '()\n', (6282, 6284), False, 'from sklearn.preprocessing import MaxAbsScaler, MinMaxScaler, Normalizer, PowerTransformer, QuantileTransformer, StandardScaler\n'), ((6738, 6752), 'sklearn.preprocessing.MaxAbsScaler', 'MaxAbsScaler', ([], {}), '()\n', (6750, 6752), False, 'from sklearn.preprocessing import MaxAbsScaler, MinMaxScaler, Normalizer, PowerTransformer, QuantileTransformer, StandardScaler\n'), ((6891, 6927), 'sklearn.preprocessing.QuantileTransformer', 'QuantileTransformer', ([], {'random_state': '(42)'}), '(random_state=42)\n', (6910, 6927), False, 'from sklearn.preprocessing import MaxAbsScaler, MinMaxScaler, Normalizer, PowerTransformer, QuantileTransformer, StandardScaler\n'), ((7065, 7083), 'sklearn.preprocessing.PowerTransformer', 'PowerTransformer', ([], {}), '()\n', (7081, 7083), False, 'from sklearn.preprocessing import MaxAbsScaler, MinMaxScaler, Normalizer, PowerTransformer, QuantileTransformer, StandardScaler\n'), ((8284, 8338), 'claudio_funcoes_sub.ids_train_test_representation', 'cv.ids_train_test_representation', (['ids', 'f_x', 'index', '""" """'], {}), "(ids, f_x, index, ' ')\n", (8316, 8338), True, 'import claudio_funcoes_sub as cv\n'), ((9774, 9791), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {}), '()\n', (9789, 9791), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((3925, 3951), 'numpy.amax', 'numpy.amax', (['tokens'], {'axis': '(0)'}), '(tokens, axis=0)\n', (3935, 3951), False, 'import numpy\n'), ((4334, 4352), 'claudio_funcoes_sub.preprocessor', 'cv.preprocessor', (['x'], {}), '(x)\n', (4349, 4352), True, 'import claudio_funcoes_sub as cv\n'), ((5079, 5101), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (5099, 5101), False, 'import timeit\n'), ((6460, 6491), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {'with_mean': '(False)'}), '(with_mean=False)\n', (6474, 6491), False, 'from sklearn.preprocessing import MaxAbsScaler, MinMaxScaler, Normalizer, PowerTransformer, QuantileTransformer, StandardScaler\n'), ((6569, 6585), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (6583, 6585), False, 'from sklearn.preprocessing import MaxAbsScaler, MinMaxScaler, Normalizer, PowerTransformer, QuantileTransformer, StandardScaler\n'), ((7295, 7316), 'sklearn.preprocessing.Normalizer', 'Normalizer', ([], {'norm': '"""l1"""'}), "(norm='l1')\n", (7305, 7316), False, 'from sklearn.preprocessing import MaxAbsScaler, MinMaxScaler, Normalizer, PowerTransformer, QuantileTransformer, StandardScaler\n'), ((9349, 9371), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (9369, 9371), False, 'import timeit\n'), ((10984, 11039), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'analyzer': '"""char_wb"""', 'ngram_range': '(2, 6)'}), "(analyzer='char_wb', ngram_range=(2, 6))\n", (10999, 11039), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((11964, 11986), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (11984, 11986), False, 'import timeit\n'), ((12029, 12051), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (12049, 12051), False, 'import timeit\n'), ((4483, 4501), 'claudio_funcoes_sub.preprocessor', 'cv.preprocessor', (['x'], {}), '(x)\n', (4498, 4501), True, 'import claudio_funcoes_sub as cv\n'), ((4611, 4632), 'afinn.Afinn', 'Afinn', ([], {'emoticons': '(True)'}), '(emoticons=True)\n', (4616, 4632), False, 'from afinn import Afinn\n'), ((7456, 7477), 'sklearn.preprocessing.Normalizer', 'Normalizer', ([], {'norm': '"""l2"""'}), "(norm='l2')\n", (7466, 7477), False, 'from sklearn.preprocessing import MaxAbsScaler, MinMaxScaler, Normalizer, PowerTransformer, QuantileTransformer, StandardScaler\n'), ((8719, 8753), 'scipy.sparse.csr_matrix', 'scipy.sparse.csr_matrix', (['f_x_train'], {}), '(f_x_train)\n', (8742, 8753), False, 'import scipy\n'), ((8799, 8832), 'scipy.sparse.csr_matrix', 'scipy.sparse.csr_matrix', (['f_x_test'], {}), '(f_x_test)\n', (8822, 8832), False, 'import scipy\n'), ((9061, 9083), 'timeit.default_timer', 'timeit.default_timer', ([], {}), '()\n', (9081, 9083), False, 'import timeit\n'), ((10093, 10144), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'lenght_reduction', 'random_state': '(42)'}), '(n_components=lenght_reduction, random_state=42)\n', (10096, 10144), False, 'from sklearn.decomposition import PCA\n'), ((10523, 10593), 'os.mkdir', 'os.mkdir', (['f"""dataset/representations/{name_dataset}_{lenght_reduction}"""'], {}), "(f'dataset/representations/{name_dataset}_{lenght_reduction}')\n", (10531, 10593), False, 'import os\n'), ((7618, 7640), 'sklearn.preprocessing.Normalizer', 'Normalizer', ([], {'norm': '"""max"""'}), "(norm='max')\n", (7628, 7640), False, 'from sklearn.preprocessing import MaxAbsScaler, MinMaxScaler, Normalizer, PowerTransformer, QuantileTransformer, StandardScaler\n'), ((10247, 10307), 'sklearn.decomposition.TruncatedSVD', 'TruncatedSVD', ([], {'n_components': 'lenght_reduction', 'random_state': '(42)'}), '(n_components=lenght_reduction, random_state=42)\n', (10259, 10307), False, 'from sklearn.decomposition import TruncatedSVD\n'), ((11318, 11339), 'afinn.Afinn', 'Afinn', ([], {'emoticons': '(True)'}), '(emoticons=True)\n', (11323, 11339), False, 'from afinn import Afinn\n'), ((2987, 3049), 'torch.cat', 'torch.cat', (['(token[-1], token[-2], token[-3], token[-4])'], {'dim': '(0)'}), '((token[-1], token[-2], token[-3], token[-4]), dim=0)\n', (2996, 3049), False, 'import torch\n'), ((3187, 3215), 'torch.sum', 'torch.sum', (['token[-4:]'], {'dim': '(0)'}), '(token[-4:], dim=0)\n', (3196, 3215), False, 'import torch\n'), ((3323, 3352), 'torch.mean', 'torch.mean', (['token[-4:]'], {'dim': '(0)'}), '(token[-4:], dim=0)\n', (3333, 3352), False, 'import torch\n')]
|
# -*- coding: utf-8 -*-
"""A set of utility functions to support outlier detection.
"""
# Author: <NAME> <<EMAIL>>
# License: BSD 2 clause
from __future__ import division
from __future__ import print_function
import os
import numpy as np
import pandas as pd
from numpy import percentile
import numbers
import sklearn
from sklearn.metrics import precision_score
from sklearn.preprocessing import StandardScaler
from sklearn.utils import column_or_1d
from sklearn.utils import check_array
from sklearn.utils import check_consistent_length
from sklearn.utils import check_random_state
from sklearn.utils.random import sample_without_replacement
MAX_INT = np.iinfo(np.int32).max
MIN_INT = -1 * MAX_INT
def make_dirs_if_not_exists(save_dir):
# make saving directory if needed
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
def read_csv_to_df(file_loc, header_lower=True, usecols=None, dtype=None,
low_memory=True, encoding=None):
"""Read in csv files with necessary processing
Parameters
----------
file_loc
header_lower
low_memory
Returns
-------
"""
if dtype != None:
df = pd.read_csv(file_loc, usecols=usecols, dtype=dtype,
low_memory=low_memory, encoding=encoding)
else:
df = pd.read_csv(file_loc, usecols=usecols, low_memory=low_memory,
encoding=encoding)
if header_lower:
df.columns = df.columns.str.lower()
return df
def read_excel_to_df(file_loc, header_lower=True, usecols=None, dtype=None,
low_memory=True, encoding=None):
"""Read in excel files with necessary processing
Parameters
----------
file_loc
header_lower
low_memory
Returns
-------
"""
if dtype != None:
df = pd.read_excel(file_loc, usecols=usecols, dtype=dtype,
low_memory=low_memory, encoding=encoding)
else:
df = pd.read_excel(file_loc, usecols=usecols, low_memory=low_memory,
encoding=encoding)
if header_lower:
df.columns = df.columns.str.lower()
return df
def check_parameter(param, low=MIN_INT, high=MAX_INT, param_name='',
include_left=False, include_right=False):
"""Check if an input is within the defined range.
Parameters
----------
param : int, float
The input parameter to check.
low : int, float
The lower bound of the range.
high : int, float
The higher bound of the range.
param_name : str, optional (default='')
The name of the parameter.
include_left : bool, optional (default=False)
Whether includes the lower bound (lower bound <=).
include_right : bool, optional (default=False)
Whether includes the higher bound (<= higher bound).
Returns
-------
within_range : bool or raise errors
Whether the parameter is within the range of (low, high)
"""
# param, low and high should all be numerical
if not isinstance(param, (numbers.Integral, np.integer, np.float)):
raise TypeError('{param_name} is set to {param} Not numerical'.format(
param=param, param_name=param_name))
if not isinstance(low, (numbers.Integral, np.integer, np.float)):
raise TypeError('low is set to {low}. Not numerical'.format(low=low))
if not isinstance(high, (numbers.Integral, np.integer, np.float)):
raise TypeError('high is set to {high}. Not numerical'.format(
high=high))
# at least one of the bounds should be specified
if low is MIN_INT and high is MAX_INT:
raise ValueError('Neither low nor high bounds is undefined')
# if wrong bound values are used
if low > high:
raise ValueError(
'Lower bound > Higher bound')
# value check under different bound conditions
if (include_left and include_right) and (param < low or param > high):
raise ValueError(
'{param_name} is set to {param}. '
'Not in the range of [{low}, {high}].'.format(
param=param, low=low, high=high, param_name=param_name))
elif (include_left and not include_right) and (
param < low or param >= high):
raise ValueError(
'{param_name} is set to {param}. '
'Not in the range of [{low}, {high}).'.format(
param=param, low=low, high=high, param_name=param_name))
elif (not include_left and include_right) and (
param <= low or param > high):
raise ValueError(
'{param_name} is set to {param}. '
'Not in the range of ({low}, {high}].'.format(
param=param, low=low, high=high, param_name=param_name))
elif (not include_left and not include_right) and (
param <= low or param >= high):
raise ValueError(
'{param_name} is set to {param}. '
'Not in the range of ({low}, {high}).'.format(
param=param, low=low, high=high, param_name=param_name))
else:
return True
|
[
"os.makedirs",
"os.path.isdir",
"pandas.read_csv",
"numpy.iinfo",
"pandas.read_excel"
] |
[((658, 676), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (666, 676), True, 'import numpy as np\n'), ((793, 816), 'os.path.isdir', 'os.path.isdir', (['save_dir'], {}), '(save_dir)\n', (806, 816), False, 'import os\n'), ((826, 847), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (837, 847), False, 'import os\n'), ((1171, 1268), 'pandas.read_csv', 'pd.read_csv', (['file_loc'], {'usecols': 'usecols', 'dtype': 'dtype', 'low_memory': 'low_memory', 'encoding': 'encoding'}), '(file_loc, usecols=usecols, dtype=dtype, low_memory=low_memory,\n encoding=encoding)\n', (1182, 1268), True, 'import pandas as pd\n'), ((1313, 1398), 'pandas.read_csv', 'pd.read_csv', (['file_loc'], {'usecols': 'usecols', 'low_memory': 'low_memory', 'encoding': 'encoding'}), '(file_loc, usecols=usecols, low_memory=low_memory, encoding=encoding\n )\n', (1324, 1398), True, 'import pandas as pd\n'), ((1829, 1928), 'pandas.read_excel', 'pd.read_excel', (['file_loc'], {'usecols': 'usecols', 'dtype': 'dtype', 'low_memory': 'low_memory', 'encoding': 'encoding'}), '(file_loc, usecols=usecols, dtype=dtype, low_memory=low_memory,\n encoding=encoding)\n', (1842, 1928), True, 'import pandas as pd\n'), ((1975, 2062), 'pandas.read_excel', 'pd.read_excel', (['file_loc'], {'usecols': 'usecols', 'low_memory': 'low_memory', 'encoding': 'encoding'}), '(file_loc, usecols=usecols, low_memory=low_memory, encoding=\n encoding)\n', (1988, 2062), True, 'import pandas as pd\n')]
|
#!/usr/bin/env python3
""" 音声情報処理 n本ノック !! """
# MIT License
# Copyright (C) 2020 by <NAME>
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# Commentary:
# - 音声セグメントからMUSIC法により基本周波数を推定する
import matplotlib.pyplot as plt
import numpy as np
import scipy
from scipy.io import wavfile
import librosa
IN_WAVE_FILE = "voice_a.wav" # 「あ」の音声
FRAME_LENGTH = 1024 # フレーム長 (FFTサイズ)
HOP_LENGTH = 80 # フレームのシフト長
CUTOFF = 4000 # 遮断周波数 (Hz)
# 音声のロード
fs, data = wavfile.read(IN_WAVE_FILE)
data = data.astype(np.float64)
# フレーム化
frames = librosa.util.frame(data, frame_length=FRAME_LENGTH,
hop_length=HOP_LENGTH).T
# 周波数軸
freq_axis = np.linspace(0, fs, frames.shape[0])
# MUSIC法のノイズ成分を高域の周波数成分と見なす
ORDER = np.min(np.where(freq_axis > CUTOFF))
# 標本共分散行列の計算
cov_frames = np.cov(frames, bias=True)
# 固有値と固有ベクトルを計算
# →固有値は大きい順に並び、固有ベクトル(縦)もそれに対応して並ぶ
eigval, eigvec = np.linalg.eig(cov_frames)
# ノイズ成分の固有ベクトル
noise_eigvec = eigvec[:, 2 * ORDER + 1:]
# パワースペクトルをノイズ成分の固有ベクトルから計算
power_noise_eigvec = np.abs(np.fft.fft(noise_eigvec))
power_noise_eigvec = power_noise_eigvec ** 2
# MUSIC法の疑似スペクトルを計算
music_pseudo_spec = 1.0 / np.sum(power_noise_eigvec, axis=1)
# 基本周波数の推定
# →ピーク位置の最小値を与える周波数
fo = freq_axis[np.min(scipy.signal.argrelmax(music_pseudo_spec))]
print(f"Estimatied fundamental frequency = {fo:.2f} Hz")
# 波形表示
fig = plt.figure(figsize=(10, 6))
n_samples = len(data)
time = np.arange(n_samples) / fs
plt.plot(time, data)
plt.xlabel("Time (sec)")
plt.ylabel("Amplitude")
plt.title("Waveform (/a/)")
plt.show()
# MUSIC法による疑似スペクトルの計算結果
fig = plt.figure(figsize=(10, 6))
plt.plot(freq_axis, 20 * np.log10(music_pseudo_spec))
plt.xlim(0, fs/2)
plt.xlabel("Frequency (Hz)")
plt.ylabel("Power [dB]")
plt.title(
f"Pseudospectrum via MUSIC method\nFundamental Frequency = {fo:.2f} Hz")
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"librosa.util.frame",
"matplotlib.pyplot.show",
"numpy.sum",
"matplotlib.pyplot.plot",
"numpy.fft.fft",
"numpy.linalg.eig",
"scipy.io.wavfile.read",
"matplotlib.pyplot.figure",
"numpy.where",
"numpy.arange",
"scipy.signal.argrelmax",
"numpy.linspace",
"matplotlib.pyplot.ylabel",
"numpy.log10",
"numpy.cov",
"matplotlib.pyplot.xlabel"
] |
[((1511, 1537), 'scipy.io.wavfile.read', 'wavfile.read', (['IN_WAVE_FILE'], {}), '(IN_WAVE_FILE)\n', (1523, 1537), False, 'from scipy.io import wavfile\n'), ((1712, 1747), 'numpy.linspace', 'np.linspace', (['(0)', 'fs', 'frames.shape[0]'], {}), '(0, fs, frames.shape[0])\n', (1723, 1747), True, 'import numpy as np\n'), ((1849, 1874), 'numpy.cov', 'np.cov', (['frames'], {'bias': '(True)'}), '(frames, bias=True)\n', (1855, 1874), True, 'import numpy as np\n'), ((1944, 1969), 'numpy.linalg.eig', 'np.linalg.eig', (['cov_frames'], {}), '(cov_frames)\n', (1957, 1969), True, 'import numpy as np\n'), ((2406, 2433), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (2416, 2433), True, 'import matplotlib.pyplot as plt\n'), ((2489, 2509), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'data'], {}), '(time, data)\n', (2497, 2509), True, 'import matplotlib.pyplot as plt\n'), ((2510, 2534), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time (sec)"""'], {}), "('Time (sec)')\n", (2520, 2534), True, 'import matplotlib.pyplot as plt\n'), ((2535, 2558), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amplitude"""'], {}), "('Amplitude')\n", (2545, 2558), True, 'import matplotlib.pyplot as plt\n'), ((2559, 2586), 'matplotlib.pyplot.title', 'plt.title', (['"""Waveform (/a/)"""'], {}), "('Waveform (/a/)')\n", (2568, 2586), True, 'import matplotlib.pyplot as plt\n'), ((2587, 2597), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2595, 2597), True, 'import matplotlib.pyplot as plt\n'), ((2629, 2656), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), '(figsize=(10, 6))\n', (2639, 2656), True, 'import matplotlib.pyplot as plt\n'), ((2711, 2730), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)', '(fs / 2)'], {}), '(0, fs / 2)\n', (2719, 2730), True, 'import matplotlib.pyplot as plt\n'), ((2729, 2757), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency (Hz)"""'], {}), "('Frequency (Hz)')\n", (2739, 2757), True, 'import matplotlib.pyplot as plt\n'), ((2758, 2782), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Power [dB]"""'], {}), "('Power [dB]')\n", (2768, 2782), True, 'import matplotlib.pyplot as plt\n'), ((2783, 2873), 'matplotlib.pyplot.title', 'plt.title', (['f"""Pseudospectrum via MUSIC method\nFundamental Frequency = {fo:.2f} Hz"""'], {}), '(\n f"""Pseudospectrum via MUSIC method\nFundamental Frequency = {fo:.2f} Hz""")\n', (2792, 2873), True, 'import matplotlib.pyplot as plt\n'), ((2871, 2881), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2879, 2881), True, 'import matplotlib.pyplot as plt\n'), ((1587, 1661), 'librosa.util.frame', 'librosa.util.frame', (['data'], {'frame_length': 'FRAME_LENGTH', 'hop_length': 'HOP_LENGTH'}), '(data, frame_length=FRAME_LENGTH, hop_length=HOP_LENGTH)\n', (1605, 1661), False, 'import librosa\n'), ((1792, 1820), 'numpy.where', 'np.where', (['(freq_axis > CUTOFF)'], {}), '(freq_axis > CUTOFF)\n', (1800, 1820), True, 'import numpy as np\n'), ((2084, 2108), 'numpy.fft.fft', 'np.fft.fft', (['noise_eigvec'], {}), '(noise_eigvec)\n', (2094, 2108), True, 'import numpy as np\n'), ((2202, 2236), 'numpy.sum', 'np.sum', (['power_noise_eigvec'], {'axis': '(1)'}), '(power_noise_eigvec, axis=1)\n', (2208, 2236), True, 'import numpy as np\n'), ((2463, 2483), 'numpy.arange', 'np.arange', (['n_samples'], {}), '(n_samples)\n', (2472, 2483), True, 'import numpy as np\n'), ((2291, 2332), 'scipy.signal.argrelmax', 'scipy.signal.argrelmax', (['music_pseudo_spec'], {}), '(music_pseudo_spec)\n', (2313, 2332), False, 'import scipy\n'), ((2682, 2709), 'numpy.log10', 'np.log10', (['music_pseudo_spec'], {}), '(music_pseudo_spec)\n', (2690, 2709), True, 'import numpy as np\n')]
|
import numpy as np
def calc_area(vertex):
vec_a = vertex[:,1] - vertex[:,0]
vec_b = vertex[:,2] - vertex[:,0]
normal = np.cross(vec_a, vec_b)
area = np.absolute(np.linalg.norm(normal, ord=2, axis=1))*0.5
return area
def uniform_sample_on_triangle(triangle):
while True:
rn = np.random.rand(2)
if np.sum(rn) <= 1.0:
break
return rn[0]*(triangle[1]-triangle[0]) + rn[1]*(triangle[2]-triangle[0]) + triangle[0]
# mesh
def mesh2pcl(triangle_collection, numpoints):
area_collection = calc_area(triangle_collection)
total_area = np.sum(area_collection)
print("Triangle count: {}".format(triangle_collection.shape[0]))
#print("Total surface area: {}".format(total_area))
area_collection /= total_area
# sample k points
# note that this will give an error if self.area_collection.shape[0] = 0 (implies empty shape)
sampled_triangles = np.random.choice(area_collection.shape[0], size=numpoints, p=area_collection)
# Sample one random uvs on each triangle
rand_uv = np.random.rand(numpoints, 2)
oob_idx = np.sum(rand_uv, axis=-1) > 1.0
rand_uv[oob_idx,:] = -rand_uv[oob_idx,:] + 1.0
sampled_triangle_collection = triangle_collection[sampled_triangles,:,:]
sampled_points = rand_uv[:,[0]] * (sampled_triangle_collection[:,1,:] - sampled_triangle_collection[:,0,:]) \
+ rand_uv[:,[1]] * (sampled_triangle_collection[:,2,:] - sampled_triangle_collection[:,0,:]) \
+ sampled_triangle_collection[:,0,:]
return sampled_points.astype(np.float32)
|
[
"numpy.sum",
"numpy.cross",
"numpy.linalg.norm",
"numpy.random.choice",
"numpy.random.rand"
] |
[((132, 154), 'numpy.cross', 'np.cross', (['vec_a', 'vec_b'], {}), '(vec_a, vec_b)\n', (140, 154), True, 'import numpy as np\n'), ((590, 613), 'numpy.sum', 'np.sum', (['area_collection'], {}), '(area_collection)\n', (596, 613), True, 'import numpy as np\n'), ((933, 1010), 'numpy.random.choice', 'np.random.choice', (['area_collection.shape[0]'], {'size': 'numpoints', 'p': 'area_collection'}), '(area_collection.shape[0], size=numpoints, p=area_collection)\n', (949, 1010), True, 'import numpy as np\n'), ((1075, 1103), 'numpy.random.rand', 'np.random.rand', (['numpoints', '(2)'], {}), '(numpoints, 2)\n', (1089, 1103), True, 'import numpy as np\n'), ((309, 326), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (323, 326), True, 'import numpy as np\n'), ((1118, 1142), 'numpy.sum', 'np.sum', (['rand_uv'], {'axis': '(-1)'}), '(rand_uv, axis=-1)\n', (1124, 1142), True, 'import numpy as np\n'), ((178, 215), 'numpy.linalg.norm', 'np.linalg.norm', (['normal'], {'ord': '(2)', 'axis': '(1)'}), '(normal, ord=2, axis=1)\n', (192, 215), True, 'import numpy as np\n'), ((338, 348), 'numpy.sum', 'np.sum', (['rn'], {}), '(rn)\n', (344, 348), True, 'import numpy as np\n')]
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import os
import cv2
import numpy as np
from PIL import Image
class testdataset:
def __init__(self, test_root_dir='.', resize=1920):
self.resize = resize
self.test_root_dir = test_root_dir
self.counter = 0
def __iter__(self):
self.imagedir = os.path.join(self.test_root_dir, 'ch4_test_images')
if not os.path.exists(self.imagedir):
raise ValueError("test dataset is not exist!")
self.img_names = [i for i in os.listdir(self.imagedir) if
os.path.splitext(i)[-1].lower() in ['.jpg', '.png', '.jpeg']]
self.image_paths = []
for img_name in self.img_names:
self.image_paths.append(os.path.join(self.imagedir, img_name))
return self
def __next__(self):
if self.counter >= len(self.image_paths):
raise StopIteration()
img_path = self.image_paths[self.counter]
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
long_size = max(img.shape[:2])
img_resized = np.zeros((long_size, long_size, 3), np.uint8)
img_resized[:img.shape[0], :img.shape[1], :] = img
img_resized = cv2.resize(img_resized, dsize=(self.resize, self.resize))
img_resized = Image.fromarray(img_resized)
img_resized = img_resized.convert('RGB')
img_resized = np.asarray(img_resized)
img_name = os.path.split(self.image_paths[self.counter])[-1]
self.counter += 1
return img, img_resized, img_name
|
[
"cv2.cvtColor",
"numpy.asarray",
"numpy.zeros",
"os.path.exists",
"cv2.imread",
"os.path.splitext",
"PIL.Image.fromarray",
"os.path.split",
"os.path.join",
"os.listdir",
"cv2.resize"
] |
[((952, 1003), 'os.path.join', 'os.path.join', (['self.test_root_dir', '"""ch4_test_images"""'], {}), "(self.test_root_dir, 'ch4_test_images')\n", (964, 1003), False, 'import os\n'), ((1603, 1623), 'cv2.imread', 'cv2.imread', (['img_path'], {}), '(img_path)\n', (1613, 1623), False, 'import cv2\n'), ((1638, 1674), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (1650, 1674), False, 'import cv2\n'), ((1736, 1781), 'numpy.zeros', 'np.zeros', (['(long_size, long_size, 3)', 'np.uint8'], {}), '((long_size, long_size, 3), np.uint8)\n', (1744, 1781), True, 'import numpy as np\n'), ((1863, 1920), 'cv2.resize', 'cv2.resize', (['img_resized'], {'dsize': '(self.resize, self.resize)'}), '(img_resized, dsize=(self.resize, self.resize))\n', (1873, 1920), False, 'import cv2\n'), ((1943, 1971), 'PIL.Image.fromarray', 'Image.fromarray', (['img_resized'], {}), '(img_resized)\n', (1958, 1971), False, 'from PIL import Image\n'), ((2043, 2066), 'numpy.asarray', 'np.asarray', (['img_resized'], {}), '(img_resized)\n', (2053, 2066), True, 'import numpy as np\n'), ((1020, 1049), 'os.path.exists', 'os.path.exists', (['self.imagedir'], {}), '(self.imagedir)\n', (1034, 1049), False, 'import os\n'), ((2086, 2131), 'os.path.split', 'os.path.split', (['self.image_paths[self.counter]'], {}), '(self.image_paths[self.counter])\n', (2099, 2131), False, 'import os\n'), ((1147, 1172), 'os.listdir', 'os.listdir', (['self.imagedir'], {}), '(self.imagedir)\n', (1157, 1172), False, 'import os\n'), ((1371, 1408), 'os.path.join', 'os.path.join', (['self.imagedir', 'img_name'], {}), '(self.imagedir, img_name)\n', (1383, 1408), False, 'import os\n'), ((1202, 1221), 'os.path.splitext', 'os.path.splitext', (['i'], {}), '(i)\n', (1218, 1221), False, 'import os\n')]
|
from pynq import DefaultIP
from pynq import DefaultHierarchy
from pynq import allocate
import numpy as np
from rfsoc_qpsk.dma_timer import DmaTimer
class QPSKRx(DefaultHierarchy):
def __init__(self, description):
super().__init__(description)
def get_decimated(self):
return self.qpsk_rx_dec.get_frame(self.dma_rx_dec)
def get_coarse_synced(self):
return self.qpsk_rx_csync.get_frame(self.dma_rx_csync)
def get_rrced(self):
return self.qpsk_rx_rrc.get_frame(self.dma_rx_rrc)
def get_data(self):
return self.qpsk_rx_tsync.get_frame(self.dma_rx_tsync)
@staticmethod
def checkhierarchy(description):
if 'dma_rx_dec' in description['ip'] \
and 'qpsk_rx_dec' in description['ip'] \
and 'dma_rx_csync' in description['ip'] \
and 'qpsk_rx_csync' in description['ip'] \
and 'dma_rx_rrc' in description['ip'] \
and 'qpsk_rx_rrc' in description['ip'] \
and 'dma_rx_tsync' in description['ip'] \
and 'qpsk_rx_tsync' in description['ip']:
return True
return False
class DataInspector(DefaultIP):
def __init__(self, description, pkt_size, buf_dtype=np.int16, buf_words_per_pkt=2):
super().__init__(description)
# Init config register
self.reset = 1
self.enable = 1
self.pkt_size = pkt_size-1
self.auto_restart = 0
self.reset = 0
# Init buffer
self.buf = allocate(shape=(pkt_size * buf_words_per_pkt, ), dtype=np.int16)
def _process_frame(self, frame):
# By default treat frame as interleaved IQ stream.
return frame[::2] + 1j * frame[1::2]
def get_frame(self, dma):
self.transfer = 1
dma.recvchannel.transfer(self.buf)
dma.recvchannel.wait()
self.transfer = 0
frame = self._process_frame(np.array(self.buf))
return frame
# Func to return a MMIO getter and setter based on a relative addr
def _create_mmio_property(addr):
def _get(self):
return self.read(addr)
def _set(self, value):
self.write(addr, value)
return property(_get, _set)
# LUT of property addresses for our data-driven properties
_data_inspector_props = [
("reset", 0 ),
("pkt_size", 4 ),
("enable", 8 ),
("auto_restart", 12),
("transfer", 16)
]
# Generate getters and setters based on _data_inspector_props
for (name, addr) in _data_inspector_props:
setattr(DataInspector, name, _create_mmio_property(addr))
class RxDecimator(DataInspector):
def __init__(self, description):
super().__init__(description, 128)
bindto = ['UoS:SysGen:axi_qpsk_rx_dec:1.1']
class RxCSync(DataInspector):
def __init__(self, description):
super().__init__(description, 128)
bindto = ['UoS:SysGen:axi_qpsk_rx_csync:1.1']
class RxRRC(DataInspector):
def __init__(self, description):
super().__init__(description, 512)
bindto = ['UoS:SysGen:axi_qpsk_rx_rrc:1.1']
class RxTSync(DataInspector):
def __init__(self, description):
super().__init__(description, 16)
# Set loop filter reset counter to 1 second
# (@ 16kHz)
self.sync_reset=0
bindto = ['UoS:SysGen:axi_qpsk_rx_tsync:1.1']
setattr(RxTSync, 'sync_reset', _create_mmio_property(20))
|
[
"numpy.array",
"pynq.allocate"
] |
[((1567, 1630), 'pynq.allocate', 'allocate', ([], {'shape': '(pkt_size * buf_words_per_pkt,)', 'dtype': 'np.int16'}), '(shape=(pkt_size * buf_words_per_pkt,), dtype=np.int16)\n', (1575, 1630), False, 'from pynq import allocate\n'), ((1975, 1993), 'numpy.array', 'np.array', (['self.buf'], {}), '(self.buf)\n', (1983, 1993), True, 'import numpy as np\n')]
|
import os
import sys
# Adding project folder to import modules
root = os.getcwd().replace("\\", "/")
sys.path.append(root)
import mod.env.config as conf
from mod.env.config import ConfigNetwork
import pandas as pd
from copy import deepcopy
from collections import defaultdict
from pprint import pprint
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="ticks")
context = "paper"
fig_format = "pdf"
def movingaverage(data, w, start=0, start_den=2):
new_data = np.zeros(len(data))
for i in range(len(data)):
if i < start:
new_data[i] = sum(data[i : i + int(w / start_den)]) / int(
w / start_den
)
continue
if i + w < len(data):
new_data[i] = sum(data[i : i + w]) / w
else:
new_data[i] = sum(data[i - w : i]) / w
return new_data
if __name__ == "__main__":
adhoc_compare = dict()
adhoc_compare_labels = dict()
colors = dict()
markers = dict()
linewidth = dict()
test_label = "hire500"
# test_label = "rebalance"
# test_label = "pavfav"
# test_label = "exploration"
# test_label = "flood"
# test_label = "unlimited"
# test_label = "policy"
# test_label = "b"
# adhoc_compare["p"] = [
# "SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_5.00_0.00_0.00_1.00_B_2.40_10.00_0.00_0.00_0.00",
# "SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_5.00_0.00_4.80_1.00_B_2.40_10.00_0.00_2.40_0.00",
# "SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_0.00_1.00_B_2.40_10.00_5.00_0.00_0.00",
# "SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_1.00_B_2.40_10.00_5.00_2.40_0.00",
# "SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_10.00_0.00_0.00_1.00_B_2.40_15.00_0.00_0.00_0.00",
# ]
d = "0.01"
adhoc_compare["hire500"] = [
f"SH_LIN_V=0000-0500[S{d}](R)_I=1_L[5]=(10102, 10203, 1030-, 32-0-, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
f"SH_LIN_V=0000-0500[S{d}](R)_I=1_L[3]=(10202, 32303, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
f"SH_LIN_V=0000-0500[S{d}](R)_I=1_L[3]=(10-02, 32-03, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
f"SH_LIN_V=0000-0500[S{d}](R)_I=1_L[3]=(10-0-, 32-0-, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
f"SH_LIN_V=0000-0500[S{d}](R)_I=1_L[4]=(10203, 1030-, 32-0-, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
f"SH_LIN_V=0000-0500[S{d}](R)_I=1_L[3]=(10303, 32-0-, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
f"SH_LIN_V=0000-0500[S{d}](R)_I=1_L[3]=(32202, 33303, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.10](R)_I=1_L[5]=(10102, 10203, 1030-, 32-0-, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.10](R)_I=1_L[3]=(10202, 32303, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.10](R)_I=1_L[3]=(10-02, 32-03, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.10](R)_I=1_L[3]=(10-0-, 32-0-, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.10](R)_I=1_L[4]=(10203, 1030-, 32-0-, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.10](R)_I=1_L[3]=(10303, 32-0-, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.10](R)_I=1_L[3]=(32202, 33303, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.01](R)_I=1_L[5]=(10102, 10203, 1030-, 32-0-, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.01](R)_I=1_L[3]=(10202, 32303, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.01](R)_I=1_L[3]=(10-02, 32-03, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.01](R)_I=1_L[3]=(10-0-, 32-0-, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.01](R)_I=1_L[4]=(10203, 1030-, 32-0-, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.01](R)_I=1_L[3]=(10303, 32-0-, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.01](R)_I=1_L[3]=(32202, 33303, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.01](R)_I=1_L[3]=(10202, 32303, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.01](R)_I=1_L[3]=(10-02, 32-03, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.01](R)_I=1_L[3]=(10-0-, 32-0-, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.01](R)_I=1_L[4]=(10203, 1030-, 32-0-, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
# "SH_LIN_V=0000-0500[S0.01](R)_I=1_L[3]=(10303, 32-0-, 33-0-)_R=([1-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_P_B_2.40_10.00_5.00_2.40_P",
]
colors["hire500"] = ["k", "g", "r", "b", "k", "g", "r", "b", "k", "g", "r", "b"]
markers["hire500"] = [None, None, None, None, "o", "o", "o","o", "x","x","x","x"]
adhoc_compare_labels["hire500"] = [
f"{d} - (10102, 10203, 1030-, 32-0-, 33-0-)",
f"{d} - (10202, 32302, 33-0-)",
f"{d} - (10-02, 32-02, 33-0-)",
f"{d} - (10-0-, 32-0-, 33-0-)",
f"{d} - (10203, 1030-, 32-0-, 33-0-)",
f"{d} - (10303, 32-0-, 33-0-)",
f"{d} - (32202, 33303, 33-0-)",
"0.10 - (10102, 10203, 1030-, 32-0-, 33-0-)",
"0.10 - (10202, 32302, 33-0-)",
"0.10 - (10-02, 32-02, 33-0-)",
"0.10 - (10-0-, 32-0-, 33-0-)",
"0.10 - (10203, 1030-, 32-0-, 33-0-)",
"0.10 - (10303, 32-0-, 33-0-)",
"0.10 - (32202, 33303, 33-0-)",
"0.10 - (10102, 10203, 1030-, 32-0-, 33-0-)",
"0.10 - (10202, 32302, 33-0-)",
"0.10 - (10-02, 32-02, 33-0-)",
"0.10 - (10-0-, 32-0-, 33-0-)",
"0.10 - (10203, 1030-, 32-0-, 33-0-)",
"0.10 - (10303, 32-0-, 33-0-)",
"0.10 - (32202, 33303, 33-0-)",
]
# adhoc_compare["hire500m"] = [
# "HI_LIN_V=0000-0500[S1.00][M](R)_I=1_L[3]=(10-01, 32-02, 33-03)_R=([1-8][L(05)]T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_0.00_B_2.40_10.00_5.00_2.40_1.00",
# "HI_LIN_V=0000-0500[S1.00][M](R)_I=1_L[3]=(10-01, 32-02, 33-03)_R=([1-8][L(05)]T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_1.00_B_2.40_10.00_5.00_2.40_0.00",
# "HI_LIN_V=0000-0500[S1.00][M](R)_I=1_L[3]=(10-02, 32-0-, 33-0-)_R=([1-8][L(05)]T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_0.00_B_2.40_10.00_5.00_2.40_1.00",
# "HI_LIN_V=0000-0500[S1.00][M](R)_I=1_L[3]=(10-02, 32-0-, 33-0-)_R=([1-8][L(05)]T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_1.00_B_2.40_10.00_5.00_2.40_0.00",
# "HI_LIN_V=0000-0500[S1.00][M](R)_I=1_L[3]=(10-02, 32-03, 33-0-)_R=([1-8][L(05)]T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_0.00_B_2.40_10.00_5.00_2.40_1.00",
# "HI_LIN_V=0000-0500[S1.00][M](R)_I=1_L[3]=(10-02, 32-03, 33-0-)_R=([1-8][L(05)]T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_1.00_B_2.40_10.00_5.00_2.40_0.00",
# "HI_LIN_V=0000-0500[S1.00][M](R)_I=1_L[4]=(10-02, 10-03, 32-0-, 33-0-)_R=([1-8][L(05)]T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_0.00_B_2.40_10.00_5.00_2.40_1.00",
# "HI_LIN_V=0000-0500[S1.00][M](R)_I=1_L[4]=(10-02, 10-03, 32-0-, 33-0-)_R=([1-8][L(05)]T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_1.00_B_2.40_10.00_5.00_2.40_0.00",
# ]
# adhoc_compare_labels["hire500m"] = [
# "500[M] - (10-01, 32-02, 33-03) - 1",
# "500[M] - (10-01, 32-02, 33-03) - 2",
# "500[M] - (10-02, 32-0-, 33-0-) - 1",
# "500[M] - (10-02, 32-0-, 33-0-) - 2",
# "500[M] - (10-02, 32-03, 33-0-) - 1",
# "500[M] - (10-02, 32-03, 33-0-) - 2",
# "500[M] - (10-02, 10-03, 32-0-, 33-0-) - 1",
# "500[M] - (10-02, 10-03, 32-0-, 33-0-) - 2",
# ]
adhoc_compare["b"] = [
"SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_5.00_0.00_0.00_0.00_B_2.40_10.00_0.00_0.00_1.00",
"SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_5.00_0.00_4.80_0.00_B_2.40_10.00_0.00_2.40_1.00",
"SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_0.00_0.00_B_2.40_10.00_5.00_0.00_1.00",
"SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_0.00_B_2.40_10.00_5.00_2.40_1.00",
"SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_7.20_0.00_B_2.40_10.00_5.00_4.80_1.00",
"SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_9.60_0.00_B_2.40_10.00_5.00_7.20_1.00",
"SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_10.00_0.00_0.00_0.00_B_2.40_15.00_0.00_0.00_1.00",
]
adhoc_compare_labels["b"] = [
r"10min (max. pk. delay)",
r"10min (max. pk. delay) + 1 $\times$ RP",
r"10min (max. pk. delay) + 5min (pen. tolerance)",
r"10min (max. pk. delay) + 5min (pen. tolerance) + 1 $\times$ RP",
r"10min (max. pk. delay) + 5min (pen. tolerance) + 2 $\times$ RP",
r"10min (max. pk. delay) + 5min (pen. tolerance) + 3 $\times$ RP",
r"15min (max. pk. delay) + 5min (pen. tolerance)",
]
adhoc_compare_labels["sensitivity_analysis"] = [
"SEN_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_9.60_10.00_0.00_0.00_1.00_B_7.20_15.00_0.00_0.0,0_0.00",
"SEN_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_9.60_10.00_0.00_0.00_0.00_B_7.20_15.00_0.00_0.00_1.00",
"SEN_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_9.60_5.00_0.00_0.00_1.00_B_7.20_10.00_0.00_0.00_0.00",
"SEN_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_9.60_5.00_0.00_0.00_0.00_B_7.20_10.00_0.00_0.00_1.00",
"SEN_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_7.20_10.00_0.00_0.00_1.00_B_4.80_15.00_0.00_0.00_0.00",
"SEN_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_7.20_10.00_0.00_0.00_0.00_B_4.80_15.00_0.00_0.00_1.00",
"SEN_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_7.20_5.00_0.00_0.00_1.00_B_4.80_10.00_0.00_0.00_0.00",
"SEN_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_7.20_5.00_0.00_0.00_0.00_B_4.80_10.00_0.00_0.00_1.00",
"SEN_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_10.00_0.00_0.00_1.00_B_2.40_15.00_0.00_0.00_0.00",
"SEN_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_10.00_0.00_0.00_0.00_B_2.40_15.00_0.00_0.00_1.00",
"SEN_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_5.00_0.00_0.00_1.00_B_2.40_10.00_0.00_0.00_0.00",
"SEN_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_5.00_0.00_0.00_0.00_B_2.40_10.00_0.00_0.00_1.00",
]
adhoc_compare["a"] = [
"SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_5.00_0.00_0.00_1.00_B_2.40_10.00_0.00_0.00_0.00",
"SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_5.00_0.00_4.80_1.00_B_2.40_10.00_0.00_2.40_0.00",
"SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_0.00_1.00_B_2.40_10.00_5.00_0.00_0.00",
"SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_4.80_1.00_B_2.40_10.00_5.00_2.40_0.00",
"SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_7.20_1.00_B_2.40_10.00_5.00_4.80_0.00",
"SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_5.00_5.00_9.60_1.00_B_2.40_10.00_5.00_7.20_0.00",
"SL_LIN_cars=0300-0000(R)_t=1_levels[3]=(10-0-, 32-0-, 33-0-)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10_A_4.80_10.00_0.00_0.00_1.00_B_2.40_15.00_0.00_0.00_0.00",
]
adhoc_compare_labels["a"] = [
"5",
"5+P",
"5+5",
"5+5+2P",
"5+5+3P",
"5+5+4P",
"10",
]
adhoc_compare["penalty"] = [
"baselineB10_disable_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-4, 2-4][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"baselineB10_pen_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-4, 2-4][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"baselineB10_pen_rej_pen_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-4, 2-4][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
]
adhoc_compare_labels["penalty"] = [
"10 min. pickup delay",
"10 min. pickup delay + 5 min. tolerance",
"10 min. pickup delay + 5 min. tolerance + rejection penalty",
]
# ################################################################ #
# Discount function ############################################## #
# ################################################################ #
adhoc_compare["penalize"] = [
"base_LIN_V=0300-0000(R)_I=1_L[3]=(10-0-, 32-0-, 33-0-)_R=([0-8][L(05)]_T=[05h,+30m+04h+60m]_0.10(S)_1.00_0.10_A_4.80_5.00_0.00_0.00_0.00_B_2.40_10.00_0.00_0.00_1.00",
"only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8][tabu=00])[L(05)]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([2-8][tabu=00])[L(05)]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([2-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([3-8][tabu=00])[L(05)]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([3-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
]
adhoc_compare_labels["penalize"] = [
"Adjacent neighbors (30s)",
r"8 $\times$ RC1",
r"8 $\times$ RC1 [P]",
r"8 $\times$ RC5",
r"8 $\times$ RC5 [P]",
r"8 $\times$ RC10",
r"8 $\times$ RC10 [P]",
]
colors["penalize"] = ["k", "g", "g", "r", "r", "b", "b"]
markers["penalize"] = [None, None, "o", None, "o", None, "o"]
linewidth["penalize"] = [1, 1, 1, 1, 1, 1, 1]
# ################################################################ #
# Rebalance ###################################################### #
# ################################################################ #
adhoc_compare["rebalance"] = [
"only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8, 2-4][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8, 3-4][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"far_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8, 2-4, 3-2][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
]
adhoc_compare_labels["rebalance"] = [
r"8 $\times$ RC1",
r"8 $\times$ RC1 + 4 $\times$ RC5",
r"8 $\times$ RC1 + 4 $\times$ RC10",
r"8 $\times$ RC1 + 4 $\times$ RC5 + 2 $\times$ RC10",
]
linewidth["rebalance"] = [1, 1, 1, 1, 1, 1, 1]
markers["rebalance"] = [None, "o", "x", "D"]
colors["rebalance"] = [
"k",
"g",
"r",
"b",
"magenta",
"gold",
"gray",
"pink",
"#cab2d6",
]
# ################################################################ #
# Max. number of cars ############################################ #
# ################################################################ #
adhoc_compare["flood"] = [
# "only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8, 2-4][tabu=00])[P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"far_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8, 2-4][tabu=00])[L(02)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8, 2-4][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8, 2-4][tabu=00])[L(10)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
]
adhoc_compare_labels["flood"] = [
# "unlimited",
"2",
"5",
"10",
]
colors["flood"] = ["r", "k", "g", "r"]
linewidth["flood"] = [1, 1, 1, 1, 1, 1, 1]
markers["flood"] = ["x", None, "o"]
# ################################################################ #
# Max. number of cars (unlimited)################################# #
# ################################################################ #
adhoc_compare["unlimited"] = [
"only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8][tabu=00])[P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8, 2-4][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8, 2-4][tabu=00])[P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
]
adhoc_compare_labels["unlimited"] = [
r"8 $\times$ RC1",
r"8 $\times$ RC1 (unlimited)",
r"8 $\times$ RC1 + 4 $\times$ RC5",
r"8 $\times$ RC1 + 4 $\times$ RC5 (unlimited)",
]
colors["unlimited"] = ["k", "k", "r", "r"]
linewidth["unlimited"] = [1, 1, 1, 1, 1, 1, 1]
markers["unlimited"] = [None, "o", None, "o"]
adhoc_compare["policy"] = [
"myopic_[MY]_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8, 2-4][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
# "myopic_[RA]_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8, 2-4][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8, 2-4][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"annealing_hire_LIN_cars=0300-0200(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
]
adhoc_compare_labels["policy"] = [
"Myopic",
# "Random rebalance",
"VFA (300 PAVs)",
"VFA (300 PAVs + 200 FAVs)",
]
# Rebalance
# adhoc_compare["flood"] = [
# "only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
# "only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8][tabu=00])[P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
# "only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8, 2-4][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
# "only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8, 2-4][tabu=00])[P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
# ]
# adhoc_compare_labels["flood"] = [
# "8 x RC1",
# "8 x RC1 (unlimited)",
# "8 x RC1 + 4 x RC5",
# "8 x RC1 + 4 x RC5 (unlimited)",
# ]
# adhoc_compare_labels["avoidflood"] = [
# ]
adhoc_compare["exploration"] = [
"only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8, 2-4][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"annealing_[X]LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"annealing0.25_[X]LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"far_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8, 2-4][thompson=06][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"annealing_[X]LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-16][thompson=08][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
]
adhoc_compare_labels["exploration"] = [
"8 x RC1 + 4 x RC5",
# "16 x RC1",
# "16 x RC1 (annealing)",
# "16 x RC1 (annealing thompson 8)",
"8 x RC1 (annealing)",
"8 x RC1 (annealing 0.25)",
"8 x RC1 + 4 x RC5 (thompson 6)",
"16 x RC1 (thompson 6)",
]
adhoc_compare["pavfav"] = [
"only1_LIN_cars=0300-0000(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
"annealing_hire_LIN_cars=0300-0200(R)_t=1_levels[3]=(1-0, 3-300, 3-600)_rebal=([1-8][tabu=00])[L(05)][P]_[05h,+30m+04h+60m]_match=15_0.10(S)_1.00_0.10",
]
adhoc_compare_labels["pavfav"] = ["300 PAVs", "300 PAVs + 200 FAVs"]
# adhoc_compare_labels = [
# "Rebalance to closest nodes",
# "Rebalance to closest nodes + 1min RCs",
# "Rebalance to 1min RCs",
# "Rebalance to 1min RCs [P]",
# "Rebalance to 1min (8), 5min(4), 10min(2) RCs [P]",
# "Rebalance to 1min (8), 5min(4), 10min(2) RCs [P] + annealing",
# "Rebalance to 1min RCs [P] + annealing",
# "Rebalance to 1min RCs [P] + annealing (0.1)",
# # "Annealing",
# # "Annealing + Thompson (0.5)",
# # "Annealing + Thompson (0.2)",
# ]
colors["policy"] = ["k", "r", "g"]
markers["policy"] = [None, "x", "D"]
colors["pavfav"] = ["k", "r"]
colors["exploration"] = [
"k",
"g",
"r",
"b",
"magenta",
"gold",
"gray",
"pink",
"#cab2d6",
]
# linewidth["penalize"] = [2, 2, 1, 2, 1, 2, 1]
# linewidth["policy"] = [1, 1, 1, 1, 1, 1, 1]
linewidth["pavfav"] = [1, 1, 1, 1, 1, 1, 1]
markers["pavfav"] = [None, "o", "x"]
linewidth["exploration"] = [1, 1, 1, 1, 1, 1, 1]
# markers["exploration"] = [None, "o", "x"]
colors_default = [
"k",
"g",
"r",
"b",
"#fb9a99",
"#e31a1c",
"#fdbf6f",
"#ff7f00",
"#cab2d6",
]
legend_pos = dict()
legend_pos["policy"] = "center right"
SL = "Requests serviced"
OF = "Objective function"
TIME = "Time(s)"
XLABEL = "Iteration"
window = 50
ITERATIONS = 1000
markers_default = [None] * len(adhoc_compare[test_label])
# markers = [None, "o", "*", "x", "|", None]
shadow = False
dpi = 1200
d = dict()
d_plot = defaultdict(list)
for exp, sum_label in zip(
adhoc_compare[test_label], adhoc_compare_labels[test_label]
):
# folder = "O:/phd/output_paper/"
# folder = conf.FOLDER_OUTPUT
# path_all_stats = folder + exp + "/overall_stats.csv"
# config_exp = ConfigNetwork()
# Comparison is drawn from training
path_all_stats = conf.FOLDER_OUTPUT + exp + "/adp/train/overall_stats.csv"
print(sum_label, path_all_stats)
config_exp = ConfigNetwork()
try:
# config_exp.load(folder + exp + "/exp_settings.json")
config_exp.load(conf.FOLDER_OUTPUT + exp + "/exp_settings.json")
df = pd.read_csv(path_all_stats)
except Exception as e:
print(f"Cannot load file!Exception: \"{e}\"")
continue
print(sum_label)
# spatiotemporal_levels = exp[2].get_levels()
# neighbors = exp[2].get_reb_neighbors()
id_label = exp # spatiotemporal_levels + neighbors
d["reward_" + id_label] = df["Total reward"][:ITERATIONS]
d["service_rate_" + id_label] = df["Service rate"][:ITERATIONS]
d["time_" + id_label] = df["time"][:ITERATIONS]
d_plot[OF].append(
(id_label, sum_label, df["Total reward"][:ITERATIONS].values)
)
d_plot[SL].append(
(id_label, sum_label, df["Service rate"][:ITERATIONS].values)
)
# d_plot["Time(s)"].append(
# (id_label, sum_label, df["time"][:ITERATIONS])
# )
# print(f" - {id_label}")\
yticks = dict()
yticks_labels = dict()
yticks[OF] = np.linspace(0, 600, 8)
yticks[SL] = np.linspace(0.5, 0.7, 5)
# Policy
# yticks[OF] = np.linspace(13000, 19000, 13)
# yticks[SL] = np.linspace(0.5, 0.95, 10)
# yticks[OF] = np.linspace(13000, 19000, 7)
# yticks[SL] = np.linspace(0.5, 0.95, 8)
yticks_labels[SL] = [f"{s:3.0%}" for s in yticks[SL]]
yticks_labels[OF] = [f"{p:,.0f}" for p in yticks[OF]]
yticks[TIME] = np.linspace(0, 300, 5)
yticks_labels["Time(s)"] = np.linspace(0, 300, 5)
df_outcome = pd.DataFrame(d)
df_outcome = df_outcome[sorted(df_outcome.columns.values)]
df_outcome.to_csv("outcome_tuning.csv", index=False)
sns.set_context(context)
np.set_printoptions(precision=3)
fig, axs = plt.subplots(1, len(d_plot), figsize=(8 * len(d_plot), 6))
for i, cat_label_data in enumerate(d_plot.items()):
cat, label_data = cat_label_data
if shadow:
for j, label_data in enumerate(label_data):
label, sum_label, data = label_data
axs[i].plot(
data,
color=colors.get(test_label, colors_default)[j],
linewidth=linewidth.get(test_label, [2] * len(label_data))[
j
],
marker=markers.get(test_label, markers_default)[j],
alpha=0.25,
label="",
)
cat, label_data = cat_label_data
for j, ld in enumerate(label_data):
label, sum_label, data = ld
mavg = movingaverage(data, window)
axs[i].plot(
mavg,
color=colors.get(test_label, colors_default)[j],
linewidth=linewidth.get(test_label, [1] * len(label_data))[j],
marker=markers.get(test_label, markers_default)[j],
fillstyle="none",
markevery=25,
# linestyle=':',
label=sum_label,
)
# And add a special annotation for the group we are interested in
# axs[i].text(ITERATIONS+0.2, mavg[-1], sum_label, horizontalalignment='left', size='small', color='k')
# axs[i].set_title(vst)
axs[i].set_xlabel(XLABEL)
axs[i].set_ylabel(cat)
axs[i].set_xlim(0, len(data))
axs[i].set_yticks(yticks[cat])
axs[i].set_yticklabels(yticks_labels[cat])
plt.legend(
loc=legend_pos.get(test_label, "lower right"),
frameon=False,
bbox_to_anchor=(1, 0, 0, 1), # (0.5, -0.15),
ncol=1,
# title="Max. #cars/location"
)
# plt.show()
print(f'Saving "{test_label}.{fig_format}"...')
plt.savefig(f"{test_label}.{fig_format}", bbox_inches="tight", dpi=dpi)
|
[
"sys.path.append",
"pandas.DataFrame",
"numpy.set_printoptions",
"os.getcwd",
"pandas.read_csv",
"collections.defaultdict",
"numpy.linspace",
"seaborn.set",
"seaborn.set_context",
"matplotlib.pyplot.savefig",
"mod.env.config.ConfigNetwork"
] |
[((103, 124), 'sys.path.append', 'sys.path.append', (['root'], {}), '(root)\n', (118, 124), False, 'import sys\n'), ((379, 401), 'seaborn.set', 'sns.set', ([], {'style': '"""ticks"""'}), "(style='ticks')\n", (386, 401), True, 'import seaborn as sns\n'), ((27632, 27649), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (27643, 27649), False, 'from collections import defaultdict\n'), ((29282, 29304), 'numpy.linspace', 'np.linspace', (['(0)', '(600)', '(8)'], {}), '(0, 600, 8)\n', (29293, 29304), True, 'import numpy as np\n'), ((29322, 29346), 'numpy.linspace', 'np.linspace', (['(0.5)', '(0.7)', '(5)'], {}), '(0.5, 0.7, 5)\n', (29333, 29346), True, 'import numpy as np\n'), ((29686, 29708), 'numpy.linspace', 'np.linspace', (['(0)', '(300)', '(5)'], {}), '(0, 300, 5)\n', (29697, 29708), True, 'import numpy as np\n'), ((29740, 29762), 'numpy.linspace', 'np.linspace', (['(0)', '(300)', '(5)'], {}), '(0, 300, 5)\n', (29751, 29762), True, 'import numpy as np\n'), ((29780, 29795), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (29792, 29795), True, 'import pandas as pd\n'), ((29926, 29950), 'seaborn.set_context', 'sns.set_context', (['context'], {}), '(context)\n', (29941, 29950), True, 'import seaborn as sns\n'), ((29955, 29987), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(3)'}), '(precision=3)\n', (29974, 29987), True, 'import numpy as np\n'), ((31998, 32069), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{test_label}.{fig_format}"""'], {'bbox_inches': '"""tight"""', 'dpi': 'dpi'}), "(f'{test_label}.{fig_format}', bbox_inches='tight', dpi=dpi)\n", (32009, 32069), True, 'import matplotlib.pyplot as plt\n'), ((72, 83), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (81, 83), False, 'import os\n'), ((28129, 28144), 'mod.env.config.ConfigNetwork', 'ConfigNetwork', ([], {}), '()\n', (28142, 28144), False, 'from mod.env.config import ConfigNetwork\n'), ((28329, 28356), 'pandas.read_csv', 'pd.read_csv', (['path_all_stats'], {}), '(path_all_stats)\n', (28340, 28356), True, 'import pandas as pd\n')]
|
import pygame
import numpy as np
'''GUI for the Game of Mills.
Gets the Mapping of the field as an array.
Has a fixed conversion Array as an 2D Space on which the board is to be represented, which can be scaled as needed.
Scans the GUI for mouseclicks, and transforms them via indexation of the unscaled Array back into
the coordinates of the board which can be returned to commit the move of the player.
Graphical Representation of both Arrays (scaled Array has same Architecture, but differing dimensions.
Ignores the additional layer of the board-Array for meta-data about the game such as stages, counters and past moves.
Author: <NAME>, https://github.com/PPetermeier
Date: 28.02.2021
Based on the implementation of mills with alphazero from Simon Schnecko.
2,7 ------------- 2,0 ------------- 2,1
| | |
| 1,7 ------- 1,0 ------- 1,1 |
| | | | |
| | 0,7 - 0,0 - 0,1 | |
| | | | | |
2,6 - 1,6 - 0,6 0,2 - 1,2 - 2,2
| | | | | |
| | 0,5 - 0,4 - 0,3 | |
| | | | |
| 1,5 ------- 1,4 ------- 1,3 |
| | |
2,5 ------------- 2,4 ------------- 2,3
1,1 ------------- 1,7 ------------- 1,13
| | |
| 3,3 ------- 3,7 ------- 3,11 |
| | | | |
| | 5,5 - 5,7 - 5,9 | |
| | | | | |
7,1 - 7,3 - 7,5 7,9 - 7,11 - 7,13
| | | | | |
| | 9,5 - 9,7 - 9,9 | |
| | | | |
| 11,3------- 11,7-------11,11 |
| | |
13,1 -------------13,7-------------13,13
'''
##------------ Umwandlungstabellen zwischen Index & Koordinate------------
scale: int = 100
line = int(scale / 10)
def setscale(value):
scale = value
def getscale():
return scale
def testboard():
testboard = np.zeros((3, 8))
return testboard
# ---------------Überlegungen, wie eine Schnittstellentabelle automatisiert erstellt werden kann ----------
# def write_coordinates(board):
# coordinates = np.copy(board)
# dimension = 1
# for i in range(len(coordinates)):
# dimension + 4
# vdistance = 2, 0
# hdistance = 0, 2
# cdistance = 2, 0
# anchor1 = (dimension / 2) - 2
# anchor2 = dimension / 2
# anchor =np.array([anchor1, anchor2])
# anchor[0] = int(dimension / 2) - 2), int(dimension / 2)
# counter = anchor
# for i in range(len(coordinates)):
# for j in range(len(coordinates[i])):
# if i == 0 and j == 0:
# coordinates[i[j]] = int(anchor1), int(anchor2)
# break
# if i > 0 == True and j == 0:
# counter = anchor - int(i * cdistance)
# coordinates[i[j]] = counter
# if j == 1:
# counter = counter + hdistance
# coordinates[i[j]] = counter
# break
# if j < 4 == True:
# counter = counter + vdistance
# coordinates[i[j]] = counter
# break
# if j < 6 == True:
# counter = counter - hdistance
# coordinates[i[j]] = counter
# break
# else:
# counter = counter - vdistance
# vdistance[0] + 1, hdistance[1] + 1
# return coordinates
##--------------------------Fixe Schnittstelle zwischen board und GUI ----------------------
coordinates = np.array([[(5, 7), (5, 9), (7, 9), (9, 9), (9, 7), (9, 5), (7, 5), (5, 5)],
[(3, 7), (3, 11), (7, 11), (11, 11), (11, 7), (11, 3), (7, 3), (3, 3)],
[(1, 7), (1, 13), (7, 13), (13, 13), (13, 7), (13, 1), (7, 1), (1, 1)]])
coordinates = np.roll(np.fliplr(coordinates), 7 , 1)
scaled_coordinates = coordinates * scale
def get_index(gui_position1, gui_position2): ## Das selbsterstellte Board hat vermute ich die Reihen und Spalten vertauscht. Sollte es zu Problemen kommen, einfach im loop rows & cols miteinander vertauschen!
gui_position1 = int(round(gui_position1/scale))
gui_position2 = int(round(gui_position2/scale))
search = (gui_position1, gui_position2)
rows = coordinates.shape[0]
cols = coordinates.shape[1]
failure = False
print(search)
for i in range(rows):
for j in range(cols):
if np.all(coordinates[i, j] == search):
return i, j
return failure
def get_gui_position(index_position1, index_position2):
gui_position = scaled_coordinates[index_position1, index_position2]
return gui_position
##------------Farbdefinition-----------------------------
BLACK, GREEN, BLUE, GRAY, YELLOW, WHITE = (0, 0, 0), (0, 255, 0), (0, 0, 255), (127, 127, 127), (255, 255, 0), (
255, 255, 255)
##-------------- Darstellungserstellung ----------------
def drawbackground():
pygame.draw.rect(screen, WHITE, (0, 0, 14 * scale, 14 * scale)) # Hintergrund
def drawlines():
for i in range(3):
for j in range(8):
start, finish = scaled_coordinates[i][j], scaled_coordinates[i][((j + 1) % 8)]
pygame.draw.line(screen, BLACK, start, finish, line)
if j % 2 == 0 and i == 0:
start, finish = scaled_coordinates[i][j], scaled_coordinates[(i + 2)][j]
pygame.draw.line(screen, BLACK, start, finish, line)
pygame.display.update()
## Soll nach jedem Zug aufgerufen werden, sofern nicht jedes Mal das Board komplett neu gezeichnet werden muss? Wenn nein, werden hier nur die Veraenderungen angezeigt
def updateboard(board):
for i in range(3):
for j in range(8):
# valid = get_legal_moves(player) Hier fehlt die Einbindung von legal_moves
# if board[i, j] == valid:
# pygame.draw.circle(surface=screen, color=GREEN, center=(get_gui_position(i, j)), radius=(scale / 2))
# pygame.display.update()
if board[i, j] == 1:
pygame.draw.circle(surface=screen, color=YELLOW, center=(get_gui_position(i, j)), radius=(scale / 2))
pygame.display.update()
if board[i, j] == -1:
pygame.draw.circle(surface=screen, color=BLUE, center=(get_gui_position(i, j)), radius=(scale / 2))
pygame.display.update()
if board[i, j] == 0:
pygame.draw.circle(surface=screen, color=GRAY, center=(get_gui_position(i, j)), radius=(scale / 2))
pygame.display.update()
## ------------------------- Initialisierungsvariabeln -------------------------------
board = testboard()
width: int = 14 * scale
height: int = 14 * scale
size = (width, height)
pygame.init()
screen = pygame.display.set_mode(size)
myfont = pygame.font.SysFont("monospace", 75)
# board = write_coordinates(board)
drawbackground()
drawlines()
updateboard(board)
pygame.display.update()
pygame.time.wait(30000)
## --------------Eventsteuerung, entnommen und angepasst aus Uebung 3 --------------
# while board.pieces[3, 7] == 0: ##piece [3,7]: speichert Spielzustand
player_turn = True
player = 1
def set_piece(board, player, col, row):
place = get_index(col, row)
if place != False:
board[place] = player
else:
Message = 'Du hast eine ungültige Stelle angeklickt, versuch es nochmal auf einem leeren Feld'
print(Message)
def scanning():
while player_turn:
for event in pygame.event.get():
if event.type == pygame.MOUSEBUTTONDOWN:
posx = event.pos[0]
posy = event.pos[1]
col = int(round(posx))
row = int(round(posy))
set_piece(board, player, col, row)
updateboard(board)
#position = [int(math.floor(posy / scale)), int(math.floor(posx / scale))]
# column = int(math.floor(posy / scale))
# row = int(math.floor(posx / scale))
# valid = self.game.getValidMoves(board, player)
# for i in range(valid):
# if position == valid(i):
# execute_move(position)
# updateboard(board)
# pygame.display.update()
# break
pygame.display.update()
scanning()
|
[
"pygame.draw.line",
"pygame.font.SysFont",
"pygame.draw.rect",
"pygame.display.set_mode",
"pygame.event.get",
"numpy.zeros",
"pygame.init",
"pygame.time.wait",
"numpy.fliplr",
"pygame.display.update",
"numpy.array",
"numpy.all"
] |
[((3601, 3829), 'numpy.array', 'np.array', (['[[(5, 7), (5, 9), (7, 9), (9, 9), (9, 7), (9, 5), (7, 5), (5, 5)], [(3, 7),\n (3, 11), (7, 11), (11, 11), (11, 7), (11, 3), (7, 3), (3, 3)], [(1, 7),\n (1, 13), (7, 13), (13, 13), (13, 7), (13, 1), (7, 1), (1, 1)]]'], {}), '([[(5, 7), (5, 9), (7, 9), (9, 9), (9, 7), (9, 5), (7, 5), (5, 5)],\n [(3, 7), (3, 11), (7, 11), (11, 11), (11, 7), (11, 3), (7, 3), (3, 3)],\n [(1, 7), (1, 13), (7, 13), (13, 13), (13, 7), (13, 1), (7, 1), (1, 1)]])\n', (3609, 3829), True, 'import numpy as np\n'), ((6819, 6832), 'pygame.init', 'pygame.init', ([], {}), '()\n', (6830, 6832), False, 'import pygame\n'), ((6842, 6871), 'pygame.display.set_mode', 'pygame.display.set_mode', (['size'], {}), '(size)\n', (6865, 6871), False, 'import pygame\n'), ((6881, 6917), 'pygame.font.SysFont', 'pygame.font.SysFont', (['"""monospace"""', '(75)'], {}), "('monospace', 75)\n", (6900, 6917), False, 'import pygame\n'), ((7001, 7024), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (7022, 7024), False, 'import pygame\n'), ((7025, 7048), 'pygame.time.wait', 'pygame.time.wait', (['(30000)'], {}), '(30000)\n', (7041, 7048), False, 'import pygame\n'), ((2030, 2046), 'numpy.zeros', 'np.zeros', (['(3, 8)'], {}), '((3, 8))\n', (2038, 2046), True, 'import numpy as np\n'), ((3892, 3914), 'numpy.fliplr', 'np.fliplr', (['coordinates'], {}), '(coordinates)\n', (3901, 3914), True, 'import numpy as np\n'), ((5012, 5075), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'WHITE', '(0, 0, 14 * scale, 14 * scale)'], {}), '(screen, WHITE, (0, 0, 14 * scale, 14 * scale))\n', (5028, 5075), False, 'import pygame\n'), ((5515, 5538), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (5536, 5538), False, 'import pygame\n'), ((7560, 7578), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (7576, 7578), False, 'import pygame\n'), ((8365, 8388), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (8386, 8388), False, 'import pygame\n'), ((4500, 4535), 'numpy.all', 'np.all', (['(coordinates[i, j] == search)'], {}), '(coordinates[i, j] == search)\n', (4506, 4535), True, 'import numpy as np\n'), ((5262, 5314), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'BLACK', 'start', 'finish', 'line'], {}), '(screen, BLACK, start, finish, line)\n', (5278, 5314), False, 'import pygame\n'), ((5458, 5510), 'pygame.draw.line', 'pygame.draw.line', (['screen', 'BLACK', 'start', 'finish', 'line'], {}), '(screen, BLACK, start, finish, line)\n', (5474, 5510), False, 'import pygame\n'), ((6235, 6258), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (6256, 6258), False, 'import pygame\n'), ((6425, 6448), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (6446, 6448), False, 'import pygame\n'), ((6614, 6637), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (6635, 6637), False, 'import pygame\n')]
|
#!/usr/bin/env python3
import os as os
import sys as sys
import logging as log
import io as io
import traceback as trb
import argparse as argp
import collections as col
import numpy as np
def parse_command_line():
"""
:return:
"""
parser = argp.ArgumentParser(prog="np_cov_to_regions.py", description=__doc__)
parser.add_argument(
"--debug",
"-d",
action="store_true",
default=False,
dest="debug",
help="Print status and error messages to STDOUT. Otherwise, only"
" errors/warnings will be reported to STDERR.",
)
parser.add_argument(
"--seq-info",
"-seq",
type=str,
required=True,
dest="seqinfo",
help="Single line from a FASTA index file (fai).",
)
parser.add_argument(
"--num-regions",
"-nr",
type=int,
required=True,
dest="numregions",
help="Number of regions to create",
)
parser.add_argument(
"--output",
"-o",
type=str,
default="stdout",
help="Output regions to file (or stdout by default)."
)
return parser.parse_args()
def read_reference_index(fpath, logger):
"""
:param fpath:
:param logger:
:return:
"""
logger.debug('Reading sequence info from path {}'.format(fpath))
with open(fpath, 'r') as fai:
seq_name, seq_length = fai.readline().split('\t')[:2]
logger.debug('Read sequence {} (length {}) info file'.format(seq_name, seq_length))
return seq_name, int(seq_length)
def prepare_data_structures(seqs, logger):
"""
DEPRECATED
:param seqs:
:param logger:
:return:
"""
seqs = sorted(seqs, key=lambda x: x[1], reverse=True)
# debug example: seqs = [('s1', 10), ('s2', 8)]
total_seq_length = sum([x[1] for x in seqs])
logger.debug('Maximal total sequence length: {} Gbp'.format(round(total_seq_length / 10**9, 2)))
pos_depth = np.zeros(total_seq_length, dtype=np.int16)
seq_boundaries = col.defaultdict(dict)
last_end = 0
for sn, sl in seqs:
offset = last_end - 1
seq_boundaries[sn]['offset'] = offset
seq_boundaries[sn]['length'] = sl
seq_boundaries[sn]['array_start'] = last_end
seq_boundaries[sn]['array_end'] = last_end + sl
last_end = last_end + sl
return seq_boundaries, pos_depth
def read_coverage_per_position(seq_name, seq_length, logger):
"""
:param seq_name:
:param seq_length:
:param logger:
:return:
"""
logger.debug('Processing coverage data for sequence {}'.format(seq_name))
pos_depth = np.zeros(seq_length, dtype=np.int64)
int64 = np.int64
for line in sys.stdin:
_, pos, depth = line.split('\t')
# -1: bedtools genomecov returns 1-based coords
pos_depth[int64(pos) - 1] = int64(depth)
logger.debug('Input stream ended...')
return pos_depth
def dump_unicov_regions(depth_per_region, pos_depth, seq_name, seq_len, output, logger):
"""
:param depth_per_region:
:param pos_depth:
:param seq_name:
:param seq_len:
:param output:
:param logger:
:return:
"""
logger.debug('Writing individual regions to file: {}'.format(output))
if 'stdout' not in output:
os.makedirs(os.path.dirname(os.path.abspath(output)), exist_ok=True)
else:
output = '/dev/stdout'
pos_depth = pos_depth.cumsum()
with open(output, 'w') as dump:
start = 1
while 1:
pos_depth -= depth_per_region
try:
current_end = np.nonzero(pos_depth > 0)[0][0]
except IndexError:
_ = dump.write('{}:{}-{}\n'.format(seq_name, start, seq_len))
break
else:
_ = dump.write('{}:{}-{}\n'.format(seq_name, start, current_end))
start = current_end
logger.debug('Regions with approx. uniform coverage generated')
return
def main(logger, cargs):
"""
:param logger:
:param cargs:
:return:
"""
logger.debug("Starting computations")
seq_name, seq_length = read_reference_index(cargs.seqinfo, logger)
pos_depth = read_coverage_per_position(seq_name, seq_length, logger)
total_depth = pos_depth.sum()
logger.debug('Total cumulative depth: {}'.format(total_depth))
depth_per_region = np.int64(round(total_depth / cargs.numregions, 0))
logger.debug('Approx. depth per region ({}): {}'.format(cargs.numregions, depth_per_region))
dump_unicov_regions(depth_per_region, pos_depth, seq_name, seq_length, cargs.output, logger)
return
if __name__ == "__main__":
logger = None
rc = 0
try:
log_msg_format = "%(asctime)s | %(levelname)s | %(message)s"
cargs = parse_command_line()
if cargs.debug:
log.basicConfig(stream=sys.stderr, level=log.DEBUG, format=log_msg_format)
else:
log.basicConfig(stream=sys.stderr, level=log.WARNING, format=log_msg_format)
logger = log.getLogger()
logger.debug("Logger initiated")
main(logger, cargs)
logger.debug("Run completed - exit")
log.shutdown()
except Exception as exc:
rc = 1
if logger is not None:
logger.error("Unrecoverable error: {}".format(str(exc)))
logger.debug("=== TRACEBACK ===\n\n")
buf = io.StringIO()
trb.print_exc(file=buf)
logger.error(buf.getvalue())
logger.debug("Exit\n")
log.shutdown()
else:
trb.print_exc()
finally:
sys.exit(rc)
|
[
"os.path.abspath",
"io.StringIO",
"traceback.print_exc",
"argparse.ArgumentParser",
"logging.basicConfig",
"numpy.zeros",
"collections.defaultdict",
"numpy.nonzero",
"logging.shutdown",
"sys.exit",
"logging.getLogger"
] |
[((260, 329), 'argparse.ArgumentParser', 'argp.ArgumentParser', ([], {'prog': '"""np_cov_to_regions.py"""', 'description': '__doc__'}), "(prog='np_cov_to_regions.py', description=__doc__)\n", (279, 329), True, 'import argparse as argp\n'), ((1982, 2024), 'numpy.zeros', 'np.zeros', (['total_seq_length'], {'dtype': 'np.int16'}), '(total_seq_length, dtype=np.int16)\n', (1990, 2024), True, 'import numpy as np\n'), ((2046, 2067), 'collections.defaultdict', 'col.defaultdict', (['dict'], {}), '(dict)\n', (2061, 2067), True, 'import collections as col\n'), ((2658, 2694), 'numpy.zeros', 'np.zeros', (['seq_length'], {'dtype': 'np.int64'}), '(seq_length, dtype=np.int64)\n', (2666, 2694), True, 'import numpy as np\n'), ((5070, 5085), 'logging.getLogger', 'log.getLogger', ([], {}), '()\n', (5083, 5085), True, 'import logging as log\n'), ((5208, 5222), 'logging.shutdown', 'log.shutdown', ([], {}), '()\n', (5220, 5222), True, 'import logging as log\n'), ((5651, 5663), 'sys.exit', 'sys.exit', (['rc'], {}), '(rc)\n', (5659, 5663), True, 'import sys as sys\n'), ((4875, 4949), 'logging.basicConfig', 'log.basicConfig', ([], {'stream': 'sys.stderr', 'level': 'log.DEBUG', 'format': 'log_msg_format'}), '(stream=sys.stderr, level=log.DEBUG, format=log_msg_format)\n', (4890, 4949), True, 'import logging as log\n'), ((4976, 5052), 'logging.basicConfig', 'log.basicConfig', ([], {'stream': 'sys.stderr', 'level': 'log.WARNING', 'format': 'log_msg_format'}), '(stream=sys.stderr, level=log.WARNING, format=log_msg_format)\n', (4991, 5052), True, 'import logging as log\n'), ((3345, 3368), 'os.path.abspath', 'os.path.abspath', (['output'], {}), '(output)\n', (3360, 3368), True, 'import os as os\n'), ((5435, 5448), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (5446, 5448), True, 'import io as io\n'), ((5461, 5484), 'traceback.print_exc', 'trb.print_exc', ([], {'file': 'buf'}), '(file=buf)\n', (5474, 5484), True, 'import traceback as trb\n'), ((5573, 5587), 'logging.shutdown', 'log.shutdown', ([], {}), '()\n', (5585, 5587), True, 'import logging as log\n'), ((5614, 5629), 'traceback.print_exc', 'trb.print_exc', ([], {}), '()\n', (5627, 5629), True, 'import traceback as trb\n'), ((3624, 3649), 'numpy.nonzero', 'np.nonzero', (['(pos_depth > 0)'], {}), '(pos_depth > 0)\n', (3634, 3649), True, 'import numpy as np\n')]
|
import numpy as np
from multiprocessing import Pool, cpu_count
import statsmodels.api as sm
from statsmodels.gam.api import GLMGam, BSplines
from scipy.stats import norm
from tqdm import tqdm
from itertools import product
import pandas as pd
from ananke.graphs import ADMG
from ananke.models import LinearGaussianSEM
from statsmodels.stats.proportion import proportion_confint
import os
import sys
try: sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
except NameError: print("Cannot load testing module")
from wrapper_resampler import ShiftedTester
np.random.seed(1)
# Help functions
def e(n=1): return np.random.normal(size=(n, 1)) # Gaussian noise
def u(n=1): return np.random.uniform(size=(n, 1)) # Gaussian noise
def cb(*args): return np.concatenate(args, axis=1) # Col bind
inv = np.linalg.inv
p = norm.pdf
# Simulate data from a Gaussian SCM
def scm(n, causal_effect=0):
H = 1/2*e(n)**2 #2*e(n)
X1 = np.random.gamma(2, size=(n,1))
X2 = X1*H + e(n)
X3 = X2**2 + 1.5*e(n)
X4 = causal_effect*X1 + X3 + H + e(n)
return cb(H, X1, X2, X3, X4)
def weight(X):
num = p(X[:, 3], loc=X[:,3].mean(), scale=1.0)
denom = p(X[:, 3], loc=X[:,2]**2, scale=2)
return num/denom
# Fitted weight
def weight_fit(X):
num = p(X[:, 3], loc=X[:,3].mean(), scale=1.0)
mod1 = GLMGam(X[:,3], smoother=BSplines(X[:,2], df = 10, degree=3)).fit()
denom = p(X[:,3], loc=mod1.fittedvalues, scale=np.sqrt(mod1.scale))
return num/denom
# Test function: Regress X4 ~ X1 and get p-value
def T(X): return (sm.OLS(X[:, 4], sm.tools.add_constant(X[:, 1])).fit().pvalues[1] < 0.05)*1
# Parameters for choice-of-m algorithm
tune_m_repeats = 25
cutoff = np.quantile(np.random.uniform(size=(1000, tune_m_repeats)).min(axis=1), 0.05)
# Loop parameters
causal_effects = [0, 0.3]#np.linspace(0, 5, num=21)
n_range = [int(10**(x/2)) for x in range(4, 10)]
tests = {"LinReg": T}
m_choices = ["heuristic", "sqrt"]
methods = ["resampling"]
combinations = list(product(n_range, causal_effects, m_choices, methods))
## Wrap as function to apply multiprocessing
def conduct_experiment(i=None):
out = []
for n, c_e, m_choice, method in combinations:
X = scm(n, causal_effect=c_e)
# Do not do anything if m < 5 or (m>n and not replacement)
try:
psi = ShiftedTester(weight_fit, tests["LinReg"], replacement="NO-REPL-reject", reject_retries=100)
m = psi.tune_m(X, j_x = [3], j_y=[2], gaussian=True, cond = [X[:,3].mean()], m_factor=1.3,
p_cutoff=cutoff, repeats=tune_m_repeats, replacement=False,
m_init=int(np.sqrt(n))) if m_choice == "heuristic" else None
out.append(psi.test(X, m=m))
except:
# Catch errors from test statistic
print(f"Error occurred {c_e}, {n}, {m_choice}, {method}")
out.append(np.nan)
return(out)
## Conduct multiple experiments with multiprocessing and export to R for plotting:
if __name__ == '__main__':
repeats = 1000
# Multiprocess
pool = Pool(cpu_count()-2)
res = np.array(
list(tqdm(pool.imap_unordered(conduct_experiment, range(repeats)), total=repeats)))
pool.close()
# Count non-nas, to be used for binomial confidence intervals
counts = (~np.isnan(res)).sum(axis=0)
nans = np.isnan(res).sum(axis=0)
res = np.nansum(res, axis=0)
df = pd.DataFrame(
[(x/(repeats-n), *v, *proportion_confint(x, repeats-n, method="binom_test"), n) for x, v, n in zip(res, combinations, nans)],
columns=["alpha", "n", "Causal_Effect", "m_choice", "method", "Lower", "Upper", "NoNans"])
# Export to R for ggplotting
df['alpha'] = df["alpha"].replace(np.NaN, "NA")
df.to_csv("experiment-dormant-nonparametric.csv")
|
[
"numpy.random.uniform",
"numpy.nansum",
"os.path.abspath",
"numpy.random.seed",
"wrapper_resampler.ShiftedTester",
"statsmodels.api.tools.add_constant",
"numpy.isnan",
"statsmodels.stats.proportion.proportion_confint",
"numpy.random.gamma",
"multiprocessing.cpu_count",
"statsmodels.gam.api.BSplines",
"numpy.random.normal",
"itertools.product",
"numpy.concatenate",
"numpy.sqrt"
] |
[((581, 598), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (595, 598), True, 'import numpy as np\n'), ((636, 665), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(n, 1)'}), '(size=(n, 1))\n', (652, 665), True, 'import numpy as np\n'), ((703, 733), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(n, 1)'}), '(size=(n, 1))\n', (720, 733), True, 'import numpy as np\n'), ((774, 802), 'numpy.concatenate', 'np.concatenate', (['args'], {'axis': '(1)'}), '(args, axis=1)\n', (788, 802), True, 'import numpy as np\n'), ((952, 983), 'numpy.random.gamma', 'np.random.gamma', (['(2)'], {'size': '(n, 1)'}), '(2, size=(n, 1))\n', (967, 983), True, 'import numpy as np\n'), ((2012, 2064), 'itertools.product', 'product', (['n_range', 'causal_effects', 'm_choices', 'methods'], {}), '(n_range, causal_effects, m_choices, methods)\n', (2019, 2064), False, 'from itertools import product\n'), ((3404, 3426), 'numpy.nansum', 'np.nansum', (['res'], {'axis': '(0)'}), '(res, axis=0)\n', (3413, 3426), True, 'import numpy as np\n'), ((1457, 1476), 'numpy.sqrt', 'np.sqrt', (['mod1.scale'], {}), '(mod1.scale)\n', (1464, 1476), True, 'import numpy as np\n'), ((1724, 1770), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(1000, tune_m_repeats)'}), '(size=(1000, tune_m_repeats))\n', (1741, 1770), True, 'import numpy as np\n'), ((2344, 2440), 'wrapper_resampler.ShiftedTester', 'ShiftedTester', (['weight_fit', "tests['LinReg']"], {'replacement': '"""NO-REPL-reject"""', 'reject_retries': '(100)'}), "(weight_fit, tests['LinReg'], replacement='NO-REPL-reject',\n reject_retries=100)\n", (2357, 2440), False, 'from wrapper_resampler import ShiftedTester\n'), ((3104, 3115), 'multiprocessing.cpu_count', 'cpu_count', ([], {}), '()\n', (3113, 3115), False, 'from multiprocessing import Pool, cpu_count\n'), ((3368, 3381), 'numpy.isnan', 'np.isnan', (['res'], {}), '(res)\n', (3376, 3381), True, 'import numpy as np\n'), ((453, 478), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (468, 478), False, 'import os\n'), ((3330, 3343), 'numpy.isnan', 'np.isnan', (['res'], {}), '(res)\n', (3338, 3343), True, 'import numpy as np\n'), ((1363, 1397), 'statsmodels.gam.api.BSplines', 'BSplines', (['X[:, 2]'], {'df': '(10)', 'degree': '(3)'}), '(X[:, 2], df=10, degree=3)\n', (1371, 1397), False, 'from statsmodels.gam.api import GLMGam, BSplines\n'), ((3482, 3537), 'statsmodels.stats.proportion.proportion_confint', 'proportion_confint', (['x', '(repeats - n)'], {'method': '"""binom_test"""'}), "(x, repeats - n, method='binom_test')\n", (3500, 3537), False, 'from statsmodels.stats.proportion import proportion_confint\n'), ((2665, 2675), 'numpy.sqrt', 'np.sqrt', (['n'], {}), '(n)\n', (2672, 2675), True, 'import numpy as np\n'), ((1583, 1613), 'statsmodels.api.tools.add_constant', 'sm.tools.add_constant', (['X[:, 1]'], {}), '(X[:, 1])\n', (1604, 1613), True, 'import statsmodels.api as sm\n')]
|
"""
This module contains classes used to define the standard behavior of the agent.
It relies on the controllers, the chosen training/test policy and the learning algorithm
to specify its behavior in the environment.
"""
import os
import numpy as np
import copy
import sys
import joblib
from warnings import warn
from .experiment import base_controllers as controllers
from .helper import tree
from deer.policies import EpsilonGreedyPolicy
class NeuralAgent(object):
"""The NeuralAgent class wraps a learning algorithm (such as a deep Q-network) for training and testing in a given environment.
Attach controllers to it in order to conduct an experiment (when to train the agent, when to test,...).
Parameters
-----------
environment : object from class Environment
The environment in which the agent interacts
learning_algo : object from class LearningAlgo
The learning algorithm associated to the agent
replay_memory_size : int
Size of the replay memory. Default : 1000000
replay_start_size : int
Number of observations (=number of time steps taken) in the replay memory before starting learning.
Default: minimum possible according to environment.inputDimensions().
batch_size : int
Number of tuples taken into account for each iteration of gradient descent. Default : 32
random_state : numpy random number generator
Default : random seed.
exp_priority : float
The exponent that determines how much prioritization is used, default is 0 (uniform priority).
One may check out Schaul et al. (2016) - Prioritized Experience Replay.
train_policy : object from class Policy
Policy followed when in training mode (mode -1)
test_policy : object from class Policy
Policy followed when in other modes than training (validation and test modes)
only_full_history : boolean
Whether we wish to train the neural network only on full histories or we wish to fill with zeroes the
observations before the beginning of the episode
"""
def __init__(self, environment, learning_algo, replay_memory_size=1000000, replay_start_size=None, batch_size=32, random_state=np.random.RandomState(), exp_priority=0, train_policy=None, test_policy=None, only_full_history=True):
inputDims = environment.inputDimensions()
if replay_start_size == None:
replay_start_size = max(inputDims[i][0] for i in range(len(inputDims)))
elif replay_start_size < max(inputDims[i][0] for i in range(len(inputDims))) :
raise AgentError("Replay_start_size should be greater than the biggest history of a state.")
self._controllers = []
self._environment = environment
self._learning_algo = learning_algo
self._replay_memory_size = replay_memory_size
self._replay_start_size = replay_start_size
self._batch_size = batch_size
self._random_state = random_state
self._exp_priority = exp_priority
self._only_full_history = only_full_history
self._dataset = DataSet(environment, max_size=replay_memory_size, random_state=random_state, use_priority=self._exp_priority, only_full_history=self._only_full_history)
self._tmp_dataset = None # Will be created by startTesting() when necessary
self._mode = -1
self._totalModeNbrEpisode = 0
self._total_mode_reward = 0
self._training_loss_averages = []
self._Vs_on_last_episode = []
self._in_episode = False
self._selected_action = -1
self._state = []
for i in range(len(inputDims)):
self._state.append(np.zeros(inputDims[i], dtype=float))
if (train_policy==None):
self._train_policy = EpsilonGreedyPolicy(learning_algo, environment.nActions(), random_state, 0.1)
else:
self._train_policy = train_policy
if (test_policy==None):
self._test_policy = EpsilonGreedyPolicy(learning_algo, environment.nActions(), random_state, 0.)
else:
self._test_policy = test_policy
self.gathering_data=True # Whether the agent is gathering data or not
self.sticky_action=1 # Number of times the agent is forced to take the same action as part of one actual time step
def setControllersActive(self, toDisable, active):
""" Activate controller
"""
for i in toDisable:
self._controllers[i].setActive(active)
def setLearningRate(self, lr):
""" Set the learning rate for the gradient descent
"""
self._learning_algo.setLearningRate(lr)
def learningRate(self):
""" Get the learning rate
"""
return self._learning_algo.learningRate()
def setDiscountFactor(self, df):
""" Set the discount factor
"""
self._learning_algo.setDiscountFactor(df)
def discountFactor(self):
""" Get the discount factor
"""
return self._learning_algo.discountFactor()
def overrideNextAction(self, action):
""" Possibility to override the chosen action. This possibility should be used on the signal OnActionChosen.
"""
self._selected_action = action
def avgBellmanResidual(self):
""" Returns the average training loss on the epoch
"""
if (len(self._training_loss_averages) == 0):
return -1
return np.average(self._training_loss_averages)
def avgEpisodeVValue(self):
""" Returns the average V value on the episode (on time steps where a non-random action has been taken)
"""
if (len(self._Vs_on_last_episode) == 0):
return -1
if(np.trim_zeros(self._Vs_on_last_episode)!=[]):
return np.average(np.trim_zeros(self._Vs_on_last_episode))
else:
return 0
def totalRewardOverLastTest(self):
""" Returns the average sum of rewards per episode and the number of episode
"""
return self._total_mode_reward/self._totalModeNbrEpisode, self._totalModeNbrEpisode
def attach(self, controller):
if (isinstance(controller, controllers.Controller)):
self._controllers.append(controller)
else:
raise TypeError("The object you try to attach is not a Controller.")
def detach(self, controllerIdx):
return self._controllers.pop(controllerIdx)
def mode(self):
return self._mode
def startMode(self, mode, epochLength):
if self._in_episode:
raise AgentError("Trying to start mode while current episode is not yet finished. This method can be "
"called only *between* episodes for testing and validation.")
elif mode == -1:
raise AgentError("Mode -1 is reserved and means 'training mode'; use resumeTrainingMode() instead.")
else:
self._mode = mode
self._total_mode_reward = 0.
del self._tmp_dataset
self._tmp_dataset = DataSet(self._environment, self._random_state, max_size=self._replay_memory_size, only_full_history=self._only_full_history)
def resumeTrainingMode(self):
self._mode = -1
def summarizeTestPerformance(self):
if self._mode == -1:
raise AgentError("Cannot summarize test performance outside test environment.")
self._environment.summarizePerformance(self._tmp_dataset, self._learning_algo, train_data_set=self._dataset)
def train(self):
"""
This function selects a random batch of data (with self._dataset.randomBatch) and performs a
Q-learning iteration (with self._learning_algo.train).
"""
# We make sure that the number of elements in the replay memory
# is strictly superior to self._replay_start_size before taking
# a random batch and perform training
if self._dataset.n_elems <= self._replay_start_size:
return
try:
if hasattr(self._learning_algo, 'nstep'):
observations, actions, rewards, terminals, rndValidIndices = self._dataset.randomBatch_nstep(self._batch_size, self._learning_algo.nstep, self._exp_priority)
loss, loss_ind = self._learning_algo.train(observations, actions, rewards, terminals)
else:
states, actions, rewards, next_states, terminals, rndValidIndices = self._dataset.randomBatch(self._batch_size, self._exp_priority)
loss, loss_ind = self._learning_algo.train(states, actions, rewards, next_states, terminals)
self._training_loss_averages.append(loss)
if (self._exp_priority):
self._dataset.updatePriorities(pow(loss_ind,self._exp_priority)+0.0001, rndValidIndices[1])
except SliceError as e:
warn("Training not done - " + str(e), AgentWarning)
def dumpNetwork(self, fname, nEpoch=-1):
""" Dump the network
Parameters
-----------
fname : string
Name of the file where the network will be dumped
nEpoch : int
Epoch number (Optional)
"""
try:
os.mkdir("nnets")
except Exception:
pass
basename = "nnets/" + fname
for f in os.listdir("nnets/"):
if fname in f:
os.remove("nnets/" + f)
all_params = self._learning_algo.getAllParams()
if (nEpoch>=0):
joblib.dump(all_params, basename + ".epoch={}".format(nEpoch))
else:
joblib.dump(all_params, basename, compress=True)
def setNetwork(self, fname, nEpoch=-1):
""" Set values into the network
Parameters
-----------
fname : string
Name of the file where the values are
nEpoch : int
Epoch number (Optional)
"""
basename = "nnets/" + fname
if (nEpoch>=0):
all_params = joblib.load(basename + ".epoch={}".format(nEpoch))
else:
all_params = joblib.load(basename)
self._learning_algo.setAllParams(all_params)
def run(self, n_epochs, epoch_length):
"""
This function encapsulates the inference and the learning.
If the agent is in train mode (mode = -1):
It starts by calling the controllers method "onStart",
Then it runs a given number of epochs where an epoch is made up of one or many episodes (called with
agent._runEpisode) and where an epoch ends up after the number of steps reaches the argument "epoch_length".
It ends up by calling the controllers method "end".
If the agent is on non train mode (mode > -1):
This function runs a number of epochs in non train mode (mode > -1), thus without controllers.
Parameters
-----------
n_epochs : int
number of epochs
epoch_length : int
maximum number of steps for a given epoch
"""
if(self._mode==-1):
self._run_train(n_epochs, epoch_length)
else:
self._run_non_train(n_epochs, epoch_length)
def _run_train(self, n_epochs, epoch_length):
"""
This function encapsulates the whole process of the learning.
It starts by calling the controllers method "onStart",
Then it runs a given number of epochs where an epoch is made up of one or many episodes (called with
agent._runEpisode) and where an epoch ends up after the number of steps reaches the argument "epoch_length".
It ends up by calling the controllers method "end".
Parameters
-----------
n_epochs : int
number of epochs
epoch_length : int
maximum number of steps for a given epoch
"""
for c in self._controllers: c.onStart(self)
i = 0
while i < n_epochs:
nbr_steps_left=epoch_length
self._training_loss_averages = []
while nbr_steps_left > 0: # run new episodes until the number of steps left for the epoch has reached 0
nbr_steps_left = self._runEpisode(nbr_steps_left)
i += 1
for c in self._controllers: c.onEpochEnd(self)
self._environment.end()
for c in self._controllers: c.onEnd(self)
def _run_non_train(self, n_epochs, epoch_length):
"""
This function runs a number of epochs in non train mode (id > -1).
Parameters
-----------
n_epochs : int
number of epochs
epoch_length : int
maximum number of steps for a given epoch
"""
for c in self._controllers: c.onStart(self)
i = 0
while i < n_epochs:
nbr_steps_left=epoch_length
self._totalModeNbrEpisode=0
while nbr_steps_left > 0:
self._totalModeNbrEpisode += 1
nbr_steps_left = self._runEpisode(nbr_steps_left)
i += 1
for c in self._controllers: c.onEpochEnd(self)
self._environment.end()
for c in self._controllers: c.onEnd(self)
def _runEpisode(self, maxSteps):
"""
This function runs an episode of learning. An episode ends up when the environment method "inTerminalState"
returns True (or when the number of steps reaches the argument "maxSteps")
Parameters
-----------
maxSteps : int
maximum number of steps before automatically ending the episode
"""
self._in_episode = True
initState = self._environment.reset(self._mode)
inputDims = self._environment.inputDimensions()
for i in range(len(inputDims)):
if inputDims[i][0] > 1:
self._state[i][1:] = initState[i][1:]
self._Vs_on_last_episode = []
is_terminal=False
reward=0
while maxSteps > 0:
maxSteps -= 1
if(self.gathering_data==True or self._mode!=-1):
obs = self._environment.observe()
for i in range(len(obs)):
self._state[i][0:-1] = self._state[i][1:]
self._state[i][-1] = obs[i]
V, action, reward = self._step()
self._Vs_on_last_episode.append(V)
if self._mode != -1:
self._total_mode_reward += reward
is_terminal = self._environment.inTerminalState() # If the transition ends up in a terminal state, mark transition as terminal
# Note that the new obs will not be stored, as it is unnecessary.
if(maxSteps>0):
self._addSample(obs, action, reward, is_terminal)
else:
self._addSample(obs, action, reward, True) # If the episode ends because max number of steps is reached, mark the transition as terminal
for c in self._controllers: c.onActionTaken(self)
if is_terminal:
break
self._in_episode = False
for c in self._controllers: c.onEpisodeEnd(self, is_terminal, reward)
return maxSteps
def _step(self):
"""
This method is called at each time step and performs one action in the environment.
Returns
-------
V : float
Estimated value function of current state.
action : int
The id of the action selected by the agent.
reward : float
Reward obtained for the transition
"""
action, V = self._chooseAction()
reward=0
for i in range(self.sticky_action):
reward += self._environment.act(action)
return V, action, reward
def _addSample(self, ponctualObs, action, reward, is_terminal):
if self._mode != -1:
self._tmp_dataset.addSample(ponctualObs, action, reward, is_terminal, priority=1)
else:
self._dataset.addSample(ponctualObs, action, reward, is_terminal, priority=1)
def _chooseAction(self):
if self._mode != -1:
# Act according to the test policy if not in training mode
action, V = self._test_policy.action(self._state, mode=self._mode, dataset=self._dataset)
else:
if self._dataset.n_elems > self._replay_start_size:
# follow the train policy
action, V = self._train_policy.action(self._state, mode=None, dataset=self._dataset) #is self._state the only way to store/pass the state?
else:
# Still gathering initial data: choose dummy action
action, V = self._train_policy.randomAction()
for c in self._controllers: c.onActionChosen(self, action)
return action, V
class AgentError(RuntimeError):
"""Exception raised for errors when calling the various Agent methods at wrong times.
Attributes:
expr -- input expression in which the error occurred
msg -- explanation of the error
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class AgentWarning(RuntimeWarning):
"""Warning issued of the various Agent methods.
Attributes:
expr -- input expression in which the error occurred
msg -- explanation of the error
"""
class DataSet(object):
"""A replay memory consisting of circular buffers for observations, actions, rewards and terminals."""
def __init__(self, env, random_state=None, max_size=1000000, use_priority=False, only_full_history=True):
"""Initializer.
Parameters
-----------
inputDims : list of tuples
Each tuple relates to one of the observations where the first value is the history size considered for this
observation and the rest describes the shape of each punctual observation (e.g., scalar, vector or matrix).
See base_classes.Environment.inputDimensions() documentation for more info.
random_state : Numpy random number generator
If None, a new one is created with default numpy seed.
max_size : float
The replay memory maximum size. Default : 1000000
"""
self._batch_dimensions = env.inputDimensions()
self._max_history_size = np.max([self._batch_dimensions[i][0] for i in range (len(self._batch_dimensions))])
self._size = max_size
self._use_priority = use_priority
self._only_full_history = only_full_history
if ( isinstance(env.nActions(),int) ):
self._actions = CircularBuffer(max_size, dtype="int8")
else:
self._actions = CircularBuffer(max_size, dtype='object')
self._rewards = CircularBuffer(max_size)
self._terminals = CircularBuffer(max_size, dtype="bool")
if (self._use_priority):
self._prioritiy_tree = tree.SumTree(max_size)
self._translation_array = np.zeros(max_size)
self._observations = np.zeros(len(self._batch_dimensions), dtype='object')
# Initialize the observations container if necessary
for i in range(len(self._batch_dimensions)):
self._observations[i] = CircularBuffer(max_size, elemShape=self._batch_dimensions[i][1:], dtype=env.observationType(i))
if (random_state == None):
self._random_state = np.random.RandomState()
else:
self._random_state = random_state
self.n_elems = 0
self.sticky_action=1 # Number of times the agent is forced to take the same action as part of one actual time step
def actions(self):
"""Get all actions currently in the replay memory, ordered by time where they were taken."""
return self._actions.getSlice(0)
def rewards(self):
"""Get all rewards currently in the replay memory, ordered by time where they were received."""
return self._rewards.getSlice(0)
def terminals(self):
"""Get all terminals currently in the replay memory, ordered by time where they were observed.
terminals[i] is True if actions()[i] lead to a terminal state (i.e. corresponded to a terminal
transition), and False otherwise.
"""
return self._terminals.getSlice(0)
def observations(self):
"""Get all observations currently in the replay memory, ordered by time where they were observed.
"""
ret = np.zeros_like(self._observations)
for input in range(len(self._observations)):
ret[input] = self._observations[input].getSlice(0)
return ret
def updatePriorities(self, priorities, rndValidIndices):
"""
"""
for i in range( len(rndValidIndices) ):
self._prioritiy_tree.update(rndValidIndices[i], priorities[i])
def randomBatch(self, batch_size, use_priority):
"""Returns a batch of states, actions, rewards, terminal status, and next_states for a number batch_size of randomly
chosen transitions. Note that if terminal[i] == True, then next_states[s][i] == np.zeros_like(states[s][i]) for
each s.
Parameters
-----------
batch_size : int
Number of transitions to return.
use_priority : Boolean
Whether to use prioritized replay or not
Returns
-------
states : numpy array of objects
Each object is a numpy array that relates to one of the observations
with size [batch_size * history size * size of punctual observation (which is 2D,1D or scalar)]).
States are taken randomly in the data with the only constraint that they are complete regarding the history size
for each observation.
actions : numpy array of integers [batch_size]
actions[i] is the action taken after having observed states[:][i].
rewards : numpy array of floats [batch_size]
rewards[i] is the reward obtained for taking actions[i-1].
next_states : numpy array of objects
Each object is a numpy array that relates to one of the observations
with size [batch_size * history size * size of punctual observation (which is 2D,1D or scalar)]).
terminals : numpy array of booleans [batch_size]
terminals[i] is True if the transition leads to a terminal state and False otherwise
Throws
-------
SliceError
If a batch of this batch_size could not be built based on current data set (not enough data or all
trajectories are too short).
"""
if (self._max_history_size + self.sticky_action - 1 >= self.n_elems):
raise SliceError(
"Not enough elements in the dataset to create a "
"complete state. {} elements in dataset; requires {}"
.format(self.n_elems, self._max_history_size))
if (self._use_priority):
#FIXME : take into account the case where self._only_full_history is false
rndValidIndices, rndValidIndices_tree = self._randomPrioritizedBatch(batch_size)
if (rndValidIndices.size == 0):
raise SliceError("Could not find a state with full histories")
else:
rndValidIndices = np.zeros(batch_size, dtype='int32')
if (self._only_full_history):
for i in range(batch_size): # TODO: multithread this loop?
rndValidIndices[i] = self._randomValidStateIndex(self._max_history_size+self.sticky_action-1)
else:
for i in range(batch_size): # TODO: multithread this loop?
rndValidIndices[i] = self._randomValidStateIndex(minimum_without_terminal=self.sticky_action)
actions = self._actions.getSliceBySeq(rndValidIndices)
rewards = self._rewards.getSliceBySeq(rndValidIndices)
terminals = self._terminals.getSliceBySeq(rndValidIndices)
states = np.zeros(len(self._batch_dimensions), dtype='object')
next_states = np.zeros_like(states)
# We calculate the first terminal index backward in time and set it
# at maximum to the value self._max_history_size+self.sticky_action-1
first_terminals=[]
for rndValidIndex in rndValidIndices:
first_terminal=1
while first_terminal<self._max_history_size+self.sticky_action-1:
if (self._terminals[rndValidIndex-first_terminal]==True or first_terminal>rndValidIndex):
break
first_terminal+=1
first_terminals.append(first_terminal)
for input in range(len(self._batch_dimensions)):
states[input] = np.zeros((batch_size,) + self._batch_dimensions[input], dtype=self._observations[input].dtype)
next_states[input] = np.zeros_like(states[input])
for i in range(batch_size):
slice=self._observations[input].getSlice(rndValidIndices[i]-self.sticky_action+2-min(self._batch_dimensions[input][0],first_terminals[i]+self.sticky_action-1), rndValidIndices[i]+1)
if (len(slice)==len(states[input][i])):
states[input][i] = slice
else:
for j in range(len(slice)):
states[input][i][-j-1]=slice[-j-1]
# If transition leads to terminal, we don't care about next state
if rndValidIndices[i] >= self.n_elems - 1 or terminals[i]:
next_states[input][i] = np.zeros_like(states[input][i])
else:
slice=self._observations[input].getSlice(rndValidIndices[i]+2-min(self._batch_dimensions[input][0],first_terminals[i]+1), rndValidIndices[i]+2)
if (len(slice)==len(states[input][i])):
next_states[input][i] = slice
else:
for j in range(len(slice)):
next_states[input][i][-j-1]=slice[-j-1]
#next_states[input][i] = self._observations[input].getSlice(rndValidIndices[i]+2-min(self._batch_dimensions[input][0],first_terminal), rndValidIndices[i]+2)
if (self._use_priority):
return states, actions, rewards, next_states, terminals, [rndValidIndices, rndValidIndices_tree]
else:
return states, actions, rewards, next_states, terminals, rndValidIndices
def randomBatch_nstep(self, batch_size, nstep, use_priority):
"""Return corresponding states, actions, rewards, terminal status, and next_states for a number batch_size of randomly
chosen transitions. Note that if terminal[i] == True, then next_states[s][i] == np.zeros_like(states[s][i]) for
each s.
Parameters
-----------
batch_size : int
Number of transitions to return.
nstep : int
Number of transitions to be considered for each element
use_priority : Boolean
Whether to use prioritized replay or not
Returns
-------
states : numpy array of objects
Each object is a numpy array that relates to one of the observations
with size [batch_size * (history size+nstep-1) * size of punctual observation (which is 2D,1D or scalar)]).
States are taken randomly in the data with the only constraint that they are complete regarding the history size
for each observation.
actions : numpy array of integers [batch_size, nstep]
actions[i] is the action taken after having observed states[:][i].
rewards : numpy array of floats [batch_size, nstep]
rewards[i] is the reward obtained for taking actions[i-1].
next_states : numpy array of objects
Each object is a numpy array that relates to one of the observations
with size [batch_size * (history size+nstep-1) * size of punctual observation (which is 2D,1D or scalar)]).
terminals : numpy array of booleans [batch_size, nstep]
terminals[i] is True if the transition leads to a terminal state and False otherwise
Throws
-------
SliceError
If a batch of this size could not be built based on current data set (not enough data or all
trajectories are too short).
"""
if (self._max_history_size + self.sticky_action - 1 >= self.n_elems):
raise SliceError(
"Not enough elements in the dataset to create a "
"complete state. {} elements in dataset; requires {}"
.format(self.n_elems, self._max_history_size))
if (self._use_priority):
#FIXME : take into account the case where self._only_full_history is false
rndValidIndices, rndValidIndices_tree = self._randomPrioritizedBatch(batch_size)
if (rndValidIndices.size == 0):
raise SliceError("Could not find a state with full histories")
else:
rndValidIndices = np.zeros(batch_size, dtype='int32')
if (self._only_full_history):
for i in range(batch_size): # TODO: multithread this loop?
rndValidIndices[i] = self._randomValidStateIndex(self._max_history_size+self.sticky_action*nstep-1)
else:
for i in range(batch_size): # TODO: multithread this loop?
rndValidIndices[i] = self._randomValidStateIndex(minimum_without_terminal=self.sticky_action*nstep)
actions=np.zeros((batch_size,(nstep)*self.sticky_action), dtype=int)
rewards=np.zeros((batch_size,(nstep)*self.sticky_action))
terminals=np.zeros((batch_size,(nstep)*self.sticky_action))
for i in range(batch_size):
actions[i] = self._actions.getSlice(rndValidIndices[i]-self.sticky_action*nstep+1,rndValidIndices[i]+self.sticky_action)
rewards[i] = self._rewards.getSlice(rndValidIndices[i]-self.sticky_action*nstep+1,rndValidIndices[i]+self.sticky_action)
terminals[i] = self._terminals.getSlice(rndValidIndices[i]-self.sticky_action*nstep+1,rndValidIndices[i]+self.sticky_action)
observations = np.zeros(len(self._batch_dimensions), dtype='object')
# We calculate the first terminal index backward in time and set it
# at maximum to the value self._max_history_size+self.sticky_action-1
first_terminals=[]
for rndValidIndex in rndValidIndices:
first_terminal=1
while first_terminal<self._max_history_size+self.sticky_action*nstep-1:
if (self._terminals[rndValidIndex-first_terminal]==True or first_terminal>rndValidIndex):
break
first_terminal+=1
first_terminals.append(first_terminal)
batch_dimensions=copy.deepcopy(self._batch_dimensions)
for input in range(len(self._batch_dimensions)):
batch_dimensions[input]=tuple( x + y for x, y in zip(self._batch_dimensions[input],(self.sticky_action*(nstep+1)-1,0,0)) )
observations[input] = np.zeros((batch_size,) + batch_dimensions[input], dtype=self._observations[input].dtype)
for i in range(batch_size):
slice=self._observations[input].getSlice(rndValidIndices[i]-self.sticky_action*nstep+2-min(self._batch_dimensions[input][0],first_terminals[i]-self.sticky_action*nstep+1), rndValidIndices[i]+self.sticky_action+1)
if (len(slice)==len(observations[input][i])):
observations[input][i] = slice
else:
for j in range(len(slice)):
observations[input][i][-j-1]=slice[-j-1]
# If transition leads to terminal, we don't care about next state
if terminals[i][-1]:#rndValidIndices[i] >= self.n_elems - 1 or terminals[i]:
observations[input][rndValidIndices[i]:rndValidIndices[i]+self.sticky_action+1] = 0
if (self._use_priority):
return observations, actions, rewards, terminals, [rndValidIndices, rndValidIndices_tree]
else:
return observations, actions, rewards, terminals, rndValidIndices
def _randomValidStateIndex(self, minimum_without_terminal):
""" Returns the index corresponding to a timestep that is valid
"""
index_lowerBound = minimum_without_terminal - 1
# We try out an index in the acceptable range of the replay memory
index = self._random_state.randint(index_lowerBound, self.n_elems-1)
# Check if slice is valid wrt terminals
# The selected index may correspond to a terminal transition but not
# the previous minimum_without_terminal-1 transition
firstTry = index
startWrapped = False
while True:
i = index-1
processed = 0
for _ in range(minimum_without_terminal-1):
if (i < 0 or self._terminals[i]):
break;
i -= 1
processed += 1
if (processed < minimum_without_terminal - 1):
# if we stopped prematurely, shift slice to the left and try again
index = i
if (index < index_lowerBound):
startWrapped = True
index = self.n_elems - 1
if (startWrapped and index <= firstTry):
raise SliceError("Could not find a state with full histories")
else:
# else index was ok according to terminals
return index
def _randomPrioritizedBatch(self, batch_size):
indices_tree = self._prioritiy_tree.getBatch(batch_size, self._random_state, self)
indices_replay_mem=np.zeros(indices_tree.size,dtype='int32')
for i in range(len(indices_tree)):
indices_replay_mem[i]= int(self._translation_array[indices_tree[i]] \
- self._actions.getLowerBound())
return indices_replay_mem, indices_tree
def addSample(self, obs, action, reward, is_terminal, priority):
"""Store the punctual observations, action, reward, is_terminal and priority in the dataset.
Parameters
-----------
obs : ndarray
An ndarray(dtype='object') where obs[s] corresponds to the punctual observation s before the
agent took action [action].
action : int
The action taken after having observed [obs].
reward : float
The reward associated to taking this [action].
is_terminal : bool
Tells whether [action] lead to a terminal state (i.e. corresponded to a terminal transition).
priority : float
The priority to be associated with the sample
"""
# Store observations
for i in range(len(self._batch_dimensions)):
self._observations[i].append(obs[i])
# Update tree and translation table
if (self._use_priority):
index = self._actions.getIndex()
if (index >= self._size):
ub = self._actions.getUpperBound()
true_size = self._actions.getTrueSize()
tree_ind = index%self._size
if (ub == true_size):
size_extension = true_size - self._size
# New index
index = self._size - 1
tree_ind = -1
# Shift translation array
self._translation_array -= size_extension + 1
tree_ind = np.where(self._translation_array==tree_ind)[0][0]
else:
tree_ind = index
self._prioritiy_tree.update(tree_ind)
self._translation_array[tree_ind] = index
# Store rest of sample
self._actions.append(action)
self._rewards.append(reward)
self._terminals.append(is_terminal)
if (self.n_elems < self._size):
self.n_elems += 1
class CircularBuffer(object):
def __init__(self, size, elemShape=(), extension=0.1, dtype="float32"):
self._size = size
self._data = np.zeros((int(size+extension*size),) + elemShape, dtype=dtype)
self._trueSize = self._data.shape[0]
self._lb = 0
self._ub = size
self._cur = 0
self.dtype = dtype
def append(self, obj):
if self._cur > self._size: #> instead of >=
self._lb += 1
self._ub += 1
if self._ub >= self._trueSize:
# Rolling array without copying whole array (for memory constraints)
# basic command: self._data[0:self._size-1] = self._data[self._lb:] OR NEW self._data[0:self._size] = self._data[self._lb-1:]
n_splits=10
for i in range(n_splits):
self._data[i*(self._size)//n_splits:(i+1)*(self._size)//n_splits] = self._data[(self._lb-1)+i*(self._size)//n_splits:(self._lb-1)+(i+1)*(self._size)//n_splits]
self._lb = 0
self._ub = self._size
self._cur = self._size #OLD self._size - 1
self._data[self._cur] = obj
self._cur += 1
def __getitem__(self, i):
return self._data[self._lb + i]
def getSliceBySeq(self, seq):
return self._data[seq + self._lb]
def getSlice(self, start, end=sys.maxsize):
if end == sys.maxsize:
return self._data[self._lb+start:self._cur]
else:
return self._data[self._lb+start:self._lb+end]
def getLowerBound(self):
return self._lb
def getUpperBound(self):
return self._ub
def getIndex(self):
return self._cur
def getTrueSize(self):
return self._trueSize
class SliceError(LookupError):
"""Exception raised for errors when getting slices from CircularBuffers.
Attributes:
expr -- input expression in which the error occurred
msg -- explanation of the error
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
if __name__ == "__main__":
pass
|
[
"os.mkdir",
"copy.deepcopy",
"numpy.zeros_like",
"numpy.average",
"os.remove",
"numpy.trim_zeros",
"numpy.zeros",
"joblib.dump",
"numpy.random.RandomState",
"numpy.where",
"joblib.load",
"os.listdir"
] |
[((2233, 2256), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (2254, 2256), True, 'import numpy as np\n'), ((5500, 5540), 'numpy.average', 'np.average', (['self._training_loss_averages'], {}), '(self._training_loss_averages)\n', (5510, 5540), True, 'import numpy as np\n'), ((9394, 9414), 'os.listdir', 'os.listdir', (['"""nnets/"""'], {}), "('nnets/')\n", (9404, 9414), False, 'import os\n'), ((20931, 20964), 'numpy.zeros_like', 'np.zeros_like', (['self._observations'], {}), '(self._observations)\n', (20944, 20964), True, 'import numpy as np\n'), ((24603, 24624), 'numpy.zeros_like', 'np.zeros_like', (['states'], {}), '(states)\n', (24616, 24624), True, 'import numpy as np\n'), ((30172, 30233), 'numpy.zeros', 'np.zeros', (['(batch_size, nstep * self.sticky_action)'], {'dtype': 'int'}), '((batch_size, nstep * self.sticky_action), dtype=int)\n', (30180, 30233), True, 'import numpy as np\n'), ((30249, 30299), 'numpy.zeros', 'np.zeros', (['(batch_size, nstep * self.sticky_action)'], {}), '((batch_size, nstep * self.sticky_action))\n', (30257, 30299), True, 'import numpy as np\n'), ((30317, 30367), 'numpy.zeros', 'np.zeros', (['(batch_size, nstep * self.sticky_action)'], {}), '((batch_size, nstep * self.sticky_action))\n', (30325, 30367), True, 'import numpy as np\n'), ((31489, 31526), 'copy.deepcopy', 'copy.deepcopy', (['self._batch_dimensions'], {}), '(self._batch_dimensions)\n', (31502, 31526), False, 'import copy\n'), ((34454, 34496), 'numpy.zeros', 'np.zeros', (['indices_tree.size'], {'dtype': '"""int32"""'}), "(indices_tree.size, dtype='int32')\n", (34462, 34496), True, 'import numpy as np\n'), ((5780, 5819), 'numpy.trim_zeros', 'np.trim_zeros', (['self._Vs_on_last_episode'], {}), '(self._Vs_on_last_episode)\n', (5793, 5819), True, 'import numpy as np\n'), ((9279, 9296), 'os.mkdir', 'os.mkdir', (['"""nnets"""'], {}), "('nnets')\n", (9287, 9296), False, 'import os\n'), ((9666, 9714), 'joblib.dump', 'joblib.dump', (['all_params', 'basename'], {'compress': '(True)'}), '(all_params, basename, compress=True)\n', (9677, 9714), False, 'import joblib\n'), ((10167, 10188), 'joblib.load', 'joblib.load', (['basename'], {}), '(basename)\n', (10178, 10188), False, 'import joblib\n'), ((19433, 19451), 'numpy.zeros', 'np.zeros', (['max_size'], {}), '(max_size)\n', (19441, 19451), True, 'import numpy as np\n'), ((19851, 19874), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (19872, 19874), True, 'import numpy as np\n'), ((23816, 23851), 'numpy.zeros', 'np.zeros', (['batch_size'], {'dtype': '"""int32"""'}), "(batch_size, dtype='int32')\n", (23824, 23851), True, 'import numpy as np\n'), ((25276, 25375), 'numpy.zeros', 'np.zeros', (['((batch_size,) + self._batch_dimensions[input])'], {'dtype': 'self._observations[input].dtype'}), '((batch_size,) + self._batch_dimensions[input], dtype=self.\n _observations[input].dtype)\n', (25284, 25375), True, 'import numpy as np\n'), ((25404, 25432), 'numpy.zeros_like', 'np.zeros_like', (['states[input]'], {}), '(states[input])\n', (25417, 25432), True, 'import numpy as np\n'), ((29652, 29687), 'numpy.zeros', 'np.zeros', (['batch_size'], {'dtype': '"""int32"""'}), "(batch_size, dtype='int32')\n", (29660, 29687), True, 'import numpy as np\n'), ((31753, 31846), 'numpy.zeros', 'np.zeros', (['((batch_size,) + batch_dimensions[input])'], {'dtype': 'self._observations[input].dtype'}), '((batch_size,) + batch_dimensions[input], dtype=self._observations[\n input].dtype)\n', (31761, 31846), True, 'import numpy as np\n'), ((3716, 3751), 'numpy.zeros', 'np.zeros', (['inputDims[i]'], {'dtype': 'float'}), '(inputDims[i], dtype=float)\n', (3724, 3751), True, 'import numpy as np\n'), ((5856, 5895), 'numpy.trim_zeros', 'np.trim_zeros', (['self._Vs_on_last_episode'], {}), '(self._Vs_on_last_episode)\n', (5869, 5895), True, 'import numpy as np\n'), ((9459, 9482), 'os.remove', 'os.remove', (["('nnets/' + f)"], {}), "('nnets/' + f)\n", (9468, 9482), False, 'import os\n'), ((26103, 26134), 'numpy.zeros_like', 'np.zeros_like', (['states[input][i]'], {}), '(states[input][i])\n', (26116, 26134), True, 'import numpy as np\n'), ((36301, 36346), 'numpy.where', 'np.where', (['(self._translation_array == tree_ind)'], {}), '(self._translation_array == tree_ind)\n', (36309, 36346), True, 'import numpy as np\n')]
|
import numpy as np
import pandas
from pandas import Series, DataFrame
import matplotlib.pyplot as plt
from pylab import rcParams
def sendfunc(rows1):
import seaborn as sb
sb.set_style('dark')
l = list(rows1)
data = []
for i in l:
data.append(list(i))
length=len(data[0])
columnNames=[]
for i in range(0,length):
columnNames.append(data[0][i])
df = pandas.DataFrame(dict(graph=columnNames))
#for i in range(0,len(data)):
#print(df.iloc[i]['graph'])
for i in range(1,len(data)):
column = [float(c) for c in data[i]]
if(i==1):
df['d']=column
elif i==2:
df['e']=column
elif(i==3):
df['f']=column
elif(i==4):
df['g']=column
ind = np.arange(len(df))
width = 0.2
sb.set_style('dark')
fig, sb = plt.subplots()
sb.text(df.d, 0, " " + str('CPU (Avg)'), color='black', fontsize=15)
sb.text(df.e, width, " " + str('Connection (Max)'), color='black', fontsize=15)
sb.text(df.f, width + width, " " + str('Tables'), color='black', fontsize=15)
sb.text(df.g, width + width + width, " " + str('Storage Capacity'), color='black', fontsize=15)
sb.barh(ind, df.d, width, color='#C7F0A0', label='CPU (Avg)')
sb.barh(ind + width, df.e, width, color='#FFF4B2', label='Connection (Max)')
sb.barh(ind + width + width, df.f, width, color='#FFACBC', label='Tables')
sb.barh(ind + width + width + width, df.g, width, color='#B1C1D8', label='Storage Capacity')
sb.set(yticks=ind, yticklabels=df.graph, ylim=[2*width - 1, len(df)])
plt.xticks(np.arange(0,110,10))
plt.grid(axis='x')
plt.xlabel('Total Capacity %', fontsize=15)
sb.tick_params(axis= 'x', which='major', labelsize=15)
plt.rcParams['ytick.labelsize']=12
#plt.rcParams['axes.labelcolor']='cyan'
plt.gca().axes.get_yaxis().set_visible(False)
plt.gcf().set_size_inches(20, 10)
fig.savefig('graph_images/clusterstats.png',transparent=False, bbox_inches='tight', pad_inches=0)
|
[
"seaborn.set_style",
"seaborn.barh",
"seaborn.tick_params",
"numpy.arange",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.grid"
] |
[((190, 210), 'seaborn.set_style', 'sb.set_style', (['"""dark"""'], {}), "('dark')\n", (202, 210), True, 'import seaborn as sb\n'), ((877, 897), 'seaborn.set_style', 'sb.set_style', (['"""dark"""'], {}), "('dark')\n", (889, 897), True, 'import seaborn as sb\n'), ((913, 927), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (925, 927), True, 'import matplotlib.pyplot as plt\n'), ((1276, 1337), 'seaborn.barh', 'sb.barh', (['ind', 'df.d', 'width'], {'color': '"""#C7F0A0"""', 'label': '"""CPU (Avg)"""'}), "(ind, df.d, width, color='#C7F0A0', label='CPU (Avg)')\n", (1283, 1337), True, 'import seaborn as sb\n'), ((1343, 1419), 'seaborn.barh', 'sb.barh', (['(ind + width)', 'df.e', 'width'], {'color': '"""#FFF4B2"""', 'label': '"""Connection (Max)"""'}), "(ind + width, df.e, width, color='#FFF4B2', label='Connection (Max)')\n", (1350, 1419), True, 'import seaborn as sb\n'), ((1425, 1499), 'seaborn.barh', 'sb.barh', (['(ind + width + width)', 'df.f', 'width'], {'color': '"""#FFACBC"""', 'label': '"""Tables"""'}), "(ind + width + width, df.f, width, color='#FFACBC', label='Tables')\n", (1432, 1499), True, 'import seaborn as sb\n'), ((1505, 1602), 'seaborn.barh', 'sb.barh', (['(ind + width + width + width)', 'df.g', 'width'], {'color': '"""#B1C1D8"""', 'label': '"""Storage Capacity"""'}), "(ind + width + width + width, df.g, width, color='#B1C1D8', label=\n 'Storage Capacity')\n", (1512, 1602), True, 'import seaborn as sb\n'), ((1719, 1737), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'axis': '"""x"""'}), "(axis='x')\n", (1727, 1737), True, 'import matplotlib.pyplot as plt\n'), ((1743, 1786), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Total Capacity %"""'], {'fontsize': '(15)'}), "('Total Capacity %', fontsize=15)\n", (1753, 1786), True, 'import matplotlib.pyplot as plt\n'), ((1792, 1845), 'seaborn.tick_params', 'sb.tick_params', ([], {'axis': '"""x"""', 'which': '"""major"""', 'labelsize': '(15)'}), "(axis='x', which='major', labelsize=15)\n", (1806, 1845), True, 'import seaborn as sb\n'), ((1693, 1714), 'numpy.arange', 'np.arange', (['(0)', '(110)', '(10)'], {}), '(0, 110, 10)\n', (1702, 1714), True, 'import numpy as np\n'), ((1990, 1999), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1997, 1999), True, 'import matplotlib.pyplot as plt\n'), ((1937, 1946), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1944, 1946), True, 'import matplotlib.pyplot as plt\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 6 21:38:42 2019
"""
import numpy as np
from scipy import linalg
# try to keep it in block
##################### basic functions ################################################
def mass_action_law (ln_X, ln_K, A):
'''
all inputs are numpy arrays!!!
NO_activity!!!!
ln [C_i] = log_K_i + Sum(aij*ln_Xj)
ln_C = A*ln_X+ln_K
parameters:
- ln_X --> vector of primary variables
- A --> stoichiometrix matrix [columns=X, rows = C_i]
- ln_K --> vector of equilibrium constant
'''
ln_C = ln_K+np.matmul(A,ln_X)
return ln_C
def u_componentvector(A,C):
'''
- A --> stoichiometrix matrix [columns=X, rows = C_i]
- C --> vector of concentrations
'''
u = np.matmul(A.transpose(),C)
return u
def surface_charge_edgelayer_flm(C,psi_L0,psi_L1):
'''
A generic way to calculate the surface charge for layers on the edges in the flm i.e. the O layer
and the d layer. Using the flm theory.
- C --> the capacitance (i.e. C1 or C3 in the flm)
- psi_L0 --> the electrostatic potential in the reference layer (i.e. psi_O or psi_d in the flm model)
- psi_L1 --> the electrostatic potential away from the reference layer (i.e. the psi_C or psi_A in the flm model)
Note: The user must be sure of the units, in general electrostatic potential is in volts and
the capacitance is in farrads.
'''
sigma = C*(psi_L0-psi_L1)
return sigma
def surface_charge_between_layer_flm(C_left, C_right, psi_mid, psi_left, psi_right):
'''
A generic way to calculate the surface charge for the inbetween layers in the flm i.e. the C layer
and the A layer. Using the flm theory.
- C_left --> The capacitance between the psi_mid and the psi_left (i.e. C1,C2 or C3)
- C_right --> The capacitance between the psi_mid and the psi_right
- psi_mid --> the electrostatic potential of the middle (i.e. the layer reference electrostatic potential. So, psi_C or psi_A in the flm model)
- psi_left --> the electrostatic potential on the left (i.e. psi_0 or psi_C in the flm model)
- psi_right --> the electrostatic potential on the right (i.e. psi_A or psi_d in the flm model)
Note: The user must be sure of the units, in general electrostatic potential is in volts and
the capacitance is in farrads.
'''
sigma = C_left*(psi_mid-psi_left) + C_right*(psi_mid-psi_right)
return sigma
def surface_charge_diffusive_monovalentelectrolyte (R, T, epsilon, epsilon_0, ionic_strength, F, psi_d):
'''
If previously the units were important, here the coherence between units is even more important
sigma_d =〖-(8*1000*RTε_o εI)〗^(1/2) sinh((Fψ_d)/2RT)
'''
partA = np.sqrt(8*1000*R*T*epsilon*epsilon_0*ionic_strength)
inner_B = (F*psi_d)/(2*R*T)
partB = np.sinh(inner_B)
sigma_d = partA*partB
return sigma_d
def charge_2_mol (charge, s, a, F):
'''
The surface charge is multiplyed by specific surface area (or area), solid concentration (or grams) depending what is desired
the units should be coherent and agree with the whole problem.
- s is the solid concentration (or grams)
- a is the specific surface area (or area)
- F is the Faraday constant
'''
Tmol = (charge*s*a)/F
return Tmol
def boltzman_2_psi(X, R, T, F):
'''
- X is the boltzman factor
- R is the universal gas constant
- T is the temperature
- F is the Faraday constant
As usual every constant should be coherent
'''
partA = (-R*T)/F
partB = np.log(X)
psi= partA*partB
return psi
def calculate_ionicstrength(Z,C):
'''
It is supossed to be numpy format vector
Z is the vector of charge
'''
# Multiplication must be pointwise for the vector
# multiply function of numpy. Multiplies pointwise according to the documentation and own experience.
I = np.matmul(np.multiply(Z,Z),C)
I = I/2
return I
####################### functions of basic functions ###############################
'relative to residual function'
def calculate_T (X, C, idx_Aq,pos_eb_0, pos_eb_c, pos_eb_a, pos_eb_d, temp, s, a, epsilon, epsilon_0, C_vector, R, T, F,Z):
X_0 = X[pos_eb_0]
X_C = X[pos_eb_c]
X_A = X[pos_eb_a]
X_D = X[pos_eb_d]
'Now the psi'
psi_0 = boltzman_2_psi(X_0, R, temp, F)
psi_C = boltzman_2_psi(X_C, R, temp, F)
psi_A = boltzman_2_psi(X_A, R, temp, F)
psi_D = boltzman_2_psi(X_D, R, temp, F)
'Now the charge'
#previous to charge
Caq = C[idx_Aq]
ionic_strength = calculate_ionicstrength(Z, Caq)
# charge
sigma_0 = surface_charge_edgelayer_flm(C_vector[0],psi_0,psi_C)
sigma_C = surface_charge_between_layer_flm(C_vector[0], C_vector[1], psi_C, psi_0, psi_A)
sigma_A = surface_charge_between_layer_flm(C_vector[1], C_vector[2], psi_A, psi_C, psi_D)
sigma_d_flm = surface_charge_edgelayer_flm(C_vector[2],psi_D,psi_A)
sigma_d_pb = surface_charge_diffusive_monovalentelectrolyte (R, temp, epsilon, epsilon_0, ionic_strength, F, psi_D)
'Change value in the electrostatic positions'
T[pos_eb_0] = charge_2_mol(sigma_0, s, a, F)
T[pos_eb_c] = charge_2_mol(sigma_C, s, a, F)
T[pos_eb_a] = charge_2_mol(sigma_A, s, a, F)
T[pos_eb_d] = charge_2_mol(sigma_d_pb, s, a, F) + charge_2_mol(sigma_d_flm, s, a, F)
return T
def calculate_residual_function(T,ln_X, ln_K, A, idx_Aq, pos_eb_0, pos_eb_c, pos_eb_a, pos_eb_d, temp, s, a, epsilon, epsilon_0, C_vector, R, F,Z, idx_fix_species = None):
ln_C = mass_action_law (ln_X, ln_K, A)
C = np.exp(ln_C)
u = u_componentvector(A,C)
X = np.exp(ln_X)
T = calculate_T (X, C, idx_Aq,pos_eb_0, pos_eb_c, pos_eb_a, pos_eb_d, temp, s, a, epsilon, epsilon_0, C_vector, R, T, F, Z)
Y = u-T
if idx_fix_species != None:
Y[idx_fix_species]=0
return Y,T
'relative to Jacobian'
def calculate_J_classicalPart(ln_X, ln_K, A):
ln_C = mass_action_law (ln_X, ln_K, A)
C = np.exp(ln_C)
n = len(ln_X)
Z = np.zeros((n,n))
for i in range(0, n):
for j in range(0, n):
Z[i,j]= np.matmul(np.multiply(A[:,i], A[:,j]), C)
return Z, C
def calculate_electrostatic_part (J, s, a, R, T, C_vector, Caq, Z, F, pos_eb_0, pos_eb_c, pos_eb_a, pos_eb_d, epsilon, epsilon_0, psi_d):
# plane 0
J[pos_eb_0,pos_eb_0] = J[pos_eb_0, pos_eb_0] + ((C_vector[0]*R*T*s*a)/(F*F))
J[pos_eb_0, pos_eb_c] = J[pos_eb_0, pos_eb_c] - ((C_vector[0]*R*T*s*a)/(F*F))
# plane C
J[pos_eb_c, pos_eb_0] = J[pos_eb_c, pos_eb_0] - ((C_vector[0]*R*T*s*a)/(F*F))
J[pos_eb_c, pos_eb_c] = J[pos_eb_c, pos_eb_c] + (((C_vector[0]+C_vector[1])*R*T*s*a)/(F*F))
J[pos_eb_c, pos_eb_a] = J[pos_eb_c, pos_eb_a] - ((C_vector[1]*R*T*s*a)/(F*F))
# plane A
J[pos_eb_a, pos_eb_c] = J[pos_eb_a, pos_eb_c] - ((C_vector[1]*R*T*s*a)/(F*F))
J[pos_eb_a, pos_eb_a] = J[pos_eb_a, pos_eb_a] + (((C_vector[1]+C_vector[2])*R*T*s*a)/(F*F))
J[pos_eb_a, pos_eb_d] = J[pos_eb_a, pos_eb_d] - ((C_vector[2]*R*T*s*a)/(F*F))
#plane D
J[pos_eb_d, pos_eb_a] = J[pos_eb_d, pos_eb_a] - ((R*T*s*a*C_vector[2])/(F*F))
J[pos_eb_d, pos_eb_d] = calculate_derivative_Td (C_vector[2], R, T, F, Caq, Z, epsilon, epsilon_0, psi_d,s,a)
return J
def calculate_derivative_Td (C, R, T, F, Caq, Z, epsilon, epsilon_0, psi_d,s,a):
ionic_strength = calculate_ionicstrength(Z, Caq)
#
DT_Dpsid = -np.sqrt(8*1000*R*T*epsilon*epsilon_0*ionic_strength)*np.cosh((F*psi_d)/(2*R*T))*(F/(2*R*T)) - C
Dpsid_DlnXpsid = (-R*T)/F
j_d = DT_Dpsid*Dpsid_DlnXpsid*((s*a)/F)
return j_d
def calculate_jacobian_function(ln_X, ln_K, A, idx_Aq, pos_eb_0, pos_eb_c, pos_eb_a, pos_eb_d, temp, s, a, epsilon, epsilon_0, C_vector, R, F,Z,idx_fix_species = None):
length_X=len(ln_X)
#
[J,C] = calculate_J_classicalPart(ln_X, ln_K, A)
Caq = C[idx_Aq]
X = np.exp(ln_X)
psi_d = boltzman_2_psi(X[pos_eb_d], R, temp, F)
J = calculate_electrostatic_part (J, s, a, R, temp, C_vector, Caq, Z, F, pos_eb_0, pos_eb_c, pos_eb_a, pos_eb_d, epsilon, epsilon_0, psi_d)
# finally just return Z
if idx_fix_species != None:
for d in idx_fix_species:
v=np.zeros(length_X)
v[d]=1
J[d,:] = v
return J
###################### SOLVING ####################################################
def four_layer_one_surface_speciation ( T, lnX_guess, A, Z, ln_k, idx_Aq,pos_eb_0, pos_eb_c, pos_eb_a, pos_eb_d, temp, s, a, epsilon, C_vector, idx_fix_species = None, tolerance = 1e-6, max_iterations = 100, debug_flm = None):
'''
- T --> The vector of Total values (The electrostatic values will be recalculated, so it does not matter what has been introduced)
- lnX_guess --> The vector of primary vairables, it might be preconditioned in the future.
- A --> stoichiometrix and component matrix (i.e. transpose). Number of rows = number species, Number of columns = number of primary variables
- ln_k --> A vector of log(Konstant equilibrium). Primary species of aquoues and sorption have a log_k=0
- idx_Aq --> An index vector with the different aqueous species position. It must coincide with the rows of "A".
- Z --> The vector of charge of the different ion. The order is determine by the rows of "A" for aqueous species. That means that it is link to idx_Aq somehow.
- pos_eb_0, pos_eb_c, pos_eb_a, pos_eb_d --> This is basically the position of the boltzman factor for the different planes
- s --> concentration of suspended solid.
- a --> is the specific surface area
- epsilon --> relative permittivity
- C_vector --> [C1, C2, C3]
- temp --> Temperature of the chemical system in Kelvins.
- debug_flm --> the class is given, only if important information about a problem is desired.
'''
# Instantiation of parameters that are constant
F = 96485.3328959 # C/mol
R = 8.314472 # J/(K*mol)
epsilon_0 = 8.854187871e-12 # Farrads = F/m - permittivity in vaccuum
if idx_fix_species != None:
lnX_guess [idx_fix_species] = np.log(T [idx_fix_species])
ln_X = lnX_guess
#X = np.exp(ln_X)
# instantiation variables for loop
counter_iterations = 0
abs_err = tolerance + 1
while abs_err>tolerance and counter_iterations < max_iterations:
# Calculate Residual function
[Y,T] = calculate_residual_function(T,ln_X, ln_k, A, idx_Aq, pos_eb_0, pos_eb_c, pos_eb_a, pos_eb_d, temp, s, a, epsilon, epsilon_0, C_vector, R, F,Z,idx_fix_species)
# Calculate Jacobian Residual function
J = calculate_jacobian_function(ln_X, ln_k, A, idx_Aq, pos_eb_0, pos_eb_c, pos_eb_a, pos_eb_d, temp, s, a, epsilon, epsilon_0, C_vector, R, F,Z, idx_fix_species)
#print(J)
# Here the precondition techniques can be implemented
# solve
delta_ln_X = linalg.solve(J,-Y)
#print(delta_ln_X)
#update X
#X = X*np.exp(delta_ln_X)
ln_X = ln_X + delta_ln_X
ln_C = mass_action_law (ln_X, ln_k, A)
C = np.exp(ln_C)
u = u_componentvector(A,C)
# Vector_error =
# error
d = u-T
if idx_fix_species != None:
d[idx_fix_species] =0
abs_err = max(abs(d))
# Relaxation factor borrow from <NAME> to avoid negative values
#max_1 = 1
#max_2 =np.amax(-2*np.multiply(delta_ln_X, 1/ln_X))
#Max_f = np.amax([max_1, max_2])
#Del_mul = 1/Max_f
#ln_X = Del_mul*delta_ln_X
#ln_X = ln_X+delta_ln_X
counter_iterations += 1
if counter_iterations >= max_iterations or np.isnan(abs_err):
raise ValueError('Max number of iterations surpassed.')
# things to do if goes well
X = np.exp(ln_X)
ln_C = mass_action_law (ln_X, ln_k, A)
C = np.exp(ln_C)
if debug_flm is not None:
return X, C, debug_flm
else:
return X, C
############################## DEBUG CLASS ############################################################
class Debug_flm:
def __init__(self):
self.array_Jacobians = []
self.array_residuals = []
self.array_tolerance = []
self.n_iterations = 0
def append_Jacobian (self,a):
self.array_Jacobians.append(a)
def append_Residual (self,a):
self.array_Jacobians.append(a)
def append_tolerance (self,a):
self.array_Jacobians.append(a)
def inc_iteration(self):
self.n_iterations += 1
|
[
"scipy.linalg.solve",
"numpy.multiply",
"numpy.log",
"numpy.zeros",
"numpy.isnan",
"numpy.exp",
"numpy.matmul",
"numpy.cosh",
"numpy.sinh",
"numpy.sqrt"
] |
[((2849, 2913), 'numpy.sqrt', 'np.sqrt', (['(8 * 1000 * R * T * epsilon * epsilon_0 * ionic_strength)'], {}), '(8 * 1000 * R * T * epsilon * epsilon_0 * ionic_strength)\n', (2856, 2913), True, 'import numpy as np\n'), ((2946, 2962), 'numpy.sinh', 'np.sinh', (['inner_B'], {}), '(inner_B)\n', (2953, 2962), True, 'import numpy as np\n'), ((3722, 3731), 'numpy.log', 'np.log', (['X'], {}), '(X)\n', (3728, 3731), True, 'import numpy as np\n'), ((5755, 5767), 'numpy.exp', 'np.exp', (['ln_C'], {}), '(ln_C)\n', (5761, 5767), True, 'import numpy as np\n'), ((5807, 5819), 'numpy.exp', 'np.exp', (['ln_X'], {}), '(ln_X)\n', (5813, 5819), True, 'import numpy as np\n'), ((6160, 6172), 'numpy.exp', 'np.exp', (['ln_C'], {}), '(ln_C)\n', (6166, 6172), True, 'import numpy as np\n'), ((6199, 6215), 'numpy.zeros', 'np.zeros', (['(n, n)'], {}), '((n, n))\n', (6207, 6215), True, 'import numpy as np\n'), ((8074, 8086), 'numpy.exp', 'np.exp', (['ln_X'], {}), '(ln_X)\n', (8080, 8086), True, 'import numpy as np\n'), ((12121, 12133), 'numpy.exp', 'np.exp', (['ln_X'], {}), '(ln_X)\n', (12127, 12133), True, 'import numpy as np\n'), ((12185, 12197), 'numpy.exp', 'np.exp', (['ln_C'], {}), '(ln_C)\n', (12191, 12197), True, 'import numpy as np\n'), ((617, 635), 'numpy.matmul', 'np.matmul', (['A', 'ln_X'], {}), '(A, ln_X)\n', (626, 635), True, 'import numpy as np\n'), ((4082, 4099), 'numpy.multiply', 'np.multiply', (['Z', 'Z'], {}), '(Z, Z)\n', (4093, 4099), True, 'import numpy as np\n'), ((10427, 10453), 'numpy.log', 'np.log', (['T[idx_fix_species]'], {}), '(T[idx_fix_species])\n', (10433, 10453), True, 'import numpy as np\n'), ((11210, 11229), 'scipy.linalg.solve', 'linalg.solve', (['J', '(-Y)'], {}), '(J, -Y)\n', (11222, 11229), False, 'from scipy import linalg\n'), ((11400, 11412), 'numpy.exp', 'np.exp', (['ln_C'], {}), '(ln_C)\n', (11406, 11412), True, 'import numpy as np\n'), ((11993, 12010), 'numpy.isnan', 'np.isnan', (['abs_err'], {}), '(abs_err)\n', (12001, 12010), True, 'import numpy as np\n'), ((8392, 8410), 'numpy.zeros', 'np.zeros', (['length_X'], {}), '(length_X)\n', (8400, 8410), True, 'import numpy as np\n'), ((6301, 6330), 'numpy.multiply', 'np.multiply', (['A[:, i]', 'A[:, j]'], {}), '(A[:, i], A[:, j])\n', (6312, 6330), True, 'import numpy as np\n'), ((7661, 7693), 'numpy.cosh', 'np.cosh', (['(F * psi_d / (2 * R * T))'], {}), '(F * psi_d / (2 * R * T))\n', (7668, 7693), True, 'import numpy as np\n'), ((7608, 7672), 'numpy.sqrt', 'np.sqrt', (['(8 * 1000 * R * T * epsilon * epsilon_0 * ionic_strength)'], {}), '(8 * 1000 * R * T * epsilon * epsilon_0 * ionic_strength)\n', (7615, 7672), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import sys
import shutil
from pathlib import Path
from multiprocessing import Pool
import numpy as np
import spectral_cube
from astropy import convolution
sys.path.append('/lustre/aoc/users/bsvoboda/temp/nestfit')
import nestfit as nf
from nestfit.main import get_irdc_priors
from . import (PDir, read_cube, TARGETS, VELOS)
TARGETS_NOMOS = [t for t in TARGETS if t != 'G285_mosaic']
def get_cubestack(target, rms=0.33):
im_11_cube = read_cube(PDir.vla_map_name(target, 'nh3_11',
modif='jfeather', ext='image.fits'))
pb_11_cube = read_cube(PDir.vla_map_name(target, 'nh3_11',
modif='joint', ext='pb.fits'))
im_22_cube = read_cube(PDir.vla_map_name(target, 'nh3_22',
modif='jfeather', ext='image.fits'))
pb_22_cube = read_cube(PDir.vla_map_name(target, 'nh3_22',
modif='joint', ext='pb.fits'))
nm_11 = nf.NoiseMap.from_pbimg(rms, pb_11_cube._data)
nm_22 = nf.NoiseMap.from_pbimg(rms, pb_22_cube._data)
cubes = (
nf.DataCube(im_11_cube, noise_map=nm_11, trans_id=1),
nf.DataCube(im_22_cube, noise_map=nm_22, trans_id=2),
)
stack = nf.CubeStack(cubes)
# first channel in some of the datasets are NaNs
stack.cubes[0].data[:,:,0] = 0
stack.cubes[1].data[:,:,0] = 0
return stack
def get_runner(stack, utrans, ncomp=1):
spec_data, has_nans = stack.get_spec_data(190, 190)
assert not has_nans
runner = nf.AmmoniaRunner.from_data(spec_data, utrans, ncomp=ncomp)
return runner
def get_bins(vsys):
bin_minmax = [
(vsys-4.0, vsys+4.0), # vcen
( 7.0, 30.0), # trot
( 2.8, 12.0), # tex
(12.5, 16.5), # ncol
( 0.0, 2.0), # sigm
( 0.0, 1.0), # orth
]
bins = np.array([
np.linspace(lo, hi, 200)
for (lo, hi) in bin_minmax
])
return bins
def if_exists_delete_store(name):
filen = f'{name}.store'
if Path(filen).exists():
print(f'-- Deleting {filen}')
shutil.rmtree(filen)
def run_nested(target, store_prefix, nproc=8):
store_name = f'data/run/{store_prefix}_{target}'
if_exists_delete_store(store_name)
utrans = get_irdc_priors(vsys=VELOS[target])
runner_cls = nf.AmmoniaRunner
stack = get_cubestack(target)
fitter = nf.CubeFitter(stack, utrans, runner_cls, ncomp_max=2,
nlive_snr_fact=5)
fitter.fit_cube(store_name=store_name, nproc=nproc)
def run_nested_all(store_prefix, nproc=8):
for target in TARGETS_NOMOS:
run_nested(target, store_prefix, nproc=nproc)
def postprocess_run(target, store_prefix):
evid_kernel = convolution.Gaussian2DKernel(1.5) # std-dev in pixels
s2 = np.sqrt(2) / 2
k_arr = np.array([
[s2**2, s2**1, s2**2],
[s2**1, s2**0, s2**1],
[s2**2, s2**1, s2**2],
])
post_kernel = convolution.CustomKernel(k_arr)
utrans = get_irdc_priors(vsys=VELOS[target])
par_bins = get_bins(VELOS[target])
store_name = f'data/run/{store_prefix}_{target}'
store = nf.HdfStore(store_name)
stack = get_cubestack(target)
runner = get_runner(stack, utrans, ncomp=1)
# begin post-processing steps
nf.aggregate_run_attributes(store)
nf.convolve_evidence(store, evid_kernel)
nf.aggregate_run_products(store)
nf.aggregate_run_pdfs(store, par_bins=par_bins)
nf.convolve_post_pdfs(store, post_kernel, evid_weight=False)
nf.quantize_conv_marginals(store)
nf.deblend_hf_intensity(store, stack, runner)
store.close()
def parallel_postprocess(store_prefix, nproc=12):
args = zip(
TARGETS_NOMOS,
[store_prefix] * len(TARGETS_NOMOS),
)
with Pool(nproc) as pool:
pool.starmap(postprocess_run, args)
if __name__ == '__main__':
prefix = 'nested'
args = sys.argv[1:]
assert len(args) > 0
assert args[0] in ('--run-nested', '--post-proc')
flag = args[0]
if flag == '--run-nested':
run_nested_all(prefix, nproc=16)
elif flag == '--post-proc':
parallel_postprocess(prefix, nproc=12)
|
[
"astropy.convolution.Gaussian2DKernel",
"nestfit.aggregate_run_attributes",
"astropy.convolution.CustomKernel",
"nestfit.CubeStack",
"nestfit.CubeFitter",
"pathlib.Path",
"shutil.rmtree",
"nestfit.convolve_evidence",
"nestfit.main.get_irdc_priors",
"sys.path.append",
"nestfit.HdfStore",
"nestfit.convolve_post_pdfs",
"numpy.linspace",
"nestfit.aggregate_run_pdfs",
"nestfit.deblend_hf_intensity",
"multiprocessing.Pool",
"nestfit.NoiseMap.from_pbimg",
"nestfit.AmmoniaRunner.from_data",
"nestfit.aggregate_run_products",
"numpy.array",
"nestfit.DataCube",
"nestfit.quantize_conv_marginals",
"numpy.sqrt"
] |
[((181, 239), 'sys.path.append', 'sys.path.append', (['"""/lustre/aoc/users/bsvoboda/temp/nestfit"""'], {}), "('/lustre/aoc/users/bsvoboda/temp/nestfit')\n", (196, 239), False, 'import sys\n'), ((899, 944), 'nestfit.NoiseMap.from_pbimg', 'nf.NoiseMap.from_pbimg', (['rms', 'pb_11_cube._data'], {}), '(rms, pb_11_cube._data)\n', (921, 944), True, 'import nestfit as nf\n'), ((957, 1002), 'nestfit.NoiseMap.from_pbimg', 'nf.NoiseMap.from_pbimg', (['rms', 'pb_22_cube._data'], {}), '(rms, pb_22_cube._data)\n', (979, 1002), True, 'import nestfit as nf\n'), ((1167, 1186), 'nestfit.CubeStack', 'nf.CubeStack', (['cubes'], {}), '(cubes)\n', (1179, 1186), True, 'import nestfit as nf\n'), ((1462, 1520), 'nestfit.AmmoniaRunner.from_data', 'nf.AmmoniaRunner.from_data', (['spec_data', 'utrans'], {'ncomp': 'ncomp'}), '(spec_data, utrans, ncomp=ncomp)\n', (1488, 1520), True, 'import nestfit as nf\n'), ((2232, 2267), 'nestfit.main.get_irdc_priors', 'get_irdc_priors', ([], {'vsys': 'VELOS[target]'}), '(vsys=VELOS[target])\n', (2247, 2267), False, 'from nestfit.main import get_irdc_priors\n'), ((2349, 2420), 'nestfit.CubeFitter', 'nf.CubeFitter', (['stack', 'utrans', 'runner_cls'], {'ncomp_max': '(2)', 'nlive_snr_fact': '(5)'}), '(stack, utrans, runner_cls, ncomp_max=2, nlive_snr_fact=5)\n', (2362, 2420), True, 'import nestfit as nf\n'), ((2684, 2717), 'astropy.convolution.Gaussian2DKernel', 'convolution.Gaussian2DKernel', (['(1.5)'], {}), '(1.5)\n', (2712, 2717), False, 'from astropy import convolution\n'), ((2775, 2877), 'numpy.array', 'np.array', (['[[s2 ** 2, s2 ** 1, s2 ** 2], [s2 ** 1, s2 ** 0, s2 ** 1], [s2 ** 2, s2 ** \n 1, s2 ** 2]]'], {}), '([[s2 ** 2, s2 ** 1, s2 ** 2], [s2 ** 1, s2 ** 0, s2 ** 1], [s2 ** \n 2, s2 ** 1, s2 ** 2]])\n', (2783, 2877), True, 'import numpy as np\n'), ((2916, 2947), 'astropy.convolution.CustomKernel', 'convolution.CustomKernel', (['k_arr'], {}), '(k_arr)\n', (2940, 2947), False, 'from astropy import convolution\n'), ((2961, 2996), 'nestfit.main.get_irdc_priors', 'get_irdc_priors', ([], {'vsys': 'VELOS[target]'}), '(vsys=VELOS[target])\n', (2976, 2996), False, 'from nestfit.main import get_irdc_priors\n'), ((3101, 3124), 'nestfit.HdfStore', 'nf.HdfStore', (['store_name'], {}), '(store_name)\n', (3112, 3124), True, 'import nestfit as nf\n'), ((3245, 3279), 'nestfit.aggregate_run_attributes', 'nf.aggregate_run_attributes', (['store'], {}), '(store)\n', (3272, 3279), True, 'import nestfit as nf\n'), ((3284, 3324), 'nestfit.convolve_evidence', 'nf.convolve_evidence', (['store', 'evid_kernel'], {}), '(store, evid_kernel)\n', (3304, 3324), True, 'import nestfit as nf\n'), ((3329, 3361), 'nestfit.aggregate_run_products', 'nf.aggregate_run_products', (['store'], {}), '(store)\n', (3354, 3361), True, 'import nestfit as nf\n'), ((3366, 3413), 'nestfit.aggregate_run_pdfs', 'nf.aggregate_run_pdfs', (['store'], {'par_bins': 'par_bins'}), '(store, par_bins=par_bins)\n', (3387, 3413), True, 'import nestfit as nf\n'), ((3418, 3478), 'nestfit.convolve_post_pdfs', 'nf.convolve_post_pdfs', (['store', 'post_kernel'], {'evid_weight': '(False)'}), '(store, post_kernel, evid_weight=False)\n', (3439, 3478), True, 'import nestfit as nf\n'), ((3483, 3516), 'nestfit.quantize_conv_marginals', 'nf.quantize_conv_marginals', (['store'], {}), '(store)\n', (3509, 3516), True, 'import nestfit as nf\n'), ((3521, 3566), 'nestfit.deblend_hf_intensity', 'nf.deblend_hf_intensity', (['store', 'stack', 'runner'], {}), '(store, stack, runner)\n', (3544, 3566), True, 'import nestfit as nf\n'), ((1029, 1081), 'nestfit.DataCube', 'nf.DataCube', (['im_11_cube'], {'noise_map': 'nm_11', 'trans_id': '(1)'}), '(im_11_cube, noise_map=nm_11, trans_id=1)\n', (1040, 1081), True, 'import nestfit as nf\n'), ((1095, 1147), 'nestfit.DataCube', 'nf.DataCube', (['im_22_cube'], {'noise_map': 'nm_22', 'trans_id': '(2)'}), '(im_22_cube, noise_map=nm_22, trans_id=2)\n', (1106, 1147), True, 'import nestfit as nf\n'), ((2057, 2077), 'shutil.rmtree', 'shutil.rmtree', (['filen'], {}), '(filen)\n', (2070, 2077), False, 'import shutil\n'), ((2748, 2758), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (2755, 2758), True, 'import numpy as np\n'), ((3744, 3755), 'multiprocessing.Pool', 'Pool', (['nproc'], {}), '(nproc)\n', (3748, 3755), False, 'from multiprocessing import Pool\n'), ((1831, 1855), 'numpy.linspace', 'np.linspace', (['lo', 'hi', '(200)'], {}), '(lo, hi, 200)\n', (1842, 1855), True, 'import numpy as np\n'), ((1989, 2000), 'pathlib.Path', 'Path', (['filen'], {}), '(filen)\n', (1993, 2000), False, 'from pathlib import Path\n')]
|
from entente.landmarks.symmetrize_landmarks import (
symmetrize_landmarks_using_plane,
symmetrize_landmarks_using_topology,
)
import numpy as np
from polliwog import Plane
import pytest
from vg.compat import v1 as vg
from ..test_symmetry import create_seat_and_arm_mesh
def test_symmetrize_landmarks_using_plane():
original = np.array([[-18.5657, 54.7161, -19.5649], [20.0896, 54.919, -19.5738]])
symmetrized = symmetrize_landmarks_using_plane(Plane.yz, original)
np.testing.assert_allclose(symmetrized, original, atol=1)
mirrored = np.copy(original)
mirrored[:, 0] = -mirrored[:, 0]
np.testing.assert_allclose(np.flipud(symmetrized), mirrored, atol=1)
distances_to_original = vg.euclidean_distance(symmetrized, original)
distances_to_mirrored = vg.euclidean_distance(np.flipud(symmetrized), mirrored)
np.testing.assert_allclose(distances_to_original, distances_to_mirrored, atol=1e-1)
def test_symmetrize_landmarks_using_plane_non_plane():
original = np.array([[-18.5657, 54.7161, -19.5649], [20.0896, 54.919, -19.5738]])
with pytest.raises(ValueError, match=r"plane_of_symmetry should be a Plane"):
symmetrize_landmarks_using_plane("not_a_plane", original)
def test_symmetrize_landmarks_using_topology():
mesh = create_seat_and_arm_mesh()
original = np.array([[-18.5657, 54.7161, -19.5649], [20.0896, 54.919, -19.5738]])
symmetrized = symmetrize_landmarks_using_topology(
mesh, Plane.yz, original, atol=1e-1
)
np.testing.assert_allclose(symmetrized, original, atol=1)
mirrored = np.copy(original)
mirrored[:, 0] = -mirrored[:, 0]
np.testing.assert_allclose(np.flipud(symmetrized), mirrored, atol=1)
distances_to_original = vg.euclidean_distance(symmetrized, original)
distances_to_mirrored = vg.euclidean_distance(np.flipud(symmetrized), mirrored)
np.testing.assert_allclose(distances_to_original, distances_to_mirrored, atol=1e-1)
def test_symmetrize_landmarks_using_topology_asymmetrical():
mesh = create_seat_and_arm_mesh().translated(np.array([50.0, 0.0, 0.0]))
original = np.array([[-18.5657, 54.7161, -19.5649], [20.0896, 54.919, -19.5738]])
with pytest.raises(
ValueError, match=r"Some landmarks are near triangles which are not mirrored"
):
symmetrize_landmarks_using_topology(mesh, Plane.yz, original, atol=1e-1)
def test_symmetrize_landmarks_using_topology_non_plane():
mesh = create_seat_and_arm_mesh().translated(np.array([50.0, 0.0, 0.0]))
original = np.array([[-18.5657, 54.7161, -19.5649], [20.0896, 54.919, -19.5738]])
with pytest.raises(ValueError, match=r"plane_of_symmetry should be a Plane"):
symmetrize_landmarks_using_topology(mesh, "not_a_plane", original, atol=1e-1)
|
[
"numpy.copy",
"numpy.flipud",
"pytest.raises",
"vg.compat.v1.euclidean_distance",
"numpy.array",
"numpy.testing.assert_allclose",
"entente.landmarks.symmetrize_landmarks.symmetrize_landmarks_using_plane",
"entente.landmarks.symmetrize_landmarks.symmetrize_landmarks_using_topology"
] |
[((340, 410), 'numpy.array', 'np.array', (['[[-18.5657, 54.7161, -19.5649], [20.0896, 54.919, -19.5738]]'], {}), '([[-18.5657, 54.7161, -19.5649], [20.0896, 54.919, -19.5738]])\n', (348, 410), True, 'import numpy as np\n'), ((429, 481), 'entente.landmarks.symmetrize_landmarks.symmetrize_landmarks_using_plane', 'symmetrize_landmarks_using_plane', (['Plane.yz', 'original'], {}), '(Plane.yz, original)\n', (461, 481), False, 'from entente.landmarks.symmetrize_landmarks import symmetrize_landmarks_using_plane, symmetrize_landmarks_using_topology\n'), ((487, 544), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['symmetrized', 'original'], {'atol': '(1)'}), '(symmetrized, original, atol=1)\n', (513, 544), True, 'import numpy as np\n'), ((561, 578), 'numpy.copy', 'np.copy', (['original'], {}), '(original)\n', (568, 578), True, 'import numpy as np\n'), ((719, 763), 'vg.compat.v1.euclidean_distance', 'vg.euclidean_distance', (['symmetrized', 'original'], {}), '(symmetrized, original)\n', (740, 763), True, 'from vg.compat import v1 as vg\n'), ((852, 938), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['distances_to_original', 'distances_to_mirrored'], {'atol': '(0.1)'}), '(distances_to_original, distances_to_mirrored,\n atol=0.1)\n', (878, 938), True, 'import numpy as np\n'), ((1008, 1078), 'numpy.array', 'np.array', (['[[-18.5657, 54.7161, -19.5649], [20.0896, 54.919, -19.5738]]'], {}), '([[-18.5657, 54.7161, -19.5649], [20.0896, 54.919, -19.5738]])\n', (1016, 1078), True, 'import numpy as np\n'), ((1330, 1400), 'numpy.array', 'np.array', (['[[-18.5657, 54.7161, -19.5649], [20.0896, 54.919, -19.5738]]'], {}), '([[-18.5657, 54.7161, -19.5649], [20.0896, 54.919, -19.5738]])\n', (1338, 1400), True, 'import numpy as np\n'), ((1419, 1490), 'entente.landmarks.symmetrize_landmarks.symmetrize_landmarks_using_topology', 'symmetrize_landmarks_using_topology', (['mesh', 'Plane.yz', 'original'], {'atol': '(0.1)'}), '(mesh, Plane.yz, original, atol=0.1)\n', (1454, 1490), False, 'from entente.landmarks.symmetrize_landmarks import symmetrize_landmarks_using_plane, symmetrize_landmarks_using_topology\n'), ((1511, 1568), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['symmetrized', 'original'], {'atol': '(1)'}), '(symmetrized, original, atol=1)\n', (1537, 1568), True, 'import numpy as np\n'), ((1585, 1602), 'numpy.copy', 'np.copy', (['original'], {}), '(original)\n', (1592, 1602), True, 'import numpy as np\n'), ((1743, 1787), 'vg.compat.v1.euclidean_distance', 'vg.euclidean_distance', (['symmetrized', 'original'], {}), '(symmetrized, original)\n', (1764, 1787), True, 'from vg.compat import v1 as vg\n'), ((1876, 1962), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['distances_to_original', 'distances_to_mirrored'], {'atol': '(0.1)'}), '(distances_to_original, distances_to_mirrored,\n atol=0.1)\n', (1902, 1962), True, 'import numpy as np\n'), ((2115, 2185), 'numpy.array', 'np.array', (['[[-18.5657, 54.7161, -19.5649], [20.0896, 54.919, -19.5738]]'], {}), '([[-18.5657, 54.7161, -19.5649], [20.0896, 54.919, -19.5738]])\n', (2123, 2185), True, 'import numpy as np\n'), ((2536, 2606), 'numpy.array', 'np.array', (['[[-18.5657, 54.7161, -19.5649], [20.0896, 54.919, -19.5738]]'], {}), '([[-18.5657, 54.7161, -19.5649], [20.0896, 54.919, -19.5738]])\n', (2544, 2606), True, 'import numpy as np\n'), ((648, 670), 'numpy.flipud', 'np.flipud', (['symmetrized'], {}), '(symmetrized)\n', (657, 670), True, 'import numpy as np\n'), ((814, 836), 'numpy.flipud', 'np.flipud', (['symmetrized'], {}), '(symmetrized)\n', (823, 836), True, 'import numpy as np\n'), ((1088, 1158), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""plane_of_symmetry should be a Plane"""'}), "(ValueError, match='plane_of_symmetry should be a Plane')\n", (1101, 1158), False, 'import pytest\n'), ((1169, 1226), 'entente.landmarks.symmetrize_landmarks.symmetrize_landmarks_using_plane', 'symmetrize_landmarks_using_plane', (['"""not_a_plane"""', 'original'], {}), "('not_a_plane', original)\n", (1201, 1226), False, 'from entente.landmarks.symmetrize_landmarks import symmetrize_landmarks_using_plane, symmetrize_landmarks_using_topology\n'), ((1672, 1694), 'numpy.flipud', 'np.flipud', (['symmetrized'], {}), '(symmetrized)\n', (1681, 1694), True, 'import numpy as np\n'), ((1838, 1860), 'numpy.flipud', 'np.flipud', (['symmetrized'], {}), '(symmetrized)\n', (1847, 1860), True, 'import numpy as np\n'), ((2072, 2098), 'numpy.array', 'np.array', (['[50.0, 0.0, 0.0]'], {}), '([50.0, 0.0, 0.0])\n', (2080, 2098), True, 'import numpy as np\n'), ((2195, 2291), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Some landmarks are near triangles which are not mirrored"""'}), "(ValueError, match=\n 'Some landmarks are near triangles which are not mirrored')\n", (2208, 2291), False, 'import pytest\n'), ((2311, 2382), 'entente.landmarks.symmetrize_landmarks.symmetrize_landmarks_using_topology', 'symmetrize_landmarks_using_topology', (['mesh', 'Plane.yz', 'original'], {'atol': '(0.1)'}), '(mesh, Plane.yz, original, atol=0.1)\n', (2346, 2382), False, 'from entente.landmarks.symmetrize_landmarks import symmetrize_landmarks_using_plane, symmetrize_landmarks_using_topology\n'), ((2493, 2519), 'numpy.array', 'np.array', (['[50.0, 0.0, 0.0]'], {}), '([50.0, 0.0, 0.0])\n', (2501, 2519), True, 'import numpy as np\n'), ((2616, 2686), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""plane_of_symmetry should be a Plane"""'}), "(ValueError, match='plane_of_symmetry should be a Plane')\n", (2629, 2686), False, 'import pytest\n'), ((2697, 2773), 'entente.landmarks.symmetrize_landmarks.symmetrize_landmarks_using_topology', 'symmetrize_landmarks_using_topology', (['mesh', '"""not_a_plane"""', 'original'], {'atol': '(0.1)'}), "(mesh, 'not_a_plane', original, atol=0.1)\n", (2732, 2773), False, 'from entente.landmarks.symmetrize_landmarks import symmetrize_landmarks_using_plane, symmetrize_landmarks_using_topology\n')]
|
"""
Show some diagnostic plots for an LNGS wav. Usage:
plotwav.py [filename]
If not specified, the file read is
darksidehd/nuvhd_lf_3x_tile57_77K_64V_6VoV_1.wav. The plots are:
* An histogram of all data;
* A temporal plot of some events;
* The temporal distribution of the trigger rising edge.
At most 1000 events are read from the wav.
"""
import sys
import numpy as np
from matplotlib import pyplot as plt
import readwav
import fighelp
if len(sys.argv) == 1:
filename = 'darksidehd/nuvhd_lf_3x_tile57_77K_64V_6VoV_1.wav'
else:
filename = sys.argv[1]
data = readwav.readwav(filename, mmap=False, maxevents=1000)
signal = data[:, 0, :].reshape(-1)
trigger = data[:, 1, :].reshape(-1)
print('computing global histogram...')
fig = fighelp.figwithsize([11.8, 4.8], resetfigcount=True)
ax = fig.subplots(1, 1)
ax.set_title('Histogram of all data')
ax.set_xlabel('ADC value')
ax.set_ylabel('occurences')
ax.plot(np.bincount(signal, minlength=1024), drawstyle='steps', label='signal')
ax.plot(np.bincount(trigger, minlength=1024), drawstyle='steps', label='trigger')
ax.set_yscale('symlog')
ax.set_ylim(-1, ax.get_ylim()[1])
ax.grid()
ax.legend(loc='best')
fighelp.saveaspng(fig)
fig.show()
fig = fighelp.figwithsize([8.21, 5.09])
ax = fig.subplots(1, 1)
start = 0
s = slice(start, start + 125000)
ax.plot(signal[s], ',', color='red', label='signal')
ax.plot(trigger[s], ',', color='blue', label='trigger')
ax.set_ylim(-1, 2**10 + 1)
ax.legend(loc='best')
ax.set_title('Original signal')
ax.set_xlabel(f'Sample number (starting from {start})')
ax.set_ylabel('ADC value')
fighelp.saveaspng(fig)
fig.show()
fig = fighelp.figwithsize()
ax = fig.subplots(1, 1)
trig = readwav.first_nonzero(data[:, 1, :] < 600)
ax.hist(trig, bins='auto', histtype='step')
ax.set_title('Distribution of trigger rising edge')
ax.set_xlabel('Sample number @ 1 GSa/s')
ax.set_ylabel('Counts per bin')
fighelp.saveaspng(fig)
fig.show()
|
[
"readwav.readwav",
"fighelp.saveaspng",
"fighelp.figwithsize",
"readwav.first_nonzero",
"numpy.bincount"
] |
[((588, 641), 'readwav.readwav', 'readwav.readwav', (['filename'], {'mmap': '(False)', 'maxevents': '(1000)'}), '(filename, mmap=False, maxevents=1000)\n', (603, 641), False, 'import readwav\n'), ((761, 813), 'fighelp.figwithsize', 'fighelp.figwithsize', (['[11.8, 4.8]'], {'resetfigcount': '(True)'}), '([11.8, 4.8], resetfigcount=True)\n', (780, 813), False, 'import fighelp\n'), ((1187, 1209), 'fighelp.saveaspng', 'fighelp.saveaspng', (['fig'], {}), '(fig)\n', (1204, 1209), False, 'import fighelp\n'), ((1228, 1261), 'fighelp.figwithsize', 'fighelp.figwithsize', (['[8.21, 5.09]'], {}), '([8.21, 5.09])\n', (1247, 1261), False, 'import fighelp\n'), ((1606, 1628), 'fighelp.saveaspng', 'fighelp.saveaspng', (['fig'], {}), '(fig)\n', (1623, 1628), False, 'import fighelp\n'), ((1647, 1668), 'fighelp.figwithsize', 'fighelp.figwithsize', ([], {}), '()\n', (1666, 1668), False, 'import fighelp\n'), ((1702, 1744), 'readwav.first_nonzero', 'readwav.first_nonzero', (['(data[:, 1, :] < 600)'], {}), '(data[:, 1, :] < 600)\n', (1723, 1744), False, 'import readwav\n'), ((1917, 1939), 'fighelp.saveaspng', 'fighelp.saveaspng', (['fig'], {}), '(fig)\n', (1934, 1939), False, 'import fighelp\n'), ((941, 976), 'numpy.bincount', 'np.bincount', (['signal'], {'minlength': '(1024)'}), '(signal, minlength=1024)\n', (952, 976), True, 'import numpy as np\n'), ((1021, 1057), 'numpy.bincount', 'np.bincount', (['trigger'], {'minlength': '(1024)'}), '(trigger, minlength=1024)\n', (1032, 1057), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 28 17:27:37 2019
@author: <NAME>
"""
import copy
import numpy as np
import populationevolution as popev
import populationevolution.raggedtoregular as r2r
import SweepApproximationFunctions as SAF
def compareNeq_Ntrue(mu_min, delta_f, M, P_mu, K, t):
Neq = SAF.findNeq(mu_min, delta_f, M, P_mu, K)
N_start = Neq.astype('int64')
rounding_error = K - np.sum(N_start)
N_start[0,0] = N_start[0,0] + rounding_error
pop = popev.Population(0, mu_min, N_start, delta_f, M, 0, 0, P_mu, K)
mu_list = pop.mutation_list
delta_Ns = [diffNeq_Npop(mu_list, Neq, pop.mutation_list,
pop.population_distribution)]
for i in range(t):
pop.update()
delta_Ns.append(diffNeq_Npop(mu_list, Neq, pop.mutation_list,
pop.population_distribution))
delta_Ns = r2r.ragged_to_regular(delta_Ns)
new_shape = (delta_Ns.shape[1], delta_Ns.shape[2])
Neq_rs = np.zeros(new_shape, dtype=Neq.dtype)
Neq_rs[:Neq.shape[0],:Neq.shape[1]] = Neq
mu_list_rs = np.minimum(np.geomspace(mu_min,
mu_min*M**(new_shape[1]-1),
new_shape[1]),1)
f_list = np.linspace(0, -delta_f*new_shape[0], new_shape[0],
endpoint=False)
return mu_list_rs, f_list, Neq_rs, delta_Ns
def diffNeq_Npop(mu_list, Neq, pop_mut, pop_dist):
fs, mus = np.maximum(pop_dist.shape, Neq.shape)
M = mu_list[1]/mu_list[0]
Neq_padded = np.zeros((fs,mus),dtype=Neq.dtype)
Neq_padded[:Neq.shape[0],:Neq.shape[1]]=Neq
mu_list_padded = np.minimum(mu_list[0]*M**np.arange(mus),1)
pop_dist_padded = np.zeros((fs,mus),dtype=pop_dist.dtype)
pop_dist_padded[:pop_dist.shape[0],:pop_dist.shape[1]]=pop_dist
pop_mut_padded = np.minimum(pop_mut[0]*M**np.arange(mus),1)
if not np.allclose(pop_mut_padded, mu_list_padded):
print('eq_mu_list:', mu_list_padded)
print('pop_mu_list:', pop_mut_padded)
raise RuntimeError('the mutation rates of Neq and the population being'
'tested do not match.')
return pop_dist_padded - Neq_padded
def mean_probability_error(delta_Ns, K):
'''
Calculate the sum of the absolute value of delta_Ns over time then divide
by 2.'''
# The first axis of the array delta_Ns is the time axis so taking
# the cumulative sum and dividing by the array ts give us delta_N
# averaged over time for increasing values of time. The second two axes
# are the fitness and mutation rate axes so taking the absolute value,
# summing over those and dividing by 2*K gives us a normalized measure
# of the deviation of N from the approximate equilibrium. We divide by 2
# because that way if there is no overlap in the distributions the measure
# will equal 1.
ts = np.arange(1,delta_Ns.shape[0]+1).reshape((-1,1,1))
return np.sum(np.abs(np.cumsum(delta_Ns,axis=0)/ts), axis=(1,2))/(2*K)
def delta_over_std(Neq, delta_N):
K = np.sum(Neq)
std = np.sqrt(Neq*(1-Neq/K))
return delta_N/std
def invader_Neq(mu_min, delta_f, M, P_mu, K, inv_f_step, inv_mu_step):
s = SAF.effective_fitness_difference(0, delta_f*inv_f_step, mu_min,
mu_min*float(M)**inv_mu_step)
if inv_f_step < 0:
raise ValueError('Invading mutants must have a fitness equal to or'
' above the maximum fitness in the invaded'
' population.')
elif inv_f_step == 0:
if inv_mu_step > -1:
raise ValueError('Invading mutants at the same fitness as the'
' invaded population must have a lower mutation'
' rate.')
else:
if SAF.fixation_probability(K, s) == 0:
raise ValueError('Invading mutants must have an effective increase'
' in fitness or be effectively neutral. Try '
'increasing the fitness or decreasing the'
' mutation rate of the invader.')
Neq = SAF.findNeq(mu_min, delta_f, M, P_mu, K)
N_start = Neq.astype('int64')
rounding_error = K - np.sum(N_start)
N_start[0,0] = N_start[0,0] + rounding_error - 1
vertical_pad = (inv_f_step, 0)
if inv_mu_step < 0:
horizontal_pad = (np.abs(inv_mu_step),0)
elif inv_mu_step >= N_start.shape[1]:
horizontal_pad = (0, inv_mu_step - N_start.shape[1])
else:
horizontal_pad = (0,0)
N_start = np.pad(N_start, (vertical_pad, horizontal_pad),mode='constant')
N_start[0,np.maximum(0,inv_mu_step)] = 1
return N_start
def invasion(mu_min, delta_f, M, P_mu, K, inv_f_step, inv_mu_step,
max_steps=10**6):
N_start = invader_Neq(mu_min, delta_f, M, P_mu, K, inv_f_step, inv_mu_step)
mu_inv = mu_min*float(M)**inv_mu_step
f_inv = delta_f*inv_f_step
if inv_mu_step < 0:
mu_min2 = mu_inv
else:
mu_min2 = mu_min
pop = popev.Population(f_inv, mu_min2, N_start, delta_f, M, 0, 0, P_mu,
K)
threshold = .5*SAF.findNeq(mu_inv, delta_f, M, P_mu, K)[0,0]
for i in range(1,max_steps):
pop.update()
if pop(f_inv, mu_inv) == 0:
return False, i
if pop(f_inv, mu_inv) >= threshold:
return True, i
raise RuntimeError('The invading mutant failed to either fix or'
' go extinct before the maximum number of allowable'
' function evaluations was reached.')
def estimate_fix_prob(mu_min, delta_f, M, P_mu, K, inv_f_step, inv_mu_step):
s = SAF.effective_fitness_difference(0, delta_f*inv_f_step, mu_min,
mu_min*float(M)**inv_mu_step)
exp_fix_prob = SAF.fixation_probability(K, s)
if exp_fix_prob < 10**-5:
raise RuntimeError('Empirically estimating the fixation probability'
' for such an unlikely event is a waste of time.')
test_count = int(np.maximum(100*(1-exp_fix_prob)/exp_fix_prob,100))
fixations = 0
extinction_times = []
fixation_times = []
for i in range(test_count):
invader_survival, time = invasion(mu_min, delta_f, M, P_mu, K,
inv_f_step, inv_mu_step)
if invader_survival:
fixations = fixations + 1
fixation_times.append(time)
else:
extinction_times.append(time)
return test_count, fixations, fixation_times, extinction_times
def value_array_to_waiting_times(fmumodes):
'''
Change an array of the mode of the fitness and mutation rate to a pair of
arrays. The array of the mode should have a shape of (t, 2).
The first array is the value of the mode fitness and mutation rate
in the order they occur, the second is the time spent at each mode before
the change to the next value.
Rapid fluctuations in the mode during a transition are smoothed out by
looking ahead at the mode a bit after making this first pair of arrays and
merging together times.
'''
mode, tau = run_length_encoding(fmumodes)
return merge_flucs(mode, tau)
# shamelessly stolen from stackoverflow answer here
# https://stackoverflow.com/questions/1066758/
# find-length-of-sequences-of-identical-values-in-a-numpy-array-run-length-encodi
# by <NAME>. Then modified to work on arrays of shape (t,2) and
# rearranged to fit my old choice of argument and output orders
def run_length_encoding(inarray):
ia = np.asarray(inarray)
n = len(ia)
if n == 0:
return (None, None)
elif n == 1:
return (ia[0], 1)
else:
x = ia[1:] != ia[:-1]
y = np.logical_or(x[:,0], x[:,1])
i = np.append(np.where(y), n-1)
z = np.diff(np.append(-1, i))
return (ia[i], z)
def merge_flucs(fmus, taus):
l = len(taus)
i = 0
fmus_fused = []
taus_fused = []
curr = fmus[0]
taucurr = taus[0]
while i < l-2:
nex = fmus[i+1]
taunex = taus[i+1]
nexnex = fmus[i+2]
taunexnex = taus[i+2]
if np.all(curr==nexnex):
taucurr = taucurr + taunex + taunexnex
i = i + 2
else:
fmus_fused.append(curr)
taus_fused.append(taucurr)
curr = nex
taucurr = taunex
i = i + 1
return np.array(fmus_fused), np.array(taus_fused)
def waiting_times_to_waiting_dict(f_mu_pairs, waiting_times):
'''
Change a pair of arrays where the first array is fitness, mutation rate
pairs and the second is the waiting times before changing to the next
fitness and mutation rate into a dictionary of lists of lists of the
empirical distribution of waiting times for transitions from each mutation
rate to either a higher fitness and the same mutation rate, a lower
mutation rate, or a higher fitness and mutation rate, plus a grab bag for
any transitions that were double sweeps (for example crossed two fitness
steps at once).
'''
if len(f_mu_pairs) != len(waiting_times):
raise ValueError('The array of fitness and mutation rate pairs must be'
'of the same length as the array of waiting times.')
fs = np.unique(f_mu_pairs[:,0])
delta_f = np.median(np.diff(np.unique(fs)))
mus = np.unique(f_mu_pairs[:,1])
M = mus[1]/mus[0]
wts_by_mu = [[] for i in range(mus.size)]
wait_dict = {'f_up': copy.deepcopy(wts_by_mu), 'mu_down':
copy.deepcopy(wts_by_mu), 'mu_up': copy.deepcopy(wts_by_mu),
'mu_2up': copy.deepcopy(wts_by_mu),
'grab_bag': copy.deepcopy(wts_by_mu)}
for i in range(len(f_mu_pairs)-1):
f = f_mu_pairs[i,0]
mu = f_mu_pairs[i,1]
ix = np.where(mu==mus)[0][0]
f_next = f_mu_pairs[i+1,0]
mu_next = f_mu_pairs[i+1,1]
if np.isclose(mu, mu_next) and np.isclose(f + delta_f, f_next):
wait_dict['f_up'][ix].append(waiting_times[i])
elif np.isclose(mu/M, mu_next) and np.isclose(f, f_next):
wait_dict['mu_down'][ix].append(waiting_times[i])
elif np.isclose(M*mu, mu_next) and np.isclose(f + delta_f, f_next):
wait_dict['mu_up'][ix].append(waiting_times[i])
elif np.isclose(M**2*mu, mu_next) and np.isclose(f + delta_f, f_next):
wait_dict['mu_2up'][ix].append(waiting_times[i])
else:
wait_dict['grab_bag'][ix].append((mu_next, f, f_next,
waiting_times[i]))
return mus, wait_dict
def waiting_dict_to_rates(mus, wait_dict):
'''
From a dictionary of the waiting times between the different types of
sweeps in the mutator-antimutator case in a simulation, compute the
transition rates between states.
'''
waitsum_dict = {}
trans_counts = {}
for trans in ['f_up', 'mu_down', 'mu_up', 'mu_2up']:
waitsum_dict[trans]=np.array([np.sum(wts) for wts in
wait_dict[trans]])
trans_counts[trans]=np.array([len(wts) for wts in wait_dict[trans]])
wait_total = (waitsum_dict['f_up'] + waitsum_dict['mu_down'] +
waitsum_dict['mu_up'] + waitsum_dict['mu_2up'])
count_total = (trans_counts['f_up'] + trans_counts['mu_down'] +
trans_counts['mu_up'] + trans_counts['mu_2up'])
rates_total = count_total/wait_total
rate_dict = {}
for trans in ['f_up', 'mu_down', 'mu_up', 'mu_2up']:
rate_dict[trans] = rates_total*trans_counts[trans]/count_total
empirical_Tm = np.zeros((mus.size, mus.size), dtype='float64')
for i, rate in list(enumerate(rate_dict['mu_down']))[1:]:
empirical_Tm[i-1, i] = rate
empirical_Tm[i, i] -= rate
for i, rate in list(enumerate(rate_dict['mu_up']))[:-1]:
empirical_Tm[i+1, i] = rate
empirical_Tm[i, i] -= rate
for i, rate in list(enumerate(rate_dict['mu_2up']))[:-2]:
empirical_Tm[i+2, i] = rate
empirical_Tm[i, i] -= rate
return empirical_Tm, rate_dict['f_up']
|
[
"numpy.maximum",
"numpy.sum",
"numpy.abs",
"numpy.allclose",
"numpy.isclose",
"numpy.arange",
"numpy.unique",
"numpy.pad",
"populationevolution.Population",
"SweepApproximationFunctions.fixation_probability",
"numpy.geomspace",
"numpy.append",
"numpy.cumsum",
"numpy.linspace",
"SweepApproximationFunctions.findNeq",
"copy.deepcopy",
"numpy.asarray",
"numpy.all",
"populationevolution.raggedtoregular.ragged_to_regular",
"numpy.zeros",
"numpy.where",
"numpy.array",
"numpy.logical_or",
"numpy.sqrt"
] |
[((311, 351), 'SweepApproximationFunctions.findNeq', 'SAF.findNeq', (['mu_min', 'delta_f', 'M', 'P_mu', 'K'], {}), '(mu_min, delta_f, M, P_mu, K)\n', (322, 351), True, 'import SweepApproximationFunctions as SAF\n'), ((486, 549), 'populationevolution.Population', 'popev.Population', (['(0)', 'mu_min', 'N_start', 'delta_f', 'M', '(0)', '(0)', 'P_mu', 'K'], {}), '(0, mu_min, N_start, delta_f, M, 0, 0, P_mu, K)\n', (502, 549), True, 'import populationevolution as popev\n'), ((899, 930), 'populationevolution.raggedtoregular.ragged_to_regular', 'r2r.ragged_to_regular', (['delta_Ns'], {}), '(delta_Ns)\n', (920, 930), True, 'import populationevolution.raggedtoregular as r2r\n'), ((999, 1035), 'numpy.zeros', 'np.zeros', (['new_shape'], {'dtype': 'Neq.dtype'}), '(new_shape, dtype=Neq.dtype)\n', (1007, 1035), True, 'import numpy as np\n'), ((1271, 1340), 'numpy.linspace', 'np.linspace', (['(0)', '(-delta_f * new_shape[0])', 'new_shape[0]'], {'endpoint': '(False)'}), '(0, -delta_f * new_shape[0], new_shape[0], endpoint=False)\n', (1282, 1340), True, 'import numpy as np\n'), ((1479, 1516), 'numpy.maximum', 'np.maximum', (['pop_dist.shape', 'Neq.shape'], {}), '(pop_dist.shape, Neq.shape)\n', (1489, 1516), True, 'import numpy as np\n'), ((1564, 1600), 'numpy.zeros', 'np.zeros', (['(fs, mus)'], {'dtype': 'Neq.dtype'}), '((fs, mus), dtype=Neq.dtype)\n', (1572, 1600), True, 'import numpy as np\n'), ((1733, 1774), 'numpy.zeros', 'np.zeros', (['(fs, mus)'], {'dtype': 'pop_dist.dtype'}), '((fs, mus), dtype=pop_dist.dtype)\n', (1741, 1774), True, 'import numpy as np\n'), ((3086, 3097), 'numpy.sum', 'np.sum', (['Neq'], {}), '(Neq)\n', (3092, 3097), True, 'import numpy as np\n'), ((3108, 3136), 'numpy.sqrt', 'np.sqrt', (['(Neq * (1 - Neq / K))'], {}), '(Neq * (1 - Neq / K))\n', (3115, 3136), True, 'import numpy as np\n'), ((4184, 4224), 'SweepApproximationFunctions.findNeq', 'SAF.findNeq', (['mu_min', 'delta_f', 'M', 'P_mu', 'K'], {}), '(mu_min, delta_f, M, P_mu, K)\n', (4195, 4224), True, 'import SweepApproximationFunctions as SAF\n'), ((4619, 4683), 'numpy.pad', 'np.pad', (['N_start', '(vertical_pad, horizontal_pad)'], {'mode': '"""constant"""'}), "(N_start, (vertical_pad, horizontal_pad), mode='constant')\n", (4625, 4683), True, 'import numpy as np\n'), ((5094, 5162), 'populationevolution.Population', 'popev.Population', (['f_inv', 'mu_min2', 'N_start', 'delta_f', 'M', '(0)', '(0)', 'P_mu', 'K'], {}), '(f_inv, mu_min2, N_start, delta_f, M, 0, 0, P_mu, K)\n', (5110, 5162), True, 'import populationevolution as popev\n'), ((5891, 5921), 'SweepApproximationFunctions.fixation_probability', 'SAF.fixation_probability', (['K', 's'], {}), '(K, s)\n', (5915, 5921), True, 'import SweepApproximationFunctions as SAF\n'), ((7664, 7683), 'numpy.asarray', 'np.asarray', (['inarray'], {}), '(inarray)\n', (7674, 7683), True, 'import numpy as np\n'), ((9404, 9431), 'numpy.unique', 'np.unique', (['f_mu_pairs[:, 0]'], {}), '(f_mu_pairs[:, 0])\n', (9413, 9431), True, 'import numpy as np\n'), ((9489, 9516), 'numpy.unique', 'np.unique', (['f_mu_pairs[:, 1]'], {}), '(f_mu_pairs[:, 1])\n', (9498, 9516), True, 'import numpy as np\n'), ((11756, 11803), 'numpy.zeros', 'np.zeros', (['(mus.size, mus.size)'], {'dtype': '"""float64"""'}), "((mus.size, mus.size), dtype='float64')\n", (11764, 11803), True, 'import numpy as np\n'), ((411, 426), 'numpy.sum', 'np.sum', (['N_start'], {}), '(N_start)\n', (417, 426), True, 'import numpy as np\n'), ((1110, 1178), 'numpy.geomspace', 'np.geomspace', (['mu_min', '(mu_min * M ** (new_shape[1] - 1))', 'new_shape[1]'], {}), '(mu_min, mu_min * M ** (new_shape[1] - 1), new_shape[1])\n', (1122, 1178), True, 'import numpy as np\n'), ((1916, 1959), 'numpy.allclose', 'np.allclose', (['pop_mut_padded', 'mu_list_padded'], {}), '(pop_mut_padded, mu_list_padded)\n', (1927, 1959), True, 'import numpy as np\n'), ((4284, 4299), 'numpy.sum', 'np.sum', (['N_start'], {}), '(N_start)\n', (4290, 4299), True, 'import numpy as np\n'), ((6128, 6184), 'numpy.maximum', 'np.maximum', (['(100 * (1 - exp_fix_prob) / exp_fix_prob)', '(100)'], {}), '(100 * (1 - exp_fix_prob) / exp_fix_prob, 100)\n', (6138, 6184), True, 'import numpy as np\n'), ((8250, 8272), 'numpy.all', 'np.all', (['(curr == nexnex)'], {}), '(curr == nexnex)\n', (8256, 8272), True, 'import numpy as np\n'), ((8519, 8539), 'numpy.array', 'np.array', (['fmus_fused'], {}), '(fmus_fused)\n', (8527, 8539), True, 'import numpy as np\n'), ((8541, 8561), 'numpy.array', 'np.array', (['taus_fused'], {}), '(taus_fused)\n', (8549, 8561), True, 'import numpy as np\n'), ((9609, 9633), 'copy.deepcopy', 'copy.deepcopy', (['wts_by_mu'], {}), '(wts_by_mu)\n', (9622, 9633), False, 'import copy\n'), ((9663, 9687), 'copy.deepcopy', 'copy.deepcopy', (['wts_by_mu'], {}), '(wts_by_mu)\n', (9676, 9687), False, 'import copy\n'), ((9698, 9722), 'copy.deepcopy', 'copy.deepcopy', (['wts_by_mu'], {}), '(wts_by_mu)\n', (9711, 9722), False, 'import copy\n'), ((9751, 9775), 'copy.deepcopy', 'copy.deepcopy', (['wts_by_mu'], {}), '(wts_by_mu)\n', (9764, 9775), False, 'import copy\n'), ((9806, 9830), 'copy.deepcopy', 'copy.deepcopy', (['wts_by_mu'], {}), '(wts_by_mu)\n', (9819, 9830), False, 'import copy\n'), ((2916, 2951), 'numpy.arange', 'np.arange', (['(1)', '(delta_Ns.shape[0] + 1)'], {}), '(1, delta_Ns.shape[0] + 1)\n', (2925, 2951), True, 'import numpy as np\n'), ((4438, 4457), 'numpy.abs', 'np.abs', (['inv_mu_step'], {}), '(inv_mu_step)\n', (4444, 4457), True, 'import numpy as np\n'), ((4697, 4723), 'numpy.maximum', 'np.maximum', (['(0)', 'inv_mu_step'], {}), '(0, inv_mu_step)\n', (4707, 4723), True, 'import numpy as np\n'), ((5209, 5249), 'SweepApproximationFunctions.findNeq', 'SAF.findNeq', (['mu_inv', 'delta_f', 'M', 'P_mu', 'K'], {}), '(mu_inv, delta_f, M, P_mu, K)\n', (5220, 5249), True, 'import SweepApproximationFunctions as SAF\n'), ((7838, 7869), 'numpy.logical_or', 'np.logical_or', (['x[:, 0]', 'x[:, 1]'], {}), '(x[:, 0], x[:, 1])\n', (7851, 7869), True, 'import numpy as np\n'), ((9463, 9476), 'numpy.unique', 'np.unique', (['fs'], {}), '(fs)\n', (9472, 9476), True, 'import numpy as np\n'), ((10047, 10070), 'numpy.isclose', 'np.isclose', (['mu', 'mu_next'], {}), '(mu, mu_next)\n', (10057, 10070), True, 'import numpy as np\n'), ((10075, 10106), 'numpy.isclose', 'np.isclose', (['(f + delta_f)', 'f_next'], {}), '(f + delta_f, f_next)\n', (10085, 10106), True, 'import numpy as np\n'), ((1693, 1707), 'numpy.arange', 'np.arange', (['mus'], {}), '(mus)\n', (1702, 1707), True, 'import numpy as np\n'), ((1887, 1901), 'numpy.arange', 'np.arange', (['mus'], {}), '(mus)\n', (1896, 1901), True, 'import numpy as np\n'), ((3847, 3877), 'SweepApproximationFunctions.fixation_probability', 'SAF.fixation_probability', (['K', 's'], {}), '(K, s)\n', (3871, 3877), True, 'import SweepApproximationFunctions as SAF\n'), ((7890, 7901), 'numpy.where', 'np.where', (['y'], {}), '(y)\n', (7898, 7901), True, 'import numpy as np\n'), ((7928, 7944), 'numpy.append', 'np.append', (['(-1)', 'i'], {}), '(-1, i)\n', (7937, 7944), True, 'import numpy as np\n'), ((9941, 9960), 'numpy.where', 'np.where', (['(mu == mus)'], {}), '(mu == mus)\n', (9949, 9960), True, 'import numpy as np\n'), ((10180, 10207), 'numpy.isclose', 'np.isclose', (['(mu / M)', 'mu_next'], {}), '(mu / M, mu_next)\n', (10190, 10207), True, 'import numpy as np\n'), ((10210, 10231), 'numpy.isclose', 'np.isclose', (['f', 'f_next'], {}), '(f, f_next)\n', (10220, 10231), True, 'import numpy as np\n'), ((11124, 11135), 'numpy.sum', 'np.sum', (['wts'], {}), '(wts)\n', (11130, 11135), True, 'import numpy as np\n'), ((2992, 3019), 'numpy.cumsum', 'np.cumsum', (['delta_Ns'], {'axis': '(0)'}), '(delta_Ns, axis=0)\n', (3001, 3019), True, 'import numpy as np\n'), ((10308, 10335), 'numpy.isclose', 'np.isclose', (['(M * mu)', 'mu_next'], {}), '(M * mu, mu_next)\n', (10318, 10335), True, 'import numpy as np\n'), ((10338, 10369), 'numpy.isclose', 'np.isclose', (['(f + delta_f)', 'f_next'], {}), '(f + delta_f, f_next)\n', (10348, 10369), True, 'import numpy as np\n'), ((10444, 10476), 'numpy.isclose', 'np.isclose', (['(M ** 2 * mu)', 'mu_next'], {}), '(M ** 2 * mu, mu_next)\n', (10454, 10476), True, 'import numpy as np\n'), ((10477, 10508), 'numpy.isclose', 'np.isclose', (['(f + delta_f)', 'f_next'], {}), '(f + delta_f, f_next)\n', (10487, 10508), True, 'import numpy as np\n')]
|
import os
import csv
from argparse import ArgumentParser
import numpy as np
import torch
from torchvision import transforms
from assets.inference import classify
from assets.mtdp import build_model
from assets.mtdp.components import Head
from assets.mtdp.networks import SingleHead
from svm_classifier_train import group_per_slide, compute_challenge_score
from sklearn.metrics import accuracy_score, confusion_matrix, roc_auc_score
from sklearn.model_selection import train_test_split
def write_submission(preds):
with open("submission.csv", "w+") as file:
file.write("filename,0,1,2,3\n")
for filename, pred_cls in preds.items():
file.write(os.path.basename(filename) + "," + ",".join([str(int(pred_cls == cls)) for cls in range(4)]) + "\n")
def read_test_files():
with open("data/test_metadata.csv", "r") as file:
reader = csv.reader(file, delimiter=',')
next(reader)
filenames = list()
for row in reader:
filenames.append(row[0])
return filenames
def main(argv):
parser = ArgumentParser()
parser.add_argument("-f", "--model_filename", dest="model_filename")
parser.add_argument("-d", "--device", dest="device", default="cuda:0")
parser.add_argument("-j", "--n_jobs", dest="n_jobs", default=5, type=int)
parser.add_argument("-a", "--architecture", dest="architecture", default="resnet50")
parser.add_argument("-s", "--tile_size", dest="tile_size", default=512, type=int)
parser.add_argument("-o", "--overlap", dest="overlap", default=0, type=int)
parser.add_argument("-z", "--zoom_level", dest="zoom_level", default=0, type=int)
parser.add_argument("-b", "--batch_size", dest="batch_size", default=32, type=int)
parser.add_argument("-i", "--image_path", dest="image_path", default=".")
parser.add_argument("-m", "--metadata_path", dest="metadata_path", default=".")
parser.add_argument("-l", "--model_path", dest="model_path", default=".")
args, _ = parser.parse_known_args(argv)
slidenames, slide2annots, slide2cls = group_per_slide(args.metadata_path)
N_CLASSES = 4
ZOOM_LEVEL = args.zoom_level
TILE_SIZE = args.tile_size
TILE_OVERLAP = args.overlap
BATCH_SIZE = args.batch_size
ARCH = args.architecture
MODEL_PATH = os.path.join(args.model_path, args.model_filename)
trans = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # ImageNet stats
])
device = torch.device(args.device)
state_dict = torch.load(MODEL_PATH, map_location=device)
features = build_model(arch=ARCH, pretrained=False, pool=True)
model = SingleHead(features, Head(features.n_features(), n_classes=4))
model.load_state_dict(state_dict)
model.eval()
model.to(device)
y_pred, y_true = list(), list()
print("{} slide(s) to process".format(len(slidenames)))
probas, tiles, filenames = list(), list(), list()
for i, filename in enumerate(slidenames):
slide_path = os.path.join(args.image_path, filename)
print("--- {} ---".format(slide_path))
try:
with torch.no_grad():
cls_dict, slide_tiles, slide_probas = classify(
slide_path=slide_path,
model=model,
device=device,
transform=trans,
batch_size=BATCH_SIZE,
tile_size=TILE_SIZE,
tile_overlap=TILE_OVERLAP,
num_workers=args.n_jobs - 1,
zoom_level=ZOOM_LEVEL,
n_classes=N_CLASSES
)
probas.append(slide_probas)
tiles.extend(slide_tiles)
filenames.extend([filename for _ in range(len(slide_tiles))])
except Exception as e:
print("/!\\ error during prediction {}".format(str(e)))
print("/!\\ ... predicting 0")
probas.append([1., 0., 0., 0.])
tiles.extend([])
filenames.append(filename)
print("-> {:3.2f}% - {} / {}".format(100 * (i + 1) / len(slidenames), i + 1, len(slidenames)))
probas = np.vstack(probas)
return {
"probas": probas,
"filenames": filenames,
"tiles": [(tile.abs_offset_x, tile.abs_offset_y, tile.height, tile.width) for tile in tiles]
}
# print()
# print("slide: ")
# val_slide_acc = accuracy_score(y_true, y_pred)
# val_slide_score = compute_challenge_score(y_true, y_pred)
# val_slide_cm = confusion_matrix(y_true, y_pred)
# print("> slide acc: ", val_slide_acc)
# print("> slide sco: ", val_slide_score)
# print("> slide cm : ")
# print(val_slide_cm)
if __name__ == "__main__":
import sys
returned = main(sys.argv[1:])
np.save("final_probas.npy", returned["probas"])
np.save("final_filenames.npy", np.array(returned["filenames"]))
np.save("final_tiles.npy", np.array(returned["tiles"]))
|
[
"svm_classifier_train.group_per_slide",
"assets.mtdp.build_model",
"numpy.save",
"csv.reader",
"argparse.ArgumentParser",
"os.path.basename",
"torch.load",
"assets.inference.classify",
"numpy.array",
"torch.device",
"torchvision.transforms.Normalize",
"torch.no_grad",
"os.path.join",
"numpy.vstack",
"torchvision.transforms.ToTensor"
] |
[((1075, 1091), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (1089, 1091), False, 'from argparse import ArgumentParser\n'), ((2073, 2108), 'svm_classifier_train.group_per_slide', 'group_per_slide', (['args.metadata_path'], {}), '(args.metadata_path)\n', (2088, 2108), False, 'from svm_classifier_train import group_per_slide, compute_challenge_score\n'), ((2303, 2353), 'os.path.join', 'os.path.join', (['args.model_path', 'args.model_filename'], {}), '(args.model_path, args.model_filename)\n', (2315, 2353), False, 'import os\n'), ((2542, 2567), 'torch.device', 'torch.device', (['args.device'], {}), '(args.device)\n', (2554, 2567), False, 'import torch\n'), ((2585, 2628), 'torch.load', 'torch.load', (['MODEL_PATH'], {'map_location': 'device'}), '(MODEL_PATH, map_location=device)\n', (2595, 2628), False, 'import torch\n'), ((2644, 2695), 'assets.mtdp.build_model', 'build_model', ([], {'arch': 'ARCH', 'pretrained': '(False)', 'pool': '(True)'}), '(arch=ARCH, pretrained=False, pool=True)\n', (2655, 2695), False, 'from assets.mtdp import build_model\n'), ((4228, 4245), 'numpy.vstack', 'np.vstack', (['probas'], {}), '(probas)\n', (4237, 4245), True, 'import numpy as np\n'), ((4861, 4908), 'numpy.save', 'np.save', (['"""final_probas.npy"""', "returned['probas']"], {}), "('final_probas.npy', returned['probas'])\n", (4868, 4908), True, 'import numpy as np\n'), ((875, 906), 'csv.reader', 'csv.reader', (['file'], {'delimiter': '""","""'}), "(file, delimiter=',')\n", (885, 906), False, 'import csv\n'), ((3065, 3104), 'os.path.join', 'os.path.join', (['args.image_path', 'filename'], {}), '(args.image_path, filename)\n', (3077, 3104), False, 'import os\n'), ((4944, 4975), 'numpy.array', 'np.array', (["returned['filenames']"], {}), "(returned['filenames'])\n", (4952, 4975), True, 'import numpy as np\n'), ((5008, 5035), 'numpy.array', 'np.array', (["returned['tiles']"], {}), "(returned['tiles'])\n", (5016, 5035), True, 'import numpy as np\n'), ((2396, 2417), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2415, 2417), False, 'from torchvision import transforms\n'), ((2427, 2502), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (2447, 2502), False, 'from torchvision import transforms\n'), ((3182, 3197), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3195, 3197), False, 'import torch\n'), ((3253, 3481), 'assets.inference.classify', 'classify', ([], {'slide_path': 'slide_path', 'model': 'model', 'device': 'device', 'transform': 'trans', 'batch_size': 'BATCH_SIZE', 'tile_size': 'TILE_SIZE', 'tile_overlap': 'TILE_OVERLAP', 'num_workers': '(args.n_jobs - 1)', 'zoom_level': 'ZOOM_LEVEL', 'n_classes': 'N_CLASSES'}), '(slide_path=slide_path, model=model, device=device, transform=trans,\n batch_size=BATCH_SIZE, tile_size=TILE_SIZE, tile_overlap=TILE_OVERLAP,\n num_workers=args.n_jobs - 1, zoom_level=ZOOM_LEVEL, n_classes=N_CLASSES)\n', (3261, 3481), False, 'from assets.inference import classify\n'), ((678, 704), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (694, 704), False, 'import os\n')]
|
import PSICT_UIF
import numpy as np
import os
import sys
import Labber
from PSICT_extras.PSICT_MultiPulse.PSICT_MultiPulse_tools import writePulseDefs, writePulseSeqs
## Create pulse definitions (list of dicts)
pulse_defs = []
## qubit
pulse_defs.append({'a': 0.3, 'w': 60e-9, 'v': 0e-9, 's': 60e-9, 'f': 90e6, 'o': 2, 'p': 0, 'DRAG': 20e-9})
## magnon
pulse_defs.append({'a': 0.5, 'w': 85e-9, 'v': 0e-9, 's': 85e-9, 'f': 60e6, 'o': 3, 'p': 0, 'DRAG': 30e-9})
## dead
pulse_defs.append({'a': 0.0, 'w': 0e-9, 'v': 40e-9, 's': 0e-9, 'f': 0e6, 'o': 4, 'p': 0})
## trigger
pulse_defs.append({'a': 1.5, 'w': 0e-9, 'v': 20e-9, 's': 0e-9, 'f': 0e6, 'o': 4, 'p': 0})
## readout
pulse_defs.append({'a': 0.64, 'w': 0e-9, 'v': 400e-9, 's': 0e-9, 'f': 85e6, 'o': 1, 'p': 90, 'fix_phase': 1})
## qubit
pulse_defs.append({'a': 0.3, 'w': 60e-9, 'v': 0e-9, 's': 60e-9, 'f': 90e6, 'o': 2, 'p': 0, 'DRAG': 0})
## magnon
pulse_defs.append({'a': 0.5, 'w': 85e-9, 'v': 0e-9, 's': 85e-9, 'f': 60e6, 'o': 3, 'p': 0, 'DRAG': 0})
## Set key order
pulse_def_key_order = ['a', 'w', 'v', 's', 'f', 'p', 'o', 'DRAG', 'fix_phase']
## Write to file
pulse_def_path = os.path.abspath('definitions_002.txt')
writePulseDefs(pulse_def_path, pulse_defs, pulse_def_key_order)
print('Pulse definitions written to file: {}'.format(pulse_def_path))
## Generate list of lists of pulse sequences
pulse_seqs = []
pulse_seqs.append(np.array([2,2,0,3,4]))
pulse_seqs.append(np.array([2,2,5,3,4]))
pulse_seqs.append(np.array([2,2,1,3,4]))
pulse_seqs.append(np.array([2,2,6,3,4]))
n_pulse_seqs = len(pulse_seqs)
## Write to file
pulse_seq_path = os.path.abspath('sequence_002.txt')
writePulseSeqs(pulse_seq_path, pulse_seqs)
print('Pulse sequences written to file: {}'.format(pulse_seq_path))
##############################################################################
## Do not change the script structure beyond this point!
pulse_sequence = 'MultiPulse-Test01'
slave_PSICT_options = {
'running_as_slave': True,
'config_path': '../PSICT_config.py',
## Set template file directory and name
'template_dir': 'C:/Users/Pierre/Google Drive/Quantum magnonics/Data/Reference_files/',
'template_file': 'MultiPulse_test02',
## Set output file directory and name (where result will be saved)
'output_dir': 'C:/Users/Pierre/Google Drive/Quantum magnonics/Data/2019/01/Data_0101',
'output_file': 'MultiPulse_test_0074',
## Script copy options
'script_copy_target_dir': 'C:/Users/Pierre/Google Drive/Quantum magnonics/Data/Measurement_scripts/MultiPulse/',
}
slave_general_options = {
'sideband': -1,
# IF frequency in Hz of the readout square pulse
'readout_IF_frequency': 90e6,
# Power in dBm of the local oscillator used for readout (LO1)
'readout_LO_power': 19,
# Optimal target frequency in Hz of the readout square pulse
'readout_frequency_opt': 8.412050e9,
# Optimal amplitude in V of the readout square pulse
'readout_amplitude_opt': 0.23,
# ''Optimal'' duration in seconds of the readout square pulse
'readout_plateau_opt': 400e-9,
# IF frequency in Hz of the qubit control pulse
'qubit_IF_frequency': 95e6,
# Power in dBm of the local oscillator used for qubit control (LO2)
'qubit_LO_power': 16,
# Target control frequency in Hz of the qubit control gaussian pulse
'qubit_frequency_opt': 7.924759e9,
# IF frequency in Hz of the magnon control pulse
'magnon_IF_frequency': 115e6,
# Power in dBm of the local oscillator used for qubit control (LO3)
'magnon_LO_power': 16,
# Target control frequency in Hz of the qubit control gaussian pulse
'magnon_frequency_opt': 7.787615e9,
## Parameters for pi pulses
'qubit_width_pi': 12e-9,
## Gaussian pulses
'qubit_amplitude_pi_dict': {12e-9: 1.109358, 200e-9: 0.073707, },
#1.068168, },
## Lambda values for different pi-pulse durations
'lambda_dict': {},
## Square pulses
'qubit_plateau_pi': 0e-9,
## General settings for pulse sequences
# Truncation range for gaussian pulses according to the definition of the 'Single-qubit pulse generator'
'SQPG_truncation_range': 3,
# Sampling rate in samples/second for the pulse sequence/AWG
'SQPG_sampling_rate': 1e09,
# Duration in seconds of the pulse sequence
'SQPG_sequence_duration': 4000e-9,
## General settings for the digitizer
# Number of repetitions (shots) averaged at the digitizer, 1: single-shot
'N_shots': 1e04,
# Length in seconds of the measured time trace
'digitizer_length': 600e-9,
# Sampling rate in samples/second of the digitizer
'digitizer_sampling_rate': 5e08,
## General setting for the demodulation
# Time in seconds to start the demodulation, takes into account the delay before the readout pulse arrive
'demodulation_skip_start': 200e-9,
# Length in seconds of the demodulation, should be close to readout_plateau, and at maximum digitizer_length
'demodulation_length': 400e-9,
# Current in amperes for the coil
'current': -7.97e-3,
}
slave_pulse_sequence_options = {
'MultiPulse-Test01': {
'Number of points': 5E3,
'First pulse delay': 100e-9,
'Generate from final pulse': 0,
'Final pulse time': 3e-6,
'pulse_def_path': pulse_def_path,
'pulse_seq_path': pulse_seq_path,
},##
}
## SCRIPT COPY BREAKPOINT
def run_pulse_sequence(pulse_sequence_name, PSICT_options, general_options, pulse_sequence_options_all, *, verbose = 0):
'''
Run the pulse sequence specified by pulse_sequence_name.
Options passed in via parameter dicts.
'''
pulse_sequence = pulse_sequence_name
pulse_sequence_options = pulse_sequence_options_all[pulse_sequence]
#############################################################################
## PSICT and directory setup
print('-------------------------------------------')
print('==>', pulse_sequence)
print('PSICT_UIF version is', PSICT_UIF.__version__)
## Initialise PSICT-UIF interface object
psictInterface = PSICT_UIF.psictUIFInterface(verbose = 0)
psictInterface.is_slave = PSICT_options['running_as_slave']
## Set file paths
config_path = PSICT_options['config_path']
template_dir = PSICT_options['template_dir']
template_file = PSICT_options['template_file']
output_dir = PSICT_options['output_dir']
output_file = PSICT_options['output_file']
script_copy_target_dir = PSICT_options['script_copy_target_dir']
psictInterface.load_config_file(config_path, verbose = 0)
psictInterface.set_template_file(template_dir, template_file, verbose = 0)
psictInterface.set_output_file(output_dir, output_file, verbose = 1)
psictInterface.set_script_copy_target_dir(script_copy_target_dir, verbose = 0)
#############################################################################
## General options
## Converting dictionary-defined parameters to variables
## This has the beneficial side effect of ensuring each of these parameters
## exists in the options dict passed to the function
sideband = general_options['sideband']
readout_IF_frequency = general_options['readout_IF_frequency']
readout_LO_power = general_options['readout_LO_power']
readout_frequency_opt = general_options['readout_frequency_opt']
readout_amplitude_opt = general_options['readout_amplitude_opt']
readout_plateau_opt = general_options['readout_plateau_opt']
qubit_IF_frequency = general_options['qubit_IF_frequency']
qubit_LO_power = general_options['qubit_LO_power']
qubit_frequency_opt = general_options['qubit_frequency_opt']
magnon_IF_frequency = general_options['magnon_IF_frequency']
magnon_LO_power = general_options['magnon_LO_power']
magnon_frequency_opt = general_options['magnon_frequency_opt']
qubit_width_pi = general_options['qubit_width_pi']
qubit_amplitude_pi_dict = general_options['qubit_amplitude_pi_dict']
qubit_amplitude_pi = qubit_amplitude_pi_dict[qubit_width_pi]
lambda_dict = general_options['lambda_dict']
qubit_plateau_pi = general_options['qubit_plateau_pi']
SQPG_truncation_range = general_options['SQPG_truncation_range']
SQPG_sampling_rate = general_options['SQPG_sampling_rate']
SQPG_sequence_duration = general_options['SQPG_sequence_duration']
N_shots = general_options['N_shots']
digitizer_length = general_options['digitizer_length']
digitizer_sampling_rate = general_options['digitizer_sampling_rate']
demodulation_skip_start = general_options['demodulation_skip_start']
demodulation_length = general_options['demodulation_length']
current = general_options['current']
#############################################################################
## Pulse sequences
if pulse_sequence == 'MultiPulse-Test01':
# Target frequency in Hz of the readout square pulse
readout_frequency = readout_frequency_opt
# Amplitude in V of the readout square pulse
readout_amplitude = readout_amplitude_opt
# Duration in seconds of the readout square pulse
readout_plateau = readout_plateau_opt
# Target control frequency in Hz of the qubit control gaussian pulse
qubit_frequency = qubit_frequency_opt
# Amplitude in V of the qubit control gaussian pulse
qubit_amplitude = qubit_amplitude_pi
# Width in seconds of the qubit control gaussian pulse
qubit_width = qubit_width_pi
# Target control frequency in Hz of the magnon control gaussian pulse
magnon_frequency = magnon_frequency_opt
point_values = {
'MultiPulse': {
'Number of points': pulse_sequence_options['Number of points'],
'First pulse delay': pulse_sequence_options['First pulse delay'],
'Generate from final pulse': pulse_sequence_options['Generate from final pulse'],
'Final pulse time': pulse_sequence_options['Final pulse time'],
}, # end MultiPulse
} # end point values
Labber_api_client_values = {
'MultiPulse': {
}, # end MultiPulse
} # end api client values
instr_config_values = {
'MultiPulse': {
'Pulse definitions file': pulse_sequence_options['pulse_def_path'],
'Pulse sequences file': pulse_sequence_options['pulse_seq_path'],
}, # end MultiPulse
} # end instr_config_values
Labber_api_hardware_names = {'MultiPulse': 'PSICT MultiPulse'}
iteration_values = {
'MultiPulse': {'Pulse sequence counter': [0, n_pulse_seqs - 1, n_pulse_seqs]},
}
iteration_order = [
('MultiPulse', 'Pulse sequence counter'),
] # end iteration order
# end reference
## Channel relations - set available quantities (could be outsourced to rcfile?)
channel_defs = {
} # end relation channels
channel_relations = {
} # end channel relations
## end Qubit_Ramsey
#############################################################################
## Set parameters & measure
#############################################################################
## Set input parameter values
psictInterface.set_point_values(point_values, verbose = 0)
psictInterface.set_api_client_values(Labber_api_client_values, \
hardware_names = Labber_api_hardware_names, server_name = 'localhost', verbose = 0)
psictInterface.set_instr_config_values(instr_config_values, \
hardware_names = Labber_api_hardware_names, server_name = 'localhost', \
verbose = 0)
psictInterface.set_iteration_values(iteration_values, iteration_order, verbose = 1)
psictInterface.set_channel_relations(channel_defs, channel_relations, verbose = 0)
## Run measurement
psictInterface.perform_measurement(dry_run = False, verbose = 1)
return psictInterface.fileManager.output_path
##
if __name__ == '__main__':
## This block will only be executed if the slave is run explicitly as a standalone script
# print('Running as a standalone script...')
slave_PSICT_options['running_as_slave'] = False
run_pulse_sequence(pulse_sequence, slave_PSICT_options, slave_general_options, slave_pulse_sequence_options, verbose = 1)
else:
## This block will only be run when the slave is imported (ie 'run' through a master script)
# print('Running as slave script...')
slave_PSICT_options['running_as_slave'] = True
##
|
[
"os.path.abspath",
"numpy.array",
"PSICT_UIF.psictUIFInterface",
"PSICT_extras.PSICT_MultiPulse.PSICT_MultiPulse_tools.writePulseSeqs",
"PSICT_extras.PSICT_MultiPulse.PSICT_MultiPulse_tools.writePulseDefs"
] |
[((1138, 1176), 'os.path.abspath', 'os.path.abspath', (['"""definitions_002.txt"""'], {}), "('definitions_002.txt')\n", (1153, 1176), False, 'import os\n'), ((1177, 1240), 'PSICT_extras.PSICT_MultiPulse.PSICT_MultiPulse_tools.writePulseDefs', 'writePulseDefs', (['pulse_def_path', 'pulse_defs', 'pulse_def_key_order'], {}), '(pulse_def_path, pulse_defs, pulse_def_key_order)\n', (1191, 1240), False, 'from PSICT_extras.PSICT_MultiPulse.PSICT_MultiPulse_tools import writePulseDefs, writePulseSeqs\n'), ((1602, 1637), 'os.path.abspath', 'os.path.abspath', (['"""sequence_002.txt"""'], {}), "('sequence_002.txt')\n", (1617, 1637), False, 'import os\n'), ((1638, 1680), 'PSICT_extras.PSICT_MultiPulse.PSICT_MultiPulse_tools.writePulseSeqs', 'writePulseSeqs', (['pulse_seq_path', 'pulse_seqs'], {}), '(pulse_seq_path, pulse_seqs)\n', (1652, 1680), False, 'from PSICT_extras.PSICT_MultiPulse.PSICT_MultiPulse_tools import writePulseDefs, writePulseSeqs\n'), ((1391, 1416), 'numpy.array', 'np.array', (['[2, 2, 0, 3, 4]'], {}), '([2, 2, 0, 3, 4])\n', (1399, 1416), True, 'import numpy as np\n'), ((1432, 1457), 'numpy.array', 'np.array', (['[2, 2, 5, 3, 4]'], {}), '([2, 2, 5, 3, 4])\n', (1440, 1457), True, 'import numpy as np\n'), ((1473, 1498), 'numpy.array', 'np.array', (['[2, 2, 1, 3, 4]'], {}), '([2, 2, 1, 3, 4])\n', (1481, 1498), True, 'import numpy as np\n'), ((1514, 1539), 'numpy.array', 'np.array', (['[2, 2, 6, 3, 4]'], {}), '([2, 2, 6, 3, 4])\n', (1522, 1539), True, 'import numpy as np\n'), ((5816, 5854), 'PSICT_UIF.psictUIFInterface', 'PSICT_UIF.psictUIFInterface', ([], {'verbose': '(0)'}), '(verbose=0)\n', (5843, 5854), False, 'import PSICT_UIF\n')]
|
import numpy as np
# flattening
two_dim_array = np.array([[1, 2, 3], [4, 5, 6]])
""" array([[1, 2, 3],
[4, 5, 6]]) """
# convert 2Dim to 1Dim
two_dim_array.ravel()
# array([1, 2, 3, 4, 5, 6])
# transpose
two_dim_array.T
""" array([[1, 4],
[2, 5],
[3, 6]]) """
# re-shaping
one_dim_array = two_dim_array.ravel()
# array([1, 2, 3, 4, 5, 6])
two_dim_array = one_dim_array.reshape((2,3))
""" array([[1, 2, 3],
[4, 5, 6]]) """
# assign value
two_dim_array[0,1] = 500
""" array([[ 1, 500, 3],
[ 4, 5, 6]]) """
|
[
"numpy.array"
] |
[((52, 84), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (60, 84), True, 'import numpy as np\n')]
|
"""
Module provides telemetry services.
"""
import collections
import numpy
class Telemetry:
"""
Calculates object movement parameters like velocity, acceleration. Additionally the class provides service to
predict further position of the object using velocity and acceleration.
"""
_position_length = 4
def __init__(self):
"""
Initializes telemetry instance.
"""
self._positions = collections.deque()
self._pos_prev = 0
self._pos_cur = 0
self._velocity = 0
def __str__(self):
"""
:return: String representation of a telemetry of an object.
"""
return "v: '%.1f'" % self._velocity
def update(self, position):
"""
Calculates current state the object.
:param: position: distance change (difference).
"""
self._positions.appendleft(position)
if len(self._positions) > Telemetry._position_length:
self._positions.pop()
ds = numpy.diff(self._positions)
self._pos_prev = self._pos_cur
self._pos_cur = position
#self._velocity = self._pos_cur - self._pos_prev
self._velocity = int(numpy.sum(ds) / 2)
def get_velocity(self):
"""
:return: Velocity of an object that is tracked by this telemetry instance. It may return '0' if there is
no enough data (distance changes) - common situation at the begging.
"""
return self._velocity
def predict_position(self):
"""
Calculates next position of an object.
:return: Predicted position of an object.
"""
#print("Current position: %d, Velocity: %d -> Next position: %d" % (self._pos_cur, self._velocity, self._pos_cur + self._velocity))
return self._pos_cur + self._velocity
|
[
"numpy.diff",
"numpy.sum",
"collections.deque"
] |
[((467, 486), 'collections.deque', 'collections.deque', ([], {}), '()\n', (484, 486), False, 'import collections\n'), ((1062, 1089), 'numpy.diff', 'numpy.diff', (['self._positions'], {}), '(self._positions)\n', (1072, 1089), False, 'import numpy\n'), ((1256, 1269), 'numpy.sum', 'numpy.sum', (['ds'], {}), '(ds)\n', (1265, 1269), False, 'import numpy\n')]
|
import datetime as dt
import glob
import os
import pickle
import time # for start stop calc
from threading import Thread
import numpy as np
import torch
import torch.utils.data as data
from deeplio.common import utils, logger
from deeplio.common.laserscan import LaserScan
class KittiRawData:
""" KiitiRawData
more or less same as pykitti with some application specific changes
"""
SEQ_NUM = {
"2011_10_03_0027": 0,
"2011_10_03_0042": 1,
"2011_10_03_0034": 2,
"2011_09_30_0016": 4,
"2011_09_30_0018": 5,
"2011_09_30_0020": 6,
"2011_09_30_0027": 7,
"2011_09_30_0028": 8,
"2011_09_30_0033": 9,
"2011_09_30_0034": 10,
}
def __init__(self, base_path_sync, base_path_unsync, date, drive,
cfg=None, oxts_bin=False, oxts_txt=False, max_points=150000, **kwargs):
self.drive = drive
self.date = date
self.seq_nr = self.SEQ_NUM['{}_{}'.format(self.date, self.drive)]
self.dataset_sync = 'sync'
self.dataset_unsync = 'extract'
self.drive_full_sync = date + '_drive_' + drive + '_' + self.dataset_sync
self.drive_full_unsync = date + '_drive_' + drive + '_' + self.dataset_unsync
self.calib_path = os.path.join(base_path_sync, date)
self.data_path_sync = os.path.join(base_path_sync, date, self.drive_full_sync)
self.data_path_unsync = os.path.join(base_path_unsync, date, self.drive_full_unsync)
self.frames = kwargs.get('frames', None)
self.max_points = max_points
if cfg is not None:
ds_config = cfg['kitti']
self.image_width = ds_config.get('image-width', 1024)
self.image_height = ds_config.get('image-height', 64)
self.fov_up = ds_config.get('fov-up', 3)
self.fov_down = ds_config.get('fov-down', -25)
self.max_depth = ds_config.get('max-depth', 80)
self.min_depth = ds_config.get('min-depth', 2)
# Find all the data files
self._get_velo_files()
#self._load_calib()
self._load_timestamps()
# Give priority to binary files, sicne they are laoded much faster
if oxts_bin:
self._load_oxts_bin()
elif oxts_txt:
self._get_oxt_files()
self._load_oxts()
self.imu_get_counter = 0
def __len__(self):
return len(self.velo_files)
def get_velo(self, idx):
"""Read velodyne [x,y,z,reflectance] scan at the specified index."""
return utils.load_velo_scan(self.velo_files[idx])
def get_velo_image(self, idx):
scan = LaserScan(project=False, H=self.image_height, W=self.image_width, fov_up=self.fov_up, fov_down=self.fov_down,
min_depth=self.min_depth, max_depth=self.max_depth)
scan.open_scan(self.velo_files[idx])
scan.do_range_projection()
scan.do_normal_projection()
# get projected data
proj_xyz = scan.proj_xyz / self.max_depth
proj_remission = scan.proj_remission
proj_range = scan.proj_range
proj_normal = scan.proj_normal
image = np.dstack((proj_xyz, proj_remission, proj_normal, proj_range))
return image
def get_imu_values(self, idx):
oxt = self.oxts_unsync[idx]
imu_values = np.array([[oxt[0].ax, oxt[0].ay, oxt[0].az,
oxt[0].wx, oxt[0].wy, oxt[0].wz]
for oxt in oxt], dtype=np.float)
return imu_values
def _get_velo_files(self):
# first try to get binary files
self.velo_files = sorted(glob.glob(
os.path.join(self.data_path_sync, 'velodyne_points',
'data', '*.bin')))
# if there is no bin files for velo, so the velo file are in text format
if self.velo_files is None:
self.velo_files = sorted(glob.glob(
os.path.join(self.data_path_unsync, 'velodyne_points',
'data', '*.txt')))
# Subselect the chosen range of frames, if any
if self.frames is not None:
self.velo_files = utils.subselect_files(
self.velo_files, self.frames)
self.velo_files = np.asarray(self.velo_files)
def _get_oxt_files(self):
"""Find and list data files for each sensor."""
self.oxts_files_sync = sorted(glob.glob(
os.path.join(self.data_path_sync, 'oxts', 'data', '*.txt')))
if self.frames is not None:
self.oxts_files_sync = utils.subselect_files(
self.oxts_files_sync, self.frames)
self.oxts_files_sync = np.asarray(self.oxts_files_sync)
self.oxts_files_unsync = sorted(glob.glob(
os.path.join(self.data_path_unsync, 'oxts', 'data', '*.txt')))
if self.frames is not None:
self.oxts_files_unsync = utils.subselect_files(
self.oxts_files_unsync, self.frames)
self.oxts_files_unsync = np.asarray(self.oxts_files_unsync)
def _load_calib_rigid(self, filename):
"""Read a rigid transform calibration file as a numpy.array."""
filepath = os.path.join(self.calib_path, filename)
data = utils.read_calib_file(filepath)
return utils.transform_from_rot_trans(data['R'], data['T'])
def _load_calib(self):
"""Load and compute intrinsic and extrinsic calibration parameters."""
# We'll build the calibration parameters as a dictionary, then
# convert it to a namedtuple to prevent it from being modified later
data = {}
# Load the rigid transformation from IMU to velodyne
data['T_velo_imu'] = self._load_calib_rigid('calib_imu_to_velo.txt')
def _load_timestamps(self):
"""Load timestamps from file."""
timestamp_file_unsync = os.path.join(self.data_path_unsync, 'oxts', 'timestamps.txt')
timestamp_file_velo = os.path.join(self.data_path_sync, 'velodyne_points', 'timestamps.txt')
timestamp_file_sync = os.path.join(self.data_path_sync, 'oxts', 'timestamps.txt')
# Read and parse the timestamps
self.timestamps_unsync = []
with open(timestamp_file_unsync, 'r') as f:
for line in f.readlines():
# NB: datetime only supports microseconds, but KITTI timestamps
# give nanoseconds, so need to truncate last 4 characters to
# get rid of \n (counts as 1) and extra 3 digits
t = dt.datetime.strptime(line[:-4], '%Y-%m-%d %H:%M:%S.%f')
self.timestamps_unsync.append(t)
self.timestamps_unsync = np.array(self.timestamps_unsync)
# Read and parse the timestamps
self.timestamps_velo = []
with open(timestamp_file_velo, 'r') as f:
for line in f.readlines():
# NB: datetime only supports microseconds, but KITTI timestamps
# give nanoseconds, so need to truncate last 4 characters to
# get rid of \n (counts as 1) and extra 3 digits
t = dt.datetime.strptime(line[:-4], '%Y-%m-%d %H:%M:%S.%f')
self.timestamps_velo.append(t)
self.timestamps_velo = np.array(self.timestamps_velo)
# Read and parse the timestamps
self.timestamps_sync = []
with open(timestamp_file_sync, 'r') as f:
for line in f.readlines():
# NB: datetime only supports microseconds, but KITTI timestamps
# give nanoseconds, so need to truncate last 4 characters to
# get rid of \n (counts as 1) and extra 3 digits
t = dt.datetime.strptime(line[:-4], '%Y-%m-%d %H:%M:%S.%f')
self.timestamps_sync.append(t)
self.timestamps_sync = np.array(self.timestamps_sync)
def _load_oxts(self):
"""Load OXTS data from file."""
self.oxts_sync = np.array(utils.load_oxts_packets_and_poses(self.oxts_files_sync))
self.oxts_unsync = np.array(utils.load_oxts_packets_and_poses(self.oxts_files_unsync))
def _load_oxts_bin(self):
oxts_file_sync = os.path.join(self.data_path_sync, 'oxts', 'data.pkl')
with open(oxts_file_sync, 'rb') as f:
self.oxts_sync = pickle.load(f)
oxts_file_unsync = os.path.join(self.data_path_unsync, 'oxts', 'data.pkl')
with open(oxts_file_unsync, 'rb') as f:
self.oxts_unsync = pickle.load(f)
def _load_oxts_lazy(self, indices):
oxts = utils.load_oxts_packets_and_poses(self.oxts_files_sync[indices])
return oxts
class Kitti(data.Dataset):
# In unsynced KITTI raw dataset are some timestamp holes - i.g. 2011_10_03_27
# e.g. there is no corresponding IMU/GPS measurment to some velodyne frames,
# We set the min. no. so we can check and ignore these holes.
DEFAULT_NUM_OXT_SAMPLES = 15
def __init__(self, config, ds_type='train', transform=None, has_imu=True, has_lidar=True):
"""
:param root_path:
:param config: Configuration file including split settings
:param transform:
"""
ds_config_common = config['datasets']
ds_config = ds_config_common['kitti']
self._seq_size = ds_config_common['sequence-size'] # Increment because we need always one sample more
self.internal_seq_size = self.seq_size + 1
self.inv_depth = ds_config.get('inverse-depth', False)
self.mean_img = ds_config['mean-image']
self.std_img = ds_config['std-image']
self.mean_imu = ds_config['mean-imu']
self.std_imu = ds_config['std-imu']
self.channels = config['channels']
self.has_imu = has_imu
self.has_lidar = has_lidar
crop_factors = ds_config.get('crop-factors', [0, 0])
self.crop_top = crop_factors[0]
self.crop_left = crop_factors[1]
self.ds_type = ds_type
self.transform = transform
self.datasets = []
self.length_each_drive = []
self.bins = []
self.images = [None] * self.internal_seq_size
root_path_sync = ds_config['root-path-sync']
root_path_unsync = ds_config['root-path-unsync']
# Since we are intrested in sequence of lidar frame - e.g. multiple frame at each iteration,
# depending on the sequence size and the current wanted index coming from pytorch dataloader
# we must switch between each drive if not enough frames exists in that specific drive wanted from dataloader,
# therefor we separate valid indices in each drive in bins.
last_bin_end = -1
for date, drives in ds_config[self.ds_type].items():
for drive in drives:
date = str(date).replace('-', '_')
drive = '{0:04d}'.format(drive)
ds = KittiRawData(root_path_sync, root_path_unsync, date, drive, ds_config_common, oxts_bin=True)
length = len(ds)
bin_start = last_bin_end + 1
bin_end = bin_start + length - 1
self.bins.append([bin_start, bin_end])
last_bin_end = bin_end
self.length_each_drive.append(length)
self.datasets.append(ds)
self.bins = np.asarray(self.bins)
self.length_each_drive = np.array(self.length_each_drive)
self.length = self.bins.flatten()[-1] + 1
self.logger = logger.get_app_logger()
@property
def seq_size(self):
return self._seq_size
@seq_size.setter
def seq_size(self, size):
self.seq_size = size
self.internal_seq_size = size + 1
def load_ground_truth(self, dataset, indices):
gts_alls = dataset.oxts_sync[indices]
gts = []
for gt in gts_alls:
T = gt[1]
x = T[:3, 3].flatten()
R = T[:3, :3].flatten()
v = np.array([gt[0].vf, gt[0].vl, gt[0].vu])
gts.append(np.hstack((x, R, v)))
return np.array(gts)
def load_images(self, dataset, indices):
threads = [None] * self.internal_seq_size
for i in range(self.internal_seq_size):
threads[i] = Thread(target=self.load_image, args=(dataset, indices[i], i))
threads[i].start()
for i in range(self.internal_seq_size):
threads[i].join()
def load_image(self, dataset, ds_index, img_index):
img = dataset.get_velo_image(ds_index)
self.images[img_index] = img
def load_imus(self, dataset, velo_timestamps):
imus = []
valids = []
for i in range(self.internal_seq_size - 1):
velo_start_ts = velo_timestamps[i]
velo_stop_ts = velo_timestamps[i+1]
mask = ((dataset.timestamps_unsync >= velo_start_ts) & (dataset.timestamps_unsync < velo_stop_ts))
oxt_indices = np.argwhere(mask).flatten()
len_oxt = len(oxt_indices)
if (len_oxt== 0):
self.logger.debug("No OXT-samples: DS: {}_{}, len:{}, velo-timestamps: {}-{}".
format(dataset.date, dataset.drive, len_oxt, velo_start_ts, velo_stop_ts))
imu_values = np.zeros((self.DEFAULT_NUM_OXT_SAMPLES, 6), dtype=np.float)
valids.append(False)
else:
oxts = dataset.oxts_unsync[oxt_indices]
imu_values = np.array([[oxt[0].ax, oxt[0].ay, oxt[0].az,
oxt[0].wx, oxt[0].wy, oxt[0].wz]
for oxt in oxts], dtype=np.float)
imu_values = np.pad(imu_values, ((0, np.maximum(self.DEFAULT_NUM_OXT_SAMPLES - len_oxt, 0).astype(np.int)), (0, 0)))
if self.DEFAULT_NUM_OXT_SAMPLES < len_oxt:
imu_values = imu_values[0:self.DEFAULT_NUM_OXT_SAMPLES, :]
valids.append(True)
imus.append(imu_values)
return imus, valids
def transform_images(self):
imgs_org = torch.stack([torch.from_numpy(im.transpose(2, 0, 1)) for im in self.images])
imgs_org = imgs_org[:, self.channels]
ct, cl = self.crop_top, self.crop_left
mean = torch.as_tensor(self.mean_img)
std = torch.as_tensor(self.std_img)
if ct > 0 and cl == 0:
imgs_normalized = [torch.from_numpy(img[ct:-ct, :, :].transpose(2, 0, 1)) for img in self.images]
elif ct > 0 and cl > 0:
imgs_normalized = [torch.from_numpy(img[ct:-ct, cl:-cl, :].transpose(2, 0, 1)) for img in self.images]
elif ct == 0 and cl > 0:
imgs_normalized = [torch.from_numpy(img[:, cl:-cl, :].transpose(2, 0, 1)) for img in self.images]
else:
imgs_normalized = [torch.from_numpy(img.transpose(2, 0, 1)) for img in self.images]
imgs_normalized = torch.stack(imgs_normalized)
imgs_normalized.sub_(mean[None, :, None, None])
imgs_normalized = imgs_normalized[:, self.channels]
return imgs_org, imgs_normalized
def transform_imus(self, imus):
imus_norm = [torch.from_numpy((imu - self.mean_imu) / self.std_imu).type(torch.float32) for imu in imus]
return imus_norm
def get_dataset_and_index(self, index):
idx = -1
num_drive = -1
for i, bin in enumerate(self.bins):
bin_start = bin[0]
bin_end = bin[1]
if bin_start <= index <= bin_end:
idx = index - bin_start
num_drive = i
break
if idx < 0 or num_drive < 0:
self.logger.error("Error: No bins and no drive number found!")
return None
dataset = self.datasets[num_drive]
# get frame indices
len_ds = len(dataset)
if idx <= len_ds - self.internal_seq_size:
indices = list(range(idx, idx + self.internal_seq_size))
elif (len_ds - self.internal_seq_size) < idx < len_ds:
indices = list(range(len_ds - self.internal_seq_size, len_ds))
else:
self.logger.error("Wrong index ({}) in {}_{}".format(idx, dataset.date, dataset.drive))
raise Exception("Wrong index ({}) in {}_{}".format(idx, dataset.date, dataset.drive))
return dataset, indices
def create_imu_data(self, dataset, indices, velo_timespamps):
# load and transform imus
imus, valids = self.load_imus(dataset, velo_timespamps)
imus = torch.stack(self.transform_imus(imus))
data = {'imus': imus, 'valids': valids}
return data
def create_lidar_data(self, dataset, indices, velo_timespamps):
# load and transform images
self.load_images(dataset, indices)
org_images, proc_images = self.transform_images()
data = {'images': proc_images, 'untrans-images': org_images}
return data
def create_data_deeplio(self, dataset, indices, velo_timespamps):
imu_data = self.create_imu_data(dataset, indices, velo_timespamps)
img_data = self.create_lidar_data(dataset, indices, velo_timespamps)
data = {**imu_data, **img_data}
return data
def __len__(self):
return self.length
def __getitem__(self, index):
if torch.is_tensor(index):
index = index.tolist()
start = time.time()
dataset, indices = self.get_dataset_and_index(index)
# Get frame timestamps
velo_timespamps = [dataset.timestamps_velo[idx] for idx in indices]
lidar_data = {}
if self.has_lidar:
lidar_data = self.create_lidar_data(dataset, indices, velo_timespamps)
imu_data = {}
if self.has_imu:
imu_data = self.create_imu_data(dataset, indices, velo_timespamps)
arch_data = {**imu_data, **lidar_data}
# load and transform ground truth
gts = torch.from_numpy(self.load_ground_truth(dataset, indices)).type(torch.float32)
meta_data = {'index': [index], 'date': [dataset.date], 'drive': [dataset.drive], 'velo-index': [indices],
'velo-timestamps': [ts.timestamp() for ts in velo_timespamps]}
data = {'data': arch_data, 'gts': gts, 'meta': meta_data}
end = time.time()
#print(index, dataset.data_path, indices)
#self.logger.debug("Idx:{}, dt: {}".format(index, end - start))
return data
def __repr__(self):
# printing dataset informations
rep = "Kitti-Dataset" \
"Type: {}, Length: {}, Seq.length: {}\n" \
"Date\tDrive\tlength\tstart-end\n".format(self.ds_type, self.length, self.internal_seq_size)
seqs = ""
for i in range(len(self.length_each_drive)):
date = self.datasets[i].date
drive = self.datasets[i].drive
length = self.length_each_drive[i]
bins = self.bins[i]
seqs = "".join("{}{}\t{}\t{}\t{}\n".format(seqs, date, drive, length, bins))
rep = "{}{}".format(rep,seqs)
return rep
|
[
"deeplio.common.laserscan.LaserScan",
"numpy.maximum",
"pickle.load",
"os.path.join",
"deeplio.common.utils.transform_from_rot_trans",
"deeplio.common.logger.get_app_logger",
"deeplio.common.utils.subselect_files",
"torch.is_tensor",
"numpy.dstack",
"threading.Thread",
"deeplio.common.utils.read_calib_file",
"numpy.asarray",
"numpy.hstack",
"datetime.datetime.strptime",
"numpy.argwhere",
"torch.from_numpy",
"torch.stack",
"numpy.zeros",
"deeplio.common.utils.load_oxts_packets_and_poses",
"time.time",
"numpy.array",
"torch.as_tensor",
"deeplio.common.utils.load_velo_scan"
] |
[((1279, 1313), 'os.path.join', 'os.path.join', (['base_path_sync', 'date'], {}), '(base_path_sync, date)\n', (1291, 1313), False, 'import os\n'), ((1345, 1401), 'os.path.join', 'os.path.join', (['base_path_sync', 'date', 'self.drive_full_sync'], {}), '(base_path_sync, date, self.drive_full_sync)\n', (1357, 1401), False, 'import os\n'), ((1434, 1494), 'os.path.join', 'os.path.join', (['base_path_unsync', 'date', 'self.drive_full_unsync'], {}), '(base_path_unsync, date, self.drive_full_unsync)\n', (1446, 1494), False, 'import os\n'), ((2572, 2614), 'deeplio.common.utils.load_velo_scan', 'utils.load_velo_scan', (['self.velo_files[idx]'], {}), '(self.velo_files[idx])\n', (2592, 2614), False, 'from deeplio.common import utils, logger\n'), ((2666, 2836), 'deeplio.common.laserscan.LaserScan', 'LaserScan', ([], {'project': '(False)', 'H': 'self.image_height', 'W': 'self.image_width', 'fov_up': 'self.fov_up', 'fov_down': 'self.fov_down', 'min_depth': 'self.min_depth', 'max_depth': 'self.max_depth'}), '(project=False, H=self.image_height, W=self.image_width, fov_up=\n self.fov_up, fov_down=self.fov_down, min_depth=self.min_depth,\n max_depth=self.max_depth)\n', (2675, 2836), False, 'from deeplio.common.laserscan import LaserScan\n'), ((3187, 3249), 'numpy.dstack', 'np.dstack', (['(proj_xyz, proj_remission, proj_normal, proj_range)'], {}), '((proj_xyz, proj_remission, proj_normal, proj_range))\n', (3196, 3249), True, 'import numpy as np\n'), ((3364, 3478), 'numpy.array', 'np.array', (['[[oxt[0].ax, oxt[0].ay, oxt[0].az, oxt[0].wx, oxt[0].wy, oxt[0].wz] for oxt in\n oxt]'], {'dtype': 'np.float'}), '([[oxt[0].ax, oxt[0].ay, oxt[0].az, oxt[0].wx, oxt[0].wy, oxt[0].wz\n ] for oxt in oxt], dtype=np.float)\n', (3372, 3478), True, 'import numpy as np\n'), ((4289, 4316), 'numpy.asarray', 'np.asarray', (['self.velo_files'], {}), '(self.velo_files)\n', (4299, 4316), True, 'import numpy as np\n'), ((4703, 4735), 'numpy.asarray', 'np.asarray', (['self.oxts_files_sync'], {}), '(self.oxts_files_sync)\n', (4713, 4735), True, 'import numpy as np\n'), ((5046, 5080), 'numpy.asarray', 'np.asarray', (['self.oxts_files_unsync'], {}), '(self.oxts_files_unsync)\n', (5056, 5080), True, 'import numpy as np\n'), ((5216, 5255), 'os.path.join', 'os.path.join', (['self.calib_path', 'filename'], {}), '(self.calib_path, filename)\n', (5228, 5255), False, 'import os\n'), ((5271, 5302), 'deeplio.common.utils.read_calib_file', 'utils.read_calib_file', (['filepath'], {}), '(filepath)\n', (5292, 5302), False, 'from deeplio.common import utils, logger\n'), ((5318, 5370), 'deeplio.common.utils.transform_from_rot_trans', 'utils.transform_from_rot_trans', (["data['R']", "data['T']"], {}), "(data['R'], data['T'])\n", (5348, 5370), False, 'from deeplio.common import utils, logger\n'), ((5889, 5950), 'os.path.join', 'os.path.join', (['self.data_path_unsync', '"""oxts"""', '"""timestamps.txt"""'], {}), "(self.data_path_unsync, 'oxts', 'timestamps.txt')\n", (5901, 5950), False, 'import os\n'), ((5981, 6051), 'os.path.join', 'os.path.join', (['self.data_path_sync', '"""velodyne_points"""', '"""timestamps.txt"""'], {}), "(self.data_path_sync, 'velodyne_points', 'timestamps.txt')\n", (5993, 6051), False, 'import os\n'), ((6082, 6141), 'os.path.join', 'os.path.join', (['self.data_path_sync', '"""oxts"""', '"""timestamps.txt"""'], {}), "(self.data_path_sync, 'oxts', 'timestamps.txt')\n", (6094, 6141), False, 'import os\n'), ((6690, 6722), 'numpy.array', 'np.array', (['self.timestamps_unsync'], {}), '(self.timestamps_unsync)\n', (6698, 6722), True, 'import numpy as np\n'), ((7263, 7293), 'numpy.array', 'np.array', (['self.timestamps_velo'], {}), '(self.timestamps_velo)\n', (7271, 7293), True, 'import numpy as np\n'), ((7834, 7864), 'numpy.array', 'np.array', (['self.timestamps_sync'], {}), '(self.timestamps_sync)\n', (7842, 7864), True, 'import numpy as np\n'), ((8174, 8227), 'os.path.join', 'os.path.join', (['self.data_path_sync', '"""oxts"""', '"""data.pkl"""'], {}), "(self.data_path_sync, 'oxts', 'data.pkl')\n", (8186, 8227), False, 'import os\n'), ((8346, 8401), 'os.path.join', 'os.path.join', (['self.data_path_unsync', '"""oxts"""', '"""data.pkl"""'], {}), "(self.data_path_unsync, 'oxts', 'data.pkl')\n", (8358, 8401), False, 'import os\n'), ((8552, 8616), 'deeplio.common.utils.load_oxts_packets_and_poses', 'utils.load_oxts_packets_and_poses', (['self.oxts_files_sync[indices]'], {}), '(self.oxts_files_sync[indices])\n', (8585, 8616), False, 'from deeplio.common import utils, logger\n'), ((11302, 11323), 'numpy.asarray', 'np.asarray', (['self.bins'], {}), '(self.bins)\n', (11312, 11323), True, 'import numpy as np\n'), ((11357, 11389), 'numpy.array', 'np.array', (['self.length_each_drive'], {}), '(self.length_each_drive)\n', (11365, 11389), True, 'import numpy as np\n'), ((11464, 11487), 'deeplio.common.logger.get_app_logger', 'logger.get_app_logger', ([], {}), '()\n', (11485, 11487), False, 'from deeplio.common import utils, logger\n'), ((12033, 12046), 'numpy.array', 'np.array', (['gts'], {}), '(gts)\n', (12041, 12046), True, 'import numpy as np\n'), ((14235, 14265), 'torch.as_tensor', 'torch.as_tensor', (['self.mean_img'], {}), '(self.mean_img)\n', (14250, 14265), False, 'import torch\n'), ((14280, 14309), 'torch.as_tensor', 'torch.as_tensor', (['self.std_img'], {}), '(self.std_img)\n', (14295, 14309), False, 'import torch\n'), ((14878, 14906), 'torch.stack', 'torch.stack', (['imgs_normalized'], {}), '(imgs_normalized)\n', (14889, 14906), False, 'import torch\n'), ((17271, 17293), 'torch.is_tensor', 'torch.is_tensor', (['index'], {}), '(index)\n', (17286, 17293), False, 'import torch\n'), ((17347, 17358), 'time.time', 'time.time', ([], {}), '()\n', (17356, 17358), False, 'import time\n'), ((18255, 18266), 'time.time', 'time.time', ([], {}), '()\n', (18264, 18266), False, 'import time\n'), ((4194, 4245), 'deeplio.common.utils.subselect_files', 'utils.subselect_files', (['self.velo_files', 'self.frames'], {}), '(self.velo_files, self.frames)\n', (4215, 4245), False, 'from deeplio.common import utils, logger\n'), ((4598, 4654), 'deeplio.common.utils.subselect_files', 'utils.subselect_files', (['self.oxts_files_sync', 'self.frames'], {}), '(self.oxts_files_sync, self.frames)\n', (4619, 4654), False, 'from deeplio.common import utils, logger\n'), ((4937, 4995), 'deeplio.common.utils.subselect_files', 'utils.subselect_files', (['self.oxts_files_unsync', 'self.frames'], {}), '(self.oxts_files_unsync, self.frames)\n', (4958, 4995), False, 'from deeplio.common import utils, logger\n'), ((7966, 8021), 'deeplio.common.utils.load_oxts_packets_and_poses', 'utils.load_oxts_packets_and_poses', (['self.oxts_files_sync'], {}), '(self.oxts_files_sync)\n', (7999, 8021), False, 'from deeplio.common import utils, logger\n'), ((8059, 8116), 'deeplio.common.utils.load_oxts_packets_and_poses', 'utils.load_oxts_packets_and_poses', (['self.oxts_files_unsync'], {}), '(self.oxts_files_unsync)\n', (8092, 8116), False, 'from deeplio.common import utils, logger\n'), ((8303, 8317), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8314, 8317), False, 'import pickle\n'), ((8481, 8495), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (8492, 8495), False, 'import pickle\n'), ((11932, 11972), 'numpy.array', 'np.array', (['[gt[0].vf, gt[0].vl, gt[0].vu]'], {}), '([gt[0].vf, gt[0].vl, gt[0].vu])\n', (11940, 11972), True, 'import numpy as np\n'), ((12217, 12278), 'threading.Thread', 'Thread', ([], {'target': 'self.load_image', 'args': '(dataset, indices[i], i)'}), '(target=self.load_image, args=(dataset, indices[i], i))\n', (12223, 12278), False, 'from threading import Thread\n'), ((3691, 3760), 'os.path.join', 'os.path.join', (['self.data_path_sync', '"""velodyne_points"""', '"""data"""', '"""*.bin"""'], {}), "(self.data_path_sync, 'velodyne_points', 'data', '*.bin')\n", (3703, 3760), False, 'import os\n'), ((4465, 4523), 'os.path.join', 'os.path.join', (['self.data_path_sync', '"""oxts"""', '"""data"""', '"""*.txt"""'], {}), "(self.data_path_sync, 'oxts', 'data', '*.txt')\n", (4477, 4523), False, 'import os\n'), ((4800, 4860), 'os.path.join', 'os.path.join', (['self.data_path_unsync', '"""oxts"""', '"""data"""', '"""*.txt"""'], {}), "(self.data_path_unsync, 'oxts', 'data', '*.txt')\n", (4812, 4860), False, 'import os\n'), ((6552, 6607), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['line[:-4]', '"""%Y-%m-%d %H:%M:%S.%f"""'], {}), "(line[:-4], '%Y-%m-%d %H:%M:%S.%f')\n", (6572, 6607), True, 'import datetime as dt\n'), ((7129, 7184), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['line[:-4]', '"""%Y-%m-%d %H:%M:%S.%f"""'], {}), "(line[:-4], '%Y-%m-%d %H:%M:%S.%f')\n", (7149, 7184), True, 'import datetime as dt\n'), ((7700, 7755), 'datetime.datetime.strptime', 'dt.datetime.strptime', (['line[:-4]', '"""%Y-%m-%d %H:%M:%S.%f"""'], {}), "(line[:-4], '%Y-%m-%d %H:%M:%S.%f')\n", (7720, 7755), True, 'import datetime as dt\n'), ((11996, 12016), 'numpy.hstack', 'np.hstack', (['(x, R, v)'], {}), '((x, R, v))\n', (12005, 12016), True, 'import numpy as np\n'), ((13236, 13295), 'numpy.zeros', 'np.zeros', (['(self.DEFAULT_NUM_OXT_SAMPLES, 6)'], {'dtype': 'np.float'}), '((self.DEFAULT_NUM_OXT_SAMPLES, 6), dtype=np.float)\n', (13244, 13295), True, 'import numpy as np\n'), ((13436, 13551), 'numpy.array', 'np.array', (['[[oxt[0].ax, oxt[0].ay, oxt[0].az, oxt[0].wx, oxt[0].wy, oxt[0].wz] for oxt in\n oxts]'], {'dtype': 'np.float'}), '([[oxt[0].ax, oxt[0].ay, oxt[0].az, oxt[0].wx, oxt[0].wy, oxt[0].wz\n ] for oxt in oxts], dtype=np.float)\n', (13444, 13551), True, 'import numpy as np\n'), ((3969, 4040), 'os.path.join', 'os.path.join', (['self.data_path_unsync', '"""velodyne_points"""', '"""data"""', '"""*.txt"""'], {}), "(self.data_path_unsync, 'velodyne_points', 'data', '*.txt')\n", (3981, 4040), False, 'import os\n'), ((12905, 12922), 'numpy.argwhere', 'np.argwhere', (['mask'], {}), '(mask)\n', (12916, 12922), True, 'import numpy as np\n'), ((15123, 15177), 'torch.from_numpy', 'torch.from_numpy', (['((imu - self.mean_imu) / self.std_imu)'], {}), '((imu - self.mean_imu) / self.std_imu)\n', (15139, 15177), False, 'import torch\n'), ((13679, 13732), 'numpy.maximum', 'np.maximum', (['(self.DEFAULT_NUM_OXT_SAMPLES - len_oxt)', '(0)'], {}), '(self.DEFAULT_NUM_OXT_SAMPLES - len_oxt, 0)\n', (13689, 13732), True, 'import numpy as np\n')]
|
"""
_physical_abstract_data.py
Copyright 2016 University of Melbourne.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
"""
import numpy as np
import os
from fourdvar.datadef.abstract._fourdvar_data import FourDVarData
import fourdvar.util.netcdf_handle as ncf
from fourdvar.util.archive_handle import get_archive_path
import fourdvar.util.date_handle as dt
from fourdvar.params.input_defn import inc_icon
import setup_logging
logger = setup_logging.get_logger( __file__ )
class PhysicalAbstractData( FourDVarData ):
"""Parent for PhysicalData and PhysicalAdjointData
"""
#Parameters
tsec = None #No. seconds per timestep
nstep = None #No. timesteps for emis data
nlays_emis = None #No. layers for emis_data
nrows = None #No. rows for all data
ncols = None #No. columns for all data
spcs = None #list of species for all data
emis_unc = None #dict of emis uncertainty values
if inc_icon is True:
nlays_icon = None #No. layers for icon data
icon_unc = None #dict of icon uncertainty values
#this class variable should be overloaded in children
icon_units = 'NA' #unit to attach to netCDF archive
#these class variables should be overloaded in children
archive_name = 'physical_abstract_data.ncf' #default archive filename
emis_units = 'NA' #unit to attach to netCDF archive
def __init__( self, icon_dict, emis_dict ):
"""
application: create an instance of PhysicalData
input: user-defined
output: None
eg: new_phys = datadef.PhysicalData( filelist )
"""
#icon_dict: {var-name: np.array([layer, row, column])
#emis_dict: {var-name: np.array([time, layer, row, column])
#params must all be set and not None (usally using cls.from_file)
self.assert_params()
if inc_icon is True:
assert set( icon_dict.keys() ) == set( self.spcs ), 'invalid icon spcs.'
self.icon = {}
assert set( emis_dict.keys() ) == set( self.spcs ), 'invalid emis spcs.'
self.emis = {}
for spcs_name in self.spcs:
if inc_icon is True:
icon_data = np.array( icon_dict[ spcs_name ] )
assert len( icon_data.shape ) == 3, 'icon dimensions invalid.'
inl,inr,inc = icon_data.shape
assert inl == self.nlays_icon, 'icon layers invalid.'
assert inr == self.nrows, 'icon rows invalid.'
assert inc == self.ncols, 'icon columns invalid.'
self.icon[ spcs_name ] = icon_data
emis_data = np.array( emis_dict[ spcs_name ] )
assert len( emis_data.shape ) == 4, 'emis dimensions invalid.'
ent,enl,enr,enc = emis_data.shape
assert ent == self.nstep, 'emis timesteps invalid.'
assert enl == self.nlays_emis, 'emis layers invalid.'
assert enr == self.nrows, 'emis rows invalid.'
assert enc == self.ncols, 'emis columns invalid.'
self.emis[ spcs_name ] = emis_data
return None
def archive( self, path=None ):
"""
extension: save a copy of data to archive/experiment directory
input: string or None
output: None
notes: this will overwrite any clash in namespace.
if input is None file will write default archive_name.
output is a netCDF file compatible with from_file method.
"""
unc = lambda spc: spc + '_UNC'
save_path = get_archive_path()
if path is None:
path = self.archive_name
save_path = os.path.join( save_path, path )
if os.path.isfile( save_path ):
os.remove( save_path )
#construct netCDF file
attr_dict = { 'SDATE': np.int32( dt.replace_date('<YYYYDDD>',dt.start_date) ),
'EDATE': np.int32( dt.replace_date('<YYYYDDD>',dt.end_date) ) }
minute, second = divmod( self.tsec, 60 )
hour, minute = divmod( minute, 60 )
day, hour = divmod( hour, 24 )
hms = int( '{:02}{:02}{:02}'.format( hour, minute, second ) )
attr_dict[ 'TSTEP' ] = np.array( [np.int32(day), np.int32(hms)] )
var_list =''.join( [ '{:<16}'.format( s ) for s in self.spcs ] )
attr_dict[ 'VAR-LIST' ] = var_list
dim_dict = { 'ROW': self.nrows, 'COL': self.ncols }
root = ncf.create( path=save_path, attr=attr_dict, dim=dim_dict,
is_root=True )
if inc_icon is True:
icon_dim = { 'LAY': self.nlays_icon }
icon_var = {}
emis_dim = { 'LAY': self.nlays_emis, 'TSTEP': None }
emis_var = {}
for spc in self.spcs:
if inc_icon is True:
icon_var[ spc ] = ( 'f4', ('LAY','ROW','COL',), self.icon[ spc ] )
icon_var[ unc(spc) ] = ( 'f4', ('LAY','ROW','COL'),
self.icon_unc[ spc ] )
emis_var[ spc ] = ( 'f4', ('TSTEP','LAY','ROW','COL'),
self.emis[ spc ] )
emis_var[ unc(spc) ] = ( 'f4', ('TSTEP','LAY','ROW','COL'),
self.emis_unc[ spc ] )
if inc_icon is True:
ncf.create( parent=root, name='icon', dim=icon_dim, var=icon_var,
is_root=False )
ncf.create( parent=root, name='emis', dim=emis_dim, var=emis_var,
is_root=False )
root.close()
return None
@classmethod
def from_file( cls, filename ):
"""
extension: create a PhysicalData instance from a file
input: user-defined
output: PhysicalData
eg: prior_phys = datadef.PhysicalData.from_file( "saved_prior.data" )
"""
daysec = 24*60*60
unc = lambda spc: spc + '_UNC'
#get all data/parameters from file
sdate = str( ncf.get_attr( filename, 'SDATE' ) )
edate = str( ncf.get_attr( filename, 'EDATE' ) )
tstep = ncf.get_attr( filename, 'TSTEP' )
day, step = int(tstep[0]), int(tstep[1])
tsec = daysec*day + 3600*(step//10000) + 60*((step//100)%100) + (step)%100
spcs_list = ncf.get_attr( filename, 'VAR-LIST' ).split()
unc_list = [ unc( spc ) for spc in spcs_list ]
if inc_icon is True:
icon_dict = ncf.get_variable( filename, spcs_list, group='icon' )
icon_unc = ncf.get_variable( filename, unc_list, group='icon' )
emis_dict = ncf.get_variable( filename, spcs_list, group='emis' )
emis_unc = ncf.get_variable( filename, unc_list, group='emis' )
for spc in spcs_list:
if inc_icon is True:
icon_unc[ spc ] = icon_unc.pop( unc( spc ) )
emis_unc[ spc ] = emis_unc.pop( unc( spc ) )
#ensure parameters from file are valid
msg = 'invalid start date'
assert sdate == dt.replace_date( '<YYYYDDD>', dt.start_date ), msg
msg = 'invalid end date'
assert edate == dt.replace_date( '<YYYYDDD>', dt.end_date ), msg
emis_shape = [ e.shape for e in emis_dict.values() ]
for eshape in emis_shape[1:]:
assert eshape == emis_shape[0], 'all emis spcs must have the same shape.'
estep, elays, erows, ecols = emis_shape[0]
if inc_icon is True:
icon_shape = [ i.shape for i in icon_dict.values() ]
for ishape in icon_shape[1:]:
assert ishape == icon_shape[0], 'all icon spcs must have the same shape.'
ilays, irows, icols = icon_shape[0]
assert irows == erows, 'icon & emis must match rows.'
assert icols == ecols, 'icon & emis must match columns.'
assert max(daysec,tsec) % min(daysec,tsec) == 0, 'tsec must be a factor or multiple of No. seconds in a day.'
assert (tsec >= daysec) or (estep % (daysec//tsec) == 0), 'nstep must cleanly divide into days.'
for spc in spcs_list:
msg = 'Uncertainty values are invalid for this data.'
if inc_icon is True:
assert icon_unc[ spc ].shape == icon_dict[ spc ].shape, msg
assert ( icon_unc[ spc ] > 0 ).all(), msg
assert emis_unc[ spc ].shape == emis_dict[ spc ].shape, msg
assert ( emis_unc[ spc ] > 0 ).all(), msg
#assign new param values.
par_name = ['tsec','nstep','nlays_emis','nrows','ncols','spcs','emis_unc']
par_val = [tsec, estep, elays, erows, ecols, spcs_list, emis_unc]
par_mutable = ['emis_unc']
if inc_icon is True:
par_name += [ 'nlays_icon', 'icon_unc' ]
par_val += [ ilays, icon_unc ]
par_mutable += ['icon_unc']
for name, val in zip( par_name, par_val ):
old_val = getattr( cls, name )
if old_val is not None:
#param already defined, ensure no clash.
if name in par_mutable:
#parameter is mutable, affect applied globally
msg = 'Any change to PhysicalAbstractData.{} is applied globally!'.format( name )
logger.warn( msg )
else:
msg = 'cannot change PhysicalAbstractData.{}'.format( name )
assert np.array_equal( old_val, val ), msg
#set this abstract classes attribute, not calling child!
setattr( PhysicalAbstractData, name, val )
if inc_icon is False:
icon_dict = None
return cls( icon_dict, emis_dict )
@classmethod
def example( cls ):
"""
application: return a valid example with arbitrary values.
input: None
output: PhysicalData
eg: mock_phys = datadef.PhysicalData.example()
notes: only used for testing.
must have date_handle dates & PhysicalData parameters already defined.
"""
icon_val = 0
emis_val = 0
#params must all be set and not None (usally using cls.from_file)
cls.assert_params()
if inc_icon is True:
icon_val += np.zeros((cls.nlays_icon, cls.nrows, cls.ncols))
icon_dict = { spc: icon_val.copy() for spc in cls.spcs }
else:
icon_dict = None
emis_val += np.zeros((cls.nstep, cls.nlays_emis, cls.nrows, cls.ncols))
emis_dict = { spc: emis_val.copy() for spc in cls.spcs }
return cls( icon_dict, emis_dict )
@classmethod
def assert_params( cls ):
"""
extension: assert that all needed physical parameters are valid.
input: None
output: None
notes: method raises assertion error if None valued parameter is found.
"""
par_name = ['tsec','nstep','nlays_emis','nrows','ncols','spcs','emis_unc']
if inc_icon is True:
par_name += [ 'nlays_icon', 'icon_unc' ]
for param in par_name:
msg = 'missing definition for {0}.{1}'.format( cls.__name__, param )
assert getattr( cls, param ) is not None, msg
assert max(24*60*60,cls.tsec) % min(24*60*60,cls.tsec) == 0, 'invalid step size (tsec).'
assert (cls.tsec>=24*60*60) or (cls.nstep % ((24*60*60)//cls.tsec) == 0), 'invalid step count (nstep).'
return None
def cleanup( self ):
"""
application: called when physical data instance is no longer required
input: None
output: None
eg: old_phys.cleanup()
notes: called after test instance is no longer needed, used to delete files etc.
"""
pass
return None
|
[
"os.remove",
"setup_logging.get_logger",
"fourdvar.util.date_handle.replace_date",
"numpy.zeros",
"fourdvar.util.archive_handle.get_archive_path",
"fourdvar.util.netcdf_handle.create",
"os.path.isfile",
"numpy.array",
"fourdvar.util.netcdf_handle.get_variable",
"numpy.int32",
"numpy.array_equal",
"fourdvar.util.netcdf_handle.get_attr",
"os.path.join"
] |
[((909, 943), 'setup_logging.get_logger', 'setup_logging.get_logger', (['__file__'], {}), '(__file__)\n', (933, 943), False, 'import setup_logging\n'), ((4183, 4201), 'fourdvar.util.archive_handle.get_archive_path', 'get_archive_path', ([], {}), '()\n', (4199, 4201), False, 'from fourdvar.util.archive_handle import get_archive_path\n'), ((4284, 4313), 'os.path.join', 'os.path.join', (['save_path', 'path'], {}), '(save_path, path)\n', (4296, 4313), False, 'import os\n'), ((4327, 4352), 'os.path.isfile', 'os.path.isfile', (['save_path'], {}), '(save_path)\n', (4341, 4352), False, 'import os\n'), ((5071, 5141), 'fourdvar.util.netcdf_handle.create', 'ncf.create', ([], {'path': 'save_path', 'attr': 'attr_dict', 'dim': 'dim_dict', 'is_root': '(True)'}), '(path=save_path, attr=attr_dict, dim=dim_dict, is_root=True)\n', (5081, 5141), True, 'import fourdvar.util.netcdf_handle as ncf\n'), ((6069, 6148), 'fourdvar.util.netcdf_handle.create', 'ncf.create', ([], {'parent': 'root', 'name': '"""emis"""', 'dim': 'emis_dim', 'var': 'emis_var', 'is_root': '(False)'}), "(parent=root, name='emis', dim=emis_dim, var=emis_var, is_root=False)\n", (6079, 6148), True, 'import fourdvar.util.netcdf_handle as ncf\n'), ((6747, 6778), 'fourdvar.util.netcdf_handle.get_attr', 'ncf.get_attr', (['filename', '"""TSTEP"""'], {}), "(filename, 'TSTEP')\n", (6759, 6778), True, 'import fourdvar.util.netcdf_handle as ncf\n'), ((7245, 7296), 'fourdvar.util.netcdf_handle.get_variable', 'ncf.get_variable', (['filename', 'spcs_list'], {'group': '"""emis"""'}), "(filename, spcs_list, group='emis')\n", (7261, 7296), True, 'import fourdvar.util.netcdf_handle as ncf\n'), ((7318, 7368), 'fourdvar.util.netcdf_handle.get_variable', 'ncf.get_variable', (['filename', 'unc_list'], {'group': '"""emis"""'}), "(filename, unc_list, group='emis')\n", (7334, 7368), True, 'import fourdvar.util.netcdf_handle as ncf\n'), ((11131, 11190), 'numpy.zeros', 'np.zeros', (['(cls.nstep, cls.nlays_emis, cls.nrows, cls.ncols)'], {}), '((cls.nstep, cls.nlays_emis, cls.nrows, cls.ncols))\n', (11139, 11190), True, 'import numpy as np\n'), ((3227, 3257), 'numpy.array', 'np.array', (['emis_dict[spcs_name]'], {}), '(emis_dict[spcs_name])\n', (3235, 3257), True, 'import numpy as np\n'), ((4368, 4388), 'os.remove', 'os.remove', (['save_path'], {}), '(save_path)\n', (4377, 4388), False, 'import os\n'), ((5955, 6034), 'fourdvar.util.netcdf_handle.create', 'ncf.create', ([], {'parent': 'root', 'name': '"""icon"""', 'dim': 'icon_dim', 'var': 'icon_var', 'is_root': '(False)'}), "(parent=root, name='icon', dim=icon_dim, var=icon_var, is_root=False)\n", (5965, 6034), True, 'import fourdvar.util.netcdf_handle as ncf\n'), ((6638, 6669), 'fourdvar.util.netcdf_handle.get_attr', 'ncf.get_attr', (['filename', '"""SDATE"""'], {}), "(filename, 'SDATE')\n", (6650, 6669), True, 'import fourdvar.util.netcdf_handle as ncf\n'), ((6695, 6726), 'fourdvar.util.netcdf_handle.get_attr', 'ncf.get_attr', (['filename', '"""EDATE"""'], {}), "(filename, 'EDATE')\n", (6707, 6726), True, 'import fourdvar.util.netcdf_handle as ncf\n'), ((7095, 7146), 'fourdvar.util.netcdf_handle.get_variable', 'ncf.get_variable', (['filename', 'spcs_list'], {'group': '"""icon"""'}), "(filename, spcs_list, group='icon')\n", (7111, 7146), True, 'import fourdvar.util.netcdf_handle as ncf\n'), ((7172, 7222), 'fourdvar.util.netcdf_handle.get_variable', 'ncf.get_variable', (['filename', 'unc_list'], {'group': '"""icon"""'}), "(filename, unc_list, group='icon')\n", (7188, 7222), True, 'import fourdvar.util.netcdf_handle as ncf\n'), ((7676, 7719), 'fourdvar.util.date_handle.replace_date', 'dt.replace_date', (['"""<YYYYDDD>"""', 'dt.start_date'], {}), "('<YYYYDDD>', dt.start_date)\n", (7691, 7719), True, 'import fourdvar.util.date_handle as dt\n'), ((7784, 7825), 'fourdvar.util.date_handle.replace_date', 'dt.replace_date', (['"""<YYYYDDD>"""', 'dt.end_date'], {}), "('<YYYYDDD>', dt.end_date)\n", (7799, 7825), True, 'import fourdvar.util.date_handle as dt\n'), ((10941, 10989), 'numpy.zeros', 'np.zeros', (['(cls.nlays_icon, cls.nrows, cls.ncols)'], {}), '((cls.nlays_icon, cls.nrows, cls.ncols))\n', (10949, 10989), True, 'import numpy as np\n'), ((2746, 2776), 'numpy.array', 'np.array', (['icon_dict[spcs_name]'], {}), '(icon_dict[spcs_name])\n', (2754, 2776), True, 'import numpy as np\n'), ((4463, 4506), 'fourdvar.util.date_handle.replace_date', 'dt.replace_date', (['"""<YYYYDDD>"""', 'dt.start_date'], {}), "('<YYYYDDD>', dt.start_date)\n", (4478, 4506), True, 'import fourdvar.util.date_handle as dt\n'), ((4550, 4591), 'fourdvar.util.date_handle.replace_date', 'dt.replace_date', (['"""<YYYYDDD>"""', 'dt.end_date'], {}), "('<YYYYDDD>', dt.end_date)\n", (4565, 4591), True, 'import fourdvar.util.date_handle as dt\n'), ((4839, 4852), 'numpy.int32', 'np.int32', (['day'], {}), '(day)\n', (4847, 4852), True, 'import numpy as np\n'), ((4854, 4867), 'numpy.int32', 'np.int32', (['hms'], {}), '(hms)\n', (4862, 4867), True, 'import numpy as np\n'), ((6933, 6967), 'fourdvar.util.netcdf_handle.get_attr', 'ncf.get_attr', (['filename', '"""VAR-LIST"""'], {}), "(filename, 'VAR-LIST')\n", (6945, 6967), True, 'import fourdvar.util.netcdf_handle as ncf\n'), ((10083, 10111), 'numpy.array_equal', 'np.array_equal', (['old_val', 'val'], {}), '(old_val, val)\n', (10097, 10111), True, 'import numpy as np\n')]
|
import numpy as np
# Untested function
from source.env.lib.log import Blob
def discountRewards(rewards, gamma=0.99):
rets, N = [], len(rewards)
discounts = np.array([gamma ** i for i in range(N)])
rewards = np.array(rewards)
for idx in range(N): rets.append(sum(rewards[idx:] * discounts[:N - idx]))
return rets
class Rollout:
def __init__(self):
self.atnArgs = []
self.vals = []
self.rewards = []
self.states = []
self.feather = Feather()
def step(self, atnArgs, val, reward, stim=None):
self.atnArgs.append(atnArgs)
self.vals.append(val)
self.states.append(stim)
self.rewards.append(reward)
def finish(self):
self.lifespan = len(self.rewards)
self.feather.finish()
# Rollout logger
class Feather:
def __init__(self):
self.blob = Blob()
def scrawl(self, apple, ent, reward):
self.blob.annID = ent.annID
self.stats(reward, apple)
def stats(self, reward, apple):
self.blob.reward.append(reward)
self.blob.apples.append(apple)
def finish(self):
self.blob.finish()
|
[
"source.env.lib.log.Blob",
"numpy.array"
] |
[((223, 240), 'numpy.array', 'np.array', (['rewards'], {}), '(rewards)\n', (231, 240), True, 'import numpy as np\n'), ((873, 879), 'source.env.lib.log.Blob', 'Blob', ([], {}), '()\n', (877, 879), False, 'from source.env.lib.log import Blob\n')]
|
import sys
import os
# import warnings
# import pickle
from datetime import datetime
import requests
from bs4 import BeautifulSoup
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from astropy.timeseries import LombScargle
# from astroquery.simbad import Simbad
# cSimbad = Simbad()
# cSimbad.add_votable_fields('ids', 'sptype', 'flux(V)', 'flux(B)')
main_url = 'https://wasp.cerit-sc.cz'
def build_search_url(star, limit=1, radius=1):
star = star.replace(' ', '').replace('+', '%2B')
url = f'{main_url}/search?'\
f'objid={star}&limit={limit}&radius={radius}&radiusUnit=deg'
return url
def query(star):
url = build_search_url(star)
response = requests.get(url)
if response.status_code != 200:
print(response)
return
return response.content.decode()
def parse(content, star):
if 'not found in Sesame' in content:
raise ValueError(f'object ID "{star}" not found')
if 'No objects matching specified criteria were found.' in content:
raise ValueError('superWASP query did not match any object')
soup = BeautifulSoup(content, 'html.parser')
table = soup.find_all('table')[0]
tablerow = table.find_all('tr')[1]
data = np.array(tablerow.find_all('td'), dtype=object)
inds = [2, 3, 4, 6, 7, 8, 9, 10]
name, npts, files, start, stop, ra, dec, mag = data[inds]
name = name.text
npts = int(npts.text)
csv_link = files.find_all('a')[1].attrs['href']
start, stop = start.text, stop.text
start = datetime.strptime(start[:-2], '%Y-%m-%d %H:%M:%S')
stop = datetime.strptime(stop[:-2], '%Y-%m-%d %H:%M:%S')
ra, dec = float(ra.text), float(dec.text)
mag = float(mag.text)
return name, npts, csv_link, start, stop, ra, dec, mag
def get_lightcurve(star, verbose=True):
content = query(star)
name, npts, csv_link, start, stop, ra, dec, mag = parse(content, star)
if verbose:
print(
f'Found "{name}" ({npts} observations '
f'between {start.date()} and {stop.date()})'
)
filename = name.replace(' ', '_') + '.csv'
if os.path.exists(filename):
return filename
# download the lightcurve
if verbose:
print('Downloading lightcurve...', end=' ', flush=True)
url = main_url + csv_link
response = requests.get(url)
if response.status_code != 200:
if verbose:
print('failed!')
return
# save the lightcurve to a file
with open(filename, 'w') as f:
f.write(response.text)
if verbose:
print()
print(f'Saved lightcurve to {filename}')
return filename
class superWASP:
def __init__(self, filename, verbose=True):
self.verbose = verbose
# read the lightcurve
data = np.genfromtxt(filename, delimiter=',', names=True)
self.target = filename[:-4]
self.N = data.size
self.time = data['HJD']
self.time -= 24e5
self.mag = data['magnitude']
median_mag = np.median(self.mag)
self.flux = np.negative(self.mag - median_mag) + median_mag
self.flux_err = data['magnitude_error']
self.c_time = self.time.copy()
self.c_flux = self.flux.copy()
self.c_flux_err = self.flux_err.copy()
self.mask = np.ones_like(self.flux, dtype=bool)
def __repr__(self):
return f'superWASP({self.target}, {self.N} points)'
@classmethod
def query_object(cls, star, verbose=True):
filename = get_lightcurve(star, verbose)
return cls(filename, verbose=verbose)
def sigmaclip(self, start_sigma=4, step=0.8, niter=5):
def plotit(original, mask):
plt.close('sigmaclip_fig')
fig, ax = plt.subplots(1, 1, num='sigmaclip_fig',
constrained_layout=True)
ax.errorbar(self.time, original, fmt='o', ms=2, alpha=0.2)
ax.plot(self.time[~mask], original[~mask], 'x', color='r', ms=2)
plt.show()
original = self.flux.copy()
it, start = 0, start_sigma
sigma = start
msg = 'sigma={:.2f} continue(c) stop(s) : '
while it < niter:
clipped, lo, up = stats.sigmaclip(original, low=sigma, high=sigma)
mask = (original > lo) & (original < up)
plotit(original, mask)
go_on = input(msg.format(sigma))
if go_on == 's':
break
sigma *= step
self.c_time = self.time[mask]
self.c_flux = clipped
self.c_flux_err = self.flux_err[mask]
self.mask = mask
return clipped, lo, up
def detrend(self, plot=True, degree=1, weigh=True):
# if self.verbose:
# print('Removing trend')
t, y, e = self.c_time, self.c_flux, self.c_flux_err
if weigh:
fitp = np.polyfit(t, y, degree, w=1/e)
else:
fitp = np.polyfit(t, y, degree)
print(f'coefficients: {fitp}')
if plot:
ax, _ = self.plot()
tt = np.linspace(t.min(), t.max(), 1000)
# max_zorder = max([l.get_zorder() for l in ax.get_lines()])
ax.plot(tt, np.polyval(fitp, tt), color='k', lw=3, zorder=3)
y = y - np.polyval(fitp, t)
self.c_flux = y
def plot(self, ax=None, **kwargs):
if ax is None:
fig, ax = plt.subplots(1, 1, constrained_layout=True)
else:
fig = ax.figure
markers, caps, bars = ax.errorbar(self.c_time, self.c_flux,
self.c_flux_err, fmt='o', ms=2,
alpha=0.6, ecolor='k')
[bar.set_alpha(0.2) for bar in bars]
[cap.set_alpha(0.5) for cap in caps]
ax.set(ylabel='-mag', xlabel='JD [days]')
return ax, fig
def gls(self):
model = LombScargle(self.c_time, self.c_flux, self.c_flux_err)
f, p = model.autopower()
if (p < 0).any():
f, p = model.autopower(method='cython')
fig, ax = plt.subplots(1, 1, constrained_layout=True)
ax.semilogx(1/f, p)
# ax.hlines(model.false_alarm_level([0.01, 0.1]), *ax.get_xlim(),
# color='k', alpha=0.5)
return model
if __name__ == "__main__":
get_lightcurve(sys.argv[1])
|
[
"numpy.ones_like",
"matplotlib.pyplot.show",
"scipy.stats.sigmaclip",
"numpy.polyfit",
"numpy.median",
"astropy.timeseries.LombScargle",
"matplotlib.pyplot.close",
"numpy.polyval",
"os.path.exists",
"numpy.genfromtxt",
"numpy.negative",
"datetime.datetime.strptime",
"requests.get",
"bs4.BeautifulSoup",
"matplotlib.pyplot.subplots"
] |
[((706, 723), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (718, 723), False, 'import requests\n'), ((1117, 1154), 'bs4.BeautifulSoup', 'BeautifulSoup', (['content', '"""html.parser"""'], {}), "(content, 'html.parser')\n", (1130, 1154), False, 'from bs4 import BeautifulSoup\n'), ((1544, 1594), 'datetime.datetime.strptime', 'datetime.strptime', (['start[:-2]', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(start[:-2], '%Y-%m-%d %H:%M:%S')\n", (1561, 1594), False, 'from datetime import datetime\n'), ((1606, 1655), 'datetime.datetime.strptime', 'datetime.strptime', (['stop[:-2]', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(stop[:-2], '%Y-%m-%d %H:%M:%S')\n", (1623, 1655), False, 'from datetime import datetime\n'), ((2139, 2163), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (2153, 2163), False, 'import os\n'), ((2345, 2362), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2357, 2362), False, 'import requests\n'), ((2813, 2863), 'numpy.genfromtxt', 'np.genfromtxt', (['filename'], {'delimiter': '""","""', 'names': '(True)'}), "(filename, delimiter=',', names=True)\n", (2826, 2863), True, 'import numpy as np\n'), ((3043, 3062), 'numpy.median', 'np.median', (['self.mag'], {}), '(self.mag)\n', (3052, 3062), True, 'import numpy as np\n'), ((3325, 3360), 'numpy.ones_like', 'np.ones_like', (['self.flux'], {'dtype': 'bool'}), '(self.flux, dtype=bool)\n', (3337, 3360), True, 'import numpy as np\n'), ((5904, 5958), 'astropy.timeseries.LombScargle', 'LombScargle', (['self.c_time', 'self.c_flux', 'self.c_flux_err'], {}), '(self.c_time, self.c_flux, self.c_flux_err)\n', (5915, 5958), False, 'from astropy.timeseries import LombScargle\n'), ((6088, 6131), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'constrained_layout': '(True)'}), '(1, 1, constrained_layout=True)\n', (6100, 6131), True, 'import matplotlib.pyplot as plt\n'), ((3083, 3117), 'numpy.negative', 'np.negative', (['(self.mag - median_mag)'], {}), '(self.mag - median_mag)\n', (3094, 3117), True, 'import numpy as np\n'), ((3714, 3740), 'matplotlib.pyplot.close', 'plt.close', (['"""sigmaclip_fig"""'], {}), "('sigmaclip_fig')\n", (3723, 3740), True, 'import matplotlib.pyplot as plt\n'), ((3763, 3827), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'num': '"""sigmaclip_fig"""', 'constrained_layout': '(True)'}), "(1, 1, num='sigmaclip_fig', constrained_layout=True)\n", (3775, 3827), True, 'import matplotlib.pyplot as plt\n'), ((4023, 4033), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4031, 4033), True, 'import matplotlib.pyplot as plt\n'), ((4237, 4285), 'scipy.stats.sigmaclip', 'stats.sigmaclip', (['original'], {'low': 'sigma', 'high': 'sigma'}), '(original, low=sigma, high=sigma)\n', (4252, 4285), False, 'from scipy import stats\n'), ((4887, 4920), 'numpy.polyfit', 'np.polyfit', (['t', 'y', 'degree'], {'w': '(1 / e)'}), '(t, y, degree, w=1 / e)\n', (4897, 4920), True, 'import numpy as np\n'), ((4952, 4976), 'numpy.polyfit', 'np.polyfit', (['t', 'y', 'degree'], {}), '(t, y, degree)\n', (4962, 4976), True, 'import numpy as np\n'), ((5283, 5302), 'numpy.polyval', 'np.polyval', (['fitp', 't'], {}), '(fitp, t)\n', (5293, 5302), True, 'import numpy as np\n'), ((5412, 5455), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'constrained_layout': '(True)'}), '(1, 1, constrained_layout=True)\n', (5424, 5455), True, 'import matplotlib.pyplot as plt\n'), ((5217, 5237), 'numpy.polyval', 'np.polyval', (['fitp', 'tt'], {}), '(fitp, tt)\n', (5227, 5237), True, 'import numpy as np\n')]
|
import numpy as np
class GreedyOpt:
"""
iterable:
the items to pick from
size:
size of subset of items to search
target:
Function to minimize
"""
def __init__(self, iterable=[], target=lambda x: 1):
self.iterable = iterable
self._target = target
self.result = []
self.min_vals = []
self.min_items = []
def set_target(self, target):
self._target = target
def target(self, item):
"""
Called every search len(iterable) times.
Total number of calls: size*items
"""
return self._target(item)
def add(self, item):
"""
called every time a minimum found
Total number of calls: size
"""
self.result.append(item)
def run(self, size):
return self.run_size(size)
def items(self):
return self.iterable
def step(self):
items = np.array(self.items())
costs = np.array([self.target(i) for i in items])
if len(costs) == 0:
return 1
min_idx = np.argmin(costs)
min_item = items[min_idx]
min_val = costs[min_idx]
self.min_items.append(min_item)
self.min_vals.append(min_val)
self.add(min_item)
def run_cost(self, cost):
while True:
error_code = self.step()
if error_code==1:
print('Greedy search failed to find desired cost')
raise Exception('Failed to optimize')
if self.min_vals[-1] < cost:
break
def run_size(self, size):
for i in range(size):
self.step()
return self.result
class GreedyParvars(GreedyOpt):
def __init__(self, graph, *args, **kwargs):
super().__init__(*args, **kwargs)
self.graph = graph
def items(self):
return self.graph.nodes
def target(self, item):
return - self.graph.degree(item)
def add(self, item):
super().add(item)
self.graph.remove_node(item)
#qtree.graph_model.eliminate_node(self.graph, item)
|
[
"numpy.argmin"
] |
[((1099, 1115), 'numpy.argmin', 'np.argmin', (['costs'], {}), '(costs)\n', (1108, 1115), True, 'import numpy as np\n')]
|
import os
import subprocess
from multiprocessing.pool import Pool
import miditoolkit
import pandas as pd
import pretty_midi
from tqdm import tqdm
import numpy as np
import pickle
from copy import deepcopy
from midi_preprocess.utils.hparams import hparams
import midi_preprocess.steps.track_separate as tc
def filter_and_merge(processed_data_dir, instru2program):
base_dir = 'midi_preprocess'
melody_model = pickle.load(open(f'{base_dir}/model/melody_model_new', 'rb'))
bass_model = pickle.load(open(f'{base_dir}/model/bass_model', 'rb'))
chord_model = pickle.load(open(f'{base_dir}/model/chord_model', 'rb'))
df = pd.read_csv(open(f'{processed_data_dir}/meta.csv'))
print(f"| load #midi infos: {df.shape[0]}.")
pool = Pool(int(os.getenv('N_PROC', os.cpu_count())))
save_dir = f'{processed_data_dir}/midi_recog_tracks'
subprocess.check_call(f'rm -rf "{save_dir}"', shell=True)
futures = [pool.apply_async(filter_recog_merge_job, args=[
midi_info['path'], midi_info, instru2program, save_dir, melody_model, bass_model, chord_model
]) for idx, midi_info in df.iterrows()]
pool.close()
merged_infos = []
for f, (idx, midi_info) in zip(tqdm(futures), df.iterrows()):
res = f.get()
merged_info = {}
merged_info.update(midi_info)
if isinstance(res, str):
merged_info['msg'] = res
else:
merged_info['msg'] = ''
merged_info.update(res)
merged_infos.append(merged_info)
df = pd.DataFrame(merged_infos)
df = df.set_index(['id'])
df.to_csv(f'{processed_data_dir}/meta.csv')
pool.join()
n_merged = len([x for x in merged_infos if x['msg'] == ''])
print(f"| merged #midi: {n_merged}")
def predict_track_with_model(midi_path, melody_model, bass_model, chord_model):
try:
ret = tc.cal_file_features(midi_path) # remove empty track and calculate the features
features, pm = ret
except Exception as e:
features = None
pm = pretty_midi.PrettyMIDI(midi_path)
if features is None and pm is None:
pm = pretty_midi.PrettyMIDI(midi_path)
if features is None or features.shape[0] == 0:
return pm, [], []
features = tc.add_labels(features) # add label
tc.remove_file_duplicate_tracks(features, pm) # delete duplicate track
features = tc.predict_labels(features, melody_model, bass_model, chord_model) # predict lead, bass, chord
predicted_melody_tracks_idx = np.where(features.melody_predict)[0]
predicted_bass_tracks_idx = np.where(features.bass_predict)[0]
melody_tracks_idx = np.concatenate((predicted_melody_tracks_idx, np.where(features.is_melody)[0]))
bass_tracks_idx = np.concatenate((predicted_bass_tracks_idx, np.where(features.is_bass)[0]))
return pm, melody_tracks_idx, bass_tracks_idx
def filter_recog_merge_job(midi_path, midi_info, instru2program, save_dir,
melody_model, bass_model, chord_model):
filter_msg = filter_tracks(midi_info)
if filter_msg != '':
return filter_msg
pm, melody_tracks_idx, bass_tracks_idx = predict_track_with_model(midi_path, melody_model, bass_model, chord_model)
if pm is None:
return 'pm is None'
pm_new = deepcopy(pm)
pm_new.instruments = []
for i, instru_old in enumerate(pm.instruments):
program_old = instru_old.program
instru = deepcopy(instru_old)
if i in melody_tracks_idx and 'MUMIDI_' not in instru.name or instru.name == 'MUMIDI_Lead':
instru.name = 'Lead'
elif i in bass_tracks_idx and 'MUMIDI_' not in instru.name or instru.name == 'MUMIDI_Bass':
instru.name = 'Bass'
elif instru_old.is_drum and 'MUMIDI_' not in instru.name or instru.name == 'MUMIDI_Drums': # drum
instru.name = 'Drums'
elif program_old // 8 == 0 and 'MUMIDI_' not in instru.name or instru.name == 'MUMIDI_Piano': # piano
instru.name = 'Piano'
elif program_old // 8 == 3 and 'MUMIDI_' not in instru.name or instru.name == 'MUMIDI_Guitar': # guitar
instru.name = 'Guitar'
elif 40 <= program_old <= 54 and 'MUMIDI_' not in instru.name or instru.name == 'MUMIDI_Strings': # string
instru.name = 'Strings'
elif 73 <= program_old <= 88: # Lead
instru.name = 'Lead'
elif program_old // 8 == 4: # Bass
instru.name = 'Bass'
else:
instru.name = 'UnRec'
instru.program = instru_old.program
pm_new.instruments.append(instru)
os.makedirs(save_dir, exist_ok=True)
out_path = f"{save_dir}/{midi_info['id']}.mid"
pm_new.write(out_path)
merged_midi_info = get_merged_midi_info(out_path, instru2program)
filter_msg = filter_tracks(midi_info)
if filter_msg != '':
return '[merged]' + filter_msg
return merged_midi_info
def filter_tracks(midi_info):
# filter out too long n_beats > 10000, and too short n_beats < 16
if midi_info['n_beats'] > hparams['max_n_beats'] or midi_info['n_beats'] < hparams['min_n_beats']:
return 'invalid beats'
if midi_info['n_notes'] < hparams['min_n_notes']:
return 'invalid n_notes'
if midi_info['n_pitches'] < hparams['min_n_pitches']:
return 'Invalid pitches'
if midi_info['cross_bar_rate'] > hparams['max_cross_bar_rate']:
return 'Invalid cross_bar'
return ''
def get_merged_midi_info(midi_fn, instru2program):
try:
mf = miditoolkit.MidiFile(midi_fn)
except KeyboardInterrupt:
raise
except Exception as e:
return str(e)
# merge tracks
track_lists_to_merge = get_tracks_to_merge(mf, instru2program)
n_merge_track = [len(x) for x in track_lists_to_merge]
available_instrs = list(set([x2 for x in track_lists_to_merge for x2 in x])) # Important for 6 tracks
# notes
all_vels = [x1.velocity for i, x in enumerate(mf.instruments) if i in available_instrs for x1 in
x.notes] # all instruments note connection in a line
all_pitches = [x1.pitch for i, x in enumerate(mf.instruments) if i in available_instrs for x1 in x.notes]
n_notes = len(all_vels) # numbers of notes
if n_notes == 0:
return 'empty tracks'
n_beats = max([x1.end for i, x in enumerate(mf.instruments)
if i in available_instrs for x1 in x.notes]) // mf.ticks_per_beat + 1
n_instru = len(mf.instruments)
n_pitches = len(set(all_pitches)) # pitch classes
vel_mean = np.mean(all_vels)
vel_std = np.std(all_vels)
n_cross_bar = 0
for i, instru in enumerate(mf.instruments):
if i not in available_instrs:
continue
for n in instru.notes:
start_beat = n.start / mf.ticks_per_beat
end_beat = n.end / mf.ticks_per_beat
if (start_beat + 0.25) // 4 < (end_beat - 0.25) // 4 and start_beat % 4 > 0.125:
n_cross_bar += 1
return {
'path_recog_tracks': midi_fn,
# velocity
'vel_mean': vel_mean,
'vel_std': vel_std,
# stats
'n_notes': n_notes,
'n_instru': n_instru,
'n_beats': n_beats,
'n_pitches': n_pitches,
'n_cross_bar': n_cross_bar,
# tracks
'n_tracks': n_merge_track,
'track_lists_to_merge': track_lists_to_merge,
}
def get_tracks_to_merge(mf, instru2program):
track_lists_to_merge = [[] for _ in range(6)]
instru_order = {v: k for k, v in enumerate(instru2program.keys())}
for idx, instr in enumerate(mf.instruments):
instr_name = instr.name
if instr_name in instru_order:
track_lists_to_merge[instru_order[instr_name]].append(idx)
return track_lists_to_merge
|
[
"pandas.DataFrame",
"midi_preprocess.steps.track_separate.remove_file_duplicate_tracks",
"midi_preprocess.steps.track_separate.add_labels",
"copy.deepcopy",
"os.makedirs",
"tqdm.tqdm",
"midi_preprocess.steps.track_separate.predict_labels",
"numpy.std",
"midi_preprocess.steps.track_separate.cal_file_features",
"os.cpu_count",
"numpy.mean",
"pretty_midi.PrettyMIDI",
"numpy.where",
"miditoolkit.MidiFile",
"subprocess.check_call"
] |
[((859, 916), 'subprocess.check_call', 'subprocess.check_call', (['f"""rm -rf "{save_dir}\\""""'], {'shell': '(True)'}), '(f\'rm -rf "{save_dir}"\', shell=True)\n', (880, 916), False, 'import subprocess\n'), ((1522, 1548), 'pandas.DataFrame', 'pd.DataFrame', (['merged_infos'], {}), '(merged_infos)\n', (1534, 1548), True, 'import pandas as pd\n'), ((2238, 2261), 'midi_preprocess.steps.track_separate.add_labels', 'tc.add_labels', (['features'], {}), '(features)\n', (2251, 2261), True, 'import midi_preprocess.steps.track_separate as tc\n'), ((2279, 2324), 'midi_preprocess.steps.track_separate.remove_file_duplicate_tracks', 'tc.remove_file_duplicate_tracks', (['features', 'pm'], {}), '(features, pm)\n', (2310, 2324), True, 'import midi_preprocess.steps.track_separate as tc\n'), ((2366, 2432), 'midi_preprocess.steps.track_separate.predict_labels', 'tc.predict_labels', (['features', 'melody_model', 'bass_model', 'chord_model'], {}), '(features, melody_model, bass_model, chord_model)\n', (2383, 2432), True, 'import midi_preprocess.steps.track_separate as tc\n'), ((3267, 3279), 'copy.deepcopy', 'deepcopy', (['pm'], {}), '(pm)\n', (3275, 3279), False, 'from copy import deepcopy\n'), ((4586, 4622), 'os.makedirs', 'os.makedirs', (['save_dir'], {'exist_ok': '(True)'}), '(save_dir, exist_ok=True)\n', (4597, 4622), False, 'import os\n'), ((6546, 6563), 'numpy.mean', 'np.mean', (['all_vels'], {}), '(all_vels)\n', (6553, 6563), True, 'import numpy as np\n'), ((6578, 6594), 'numpy.std', 'np.std', (['all_vels'], {}), '(all_vels)\n', (6584, 6594), True, 'import numpy as np\n'), ((1200, 1213), 'tqdm.tqdm', 'tqdm', (['futures'], {}), '(futures)\n', (1204, 1213), False, 'from tqdm import tqdm\n'), ((1853, 1884), 'midi_preprocess.steps.track_separate.cal_file_features', 'tc.cal_file_features', (['midi_path'], {}), '(midi_path)\n', (1873, 1884), True, 'import midi_preprocess.steps.track_separate as tc\n'), ((2112, 2145), 'pretty_midi.PrettyMIDI', 'pretty_midi.PrettyMIDI', (['midi_path'], {}), '(midi_path)\n', (2134, 2145), False, 'import pretty_midi\n'), ((2496, 2529), 'numpy.where', 'np.where', (['features.melody_predict'], {}), '(features.melody_predict)\n', (2504, 2529), True, 'import numpy as np\n'), ((2565, 2596), 'numpy.where', 'np.where', (['features.bass_predict'], {}), '(features.bass_predict)\n', (2573, 2596), True, 'import numpy as np\n'), ((3418, 3438), 'copy.deepcopy', 'deepcopy', (['instru_old'], {}), '(instru_old)\n', (3426, 3438), False, 'from copy import deepcopy\n'), ((5515, 5544), 'miditoolkit.MidiFile', 'miditoolkit.MidiFile', (['midi_fn'], {}), '(midi_fn)\n', (5535, 5544), False, 'import miditoolkit\n'), ((2025, 2058), 'pretty_midi.PrettyMIDI', 'pretty_midi.PrettyMIDI', (['midi_path'], {}), '(midi_path)\n', (2047, 2058), False, 'import pretty_midi\n'), ((780, 794), 'os.cpu_count', 'os.cpu_count', ([], {}), '()\n', (792, 794), False, 'import os\n'), ((2669, 2697), 'numpy.where', 'np.where', (['features.is_melody'], {}), '(features.is_melody)\n', (2677, 2697), True, 'import numpy as np\n'), ((2768, 2794), 'numpy.where', 'np.where', (['features.is_bass'], {}), '(features.is_bass)\n', (2776, 2794), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 5 09:01:46 2021
@author: Michi
"""
import os
import sys
import numpy as np
PACKAGE_PARENT = '..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
import Globals
import astropy.units as u
from population.astro.astroMassDistribution import AstroSmoothPowerLawMass, BrokenPowerLawMass, TruncPowerLawMass, PowerLawPlusPeakMass
from population.astro.astroSpinDistribution import DummySpinDist, GaussSpinDist
from population.astro.rateEvolution import PowerLawRateEvolution, AstroPhRateEvolution
from population.astro.astroPopulation import AstroPopulation
from cosmology.cosmo import Cosmo
from population.allPopulations import AllPopulations
from dataStructures.mockData import GWMockData, GWMockInjectionsData
from dataStructures.O3adata import O3aData, O3aInjectionsData
from dataStructures.O1O2data import O1O2Data, O1O2InjectionsData
from dataStructures.O3bdata import O3bData, O3bInjectionsData
#import astropy.units as u
#from posteriors.prior import Prior
#from posteriors.likelihood import HyperLikelihood
#from posteriors.selectionBias import SelectionBiasInjections
#from posteriors.posterior import Posterior
mass_functions = { 'smooth_pow_law': AstroSmoothPowerLawMass,
'broken_pow_law': BrokenPowerLawMass,
'trunc_pow_law': TruncPowerLawMass,
'pow_law_peak': PowerLawPlusPeakMass,
}
spin_functions = { 'gauss': GaussSpinDist,
'skip': DummySpinDist
}
rate_functions = { # 'gauss': AstroSmoothPowerLawMass(),
'simple_pow_law': PowerLawRateEvolution,
'astro-ph' : AstroPhRateEvolution,
}
fnames_data = { 'mock': 'observations.h5',
'O3a': '',
'O1O2': '',
'O3b': '',
}
fnames_inj = { 'mock': 'selected.h5',
'O3a': 'o3a_bbhpop_inj_info.hdf',
'O1O2':'injections_O1O2an_spin.h5',
'O3b':''
}
fnames_inj_3 = { 'mock': 'selected.h5',
'O3a': 'endo3_bbhpop-LIGO-T2100113-v12-1238166018-15843600.hdf5',
'O1O2':'injections_O1O2an_spin.h5',
'O3b':'endo3_bbhpop-LIGO-T2100113-v12-1256655642-12905976.hdf5'
}
fnames_SNRs = { 'mock': 'optimal_snr.h5'
}
def setup_chain(nwalkers, exp_values, priorNames, priorLimits, priorParams, params_inference, perc_variation_init=10, seed=1312):
'''
Returns initial position for the walkers
-------
pos : TYPE
DESCRIPTION.
'''
ndim=len(params_inference)
eps = [val if val!=0 else 1 for val in exp_values ]
lowLims = [ max( [exp_values[i]-eps[i]*perc_variation_init/100, priorLimits[p][0] ] ) for i,p in enumerate(params_inference) ]
upLims = [ min( [ exp_values[i]+eps[i]*perc_variation_init/100, priorLimits[p][1] ]) for i,p in enumerate(params_inference) ]
for i in range(len(lowLims)):
if lowLims[i]>upLims[i]:
lowLim=upLims[i]
upLim=lowLims[i]
upLims[i]=upLim
lowLims[i]=lowLim
if priorNames[params_inference[i]]=='gauss':
print('Re-adjusting intervals for %s to gaussian prior limits...' %params_inference[i])
mu, sigma = priorParams[params_inference[i]]['mu'], priorParams[params_inference[i]]['sigma']
lowLim = lowLims[i]
upLim=upLims[i]
lowLims[i] = max(lowLim, mu-5*sigma)
upLims[i] = min(upLim, mu+5*sigma)
print('lowLims: %s' %lowLims)
print('upLims: %s' %upLims)
Delta = [upLims[i]-lowLims[i] for i in range(ndim)]
print('Delta: %s' %Delta)
print('Initial intervals for initialization of the walkers have an amplitude of +-%s percent around the expeced values of %s'%(perc_variation_init, str(exp_values)) )
np.random.seed(seed)
pos = Delta*np.random.rand(nwalkers, ndim)+lowLims
return pos
def build_model( populations, cosmo_args={}, mass_args={}, spin_args={}, rate_args={}, ):
'''
Parameters
----------
populations : dict
{ pop_name : {mass_function: , spin_distribution: , rate: } }.
Returns
-------
None.
'''
# Create cosmology
myCosmo = Cosmo(**cosmo_args)
# Collector of all populations
allPops = AllPopulations(myCosmo)
for population in populations.keys():
print('Adding population %s' %population)
# Create mass dist
massFunction = mass_functions[populations[population]['mass_function']](**mass_args)
# Create spin dist
spinDist = spin_functions[populations[population]['spin_distribution']](**spin_args)
# Create rate
rate = rate_functions[populations[population]['rate']](**rate_args)
# Create population
pop_ = AstroPopulation(rate, massFunction, spinDist)
allPops.add_pop(pop_)
return allPops
def load_data(dataset_name, injections_name=None, nObsUse=None, nSamplesUse=None, percSamplesUse=None, nInjUse=None, dist_unit=u.Gpc, data_args ={}, inj_args ={}, Tobs=None):
# for O2/O3, injections_name can be None in which case the LVC injections are used (they should be in the same folder as the data)
# or specify the name of a folder containing a file 'selected.h5'
############################################################
# DATA
if "mock" in dataset_name:
dataset_key='mock'
else:
dataset_key=dataset_name
fname = os.path.join(Globals.dataPath, dataset_name, fnames_data[dataset_key])
fnameInj = os.path.join(Globals.dataPath, dataset_name, fnames_inj[dataset_key] )
if dataset_key=='mock':
Data = GWMockData(fname, nObsUse=nObsUse, nSamplesUse=nSamplesUse, percSamplesUse=percSamplesUse, dist_unit=dist_unit, Tobs=Tobs)
if 'SNR_th' in inj_args.keys():
snr_th = inj_args['SNR_th']
else: snr_th=None
fnameInj = os.path.join(Globals.dataPath, injections_name, fnames_inj[dataset_key] )
injData = GWMockInjectionsData(fnameInj, nInjUse=nInjUse, dist_unit=dist_unit, Tobs=Tobs, snr_th=snr_th)
elif dataset_name=='O3a':
Data = O3aData(fname, nObsUse=nObsUse, nSamplesUse=nSamplesUse, percSamplesUse=percSamplesUse, dist_unit=dist_unit, **data_args)
if injections_name is None:
injData = O3aInjectionsData(fnameInj, nInjUse=nInjUse, dist_unit=dist_unit, **inj_args)
else:
fnameInj = os.path.join(Globals.dataPath, dataset_name, injections_name, 'selected.h5') #fnames_inj[dataset_key] )
if 'SNR_th' in inj_args.keys():
snr_th = inj_args['SNR_th']
else: snr_th=None
injData = GWMockInjectionsData(fnameInj, nInjUse=nInjUse, dist_unit=dist_unit, Tobs=Data.Tobs, snr_th=snr_th )
elif dataset_name=='O1O2':
Data = O1O2Data(fname, nObsUse=nObsUse, nSamplesUse=nSamplesUse, percSamplesUse=percSamplesUse, dist_unit=dist_unit, **data_args)
if injections_name is None:
injData = O1O2InjectionsData(fnameInj, nInjUse=nInjUse, dist_unit=dist_unit, **inj_args)
else:
fnameInj = os.path.join(Globals.dataPath, dataset_name, injections_name, 'selected.h5')
if 'SNR_th' in inj_args.keys():
snr_th = inj_args['SNR_th']
else: snr_th=None
injData = GWMockInjectionsData(fnameInj, nInjUse=nInjUse, dist_unit=dist_unit, Tobs=Data.Tobs, snr_th=snr_th)
elif dataset_name=='O3b':
Data = O3bData(fname, nObsUse=nObsUse, nSamplesUse=nSamplesUse, percSamplesUse=percSamplesUse, dist_unit=dist_unit, **data_args)
if injections_name is None:
injData = O3bInjectionsData(fnameInj, nInjUse=nInjUse, dist_unit=dist_unit, **inj_args)
#raise ValueError('LVC injections are not supported for O3b. Specify a name for the injections')
else:
fnameInj = os.path.join(Globals.dataPath, dataset_name, injections_name, 'selected.h5')
if 'SNR_th' in inj_args.keys():
snr_th = inj_args['SNR_th']
else: snr_th=None
injData = GWMockInjectionsData(fnameInj, nInjUse=nInjUse, dist_unit=dist_unit, Tobs=Data.Tobs, snr_th=snr_th )
else:
raise ValueError('Dataset name not valid')
return Data, injData
def load_injections(dataset_name, injections_name=None, nInjUse=None, dist_unit=u.Gpc, inj_args ={}, Tobs=None):
# for O2/O3, injections_name can be None in which case the LVC injections are used (they should be in the same folder as the data)
# or specify the name of a folder containing a file 'selected.h5'
############################################################
# DATA
if "mock" in dataset_name:
dataset_key='mock'
else:
dataset_key=dataset_name
if inj_args['which_injections']=='GWTC-2':
fnameInj = os.path.join(Globals.dataPath, dataset_name, fnames_inj[dataset_key] )
elif inj_args['which_injections']=='GWTC-3':
fnameInj = os.path.join(Globals.dataPath, dataset_name, fnames_inj_3[dataset_key] )
#else:
# raise ValueError('which_injections you entered %s' %which_injections)
if dataset_key=='mock':
#Data = GWMockData(fname, nObsUse=nObsUse, nSamplesUse=nSamplesUse, percSamplesUse=percSamplesUse, dist_unit=dist_unit, Tobs=Tobs)
if 'SNR_th' in inj_args.keys():
snr_th = inj_args['SNR_th']
else: snr_th=None
fnameInj = os.path.join(Globals.dataPath, injections_name, fnames_inj[dataset_key] )
injData = GWMockInjectionsData(fnameInj, nInjUse=nInjUse, dist_unit=dist_unit, Tobs=Tobs, snr_th=snr_th)
elif dataset_name=='O3a':
#Data = O3aData(fname, nObsUse=nObsUse, nSamplesUse=nSamplesUse, percSamplesUse=percSamplesUse, dist_unit=dist_unit, **data_args)
if injections_name is None:
injData = O3aInjectionsData(fnameInj, nInjUse=nInjUse, dist_unit=dist_unit, **inj_args)
else:
fnameInj = os.path.join(Globals.dataPath, dataset_name, injections_name, 'selected.h5') #fnames_inj[dataset_key] )
if 'SNR_th' in inj_args.keys():
snr_th = inj_args['SNR_th']
else: snr_th=None
injData = GWMockInjectionsData(fnameInj, nInjUse=nInjUse, dist_unit=dist_unit, Tobs=183.375/365., snr_th=snr_th )
elif dataset_name=='O1O2':
#Data = O1O2Data(fname, nObsUse=nObsUse, nSamplesUse=nSamplesUse, percSamplesUse=percSamplesUse, dist_unit=dist_unit, **data_args)
if injections_name is None:
injData = O1O2InjectionsData(fnameInj, nInjUse=nInjUse, dist_unit=dist_unit, **inj_args)
else:
fnameInj = os.path.join(Globals.dataPath, dataset_name, injections_name, 'selected.h5')
if 'SNR_th' in inj_args.keys():
snr_th = inj_args['SNR_th']
else: snr_th=None
injData = GWMockInjectionsData(fnameInj, nInjUse=nInjUse, dist_unit=dist_unit, Tobs=(129+267)/365., snr_th=snr_th)
elif dataset_name=='O3b':
#Data = O3bData(fname, nObsUse=nObsUse, nSamplesUse=nSamplesUse, percSamplesUse=percSamplesUse, dist_unit=dist_unit, **data_args)
if injections_name is None:
injData = O3bInjectionsData(fnameInj, nInjUse=nInjUse, dist_unit=dist_unit, **inj_args)
#raise ValueError('LVC injections are not supported for O3b. Specify a name for the injections')
else:
fnameInj = os.path.join(Globals.dataPath, dataset_name, injections_name, 'selected.h5')
if 'SNR_th' in inj_args.keys():
snr_th = inj_args['SNR_th']
else: snr_th=None
injData = GWMockInjectionsData(fnameInj, nInjUse=nInjUse, dist_unit=dist_unit, Tobs=147.083/365. , snr_th=snr_th )
else:
raise ValueError('Dataset name not valid')
return injData
|
[
"os.path.expanduser",
"cosmology.cosmo.Cosmo",
"population.astro.astroPopulation.AstroPopulation",
"numpy.random.seed",
"dataStructures.O3adata.O3aData",
"dataStructures.O3bdata.O3bInjectionsData",
"os.getcwd",
"dataStructures.mockData.GWMockInjectionsData",
"dataStructures.O3adata.O3aInjectionsData",
"dataStructures.O1O2data.O1O2InjectionsData",
"dataStructures.O3bdata.O3bData",
"population.allPopulations.AllPopulations",
"numpy.random.rand",
"dataStructures.O1O2data.O1O2Data",
"os.path.join",
"dataStructures.mockData.GWMockData"
] |
[((4125, 4145), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (4139, 4145), True, 'import numpy as np\n'), ((4542, 4561), 'cosmology.cosmo.Cosmo', 'Cosmo', ([], {}), '(**cosmo_args)\n', (4547, 4561), False, 'from cosmology.cosmo import Cosmo\n'), ((4620, 4643), 'population.allPopulations.AllPopulations', 'AllPopulations', (['myCosmo'], {}), '(myCosmo)\n', (4634, 4643), False, 'from population.allPopulations import AllPopulations\n'), ((5997, 6067), 'os.path.join', 'os.path.join', (['Globals.dataPath', 'dataset_name', 'fnames_data[dataset_key]'], {}), '(Globals.dataPath, dataset_name, fnames_data[dataset_key])\n', (6009, 6067), False, 'import os\n'), ((6087, 6156), 'os.path.join', 'os.path.join', (['Globals.dataPath', 'dataset_name', 'fnames_inj[dataset_key]'], {}), '(Globals.dataPath, dataset_name, fnames_inj[dataset_key])\n', (6099, 6156), False, 'import os\n'), ((307, 347), 'os.path.join', 'os.path.join', (['SCRIPT_DIR', 'PACKAGE_PARENT'], {}), '(SCRIPT_DIR, PACKAGE_PARENT)\n', (319, 347), False, 'import os\n'), ((5214, 5259), 'population.astro.astroPopulation.AstroPopulation', 'AstroPopulation', (['rate', 'massFunction', 'spinDist'], {}), '(rate, massFunction, spinDist)\n', (5229, 5259), False, 'from population.astro.astroPopulation import AstroPopulation\n'), ((6218, 6345), 'dataStructures.mockData.GWMockData', 'GWMockData', (['fname'], {'nObsUse': 'nObsUse', 'nSamplesUse': 'nSamplesUse', 'percSamplesUse': 'percSamplesUse', 'dist_unit': 'dist_unit', 'Tobs': 'Tobs'}), '(fname, nObsUse=nObsUse, nSamplesUse=nSamplesUse, percSamplesUse=\n percSamplesUse, dist_unit=dist_unit, Tobs=Tobs)\n', (6228, 6345), False, 'from dataStructures.mockData import GWMockData, GWMockInjectionsData\n'), ((6483, 6555), 'os.path.join', 'os.path.join', (['Globals.dataPath', 'injections_name', 'fnames_inj[dataset_key]'], {}), '(Globals.dataPath, injections_name, fnames_inj[dataset_key])\n', (6495, 6555), False, 'import os\n'), ((6579, 6678), 'dataStructures.mockData.GWMockInjectionsData', 'GWMockInjectionsData', (['fnameInj'], {'nInjUse': 'nInjUse', 'dist_unit': 'dist_unit', 'Tobs': 'Tobs', 'snr_th': 'snr_th'}), '(fnameInj, nInjUse=nInjUse, dist_unit=dist_unit, Tobs=\n Tobs, snr_th=snr_th)\n', (6599, 6678), False, 'from dataStructures.mockData import GWMockData, GWMockInjectionsData\n'), ((9721, 9790), 'os.path.join', 'os.path.join', (['Globals.dataPath', 'dataset_name', 'fnames_inj[dataset_key]'], {}), '(Globals.dataPath, dataset_name, fnames_inj[dataset_key])\n', (9733, 9790), False, 'import os\n'), ((10374, 10446), 'os.path.join', 'os.path.join', (['Globals.dataPath', 'injections_name', 'fnames_inj[dataset_key]'], {}), '(Globals.dataPath, injections_name, fnames_inj[dataset_key])\n', (10386, 10446), False, 'import os\n'), ((10470, 10569), 'dataStructures.mockData.GWMockInjectionsData', 'GWMockInjectionsData', (['fnameInj'], {'nInjUse': 'nInjUse', 'dist_unit': 'dist_unit', 'Tobs': 'Tobs', 'snr_th': 'snr_th'}), '(fnameInj, nInjUse=nInjUse, dist_unit=dist_unit, Tobs=\n Tobs, snr_th=snr_th)\n', (10490, 10569), False, 'from dataStructures.mockData import GWMockData, GWMockInjectionsData\n'), ((229, 240), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (238, 240), False, 'import os\n'), ((242, 270), 'os.path.expanduser', 'os.path.expanduser', (['__file__'], {}), '(__file__)\n', (260, 270), False, 'import os\n'), ((4162, 4192), 'numpy.random.rand', 'np.random.rand', (['nwalkers', 'ndim'], {}), '(nwalkers, ndim)\n', (4176, 4192), True, 'import numpy as np\n'), ((6728, 6854), 'dataStructures.O3adata.O3aData', 'O3aData', (['fname'], {'nObsUse': 'nObsUse', 'nSamplesUse': 'nSamplesUse', 'percSamplesUse': 'percSamplesUse', 'dist_unit': 'dist_unit'}), '(fname, nObsUse=nObsUse, nSamplesUse=nSamplesUse, percSamplesUse=\n percSamplesUse, dist_unit=dist_unit, **data_args)\n', (6735, 6854), False, 'from dataStructures.O3adata import O3aData, O3aInjectionsData\n'), ((9868, 9939), 'os.path.join', 'os.path.join', (['Globals.dataPath', 'dataset_name', 'fnames_inj_3[dataset_key]'], {}), '(Globals.dataPath, dataset_name, fnames_inj_3[dataset_key])\n', (9880, 9939), False, 'import os\n'), ((6917, 6994), 'dataStructures.O3adata.O3aInjectionsData', 'O3aInjectionsData', (['fnameInj'], {'nInjUse': 'nInjUse', 'dist_unit': 'dist_unit'}), '(fnameInj, nInjUse=nInjUse, dist_unit=dist_unit, **inj_args)\n', (6934, 6994), False, 'from dataStructures.O3adata import O3aData, O3aInjectionsData\n'), ((7041, 7117), 'os.path.join', 'os.path.join', (['Globals.dataPath', 'dataset_name', 'injections_name', '"""selected.h5"""'], {}), "(Globals.dataPath, dataset_name, injections_name, 'selected.h5')\n", (7053, 7117), False, 'import os\n'), ((7301, 7405), 'dataStructures.mockData.GWMockInjectionsData', 'GWMockInjectionsData', (['fnameInj'], {'nInjUse': 'nInjUse', 'dist_unit': 'dist_unit', 'Tobs': 'Data.Tobs', 'snr_th': 'snr_th'}), '(fnameInj, nInjUse=nInjUse, dist_unit=dist_unit, Tobs=\n Data.Tobs, snr_th=snr_th)\n', (7321, 7405), False, 'from dataStructures.mockData import GWMockData, GWMockInjectionsData\n'), ((7483, 7610), 'dataStructures.O1O2data.O1O2Data', 'O1O2Data', (['fname'], {'nObsUse': 'nObsUse', 'nSamplesUse': 'nSamplesUse', 'percSamplesUse': 'percSamplesUse', 'dist_unit': 'dist_unit'}), '(fname, nObsUse=nObsUse, nSamplesUse=nSamplesUse, percSamplesUse=\n percSamplesUse, dist_unit=dist_unit, **data_args)\n', (7491, 7610), False, 'from dataStructures.O1O2data import O1O2Data, O1O2InjectionsData\n'), ((10809, 10886), 'dataStructures.O3adata.O3aInjectionsData', 'O3aInjectionsData', (['fnameInj'], {'nInjUse': 'nInjUse', 'dist_unit': 'dist_unit'}), '(fnameInj, nInjUse=nInjUse, dist_unit=dist_unit, **inj_args)\n', (10826, 10886), False, 'from dataStructures.O3adata import O3aData, O3aInjectionsData\n'), ((10933, 11009), 'os.path.join', 'os.path.join', (['Globals.dataPath', 'dataset_name', 'injections_name', '"""selected.h5"""'], {}), "(Globals.dataPath, dataset_name, injections_name, 'selected.h5')\n", (10945, 11009), False, 'import os\n'), ((11193, 11303), 'dataStructures.mockData.GWMockInjectionsData', 'GWMockInjectionsData', (['fnameInj'], {'nInjUse': 'nInjUse', 'dist_unit': 'dist_unit', 'Tobs': '(183.375 / 365.0)', 'snr_th': 'snr_th'}), '(fnameInj, nInjUse=nInjUse, dist_unit=dist_unit, Tobs=\n 183.375 / 365.0, snr_th=snr_th)\n', (11213, 11303), False, 'from dataStructures.mockData import GWMockData, GWMockInjectionsData\n'), ((7673, 7751), 'dataStructures.O1O2data.O1O2InjectionsData', 'O1O2InjectionsData', (['fnameInj'], {'nInjUse': 'nInjUse', 'dist_unit': 'dist_unit'}), '(fnameInj, nInjUse=nInjUse, dist_unit=dist_unit, **inj_args)\n', (7691, 7751), False, 'from dataStructures.O1O2data import O1O2Data, O1O2InjectionsData\n'), ((7798, 7874), 'os.path.join', 'os.path.join', (['Globals.dataPath', 'dataset_name', 'injections_name', '"""selected.h5"""'], {}), "(Globals.dataPath, dataset_name, injections_name, 'selected.h5')\n", (7810, 7874), False, 'import os\n'), ((8031, 8135), 'dataStructures.mockData.GWMockInjectionsData', 'GWMockInjectionsData', (['fnameInj'], {'nInjUse': 'nInjUse', 'dist_unit': 'dist_unit', 'Tobs': 'Data.Tobs', 'snr_th': 'snr_th'}), '(fnameInj, nInjUse=nInjUse, dist_unit=dist_unit, Tobs=\n Data.Tobs, snr_th=snr_th)\n', (8051, 8135), False, 'from dataStructures.mockData import GWMockData, GWMockInjectionsData\n'), ((8194, 8320), 'dataStructures.O3bdata.O3bData', 'O3bData', (['fname'], {'nObsUse': 'nObsUse', 'nSamplesUse': 'nSamplesUse', 'percSamplesUse': 'percSamplesUse', 'dist_unit': 'dist_unit'}), '(fname, nObsUse=nObsUse, nSamplesUse=nSamplesUse, percSamplesUse=\n percSamplesUse, dist_unit=dist_unit, **data_args)\n', (8201, 8320), False, 'from dataStructures.O3bdata import O3bData, O3bInjectionsData\n'), ((11569, 11647), 'dataStructures.O1O2data.O1O2InjectionsData', 'O1O2InjectionsData', (['fnameInj'], {'nInjUse': 'nInjUse', 'dist_unit': 'dist_unit'}), '(fnameInj, nInjUse=nInjUse, dist_unit=dist_unit, **inj_args)\n', (11587, 11647), False, 'from dataStructures.O1O2data import O1O2Data, O1O2InjectionsData\n'), ((11694, 11770), 'os.path.join', 'os.path.join', (['Globals.dataPath', 'dataset_name', 'injections_name', '"""selected.h5"""'], {}), "(Globals.dataPath, dataset_name, injections_name, 'selected.h5')\n", (11706, 11770), False, 'import os\n'), ((11927, 12041), 'dataStructures.mockData.GWMockInjectionsData', 'GWMockInjectionsData', (['fnameInj'], {'nInjUse': 'nInjUse', 'dist_unit': 'dist_unit', 'Tobs': '((129 + 267) / 365.0)', 'snr_th': 'snr_th'}), '(fnameInj, nInjUse=nInjUse, dist_unit=dist_unit, Tobs=(\n 129 + 267) / 365.0, snr_th=snr_th)\n', (11947, 12041), False, 'from dataStructures.mockData import GWMockData, GWMockInjectionsData\n'), ((8383, 8460), 'dataStructures.O3bdata.O3bInjectionsData', 'O3bInjectionsData', (['fnameInj'], {'nInjUse': 'nInjUse', 'dist_unit': 'dist_unit'}), '(fnameInj, nInjUse=nInjUse, dist_unit=dist_unit, **inj_args)\n', (8400, 8460), False, 'from dataStructures.O3bdata import O3bData, O3bInjectionsData\n'), ((8620, 8696), 'os.path.join', 'os.path.join', (['Globals.dataPath', 'dataset_name', 'injections_name', '"""selected.h5"""'], {}), "(Globals.dataPath, dataset_name, injections_name, 'selected.h5')\n", (8632, 8696), False, 'import os\n'), ((8853, 8957), 'dataStructures.mockData.GWMockInjectionsData', 'GWMockInjectionsData', (['fnameInj'], {'nInjUse': 'nInjUse', 'dist_unit': 'dist_unit', 'Tobs': 'Data.Tobs', 'snr_th': 'snr_th'}), '(fnameInj, nInjUse=nInjUse, dist_unit=dist_unit, Tobs=\n Data.Tobs, snr_th=snr_th)\n', (8873, 8957), False, 'from dataStructures.mockData import GWMockData, GWMockInjectionsData\n'), ((12285, 12362), 'dataStructures.O3bdata.O3bInjectionsData', 'O3bInjectionsData', (['fnameInj'], {'nInjUse': 'nInjUse', 'dist_unit': 'dist_unit'}), '(fnameInj, nInjUse=nInjUse, dist_unit=dist_unit, **inj_args)\n', (12302, 12362), False, 'from dataStructures.O3bdata import O3bData, O3bInjectionsData\n'), ((12522, 12598), 'os.path.join', 'os.path.join', (['Globals.dataPath', 'dataset_name', 'injections_name', '"""selected.h5"""'], {}), "(Globals.dataPath, dataset_name, injections_name, 'selected.h5')\n", (12534, 12598), False, 'import os\n'), ((12755, 12865), 'dataStructures.mockData.GWMockInjectionsData', 'GWMockInjectionsData', (['fnameInj'], {'nInjUse': 'nInjUse', 'dist_unit': 'dist_unit', 'Tobs': '(147.083 / 365.0)', 'snr_th': 'snr_th'}), '(fnameInj, nInjUse=nInjUse, dist_unit=dist_unit, Tobs=\n 147.083 / 365.0, snr_th=snr_th)\n', (12775, 12865), False, 'from dataStructures.mockData import GWMockData, GWMockInjectionsData\n')]
|
import numpy as np
from PuzzleLib.Backend import gpuarray, Blas
from PuzzleLib.Backend.Dnn import PoolMode, poolNd, poolNdBackward
from PuzzleLib.Modules.Module import ModuleError, Module
class SubtractMean(Module):
def __init__(self, size=5, includePad=True, name=None):
super().__init__(name)
self.registerBlueprint(locals())
if size % 2 != 1 or size == 1:
raise ModuleError("Subtractive norm size must be odd and > 1")
self.size = self.repeat(size, 2)
self.pad = (self.size[0] // 2, self.size[1] // 2)
self.mode = PoolMode.avgWithPad if includePad else PoolMode.avgNoPad
self.means = None
self.workspace = None
def updateData(self, data):
self.means, self.workspace = poolNd(
data, size=self.size, stride=1, pad=self.pad, mode=self.mode, test=not self.train
)
self.data = Blas.addVectorToVector(data.ravel(), self.means.ravel(), beta=-1.0).reshape(*data.shape)
def updateGrad(self, grad):
meansGrad = poolNdBackward(
self.inData, self.means, grad, self.workspace, size=self.size, stride=1, pad=self.pad, mode=self.mode
)
Blas.addVectorToVector(grad.ravel(), meansGrad.ravel(), out=meansGrad.ravel(), beta=-1.0)
self.grad = meansGrad
def dataShapeFrom(self, shape):
return shape
def checkDataShape(self, shape):
if len(shape) != 4:
raise ModuleError("Data must be 4d tensor")
def gradShapeFrom(self, shape):
return shape
def checkGradShape(self, shape):
if len(shape) != 4:
raise ModuleError("Grad must be 4d tensor")
def reset(self):
super().reset()
self.means = None
self.workspace = None
def unittest():
batchsize, maps, h, w = 1, 1, 6, 6
size = 3
data = gpuarray.to_gpu(np.random.randn(batchsize, maps, h, w).astype(np.float32))
subtractMean = SubtractMean(size=size)
subtractMean(data)
hpad, wpad = subtractMean.pad
hostData = np.zeros(shape=(batchsize, maps, h + 2 * hpad, w + 2 * wpad), dtype=np.float32)
hostData[:, :, hpad:-hpad, wpad:-wpad] = data.get()
hostOutData = np.empty(subtractMean.data.shape, dtype=np.float32)
for b in range(batchsize):
for c in range(maps):
for y in range(data.shape[2]):
for x in range(data.shape[3]):
hostOutData[b, c, y, x] -= np.sum(hostData[b, c, y:y + size, x:x + size]) / size**2
assert np.allclose(hostOutData, subtractMean.data.get())
grad = gpuarray.to_gpu(np.random.randn(*subtractMean.data.shape).astype(np.float32))
subtractMean.backward(grad)
hostGrad = grad.get()
hostInGrad = np.zeros(shape=hostData.shape, dtype=np.float32)
hostInGrad[:, :, hpad:-hpad, wpad:-wpad] = hostGrad
for b in range(batchsize):
for c in range(maps):
for y in range(hostGrad.shape[2]):
for x in range(hostGrad.shape[3]):
for dy in range(size):
for dx in range(size):
hostInGrad[b, c, y + dy, x + dx] -= hostGrad[b, c, y, x] / size**2
assert np.allclose(hostInGrad[:, :, hpad:-hpad, wpad:-wpad], subtractMean.grad.get())
if __name__ == "__main__":
unittest()
|
[
"PuzzleLib.Backend.Dnn.poolNd",
"numpy.sum",
"numpy.random.randn",
"numpy.empty",
"numpy.zeros",
"PuzzleLib.Backend.Dnn.poolNdBackward",
"PuzzleLib.Modules.Module.ModuleError"
] |
[((1836, 1915), 'numpy.zeros', 'np.zeros', ([], {'shape': '(batchsize, maps, h + 2 * hpad, w + 2 * wpad)', 'dtype': 'np.float32'}), '(shape=(batchsize, maps, h + 2 * hpad, w + 2 * wpad), dtype=np.float32)\n', (1844, 1915), True, 'import numpy as np\n'), ((1985, 2036), 'numpy.empty', 'np.empty', (['subtractMean.data.shape'], {'dtype': 'np.float32'}), '(subtractMean.data.shape, dtype=np.float32)\n', (1993, 2036), True, 'import numpy as np\n'), ((2461, 2509), 'numpy.zeros', 'np.zeros', ([], {'shape': 'hostData.shape', 'dtype': 'np.float32'}), '(shape=hostData.shape, dtype=np.float32)\n', (2469, 2509), True, 'import numpy as np\n'), ((704, 798), 'PuzzleLib.Backend.Dnn.poolNd', 'poolNd', (['data'], {'size': 'self.size', 'stride': '(1)', 'pad': 'self.pad', 'mode': 'self.mode', 'test': '(not self.train)'}), '(data, size=self.size, stride=1, pad=self.pad, mode=self.mode, test=\n not self.train)\n', (710, 798), False, 'from PuzzleLib.Backend.Dnn import PoolMode, poolNd, poolNdBackward\n'), ((949, 1071), 'PuzzleLib.Backend.Dnn.poolNdBackward', 'poolNdBackward', (['self.inData', 'self.means', 'grad', 'self.workspace'], {'size': 'self.size', 'stride': '(1)', 'pad': 'self.pad', 'mode': 'self.mode'}), '(self.inData, self.means, grad, self.workspace, size=self.\n size, stride=1, pad=self.pad, mode=self.mode)\n', (963, 1071), False, 'from PuzzleLib.Backend.Dnn import PoolMode, poolNd, poolNdBackward\n'), ((380, 436), 'PuzzleLib.Modules.Module.ModuleError', 'ModuleError', (['"""Subtractive norm size must be odd and > 1"""'], {}), "('Subtractive norm size must be odd and > 1')\n", (391, 436), False, 'from PuzzleLib.Modules.Module import ModuleError, Module\n'), ((1308, 1345), 'PuzzleLib.Modules.Module.ModuleError', 'ModuleError', (['"""Data must be 4d tensor"""'], {}), "('Data must be 4d tensor')\n", (1319, 1345), False, 'from PuzzleLib.Modules.Module import ModuleError, Module\n'), ((1463, 1500), 'PuzzleLib.Modules.Module.ModuleError', 'ModuleError', (['"""Grad must be 4d tensor"""'], {}), "('Grad must be 4d tensor')\n", (1474, 1500), False, 'from PuzzleLib.Modules.Module import ModuleError, Module\n'), ((1672, 1710), 'numpy.random.randn', 'np.random.randn', (['batchsize', 'maps', 'h', 'w'], {}), '(batchsize, maps, h, w)\n', (1687, 1710), True, 'import numpy as np\n'), ((2332, 2373), 'numpy.random.randn', 'np.random.randn', (['*subtractMean.data.shape'], {}), '(*subtractMean.data.shape)\n', (2347, 2373), True, 'import numpy as np\n'), ((2191, 2237), 'numpy.sum', 'np.sum', (['hostData[b, c, y:y + size, x:x + size]'], {}), '(hostData[b, c, y:y + size, x:x + size])\n', (2197, 2237), True, 'import numpy as np\n')]
|
import tensorflow as tf
import numpy as np
np.random.seed(1234)
import os
import time
import datetime
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from builddata import *
from model import ConvKB
# Parameters
# ==================================================
parser = ArgumentParser("ConvKB", formatter_class=ArgumentDefaultsHelpFormatter, conflict_handler='resolve')
parser.add_argument("--data", default="../data/", help="Data sources.")
parser.add_argument("--run_folder", default="../", help="Data sources.")
parser.add_argument("--name", default="WN18RR", help="Name of the dataset.")
parser.add_argument("--embedding_dim", default=50, type=int, help="Dimensionality of character embedding")
parser.add_argument("--filter_sizes", default="1", help="Comma-separated filter sizes")
parser.add_argument("--num_filters", default=500, type=int, help="Number of filters per filter size")
parser.add_argument("--dropout_keep_prob", default=1.0, type=float, help="Dropout keep probability")
parser.add_argument("--l2_reg_lambda", default=0.001, type=float, help="L2 regularization lambda")
parser.add_argument("--learning_rate", default=0.0001, type=float, help="Learning rate")
parser.add_argument("--is_trainable", default=True, type=bool, help="")
parser.add_argument("--batch_size", default=128, type=int, help="Batch Size")
parser.add_argument("--neg_ratio", default=1.0, type=float, help="Number of negative triples generated by positive")
parser.add_argument("--num_epochs", default=201, type=int, help="Number of training epochs")
parser.add_argument("--saveStep", default=200, type=int, help="")
parser.add_argument("--allow_soft_placement", default=True, type=bool, help="Allow device soft device placement")
parser.add_argument("--log_device_placement", default=False, type=bool, help="Log placement of ops on devices")
parser.add_argument("--model_name", default='wn18rr', help="")
parser.add_argument("--useConstantInit", action='store_true')
parser.add_argument("--model_index", default='200', help="")
parser.add_argument("--seed", default=1234, type=int, help="")
parser.add_argument("--num_splits", default=8, type=int, help="Split the validation set into 8 parts for a faster evaluation")
parser.add_argument("--testIdx", default=1, type=int, help="From 0 to 7. Index of one of 8 parts")
parser.add_argument("--decode", action='store_false')
args = parser.parse_args()
print(args)
# Load data
print("Loading data...")
train, valid, test, words_indexes, indexes_words, \
headTailSelector, entity2id, id2entity, relation2id, id2relation = build_data(path=args.data, name=args.name)
data_size = len(train)
train_batch = Batch_Loader(train, words_indexes, indexes_words, headTailSelector, \
entity2id, id2entity, relation2id, id2relation, batch_size=args.batch_size,
neg_ratio=args.neg_ratio)
entity_array = np.array(list(train_batch.indexes_ents.keys()))
lstEmbed = []
#Using the pre-trained embeddings.
print("Using pre-trained model.")
lstEmbed = np.empty([len(words_indexes), args.embedding_dim]).astype(np.float32)
initEnt, initRel = init_norm_Vector(args.data + args.name + '/relation2vec' + str(args.embedding_dim) + '.init',
args.data + args.name + '/entity2vec' + str(args.embedding_dim) + '.init',
args.embedding_dim)
for _word in words_indexes:
if _word in relation2id:
index = relation2id[_word]
_ind = words_indexes[_word]
lstEmbed[_ind] = initRel[index]
elif _word in entity2id:
index = entity2id[_word]
_ind = words_indexes[_word]
lstEmbed[_ind] = initEnt[index]
else:
print('*****************Error********************!')
break
lstEmbed = np.array(lstEmbed, dtype=np.float32)
assert len(words_indexes) % (len(entity2id) + len(relation2id)) == 0
print("Loading data... finished!")
x_valid = np.array(list(valid.keys())).astype(np.int32)
y_valid = np.array(list(valid.values())).astype(np.float32)
x_test = np.array(list(test.keys())).astype(np.int32)
y_test = np.array(list(test.values())).astype(np.float32)
# Training
# ==================================================
with tf.Graph().as_default():
tf.set_random_seed(args.seed)
session_conf = tf.ConfigProto(allow_soft_placement=args.allow_soft_placement,
log_device_placement=args.log_device_placement)
session_conf.gpu_options.allow_growth = True
sess = tf.Session(config=session_conf)
with sess.as_default():
global_step = tf.Variable(0, name="global_step", trainable=False)
cnn = ConvKB(
sequence_length=x_valid.shape[1], # 3
num_classes=y_valid.shape[1], # 1
pre_trained=lstEmbed,
embedding_size=args.embedding_dim,
filter_sizes=list(map(int, args.filter_sizes.split(","))),
num_filters=args.num_filters,
vocab_size=len(words_indexes),
l2_reg_lambda=args.l2_reg_lambda,
is_trainable=args.is_trainable,
useConstantInit=args.useConstantInit)
optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
# optimizer = tf.train.RMSPropOptimizer(learning_rate=args.learning_rate)
# optimizer = tf.train.GradientDescentOptimizer(learning_rate=args.learning_rate)
grads_and_vars = optimizer.compute_gradients(cnn.loss)
train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
out_dir = os.path.abspath(os.path.join(args.run_folder, "runs", args.model_name))
print("Writing to {}\n".format(out_dir))
# Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
# Initialize all variables
sess.run(tf.global_variables_initializer())
def train_step(x_batch, y_batch):
"""
A single training step
"""
feed_dict = {
cnn.input_x: x_batch,
cnn.input_y: y_batch,
cnn.dropout_keep_prob: args.dropout_keep_prob,
}
_, step, loss = sess.run([train_op, global_step, cnn.loss], feed_dict)
if step % 10000 == 0: print(step)
num_batches_per_epoch = int((data_size - 1) / args.batch_size) + 1
for epoch in range(args.num_epochs):
for batch_num in range(num_batches_per_epoch):
x_batch, y_batch = train_batch()
train_step(x_batch, y_batch)
current_step = tf.train.global_step(sess, global_step)
if epoch >= 0:
if epoch % args.saveStep == 0:
path = cnn.saver.save(sess, checkpoint_prefix, global_step=epoch)
print("Saved model checkpoint to {}\n".format(path))
|
[
"numpy.random.seed",
"argparse.ArgumentParser",
"os.makedirs",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"os.path.exists",
"tensorflow.set_random_seed",
"tensorflow.ConfigProto",
"tensorflow.Variable",
"numpy.array",
"tensorflow.Graph",
"tensorflow.train.AdamOptimizer",
"os.path.join",
"tensorflow.train.global_step"
] |
[((44, 64), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (58, 64), True, 'import numpy as np\n'), ((295, 398), 'argparse.ArgumentParser', 'ArgumentParser', (['"""ConvKB"""'], {'formatter_class': 'ArgumentDefaultsHelpFormatter', 'conflict_handler': '"""resolve"""'}), "('ConvKB', formatter_class=ArgumentDefaultsHelpFormatter,\n conflict_handler='resolve')\n", (309, 398), False, 'from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\n'), ((3656, 3692), 'numpy.array', 'np.array', (['lstEmbed'], {'dtype': 'np.float32'}), '(lstEmbed, dtype=np.float32)\n', (3664, 3692), True, 'import numpy as np\n'), ((4125, 4154), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['args.seed'], {}), '(args.seed)\n', (4143, 4154), True, 'import tensorflow as tf\n'), ((4171, 4285), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': 'args.allow_soft_placement', 'log_device_placement': 'args.log_device_placement'}), '(allow_soft_placement=args.allow_soft_placement,\n log_device_placement=args.log_device_placement)\n', (4185, 4285), True, 'import tensorflow as tf\n'), ((4346, 4377), 'tensorflow.Session', 'tf.Session', ([], {'config': 'session_conf'}), '(config=session_conf)\n', (4356, 4377), True, 'import tensorflow as tf\n'), ((4419, 4470), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'name': '"""global_step"""', 'trainable': '(False)'}), "(0, name='global_step', trainable=False)\n", (4430, 4470), True, 'import tensorflow as tf\n'), ((4887, 4943), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'args.learning_rate'}), '(learning_rate=args.learning_rate)\n', (4909, 4943), True, 'import tensorflow as tf\n'), ((5564, 5601), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""model"""'], {}), "(checkpoint_dir, 'model')\n", (5576, 5601), False, 'import os\n'), ((4099, 4109), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (4107, 4109), True, 'import tensorflow as tf\n'), ((5270, 5324), 'os.path.join', 'os.path.join', (['args.run_folder', '"""runs"""', 'args.model_name'], {}), "(args.run_folder, 'runs', args.model_name)\n", (5282, 5324), False, 'import os\n'), ((5504, 5540), 'os.path.join', 'os.path.join', (['out_dir', '"""checkpoints"""'], {}), "(out_dir, 'checkpoints')\n", (5516, 5540), False, 'import os\n'), ((5611, 5641), 'os.path.exists', 'os.path.exists', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (5625, 5641), False, 'import os\n'), ((5646, 5673), 'os.makedirs', 'os.makedirs', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (5657, 5673), False, 'import os\n'), ((5715, 5748), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (5746, 5748), True, 'import tensorflow as tf\n'), ((6311, 6350), 'tensorflow.train.global_step', 'tf.train.global_step', (['sess', 'global_step'], {}), '(sess, global_step)\n', (6331, 6350), True, 'import tensorflow as tf\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author: 11360
# datetime: 2021/2/28 23:43
# project: Particle Filter
import numpy as np
import matplotlib.pyplot as plt
dt = 0.4
# 认为噪声仍服从高斯分布
variance_Q = 0.1 * dt # 状态转移噪声协方差
variance_R = 1 * dt # 测量协方差
class Particle_filter:
def __init__(self, particle_num):
self.particle_num = particle_num
def estimate(self, particles, weights):
""" 每一步对系统状态进行估计,求z关于后验概率的期望,对应即求Particles与权重的加权平均 """
mean = np.average(particles, weights=weights)
var = np.average((particles - mean) ** 2, weights=weights)
return mean, var
def simple_resample(self, particles, weights):
""" 通过累计分布函数进行重采样 """
N = len(particles)
cumulative_sum = np.cumsum(weights)
cumulative_sum[-1] = 1. # 避免误差
rn = np.random.rand(N) # 0-1之间的均匀分布采样
indexes = np.searchsorted(cumulative_sum, rn) # 在数组A中插入数组B,返回索引
# 根据索引采样
particles[:] = particles[indexes]
weights.fill(1.0 / N)
return particles, weights
def make_data(self):
""" 构造观测值 """
z = np.random.normal(0, 1) # 初始真实状态
self.hidden_data = [z]
self.observe_data = [dt * z ** 3 + np.random.normal(0, variance_R)]
for i in range(99):
# 100个数据点
z = z + z * dt * np.cos(z) + np.random.normal(0, variance_Q)
x = dt * z ** 3 + np.random.normal(0, variance_R)
self.hidden_data.append(z)
self.observe_data.append(x)
def filter(self, resampling=True):
z_estimate = 0 # 初始状态估计值
self.z_estimate_lst = [z_estimate]
V = 1 # 初始协方差
z_particles = z_estimate + np.random.normal(0, V, self.particle_num)
Particles_weights = np.array([1 / self.particle_num for _ in range(self.particle_num)])
self.z_particles_lst = [z_particles]
for i in range(1, len(self.observe_data)):
# 从p(zt|zt-1)中采样
z_particles_sampling = self.sampling(z_particles)
x_particles_sampling = dt * z_particles_sampling ** 3
# 计算权重
Particles_weights = self.cal_weights(self.observe_data[i], x_particles_sampling, Particles_weights)
# 估计
z_est, z_var_ssd = self.estimate(z_particles_sampling, Particles_weights)
self.z_estimate_lst.append(z_est)
# 重采样
if resampling:
z_particles, Particles_weights = self.simple_resample(z_particles_sampling, Particles_weights)
self.z_particles_lst.append(z_particles)
else:
z_particles = z_particles_sampling
self.z_particles_lst.append(z_particles)
def sampling(self, z_particles):
""" 从p(zt|zt-1)中采样 """
z_particles_sampling = z_particles + dt * z_particles * np.cos(z_particles) + \
np.random.normal(0, variance_Q, self.particle_num)
return z_particles_sampling
def cal_weights(self, observed_data, x_particles_sampling, old_par_weights):
""" 计算p(xt|zt)w(t-1), 由于每次都进行重采样,w(t-1)是常数 """
variance_R_guji = variance_R + 2
Particles_weights = (1 / np.sqrt(2 * np.pi * variance_R_guji)) * np.exp(-(observed_data - x_particles_sampling) ** 2 /
(2 * variance_R_guji))
Particles_weights = Particles_weights * old_par_weights
Particles_weights /= np.sum(Particles_weights)
return Particles_weights
def plot(self):
plt.figure()
num = len(self.observe_data)
x = [i for i in range(num)]
plt.scatter(x, self.observe_data, c="r", s=5)
plt.scatter(x, self.z_estimate_lst, c="g", s=5)
p1, = plt.plot(x, self.observe_data, c="r", )
for i in range(self.particle_num):
p3, = plt.plot(x, [self.z_particles_lst[j][i] for j in range(num)], color='gray')
p2, = plt.plot(x, self.z_estimate_lst, c="g")
plt.legend([p1, p2, p3], ["Observed data", "Estimated state", "Particle trajectory"])
plt.show()
if __name__ == "__main__":
obj = Particle_filter(100)
obj.make_data()
obj.filter(resampling=False)
obj.plot()
|
[
"numpy.average",
"numpy.sum",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.legend",
"numpy.searchsorted",
"numpy.cumsum",
"matplotlib.pyplot.figure",
"numpy.exp",
"numpy.random.normal",
"numpy.cos",
"numpy.random.rand",
"numpy.sqrt"
] |
[((505, 543), 'numpy.average', 'np.average', (['particles'], {'weights': 'weights'}), '(particles, weights=weights)\n', (515, 543), True, 'import numpy as np\n'), ((559, 611), 'numpy.average', 'np.average', (['((particles - mean) ** 2)'], {'weights': 'weights'}), '((particles - mean) ** 2, weights=weights)\n', (569, 611), True, 'import numpy as np\n'), ((777, 795), 'numpy.cumsum', 'np.cumsum', (['weights'], {}), '(weights)\n', (786, 795), True, 'import numpy as np\n'), ((851, 868), 'numpy.random.rand', 'np.random.rand', (['N'], {}), '(N)\n', (865, 868), True, 'import numpy as np\n'), ((904, 939), 'numpy.searchsorted', 'np.searchsorted', (['cumulative_sum', 'rn'], {}), '(cumulative_sum, rn)\n', (919, 939), True, 'import numpy as np\n'), ((1152, 1174), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {}), '(0, 1)\n', (1168, 1174), True, 'import numpy as np\n'), ((3566, 3591), 'numpy.sum', 'np.sum', (['Particles_weights'], {}), '(Particles_weights)\n', (3572, 3591), True, 'import numpy as np\n'), ((3658, 3670), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3668, 3670), True, 'import matplotlib.pyplot as plt\n'), ((3755, 3800), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'self.observe_data'], {'c': '"""r"""', 's': '(5)'}), "(x, self.observe_data, c='r', s=5)\n", (3766, 3800), True, 'import matplotlib.pyplot as plt\n'), ((3810, 3857), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x', 'self.z_estimate_lst'], {'c': '"""g"""', 's': '(5)'}), "(x, self.z_estimate_lst, c='g', s=5)\n", (3821, 3857), True, 'import matplotlib.pyplot as plt\n'), ((3873, 3910), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'self.observe_data'], {'c': '"""r"""'}), "(x, self.observe_data, c='r')\n", (3881, 3910), True, 'import matplotlib.pyplot as plt\n'), ((4067, 4106), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'self.z_estimate_lst'], {'c': '"""g"""'}), "(x, self.z_estimate_lst, c='g')\n", (4075, 4106), True, 'import matplotlib.pyplot as plt\n'), ((4118, 4207), 'matplotlib.pyplot.legend', 'plt.legend', (['[p1, p2, p3]', "['Observed data', 'Estimated state', 'Particle trajectory']"], {}), "([p1, p2, p3], ['Observed data', 'Estimated state',\n 'Particle trajectory'])\n", (4128, 4207), True, 'import matplotlib.pyplot as plt\n'), ((4213, 4223), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4221, 4223), True, 'import matplotlib.pyplot as plt\n'), ((1749, 1790), 'numpy.random.normal', 'np.random.normal', (['(0)', 'V', 'self.particle_num'], {}), '(0, V, self.particle_num)\n', (1765, 1790), True, 'import numpy as np\n'), ((2974, 3024), 'numpy.random.normal', 'np.random.normal', (['(0)', 'variance_Q', 'self.particle_num'], {}), '(0, variance_Q, self.particle_num)\n', (2990, 3024), True, 'import numpy as np\n'), ((3318, 3394), 'numpy.exp', 'np.exp', (['(-(observed_data - x_particles_sampling) ** 2 / (2 * variance_R_guji))'], {}), '(-(observed_data - x_particles_sampling) ** 2 / (2 * variance_R_guji))\n', (3324, 3394), True, 'import numpy as np\n'), ((1261, 1292), 'numpy.random.normal', 'np.random.normal', (['(0)', 'variance_R'], {}), '(0, variance_R)\n', (1277, 1292), True, 'import numpy as np\n'), ((1390, 1421), 'numpy.random.normal', 'np.random.normal', (['(0)', 'variance_Q'], {}), '(0, variance_Q)\n', (1406, 1421), True, 'import numpy as np\n'), ((1453, 1484), 'numpy.random.normal', 'np.random.normal', (['(0)', 'variance_R'], {}), '(0, variance_R)\n', (1469, 1484), True, 'import numpy as np\n'), ((3278, 3314), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi * variance_R_guji)'], {}), '(2 * np.pi * variance_R_guji)\n', (3285, 3314), True, 'import numpy as np\n'), ((2918, 2937), 'numpy.cos', 'np.cos', (['z_particles'], {}), '(z_particles)\n', (2924, 2937), True, 'import numpy as np\n'), ((1378, 1387), 'numpy.cos', 'np.cos', (['z'], {}), '(z)\n', (1384, 1387), True, 'import numpy as np\n')]
|
import numpy as np
from ml.model import NumberRecognizeNN
from ml.data_processor import DataProcessor
class ModelAPI():
def __init__(self, resource):
self.resource = resource
self.model = NumberRecognizeNN(resource.INPUT_SIZE, resource.OUTPUT_SIZE)
resource.load_model(self.model)
means, stds = resource.load_normalization_params()
self.dp = DataProcessor(means, stds)
def predict(self, data):
_data = data
if isinstance(data, (tuple, list)):
_data = np.array([data], dtype=np.float32)
f_data = self.dp.format_x(_data, size=self.resource.INPUT_SIZE)
predicted = self.model(f_data)
number = np.argmax(predicted.data, axis=1)
return number
|
[
"ml.model.NumberRecognizeNN",
"numpy.array",
"ml.data_processor.DataProcessor",
"numpy.argmax"
] |
[((211, 271), 'ml.model.NumberRecognizeNN', 'NumberRecognizeNN', (['resource.INPUT_SIZE', 'resource.OUTPUT_SIZE'], {}), '(resource.INPUT_SIZE, resource.OUTPUT_SIZE)\n', (228, 271), False, 'from ml.model import NumberRecognizeNN\n'), ((390, 416), 'ml.data_processor.DataProcessor', 'DataProcessor', (['means', 'stds'], {}), '(means, stds)\n', (403, 416), False, 'from ml.data_processor import DataProcessor\n'), ((696, 729), 'numpy.argmax', 'np.argmax', (['predicted.data'], {'axis': '(1)'}), '(predicted.data, axis=1)\n', (705, 729), True, 'import numpy as np\n'), ((532, 566), 'numpy.array', 'np.array', (['[data]'], {'dtype': 'np.float32'}), '([data], dtype=np.float32)\n', (540, 566), True, 'import numpy as np\n')]
|
"""
Users can register their rollout func here, with the same parameters list like method `sequential`
and return a Dict-like metric results.
Examples:
>>> def custom_rollout_function(
... agent_interfaces: List[env.AgentInterface],
... env_desc: Dict[str, Any],
... metric_type: str,
... max_iter: int,
... behavior_policy_mapping: Dict[AgentID, PolicyID],
... ) -> Dict[str, Any]
In your custom rollout function, you can decide extra data
you wanna save by specifying extra columns when Episode initialization.
"""
import collections
from typing import Iterator
import ray
import numpy as np
from malib import settings
from malib.utils.general import iter_many_dicts_recursively
from malib.utils.typing import (
AgentID,
BufferDescription,
Dict,
Any,
Tuple,
List,
BehaviorMode,
EnvID,
Callable,
Union,
)
from malib.utils.logger import Log
from malib.utils.episode import Episode, NewEpisodeDict, EpisodeKey
from malib.rollout.postprocessor import get_postprocessor
from malib.envs.vector_env import VectorEnv, SubprocVecEnv
from malib.envs.agent_interface import AgentInterface
def _process_environment_returns(
env_rets: Dict[EnvID, Dict[str, Dict[AgentID, Any]]],
agent_interfaces: Dict[AgentID, AgentInterface],
filtered_env_outputs: Dict[EnvID, Dict[str, Dict[AgentID, Any]]],
) -> Tuple[
Dict[EnvID, Dict[str, Dict[AgentID, Any]]],
Dict[EnvID, Dict[str, Dict[AgentID, Any]]],
List[EnvID],
]:
"""Processes environment returns, including observation, rewards. Also the agent
communication.
"""
policy_inputs = {}
drop_env_ids = []
for env_id, rets in env_rets.items():
# preset done if no done
policy_input = {}
drop = False
if env_id not in filtered_env_outputs:
filtered_env_outputs[env_id] = {}
filtered_env_output = filtered_env_outputs[env_id]
for k, ret in rets.items():
if k in [EpisodeKey.CUR_OBS, EpisodeKey.NEXT_OBS]:
output = {
aid: agent_interfaces[aid].transform_observation(
observation=obs, state=None
)["obs"]
for aid, obs in ret.items()
}
if k == EpisodeKey.NEXT_OBS:
if EpisodeKey.CUR_OBS not in filtered_env_output:
filtered_env_output[EpisodeKey.CUR_OBS] = output
policy_input[EpisodeKey.CUR_OBS] = output
elif k == EpisodeKey.NEXT_STATE:
if EpisodeKey.CUR_STATE not in filtered_env_output:
filtered_env_output[EpisodeKey.CUR_STATE] = ret
policy_input[EpisodeKey.CUR_STATE] = ret
else:
if k == EpisodeKey.DONE:
done = ret["__all__"]
drop = done
drop_env_ids.append(env_id)
output = {k: v for k, v in ret.items() if k != "__all__"}
else:
output = ret
policy_input[k] = output
filtered_env_output[k] = output
if not drop:
policy_inputs[env_id] = policy_input
# we transfer DONE key as a signal for some masking behaviors
if EpisodeKey.DONE not in policy_input:
policy_input = {
EpisodeKey.DONE: dict.fromkeys(rets[EpisodeKey.CUR_OBS], False)
}
return policy_inputs, filtered_env_outputs, drop_env_ids
def _do_policy_eval(
policy_inputs: Dict[EnvID, Dict[str, Dict[AgentID, Any]]],
agent_interfaces: Dict[AgentID, AgentInterface],
episodes: NewEpisodeDict,
) -> Dict[str, Dict[EnvID, Dict[AgentID, Any]]]:
actions, action_dists, next_rnn_state = {}, {}, {}
env_ids = list(policy_inputs.keys())
# we need to link environment id to agent ids, especially in the case of
# sequential rollout
env_agent_ids = []
# collect by agent wise
agent_wise_inputs = collections.defaultdict(
lambda: collections.defaultdict(lambda: [])
)
for env_id in env_ids:
env_episode = episodes[env_id]
# for agent_id, interface in agent_interfaces.items():
env_agent_ids.append(list(policy_inputs[env_id][EpisodeKey.CUR_OBS].keys()))
for agent_id in policy_inputs[env_id][EpisodeKey.CUR_OBS].keys():
interface = agent_interfaces[agent_id]
if len(env_episode[EpisodeKey.RNN_STATE][agent_id]) < 1:
obs_shape = policy_inputs[env_id][EpisodeKey.CUR_OBS][agent_id].shape
env_episode[EpisodeKey.RNN_STATE][agent_id].append(
interface.get_initial_state(
batch_size=None if len(obs_shape) == 1 else obs_shape[0]
)
)
# FIXME(ming): maybe wrong in some cases, I didn't load it yet.
last_done = np.zeros(obs_shape[:-1])
else:
last_done = env_episode[EpisodeKey.DONE][agent_id][-1]
last_rnn_state = env_episode[EpisodeKey.RNN_STATE][agent_id][-1]
agent_wise_inputs[agent_id][EpisodeKey.RNN_STATE].append(last_rnn_state)
# rnn mask dependences on done or not
agent_wise_inputs[agent_id][EpisodeKey.DONE].append(last_done)
for k, agent_v in policy_inputs[env_id].items():
for agent_id, v in agent_v.items():
agent_wise_inputs[agent_id][k].append(v)
for agent_id, inputs in agent_wise_inputs.items():
interface = agent_interfaces[agent_id]
(
actions[agent_id],
action_dists[agent_id],
next_rnn_state[agent_id],
) = interface.compute_action(**inputs)
return {
EpisodeKey.ACTION: actions,
EpisodeKey.ACTION_DIST: action_dists,
EpisodeKey.RNN_STATE: next_rnn_state,
}, dict(zip(env_ids, env_agent_ids))
def _process_policy_outputs(
active_env_to_agent_ids: Dict[EnvID, List[AgentID]],
# policy_outputs: Dict[str, Dict[AgentID, DataTransferType]],
policy_outputs: Dict[str, Dict[AgentID, Iterator]],
env: VectorEnv,
) -> Dict[EnvID, Dict[AgentID, Any]]:
"""Proceses the policy returns. Here we convert the policy return to legal environment step inputs."""
assert (
EpisodeKey.ACTION in policy_outputs and EpisodeKey.ACTION_DIST in policy_outputs
), "`action` and `action_prob` are required in the policy outputs, please check the return of `_do_policy_eval`: {}".format(
list(policy_outputs.keys())
)
detached_policy_outputs = {}
for i, (env_id, agent_ids) in enumerate(active_env_to_agent_ids.items()):
detached = collections.defaultdict(lambda: collections.defaultdict())
for k, agent_v in policy_outputs.items():
for aid in agent_ids:
_v = agent_v[aid]
if k == EpisodeKey.RNN_STATE:
detached[k][aid] = [next(__v) for __v in _v]
else:
detached[k][aid] = next(_v)
detached_policy_outputs[env_id] = detached
env_actions: Dict[EnvID, Dict[AgentID, Any]] = env.action_adapter(
detached_policy_outputs
)
return env_actions, detached_policy_outputs
# def _reduce_rollout_info(rollout_info) -> Dict[str, float]:
# res = {}
# if isinstance(rollout_info, list) or isinstance(rollout_info, tuple):
# _item = np.array(rollout_info)
# res["mean"] = np.mean(_item)
# res["min"] = np.min(_item)
# res["max"] = np.max(_item)
# elif isinstance(rollout_info, dict):
# for k, item in rollout_info.items():
# res[k] = _reduce_rollout_info(item)
# else:
# res = rollout_info
# return res
def env_runner(
env: VectorEnv,
agent_interfaces: Dict[AgentID, AgentInterface],
buffer_desc: BufferDescription,
runtime_config: Dict[str, Any],
dataset_server: ray.ObjectRef = None,
custom_environment_return_processor: Callable = None,
custom_policy_output_processor: Callable = None,
custom_do_policy_eval: Callable = None,
) -> Dict[str, Dict[str, Any]]:
"""Rollout in simultaneous mode, support environment vectorization.
:param VectorEnv env: The environment instance.
:param Dict[Agent,AgentInterface] agent_interfaces: The dict of agent interfaces for interacting with environment.
:param ray.ObjectRef dataset_server: The offline dataset server handler, buffering data if it is not None.
:return: A dict of rollout information.
"""
# collect runtime configuration into runtime_config
# keys in runtime config:
# 1. num_envs: determines how many environments will this runner use
# 2. max_step: the maximum length of one episode
# 3. fragement_length: the total length you wanna run in this runner
# 4. behavior_policies: a dict of policy ids, mapping from agent id to policy id
behavior_policies = runtime_config["behavior_policies"]
sample_dist = runtime_config.get("behavior_policy_dist", None)
for agent_id, interface in agent_interfaces.items():
interface.reset(policy_id=behavior_policies[agent_id], sample_dist=sample_dist)
# if buffer_desc is not None:
# assert runtime_config["trainable_mapping"] is None, (runtime_config, dataset_server)
rets = env.reset(
limits=runtime_config["num_envs"],
fragment_length=runtime_config["fragment_length"],
max_step=runtime_config["max_step"],
custom_reset_config=runtime_config["custom_reset_config"],
trainable_mapping=runtime_config["trainable_mapping"],
)
if isinstance(env, VectorEnv):
assert len(env.active_envs) > 0, (env._active_envs, rets, env)
episodes = NewEpisodeDict(lambda env_id: Episode(behavior_policies, env_id=env_id))
# process_environment_returns = (
# custom_environment_return_processor or _process_environment_returns
# )
# process_policy_outputs = custom_policy_output_processor or _process_policy_outputs
# do_policy_eval = custom_do_policy_eval or _do_policy_eval
# XXX(ming): currently, we mute all processors' cutomization to avoid unpredictable behaviors
process_environment_returns = _process_environment_returns
process_policy_outputs = _process_policy_outputs
do_policy_eval = _do_policy_eval
while not env.is_terminated():
filtered_env_outputs = {}
# ============ a frame =============
(
active_policy_inputs,
filtered_env_outputs,
drop_env_ids,
) = process_environment_returns(rets, agent_interfaces, filtered_env_outputs)
active_policy_outputs, active_env_to_agent_ids = do_policy_eval(
active_policy_inputs, agent_interfaces, episodes
)
env_inputs, detached_policy_outputs = process_policy_outputs(
active_env_to_agent_ids, active_policy_outputs, env
)
# XXX(ming): maybe more general inputs.
rets = env.step(env_inputs)
# again, filter next_obs here
(
active_policy_inputs,
filtered_env_outputs,
drop_env_ids,
) = process_environment_returns(rets, agent_interfaces, filtered_env_outputs)
# filter policy inputs here
# =================================
episodes.record(detached_policy_outputs, filtered_env_outputs)
rollout_info = env.collect_info()
if dataset_server:
policies = {
aid: interface.get_policy(behavior_policies[aid])
for aid, interface in agent_interfaces.items()
}
batch_mode = runtime_config["batch_mode"]
trainable_agents = list(runtime_config["trainable_mapping"].keys())
episodes = list(episodes.to_numpy(batch_mode, filter=trainable_agents).values())
for handler in get_postprocessor(runtime_config["postprocessor_types"]):
episodes = handler(episodes, policies)
buffer_desc.batch_size = (
env.batched_step_cnt if batch_mode == "time_step" else len(episodes)
)
indices = None
while indices is None:
batch = ray.get(dataset_server.get_producer_index.remote(buffer_desc))
indices = batch.data
# buffer_desc.batch_size = len(indices)
buffer_desc.data = episodes
buffer_desc.indices = indices
dataset_server.save.remote(buffer_desc)
ph = list(rollout_info.values())
holder = {}
for history, ds, k, vs in iter_many_dicts_recursively(*ph, history=[]):
arr = [np.sum(_vs) for _vs in vs]
prefix = "/".join(history)
# print(history, prefix, _arr, vs)
holder[prefix] = arr
return {"total_fragment_length": env.batched_step_cnt, "eval_info": holder}
class Stepping:
def __init__(
self,
exp_cfg: Dict[str, Any],
env_desc: Dict[str, Any],
dataset_server=None,
use_subproc_env: bool = False,
batch_mode: str = "time_step",
postprocessor_types: List[Union[str, Callable]] = ["default"],
):
# init environment here
self.env_desc = env_desc
self.batch_mode = batch_mode
self.postprocessor_types = postprocessor_types
# if not env.is_sequential:
if use_subproc_env:
self.env = SubprocVecEnv(
env_desc["observation_spaces"],
env_desc["action_spaces"],
env_desc["creator"],
env_desc["config"],
max_num_envs=2, # FIXME(ziyu): currently just fixed it.
)
else:
self.env = VectorEnv(
observation_spaces=env_desc["observation_spaces"],
action_spaces=env_desc["action_spaces"],
creator=env_desc["creator"],
configs=env_desc["config"],
)
self._dataset_server = dataset_server
@classmethod
def as_remote(
cls,
num_cpus: int = None,
num_gpus: int = None,
memory: int = None,
object_store_memory: int = None,
resources: dict = None,
) -> type:
"""Return a remote class for Actor initialization."""
return ray.remote(
num_cpus=num_cpus,
num_gpus=num_gpus,
memory=memory,
object_store_memory=object_store_memory,
resources=resources,
)(cls)
@Log.data_feedback(enable=settings.DATA_FEEDBACK)
def run(
self,
agent_interfaces: Dict[AgentID, AgentInterface],
fragment_length: int,
desc: Dict[str, Any],
buffer_desc: BufferDescription = None,
) -> Tuple[str, Dict[str, List]]:
"""Environment stepping, rollout/simulate with environment vectorization if it is feasible.
:param Dict[AgentID,AgentInterface] agent_interface: A dict of agent interfaces.
:param Union[str,type] metric_type: Metric type or handler.
:param int fragment_length: The maximum length of an episode.
:param Dict[str,Any] desc: The description of task.
:param str role: Indicator of stepping type. Values in `rollout` or `simulation`.
:returns: A tuple of a dict of MetricEntry and the caculation of total frames.
"""
task_type = desc["flag"]
behavior_policies = {}
if task_type == "rollout":
for interface in agent_interfaces.values():
interface.set_behavior_mode(BehaviorMode.EXPLORATION)
else:
for interface in agent_interfaces.values():
interface.set_behavior_mode(BehaviorMode.EXPLOITATION)
# desc: policy_distribution, behavior_policies, num_episodes
policy_distribution = desc.get("policy_distribution")
for agent, interface in agent_interfaces.items():
if policy_distribution:
interface.reset(sample_dist=policy_distribution[agent])
behavior_policies[agent] = interface.behavior_policy
# behavior policies is a mapping from agents to policy ids
# update with external behavior_policies
behavior_policies.update(desc["behavior_policies"] or {})
# specify the number of running episodes
num_episodes = desc["num_episodes"]
max_step = desc.get("max_step", None)
self.add_envs(num_episodes)
rollout_results = env_runner(
self.env,
agent_interfaces,
buffer_desc if task_type == "rollout" else None,
runtime_config={
"max_step": max_step,
"fragment_length": fragment_length,
"num_envs": num_episodes,
"behavior_policies": behavior_policies,
"custom_reset_config": None,
"batch_mode": self.batch_mode,
"trainable_mapping": desc["behavior_policies"]
if task_type == "rollout"
else None,
"postprocessor_types": self.postprocessor_types,
},
dataset_server=self._dataset_server if task_type == "rollout" else None,
)
return task_type, rollout_results
def add_envs(self, maximum: int) -> int:
"""Create environments, if env is an instance of VectorEnv, add these new environment instances into it,
otherwise do nothing.
:returns: The number of nested environments.
"""
if not isinstance(self.env, VectorEnv):
return 1
existing_env_num = getattr(self.env, "num_envs", 1)
if existing_env_num >= maximum:
return self.env.num_envs
self.env.add_envs(num=maximum - existing_env_num)
return self.env.num_envs
def close(self):
if self.env is not None:
self.env.close()
|
[
"ray.remote",
"malib.utils.general.iter_many_dicts_recursively",
"numpy.sum",
"malib.utils.logger.Log.data_feedback",
"malib.rollout.postprocessor.get_postprocessor",
"numpy.zeros",
"malib.envs.vector_env.VectorEnv",
"collections.defaultdict",
"malib.utils.episode.Episode",
"malib.envs.vector_env.SubprocVecEnv"
] |
[((12640, 12684), 'malib.utils.general.iter_many_dicts_recursively', 'iter_many_dicts_recursively', (['*ph'], {'history': '[]'}), '(*ph, history=[])\n', (12667, 12684), False, 'from malib.utils.general import iter_many_dicts_recursively\n'), ((14564, 14612), 'malib.utils.logger.Log.data_feedback', 'Log.data_feedback', ([], {'enable': 'settings.DATA_FEEDBACK'}), '(enable=settings.DATA_FEEDBACK)\n', (14581, 14612), False, 'from malib.utils.logger import Log\n'), ((11979, 12035), 'malib.rollout.postprocessor.get_postprocessor', 'get_postprocessor', (["runtime_config['postprocessor_types']"], {}), "(runtime_config['postprocessor_types'])\n", (11996, 12035), False, 'from malib.rollout.postprocessor import get_postprocessor\n'), ((4106, 4142), 'collections.defaultdict', 'collections.defaultdict', (['(lambda : [])'], {}), '(lambda : [])\n', (4129, 4142), False, 'import collections\n'), ((9892, 9933), 'malib.utils.episode.Episode', 'Episode', (['behavior_policies'], {'env_id': 'env_id'}), '(behavior_policies, env_id=env_id)\n', (9899, 9933), False, 'from malib.utils.episode import Episode, NewEpisodeDict, EpisodeKey\n'), ((12701, 12712), 'numpy.sum', 'np.sum', (['_vs'], {}), '(_vs)\n', (12707, 12712), True, 'import numpy as np\n'), ((13464, 13597), 'malib.envs.vector_env.SubprocVecEnv', 'SubprocVecEnv', (["env_desc['observation_spaces']", "env_desc['action_spaces']", "env_desc['creator']", "env_desc['config']"], {'max_num_envs': '(2)'}), "(env_desc['observation_spaces'], env_desc['action_spaces'],\n env_desc['creator'], env_desc['config'], max_num_envs=2)\n", (13477, 13597), False, 'from malib.envs.vector_env import VectorEnv, SubprocVecEnv\n'), ((13767, 13935), 'malib.envs.vector_env.VectorEnv', 'VectorEnv', ([], {'observation_spaces': "env_desc['observation_spaces']", 'action_spaces': "env_desc['action_spaces']", 'creator': "env_desc['creator']", 'configs': "env_desc['config']"}), "(observation_spaces=env_desc['observation_spaces'], action_spaces=\n env_desc['action_spaces'], creator=env_desc['creator'], configs=\n env_desc['config'])\n", (13776, 13935), False, 'from malib.envs.vector_env import VectorEnv, SubprocVecEnv\n'), ((14356, 14485), 'ray.remote', 'ray.remote', ([], {'num_cpus': 'num_cpus', 'num_gpus': 'num_gpus', 'memory': 'memory', 'object_store_memory': 'object_store_memory', 'resources': 'resources'}), '(num_cpus=num_cpus, num_gpus=num_gpus, memory=memory,\n object_store_memory=object_store_memory, resources=resources)\n', (14366, 14485), False, 'import ray\n'), ((4989, 5013), 'numpy.zeros', 'np.zeros', (['obs_shape[:-1]'], {}), '(obs_shape[:-1])\n', (4997, 5013), True, 'import numpy as np\n'), ((6812, 6837), 'collections.defaultdict', 'collections.defaultdict', ([], {}), '()\n', (6835, 6837), False, 'import collections\n')]
|
import warnings
from typing import Set, Dict, Optional, List, Tuple
import numpy as np
import pandas as pd
from mdrsl.data_structures.rules.rule_part import Consequent
from mdrsl.evaluation.interpretability.basic_rule_set_stats import BasicRuleSetStatistics, is_valid_fraction
from mdrsl.rule_models.mids.cover.cover_checker import CoverChecker
from mdrsl.rule_models.mids.cover.overlap_cacher import OverlapChecker
from mdrsl.rule_models.mids.mids_rule import MIDSRule
from mdrsl.rule_models.mids.mids_ruleset import MIDSRuleSet
from mdrsl.utils.value_collection import ValueCollector
TargetAttr = str
TargetVal = object
DefaultCoverChecker = CoverChecker
DefaultOverlapChecker = OverlapChecker
class MIDSInterpretabilityStatistics(BasicRuleSetStatistics):
def __init__(self,
rule_length_collector: ValueCollector,
fraction_bodily_overlap: float,
fraction_uncovered_examples: float,
avg_frac_predicted_classes: float,
frac_predicted_classes_per_target_attr: Dict[TargetAttr, float],
# ground_set_size: Optional[int] = None
):
super().__init__(rule_length_collector, model_abbreviation="MIDS")
self.fraction_bodily_overlap: float = fraction_bodily_overlap
self.fraction_uncovered_examples: float = fraction_uncovered_examples
self.avg_frac_predicted_classes: float = avg_frac_predicted_classes
self.frac_predicted_classes_per_target_attr: Dict[TargetAttr, float] = frac_predicted_classes_per_target_attr
# self.ground_set_size: Optional[int] = ground_set_size
def to_str(self, indentation: str = "") -> str:
output_string = (
indentation + "Rule length stats: " + str(self.rule_length_counter) + "\n"
+ indentation + "Fraction bodily overlap: " + str(self.fraction_bodily_overlap) + "\n"
+ indentation + "Fraction uncovered examples: " + str(self.fraction_uncovered_examples) + "\n"
+ indentation + "Avg fraction predicted classes: " + str(self.avg_frac_predicted_classes) + "\n"
+ indentation + "Fraction predicted classs by target:\n"
+ indentation + "\t" + str(self.frac_predicted_classes_per_target_attr) + "\n"
)
return output_string
def __str__(self):
return self.to_str()
def satisfies(self, condition: 'MIDSInterpretabilityStatitisticsAbstractCondition') -> bool:
return condition.is_satisfied_by(self)
class MIDSInterpretabilityStatisticsCalculator:
@staticmethod
def _fraction_overlap(ruleset: MIDSRuleSet, test_dataframe: pd.DataFrame,
target_attr: Optional[TargetAttr] = None,
cover_checker_on_test: Optional[CoverChecker] = None,
overlap_checker_on_test: Optional[OverlapChecker] = None,
debug=False) -> float:
"""
This metric captures the extend of overlap between every pair of rules in a decision set R.
Smaller values of this metric signify higher interpretability.
Boundary values:
0.0 if no rules in R overlap:
1.0 if all data points in are covered by all rules in R.
NOTE:
* this is 0.0 for any decision list,
because their if-else structure ensures that a rule in the list applies only to those data points
which have not been covered by any of the preceeding rules
* this is 0.0 for the empty rule set
:param ruleset:
:param test_dataframe:
:param cover_checker_on_test:
:param overlap_checker_on_test:
:return:
"""
if type(ruleset) != MIDSRuleSet:
raise Exception(f"Type of ruleset must be MIDSRuleSet, but is {type(ruleset)}")
# warnings.warn("FRACTION_OVERLAP IS CURRENTLY NOT RELATIVE TO A TARGET ATTRIBUTE. THIS MIGHT BE INCORRECT")
ruleset_size: int = len(ruleset)
if ruleset_size == 0:
print("Warning: the MIDS rule set is empty.")
return 0.0
nb_of_test_examples: int = test_dataframe.index.size
if nb_of_test_examples == 0:
raise Exception("There are no test instances to calculate overlap on")
if cover_checker_on_test is None:
cover_checker_on_test = DefaultCoverChecker()
if overlap_checker_on_test is None:
overlap_checker_on_test = DefaultOverlapChecker(cover_checker_on_test, debug)
overlap_sum: int = 0
rule_i: MIDSRule
rule_j: MIDSRule
for i, rule_i in enumerate(ruleset.ruleset):
for j, rule_j in enumerate(ruleset.ruleset):
if i <= j:
continue
else:
if target_attr is None:
overlap_sum += overlap_checker_on_test.get_pure_overlap_count(rule_i, rule_j, test_dataframe)
else:
overlap_sum += overlap_checker_on_test.get_relative_overlap_count(rule_i, rule_j,
test_dataframe, target_attr)
if overlap_sum == 0:
warnings.warn("overlap is 0, which is unlikely")
return 0
else:
frac_overlap = 2 / (ruleset_size * (ruleset_size - 1)) * overlap_sum / nb_of_test_examples
if not is_valid_fraction(frac_overlap):
raise Exception("Fraction overlap is not within [0,1]: " + str(frac_overlap))
return frac_overlap
@staticmethod
def fraction_bodily_overlap(ruleset: MIDSRuleSet, test_dataframe: pd.DataFrame,
cover_checker_on_test: Optional[CoverChecker] = None,
overlap_checker_on_test: Optional[OverlapChecker] = None,
debug=False) -> float:
return MIDSInterpretabilityStatisticsCalculator._fraction_overlap(
ruleset=ruleset, test_dataframe=test_dataframe,
target_attr=None,
cover_checker_on_test=cover_checker_on_test,
overlap_checker_on_test=overlap_checker_on_test,
debug=debug
)
@staticmethod
def get_fraction_overlap_relative_to_target_attr(ruleset: MIDSRuleSet, test_dataframe: pd.DataFrame,
target_attr: str,
cover_checker_on_test: Optional[CoverChecker] = None,
overlap_checker_on_test: Optional[OverlapChecker] = None,
debug=False
) -> float:
"""
This metric captures the extend of overlap between every pair of rules in a decision set R.
Smaller values of this metric signify higher interpretability.
Boundary values:
0.0 if no rules in R overlap:
1.0 if all data points in are covered by all rules in R.
NOTE:
* this is 0.0 for any decision list,
because their if-else structure ensures that a rule in the list applies only to those data points
which have not been covered by any of the preceeding rules
* this is 0.0 for the empty rule set
:param ruleset:
:param test_dataframe:
:param target_attr:
:param cover_checker_on_test:
:param overlap_checker_on_test:
:param debug:
:return:
"""
if target_attr is None:
raise Exception("Cannot calculate relative overlap fraction without a given target attr. It is None.")
return MIDSInterpretabilityStatisticsCalculator._fraction_overlap(
ruleset=ruleset,
test_dataframe=test_dataframe,
target_attr=target_attr,
cover_checker_on_test=cover_checker_on_test,
overlap_checker_on_test=overlap_checker_on_test,
debug=debug
)
@staticmethod
def fraction_uncovered_examples(ruleset: MIDSRuleSet, test_dataframe: pd.DataFrame,
cover_checker_on_test: Optional[CoverChecker] = None
) -> float:
"""
This metric computes the fraction of the data points which are not covered by any rule in the decision set.
REMEMBER, COVER is independent of the head of a rule.
Boundary values:
0.0 if every data point is covered by some rule in the data set.
1.0 when no data point is covered by any rule in R
(This could be the case when |R| = 0 )
Note:
* for decision lists, this metric is
the fraction of the data points that are covered by the ELSE clause of the list
(i.e. the default prediction).
:param ruleset:
:param test_dataframe:
:param cover_checker_on_test:
:return:
"""
if type(ruleset) != MIDSRuleSet:
raise Exception("Type of ruleset must be IDSRuleSet")
if cover_checker_on_test is None:
cover_checker_on_test = DefaultCoverChecker()
nb_of_test_examples: int = test_dataframe.index.size
if nb_of_test_examples == 0:
raise Exception("There are no test instances to calculate the fraction uncovered on")
cover_cumulative_mask: np.ndarray = np.zeros(nb_of_test_examples, dtype=bool)
for rule in ruleset.ruleset:
cover_mask = cover_checker_on_test.get_cover(rule, test_dataframe)
cover_cumulative_mask = np.logical_or(cover_cumulative_mask, cover_mask)
nb_of_covered_test_examples: int = np.count_nonzero(cover_cumulative_mask)
frac_uncovered: float = 1 - 1 / nb_of_test_examples * nb_of_covered_test_examples
if not is_valid_fraction(frac_uncovered):
raise Exception("Fraction uncovered examples is not within [0,1]: " + str(frac_uncovered))
return frac_uncovered
@staticmethod
def fraction_predicted_classes(ruleset: MIDSRuleSet, test_dataframe,
target_attributes: List[TargetAttr]
) -> Tuple[float, Dict[TargetAttr, float]]:
"""
This metric denotes the fraction of the classes in the data that are predicted by the ruleset R.
Returns:
1. fraction per target attribute, averaged over the different targets
2. a map from target attribute to fraction for that target attr
Boundary value:
0.0 if no class is predicted (e.g. the ruleset is empty)
1.0 every class is predicted by some rule in R.
Note:
* The same for decision lists, but we not consider the ELSE clause (the default prediction).
:param target_attributes:
:param ruleset:
:param test_dataframe:
:return:
"""
if type(ruleset) != MIDSRuleSet:
raise Exception("Type of ruleset must be IDSRuleSet")
warnings.warn(
"Ugly conversion to string to deal with numerical attributes."
" Clean this up (look at Survived in Titanic).")
values_in_data_per_target_attribute: Dict[TargetAttr, Set[TargetVal]] = {}
predicted_values_per_target_attribute: Dict[TargetAttr, Set[TargetVal]] = {}
for target_attr in target_attributes:
values_as_str: List[str] = [str(val) for val in test_dataframe[target_attr].values]
values_in_data_per_target_attribute[target_attr] = set(values_as_str)
predicted_values_per_target_attribute[target_attr] = set()
target_attribute_set: Set[TargetAttr] = set(target_attributes)
for rule in ruleset.ruleset:
consequent: Consequent = rule.car.consequent
for literal in consequent.get_literals():
predicted_attribute: TargetAttr = literal.get_attribute()
predicted_value: TargetVal = literal.get_value()
if predicted_attribute in target_attribute_set:
predicted_value_str = str(predicted_value)
predicted_values: Set[TargetVal] = predicted_values_per_target_attribute[predicted_attribute]
if predicted_value_str in values_in_data_per_target_attribute[predicted_attribute]:
predicted_values.add(predicted_value_str)
# print("values_in_data_per_target_attribute", values_in_data_per_target_attribute)
# print("predicted_values_per_target_attribute", predicted_values_per_target_attribute)
frac_predicted_classes_per_target_attr: Dict[TargetAttr, float] = {}
avg_frac_predicted_classes: float = 0
for target_attr in values_in_data_per_target_attribute.keys():
values_occuring_in_data = values_in_data_per_target_attribute[target_attr]
predicted_values = predicted_values_per_target_attribute[target_attr]
domain_size_in_test_data = len(values_occuring_in_data)
nb_of_predicted_values = len(predicted_values)
frac_classes: float = nb_of_predicted_values / domain_size_in_test_data
frac_predicted_classes_per_target_attr[target_attr] = frac_classes
avg_frac_predicted_classes += frac_classes
nb_of_target_attrs = len(target_attributes)
avg_frac_predicted_classes = avg_frac_predicted_classes / nb_of_target_attrs
if not is_valid_fraction(avg_frac_predicted_classes):
raise Exception("Avg fraction predicted classes examples is not within [0,1]: "
+ str(avg_frac_predicted_classes))
return avg_frac_predicted_classes, frac_predicted_classes_per_target_attr
@staticmethod
def calculate_ruleset_statistics(ruleset: MIDSRuleSet, test_dataframe: pd.DataFrame,
target_attributes: List[TargetAttr]
) -> MIDSInterpretabilityStatistics:
rule_length_collector = ValueCollector()
for rule in ruleset.ruleset:
rule_length_collector.add_value(len(rule))
fraction_bodily_overlap: float = MIDSInterpretabilityStatisticsCalculator.fraction_bodily_overlap(
ruleset=ruleset, test_dataframe=test_dataframe)
fraction_uncovered_examples: float = MIDSInterpretabilityStatisticsCalculator.fraction_uncovered_examples(
ruleset=ruleset, test_dataframe=test_dataframe
)
avg_frac_predicted_classes: float
frac_predicted_classes_per_target_attr: Dict[TargetAttr, float]
avg_frac_predicted_classes, frac_predicted_classes_per_target_attr = \
MIDSInterpretabilityStatisticsCalculator.fraction_predicted_classes(
ruleset=ruleset, test_dataframe=test_dataframe, target_attributes=target_attributes
)
statistics = MIDSInterpretabilityStatistics(
rule_length_collector=rule_length_collector,
fraction_bodily_overlap=fraction_bodily_overlap,
fraction_uncovered_examples=fraction_uncovered_examples,
avg_frac_predicted_classes=avg_frac_predicted_classes,
frac_predicted_classes_per_target_attr=frac_predicted_classes_per_target_attr
# ground_set_size=ground_set_size
)
return statistics
class MIDSInterpretabilityStatisticsAbstractCondition:
def is_satisfied_by(self, interpret_stats: MIDSInterpretabilityStatistics) -> bool:
raise NotImplementedError("abstract method")
class MIDSInterpretabilityStatisticsBoundaryCondition(MIDSInterpretabilityStatisticsAbstractCondition):
def __init__(self,
max_fraction_bodily_overlap: float = 1.0,
max_fraction_uncovered_examples: float = 1.0,
min_avg_fraction_predicted_classes: float = 0.0,
min_frac_predicted_classes_for_each_target_attr: float = 0.5,
max_n_rules: int = 500,
max_avg_rule_length: int = 10
):
self.max_fraction_bodily_overlap: float = max_fraction_bodily_overlap
self.max_fraction_uncovered_examples: float = max_fraction_uncovered_examples
self.min_avg_fraction_predicted_classes: float = min_avg_fraction_predicted_classes
self.min_frac_predicted_classes_for_each_target_attr: float = min_frac_predicted_classes_for_each_target_attr
self.max_n_rules: int = max_n_rules
self.max_avg_rule_length: int = max_avg_rule_length
def is_satisfied_by(self, interpret_stats: MIDSInterpretabilityStatistics) -> bool:
return (
interpret_stats.ruleset_size() <= self.max_n_rules and
interpret_stats.avg_nb_of_literals_per_rule() <= self.max_avg_rule_length and
interpret_stats.fraction_bodily_overlap <= self.max_fraction_bodily_overlap and
interpret_stats.fraction_uncovered_examples <= self.max_fraction_uncovered_examples and
interpret_stats.avg_frac_predicted_classes >= self.min_avg_fraction_predicted_classes and
self._is_frac_predicted_classes_for_each_class_above_threshold(interpret_stats)
)
def _is_frac_predicted_classes_for_each_class_above_threshold(self,
interpret_stats: MIDSInterpretabilityStatistics):
for frac_predicted_classes_for_single_target in interpret_stats.frac_predicted_classes_per_target_attr.values():
if frac_predicted_classes_for_single_target < self.min_frac_predicted_classes_for_each_target_attr:
return False
return True
|
[
"numpy.count_nonzero",
"numpy.zeros",
"mdrsl.utils.value_collection.ValueCollector",
"numpy.logical_or",
"mdrsl.evaluation.interpretability.basic_rule_set_stats.is_valid_fraction",
"warnings.warn"
] |
[((9609, 9650), 'numpy.zeros', 'np.zeros', (['nb_of_test_examples'], {'dtype': 'bool'}), '(nb_of_test_examples, dtype=bool)\n', (9617, 9650), True, 'import numpy as np\n'), ((9897, 9936), 'numpy.count_nonzero', 'np.count_nonzero', (['cover_cumulative_mask'], {}), '(cover_cumulative_mask)\n', (9913, 9936), True, 'import numpy as np\n'), ((11259, 11391), 'warnings.warn', 'warnings.warn', (['"""Ugly conversion to string to deal with numerical attributes. Clean this up (look at Survived in Titanic)."""'], {}), "(\n 'Ugly conversion to string to deal with numerical attributes. Clean this up (look at Survived in Titanic).'\n )\n", (11272, 11391), False, 'import warnings\n'), ((14276, 14292), 'mdrsl.utils.value_collection.ValueCollector', 'ValueCollector', ([], {}), '()\n', (14290, 14292), False, 'from mdrsl.utils.value_collection import ValueCollector\n'), ((5296, 5344), 'warnings.warn', 'warnings.warn', (['"""overlap is 0, which is unlikely"""'], {}), "('overlap is 0, which is unlikely')\n", (5309, 5344), False, 'import warnings\n'), ((9804, 9852), 'numpy.logical_or', 'np.logical_or', (['cover_cumulative_mask', 'cover_mask'], {}), '(cover_cumulative_mask, cover_mask)\n', (9817, 9852), True, 'import numpy as np\n'), ((10044, 10077), 'mdrsl.evaluation.interpretability.basic_rule_set_stats.is_valid_fraction', 'is_valid_fraction', (['frac_uncovered'], {}), '(frac_uncovered)\n', (10061, 10077), False, 'from mdrsl.evaluation.interpretability.basic_rule_set_stats import BasicRuleSetStatistics, is_valid_fraction\n'), ((13703, 13748), 'mdrsl.evaluation.interpretability.basic_rule_set_stats.is_valid_fraction', 'is_valid_fraction', (['avg_frac_predicted_classes'], {}), '(avg_frac_predicted_classes)\n', (13720, 13748), False, 'from mdrsl.evaluation.interpretability.basic_rule_set_stats import BasicRuleSetStatistics, is_valid_fraction\n'), ((5503, 5534), 'mdrsl.evaluation.interpretability.basic_rule_set_stats.is_valid_fraction', 'is_valid_fraction', (['frac_overlap'], {}), '(frac_overlap)\n', (5520, 5534), False, 'from mdrsl.evaluation.interpretability.basic_rule_set_stats import BasicRuleSetStatistics, is_valid_fraction\n')]
|
import cv2
from PIL import Image, ImageTk
import io
import tensorflow as tf
from dataloader import mask_gen_cs_kar, mask_gen_hor_cs_kar_albedo
import numpy as np
import random
import os
import glob
from html import HTML
import skvideo.io
import argparse
tf.enable_eager_execution()
os.environ["CUDA_VISIBLE_DEVICES"] = '1'
# sg.theme('DarkBlue1')
parser = argparse.ArgumentParser()
parser.add_argument('--modelpath', required=True, help='folder where checkpoint is stored')
parser.add_argument('--masks', default= '/home/yam28/Documents/phdYoop/Stamps/dataset/val/synthgiraffebis')
parser.add_argument('--ims', default='/home/yam28/Documents/phdYoop/datasets/giraffe_datasetbis/albedo/PNG')
parser.add_argument('--fullrotation', default=0)
args = parser.parse_args()
l_to_m_model = os.path.join(args.modelpath, 'frozen_model_l_to_m.pb')
m_to_l_model = os.path.join(args.modelpath, 'frozen_model_z_to_l.pb')
orig_im = args.masks
mpaths = sorted(glob.glob(os.path.join(orig_im, '*.png')))
np.random.seed(42)
np.random.shuffle(mpaths)
lenm = len(mpaths)
mpaths = mpaths[int(0.9*len(mpaths)):][:10]
# impath = '/home/yam28/Documents/phdYoop/datasets/COCO/val2017'
impath = args.ims
nb_landmarks = int(l_to_m_model.split('_nbl')[1].split('/')[0])
SHAPE = 256
dataloader = mask_gen_hor_cs_kar_albedo(mpaths, impath, SHAPE, 8, 8)
def get_img_data(f, maxsize=(200, 200), first=True):
"""Generate image data using PIL
"""
img = Image.open(f)
img.thumbnail(maxsize)
if first: # tkinter is inactive the first time
bio = io.BytesIO()
img.save(bio, format="PNG")
del img
return bio.getvalue()
return ImageTk.PhotoImage(img)
def create_template(nr, r):
# r = np.reshape(np.squeeze(r) * SHAPE, [10, 2])
r = np.squeeze(r*SHAPE)
nr = nr*SHAPE
margin = 3
box = np.array([0, 0, 0, 0])
bbx = np.zeros([1, SHAPE, SHAPE, 1])
mins = np.min(nr, axis=0)
maxs = np.max(nr, axis=0)
box[1] = int(mins[1]) - margin
box[3] = int(maxs[1]) + margin
box[0] = int(mins[0]) - margin
box[2] = int(maxs[0]) + margin
if box[0] < 0: box[0] = 0
if box[1] < 0: box[1] = 0
if box[3] > SHAPE: box[3] = SHAPE - 1
if box[2] > SHAPE: box[2] = SHAPE - 1
if box[3] == box[1]:
box[3] += 1
if box[0] == box[2]:
box[2] += 1
# if box[0]>r[0]:
# box[0]=r[0]
# if box[1]>r[1]:
# box[1]=r[1]
# if box[2]<r[2]:
# box[2]=r[2]
# if box[3]<r[3]:
# box[3]=r[3]
bbx[:, box[0]:box[2], box[1]:box[3], :] = 1
box = np.reshape(box, [1,1,1,4])/SHAPE
return bbx, box
#taken from https://github.com/tomasjakab/imm/
def get_coord(x, other_axis, axis_size):
# get "x-y" coordinates:
g_c_prob = tf.reduce_mean(x, axis=other_axis) # B,W,NMAP
g_c_prob = tf.nn.softmax(g_c_prob, axis=1) # B,W,NMAP
coord_pt = tf.to_float(tf.linspace(-1.0, 1.0, axis_size)) # W
coord_pt = tf.reshape(coord_pt, [1, axis_size, 1])
g_c = tf.reduce_sum(g_c_prob * coord_pt, axis=1)
return g_c, g_c_prob
# taken from https://github.com/tomasjakab/imm/
def get_gaussian_maps(mu, shape_hw, inv_std, mode='ankush'):
"""
Generates [B,SHAPE_H,SHAPE_W,NMAPS] tensor of 2D gaussians,
given the gaussian centers: MU [B, NMAPS, 2] tensor.
STD: is the fixed standard dev.
"""
with tf.name_scope(None, 'gauss_map', [mu]):
mu_y, mu_x = mu[:, :, 0:1], mu[:, :, 1:2]
y = np.linspace(0., 1.0, shape_hw[0])
x = np.linspace(0., 1.0, shape_hw[1])
if mode in ['rot', 'flat']:
mu_y, mu_x = np.expand_dims(mu_y, -1), np.expand_dims(mu_x, -1)
y = np.reshape(y, [1, 1, shape_hw[0], 1])
x = np.reshape(x, [1, 1, 1, shape_hw[1]])
g_y = np.square(y - mu_y)
g_x = np.square(x - mu_x)
dist = (g_y + g_x) * inv_std**2
if mode == 'rot':
g_yx = np.exp(-dist)
else:
g_yx = tf.exp(-tf.pow(dist + 1e-5, 0.25))
elif mode == 'ankush':
y = tf.reshape(y, [1, 1, shape_hw[0]])
x = tf.reshape(x, [1, 1, shape_hw[1]])
g_y = tf.exp(-tf.sqrt(1e-4 + tf.abs((mu_y - y) * inv_std)))
g_x = tf.exp(-tf.sqrt(1e-4 + tf.abs((mu_x - x) * inv_std)))
g_y = tf.expand_dims(g_y, axis=3)
g_x = tf.expand_dims(g_x, axis=2)
g_yx = tf.matmul(g_y, g_x) # [B, NMAPS, H, W]
else:
raise ValueError('Unknown mode: ' + str(mode))
g_yx = np.transpose(g_yx, [0, 2, 3, 1])
return g_yx
# get "maximally" different random colors:
# ref: https://gist.github.com/adewes/5884820
def get_random_color(pastel_factor = 0.5):
return [(x+pastel_factor)/(1.0+pastel_factor) for x in [np.random.uniform(0,1.0) for i in [1,2,3]]]
def color_distance(c1,c2):
return sum([abs(x[0]-x[1]) for x in zip(c1,c2)])
def generate_new_color(existing_colors,pastel_factor = 0.5):
max_distance = None
best_color = None
for i in range(0,100):
color = get_random_color(pastel_factor = pastel_factor)
if not existing_colors:
return color
best_distance = min([color_distance(color,c) for c in existing_colors])
if not max_distance or best_distance > max_distance:
max_distance = best_distance
best_color = color
return best_color
def get_n_colors(n, pastel_factor=0.9):
colors = []
for i in range(n):
colors.append(generate_new_color(colors,pastel_factor = 0.9))
return colors
COLORS = get_n_colors(nb_landmarks, pastel_factor=0.9)
def colorize_landmark_maps(maps):
"""
Given BxHxWxN maps of landmarks, returns an aggregated landmark map
in which each landmark is colored randomly. BxHxWxN
"""
n_maps = maps.shape[-1]
# get n colors:
# colors = get_n_colors(n_maps, pastel_factor=0.0)
hmaps = [np.expand_dims(maps[..., i], axis=3) * np.reshape(COLORS[i], [1, 1, 1, 3])
for i in range(n_maps)]
return np.max(hmaps, axis=0)
def get_landmarks(mus, sigma, rotx, roty, rotz, tx, ty, tz, focal=1.):
assert mus is not None
assert sigma is not None
count = 4
rotXval = rotx
rotYval = roty
rotZval = rotz
rotX = (rotXval) * np.pi / 180
rotY = (rotYval) * np.pi / 180
rotZ = (rotZval) * np.pi / 180
zr = tf.zeros_like(rotY)
ons = tf.ones_like(rotY)
RX = tf.stack([tf.stack([ons, zr, zr], axis=-1), tf.stack([zr, tf.cos(rotX), -tf.sin(rotX)], axis=-1),
tf.stack([zr, tf.sin(rotX), tf.cos(rotX)], axis=-1)], axis=-1)
RY = tf.stack([tf.stack([tf.cos(rotY), zr, tf.sin(rotY)], axis=-1), tf.stack([zr, ons, zr], axis=-1),
tf.stack([-tf.sin(rotY), zr, tf.cos(rotY)], axis=-1)], axis=-1)
RZ = tf.stack([tf.stack([tf.cos(rotZ), -tf.sin(rotZ), zr], axis=-1),
tf.stack([tf.sin(rotZ), tf.cos(rotZ), zr], axis=-1),
tf.stack([zr, zr, ons], axis=-1)], axis=-1)
# Composed rotation matrix with (RX,RY,RZ)
R = tf.matmul(tf.matmul(RX, RY), RZ)
# R = tf.stack([R] * nb_landmarks, axis=0)[None, :, :, :]
transvec = tf.constant(np.array([[tx, ty, tz]]), dtype=tf.float64)
transvec = tf.stack([transvec] * nb_landmarks, axis=1)
transvec = transvec[:, :, tf.newaxis, :]
px = tf.zeros([tf.shape(mus)[0], nb_landmarks])
py = tf.zeros([tf.shape(mus)[0], nb_landmarks])
fvs = tf.ones_like(px) * focal
zv = tf.zeros_like(px)
ov = tf.ones_like(px)
K = tf.stack([tf.stack([fvs, zv, zv], axis=-1), tf.stack([zv, fvs, zv], axis=-1),
tf.stack([px, py, ov], axis=-1)], axis=-1)
K = tf.cast(K, tf.float64)
K = tf.identity(K, name='K')
R = tf.cast(R, tf.float64) * tf.ones_like(sigma)
sigma = tf.linalg.matmul(tf.linalg.matmul(R, sigma), R, transpose_b=True)
invsigma = tf.linalg.inv(sigma)
mus = tf.cast(mus, tf.float64)
mus = tf.transpose(tf.linalg.matmul(R, tf.transpose(mus, [0, 1, 3, 2])), [0, 1, 3, 2]) + transvec
M0 = tf.matmul(invsigma, tf.matmul(mus, mus, transpose_a=True))
M0 = tf.matmul(M0, invsigma, transpose_b=True)
M1 = (tf.matmul(tf.matmul(mus, invsigma), mus, transpose_b=True) - 1)
M1 = M1 * invsigma
M = M0 - M1
Mtmp = tf.constant(np.array([[1, 1, 0], [1, 1, 0], [0, 0, 1]]), dtype=tf.float64)
M = -M + 2 * M * Mtmp[tf.newaxis, tf.newaxis, :, :]
M33 = tf.gather(tf.gather(M, [0, 1], axis=2), [0, 1], axis=3)
K33 = tf.gather(tf.gather(K, [0, 1], axis=2), [0, 1], axis=3)
M31 = tf.gather(tf.gather(M, [0, 1], axis=2), [1, 2], axis=3)
M23 = tf.gather(tf.gather(M, [0, 2], axis=2), [0, 1], axis=3)
det_m31 = tf.linalg.det(M31)
det_m23 = tf.linalg.det(M23)
det_m33 = tf.linalg.det(M33)
det_m = tf.linalg.det(M)
mup0 = tf.squeeze(tf.matmul(K33, tf.stack([det_m31, -det_m23], axis=-1)[:, :, :, tf.newaxis]), axis=-1) / (
det_m33[:, :, tf.newaxis])
mup1 = tf.stack([K[:, :, 0, 2], K[:, :, 1, 2]], axis=-1)
mup = mup0 + mup1
sigma_w = det_m / det_m33
sigma_w = sigma_w[:, :, None, None]
invm33 = tf.linalg.inv(M33)
sigmap = -sigma_w * invm33
gauss_xy_list = []
mup = tf.cast(mup, tf.float32)
sigmap = tf.cast(sigmap, tf.float32)
mup = tf.identity(mup, name='mu2d')
sigmap = tf.identity(sigmap, name='sigma2d')
gm2d = get_gaussian_maps_2d(mup, sigmap, [256, 256])
return mup, sigmap, gm2d
def get_landmarks_(mus, sigma, rotx, roty, rotz, tx, ty, tz, focal=1.):
assert mus is not None
assert sigma is not None
count = 4
rotXval = rotx
rotYval = roty
rotZval = rotz
rotX = (rotXval) * np.pi / 180
rotY = (rotYval) * np.pi / 180
rotZ = (rotZval) * np.pi / 180
# Rotation matrices around the X,Y,Z axis
ons = tf.ones_like(rotY)
zr = tf.zeros_like(rotY)
RX = tf.stack([tf.stack([ons, zr, zr], axis=-1), tf.stack([zr, tf.cos(rotX), -tf.sin(rotX)], axis=-1),
tf.stack([zr, tf.sin(rotX), tf.cos(rotX)], axis=-1)], axis=-1)
RY = tf.stack([tf.stack([tf.cos(rotY), zr, tf.sin(rotY)], axis=-1), tf.stack([zr, ons, zr], axis=-1),
tf.stack([-tf.sin(rotY), zr, tf.cos(rotY)], axis=-1)], axis=-1)
RZ = tf.stack([tf.stack([tf.cos(rotZ), -tf.sin(rotZ), zr], axis=-1),
tf.stack([tf.sin(rotZ), tf.cos(rotZ), zr], axis=-1),
tf.stack([zr, zr, ons], axis=-1)], axis=-1)
# Composed rotation matrix with (RX,RY,RZ)
R = tf.matmul(tf.matmul(RX, RY), RZ)
# R = tf.stack([R] * nb_landmarks, axis=0)[None, :, :, :]
transvec = tf.constant(np.array([[tx, ty, tz]]), dtype=tf.float32)
transvec = tf.stack([transvec] * nb_landmarks, axis=1)
transvec = transvec[:, :, None, :]
px = tf.zeros([tf.shape(mus)[0], nb_landmarks])
py = tf.zeros([tf.shape(mus)[0], nb_landmarks])
fvs = tf.ones_like(px) * focal
zv = tf.zeros_like(px)
ov = tf.ones_like(px)
K = tf.stack([tf.stack([fvs, zv, zv], axis=-1), tf.stack([zv, fvs, zv], axis=-1),
tf.stack([px, py, ov], axis=-1)], axis=-1)
K = tf.identity(K, name='K')
R = R * tf.ones_like(sigma)
sigma = tf.linalg.matmul(tf.linalg.matmul(R, sigma), R, transpose_b=True)
# mus = tf.linalg.matmul(mus, tf.linalg.inv(R)) + transvec
mus = tf.transpose(tf.linalg.matmul(R, tf.transpose(mus, [0, 1, 3, 2])), [0, 1, 3, 2]) + transvec
invsigma = tf.linalg.inv(sigma)
M0 = tf.matmul(invsigma, tf.matmul(mus, mus, transpose_a=True))
M0 = tf.matmul(M0, invsigma, transpose_b=True)
M1 = (tf.matmul(tf.matmul(mus, invsigma), mus, transpose_b=True) - 1)
M1 = M1 * invsigma
M = M0 - M1
Mtmp = tf.constant(np.array([[1, 1, 0], [1, 1, 0], [0, 0, 1]]), dtype=tf.float32)
M = -M + 2 * M * Mtmp[None, None, :, :]
M33 = tf.gather(tf.gather(M, [0, 1], axis=2), [0, 1], axis=3)
K33 = tf.gather(tf.gather(K, [0, 1], axis=2), [0, 1], axis=3)
M31 = tf.gather(tf.gather(M, [0, 1], axis=2), [1, 2], axis=3)
M23 = tf.gather(tf.gather(M, [0, 2], axis=2), [0, 1], axis=3)
det_m31 = tf.linalg.det(M31)
det_m23 = tf.linalg.det(M23)
det_m33 = tf.linalg.det(M33)
det_m = tf.linalg.det(M)
mup0 = tf.squeeze(tf.matmul(K33, tf.stack([det_m31, -det_m23], axis=-1)[:, :, :, None]), axis=-1) / (
det_m33[:, :, None])
mup1 = tf.stack([K[:, :, 0, 2], K[:, :, 1, 2]], axis=-1)
mup = mup0 + mup1
sigma_w = det_m / det_m33
sigma_w = sigma_w[:, :, None, None]
invm33 = tf.linalg.inv(M33)
sigmap = -sigma_w * invm33
gm = get_gaussian_maps_2d(mup, sigmap, [256, 256])
return mup, sigmap, gm
def get_gaussian_maps_2d(mu, sigma, shape_hw, mode='rot'):
"""
Generates [B,SHAPE_H,SHAPE_W,NMAPS] tensor of 2D gaussians,
given the gaussian centers: MU [B, NMAPS, 2] tensor.
STD: is the fixed standard dev.
"""
with tf.name_scope(None, 'gauss_map', [mu]):
y = tf.to_float(tf.linspace(-1.0, 1.0, shape_hw[0]))
x = tf.to_float(tf.linspace(-1.0, 1.0, shape_hw[1]))
[x,y] = tf.meshgrid(x,y)
xy = tf.stack([x, y], axis=-1)
xy = tf.stack([xy] * nb_landmarks, axis=0)
xy = xy[tf.newaxis, : ,:, :, :]
mu = mu[:,:,tf.newaxis, tf.newaxis,:]
invsigma = tf.linalg.inv(sigma)
invsigma = tf.cast(invsigma, tf.float32)
pp = tf.tile(invsigma[:, :, tf.newaxis, :, :], [1, 1, shape_hw[1], 1, 1])
X = xy-mu
dist = tf.matmul(X,pp)
dist = tf.reduce_sum((dist*X), axis=-1)
g_yx = tf.exp(-dist)
g_yx = tf.transpose(g_yx, perm=[0, 2, 3, 1])
return g_yx
# im = cv2.imread(orig_im) # create PIL image from frame
# im = cv2.resize(im, (SHAPE,SHAPE))
with tf.gfile.GFile(m_to_l_model, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph_m_to_l:
tf.import_graph_def(graph_def, name='')
with tf.gfile.GFile(l_to_m_model, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph_l_to_m:
tf.import_graph_def(graph_def, name='')
# We access the input and output nodes
# if 'nozgm' not in args.modelpath:
# if 'nozgm' not in args.modelpath:
# mask_ltm = graph_l_to_m.get_tensor_by_name('mask:0')
mu2d_ltm = graph_l_to_m.get_tensor_by_name('gen/genlandmarks/mu2d:0')
sigma2d_ltm = graph_l_to_m.get_tensor_by_name('gen/genlandmarks/sigma2d:0')
genm = graph_l_to_m.get_tensor_by_name('gen/genmask/convlast/output:0')
if '_ete_' in args.modelpath:
input_mtl = graph_m_to_l.get_tensor_by_name('input:0')
mask_mtl = graph_m_to_l.get_tensor_by_name('mask:0')
# bbx = graph_m_to_l.get_tensor_by_name('bbx:0')
mu3d_mtl = graph_m_to_l.get_tensor_by_name('gen/genlandmarks/mu3d:0')
sigma3d_mtl = graph_m_to_l.get_tensor_by_name('gen/genlandmarks/sigma3d:0')
theta3d_mtl = graph_m_to_l.get_tensor_by_name('gen/genlandmarks/theta3d:0')
if args.fullrotation:
angle=360
eval_dir = os.path.join(os.path.dirname(m_to_l_model), 'evalm_360/ims')
os.makedirs(eval_dir, exist_ok=True)
else:
angle = int(args.modelpath.split('_ange')[1].split('_')[0])
eval_dir = os.path.join(os.path.dirname(m_to_l_model), 'evalm/ims')
os.makedirs(eval_dir, exist_ok=True)
html = HTML(os.path.dirname(eval_dir), 'show images')
html.add_header('%s' % ('3d gm perspective'))
fourcc = cv2.VideoWriter_fourcc(*'MP4V')
ims = []
texts = []
links = []
mus = []
sigmas = []
nb_frames = 60
# angle_range = list(np.linspace(-angle, 0., int(nb_frames/2))) + list(np.linspace(0, angle, int(nb_frames/2)))[1:]
angle_range = list(np.linspace(0., angle, nb_frames))
for its, (np_im, template, np_m, r, z_np, z2_np) in enumerate(dataloader):
ims = []
texts = []
links = []
with tf.Session(graph=graph_m_to_l, config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))) as sess_m_to_l:
if '_ete_' in args.modelpath:
mu3d, sig3d, thet3d = sess_m_to_l.run([mu3d_mtl, sigma3d_mtl, theta3d_mtl], feed_dict={mask_mtl: np_m,
input_mtl: np_im})
else:
mu3d, sig3d, thet3d = sess_m_to_l.run([mu3d_mtl, sigma3d_mtl, theta3d_mtl], feed_dict={mask_mtl: np_m})
sess_m_to_l.close()
cv2.imwrite(os.path.join(eval_dir, 'm%d.png'%its), 255-np_m.squeeze())
ims.append('./ims/m%d.png'%its)
texts.append('input m')
links.append('./ims/m%d.png'%its)
gmframes = []
mframes = []
np.save(os.path.join(eval_dir, 'm_%d.npy'%its), mu3d)
np.save(os.path.join(eval_dir, 'sig3d_%d.npy'%its), sig3d)
for x in angle_range:
x = np.array([[x]], dtype=np.float32)
zrs = tf.zeros_like(x)
mu2d, sigma2d, gm2d = get_landmarks(mu3d, sig3d, zrs, thet3d[0,0] + x*1., zrs, 0., 0., -2.)
with tf.Session(graph=graph_l_to_m, config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))) as sess_l_to_m:
m_final = sess_l_to_m.run(genm, feed_dict={mu2d_ltm: mu2d.numpy(),
sigma2d_ltm: sigma2d.numpy()})
m_final = 255 - (np.squeeze((m_final + 1) * 0.5 * 255)).astype(np.uint8)
sess_l_to_m.close()
mframes.append(m_final)
gmframes.append(255-(np.squeeze(colorize_landmark_maps(gm2d))*255).astype(np.uint8))
if x == 0:
gm2drgb = colorize_landmark_maps(gm2d)
cv2.imwrite(os.path.join(eval_dir, '2dgm_%d.png'%its), (gm2drgb.squeeze()[:,:,::-1]*255).astype(np.uint8))
ims.append('./ims/2dgm_%d.png'%its)
texts.append('2dgm')
links.append('./ims/2dgm_%d.png'%its)
cv2.imwrite(os.path.join(eval_dir, 'genm_%d.png'%its), m_final)
ims.append('./ims/genm_%d.png'%its)
texts.append('gen m')
links.append('./ims/genm_%d.png'%its)
vidgmpath = os.path.join(eval_dir, 'vid_gm%d.mp4' % its)
skvideo.io.vwrite(vidgmpath, gmframes)
vidgmpath_avi = os.path.join(eval_dir, 'vid_gm%d.avi' % its)
skvideo.io.vwrite(vidgmpath_avi, gmframes)
ims.append(os.path.join('./ims', os.path.basename(vidgmpath)))
texts.append('gmvid')
links.append(os.path.join('./ims', os.path.basename(vidgmpath)))
vidmpath = os.path.join(eval_dir, 'vid_m%d.mp4' % its)
skvideo.io.vwrite(vidmpath, mframes)
vidmpath_avi = os.path.join(eval_dir, 'vid_m%d.avi' % its)
skvideo.io.vwrite(vidmpath_avi, mframes)
ims.append(os.path.join('./ims', os.path.basename(vidmpath)))
texts.append('mvid')
links.append(os.path.join('./ims', os.path.basename(vidmpath)))
html.add_im_vid(ims, texts, links, width=256)
html.save()
|
[
"tensorflow.meshgrid",
"tensorflow.reduce_sum",
"numpy.random.seed",
"argparse.ArgumentParser",
"cv2.VideoWriter_fourcc",
"tensorflow.identity",
"tensorflow.reshape",
"tensorflow.zeros_like",
"tensorflow.matmul",
"tensorflow.linalg.det",
"tensorflow.linalg.inv",
"numpy.exp",
"tensorflow.GPUOptions",
"os.path.join",
"tensorflow.nn.softmax",
"tensorflow.abs",
"tensorflow.sin",
"tensorflow.gather",
"os.path.dirname",
"numpy.transpose",
"tensorflow.stack",
"tensorflow.cast",
"numpy.max",
"tensorflow.exp",
"dataloader.mask_gen_hor_cs_kar_albedo",
"numpy.linspace",
"numpy.reshape",
"tensorflow.GraphDef",
"tensorflow.name_scope",
"numpy.random.shuffle",
"io.BytesIO",
"os.path.basename",
"tensorflow.linspace",
"numpy.square",
"tensorflow.reduce_mean",
"tensorflow.ones_like",
"tensorflow.transpose",
"numpy.min",
"tensorflow.tile",
"tensorflow.gfile.GFile",
"tensorflow.Graph",
"tensorflow.enable_eager_execution",
"numpy.squeeze",
"tensorflow.import_graph_def",
"tensorflow.expand_dims",
"numpy.random.uniform",
"PIL.ImageTk.PhotoImage",
"os.makedirs",
"tensorflow.linalg.matmul",
"numpy.zeros",
"numpy.expand_dims",
"tensorflow.pow",
"PIL.Image.open",
"tensorflow.shape",
"numpy.array",
"tensorflow.cos"
] |
[((254, 281), 'tensorflow.enable_eager_execution', 'tf.enable_eager_execution', ([], {}), '()\n', (279, 281), True, 'import tensorflow as tf\n'), ((357, 382), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (380, 382), False, 'import argparse\n'), ((783, 837), 'os.path.join', 'os.path.join', (['args.modelpath', '"""frozen_model_l_to_m.pb"""'], {}), "(args.modelpath, 'frozen_model_l_to_m.pb')\n", (795, 837), False, 'import os\n'), ((853, 907), 'os.path.join', 'os.path.join', (['args.modelpath', '"""frozen_model_z_to_l.pb"""'], {}), "(args.modelpath, 'frozen_model_z_to_l.pb')\n", (865, 907), False, 'import os\n'), ((989, 1007), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (1003, 1007), True, 'import numpy as np\n'), ((1008, 1033), 'numpy.random.shuffle', 'np.random.shuffle', (['mpaths'], {}), '(mpaths)\n', (1025, 1033), True, 'import numpy as np\n'), ((1269, 1324), 'dataloader.mask_gen_hor_cs_kar_albedo', 'mask_gen_hor_cs_kar_albedo', (['mpaths', 'impath', 'SHAPE', '(8)', '(8)'], {}), '(mpaths, impath, SHAPE, 8, 8)\n', (1295, 1324), False, 'from dataloader import mask_gen_cs_kar, mask_gen_hor_cs_kar_albedo\n'), ((15066, 15097), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'MP4V'"], {}), "(*'MP4V')\n", (15088, 15097), False, 'import cv2\n'), ((1434, 1447), 'PIL.Image.open', 'Image.open', (['f'], {}), '(f)\n', (1444, 1447), False, 'from PIL import Image, ImageTk\n'), ((1666, 1689), 'PIL.ImageTk.PhotoImage', 'ImageTk.PhotoImage', (['img'], {}), '(img)\n', (1684, 1689), False, 'from PIL import Image, ImageTk\n'), ((1780, 1801), 'numpy.squeeze', 'np.squeeze', (['(r * SHAPE)'], {}), '(r * SHAPE)\n', (1790, 1801), True, 'import numpy as np\n'), ((1843, 1865), 'numpy.array', 'np.array', (['[0, 0, 0, 0]'], {}), '([0, 0, 0, 0])\n', (1851, 1865), True, 'import numpy as np\n'), ((1876, 1906), 'numpy.zeros', 'np.zeros', (['[1, SHAPE, SHAPE, 1]'], {}), '([1, SHAPE, SHAPE, 1])\n', (1884, 1906), True, 'import numpy as np\n'), ((1918, 1936), 'numpy.min', 'np.min', (['nr'], {'axis': '(0)'}), '(nr, axis=0)\n', (1924, 1936), True, 'import numpy as np\n'), ((1948, 1966), 'numpy.max', 'np.max', (['nr'], {'axis': '(0)'}), '(nr, axis=0)\n', (1954, 1966), True, 'import numpy as np\n'), ((2768, 2802), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['x'], {'axis': 'other_axis'}), '(x, axis=other_axis)\n', (2782, 2802), True, 'import tensorflow as tf\n'), ((2830, 2861), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['g_c_prob'], {'axis': '(1)'}), '(g_c_prob, axis=1)\n', (2843, 2861), True, 'import tensorflow as tf\n'), ((2956, 2995), 'tensorflow.reshape', 'tf.reshape', (['coord_pt', '[1, axis_size, 1]'], {}), '(coord_pt, [1, axis_size, 1])\n', (2966, 2995), True, 'import tensorflow as tf\n'), ((3006, 3048), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(g_c_prob * coord_pt)'], {'axis': '(1)'}), '(g_c_prob * coord_pt, axis=1)\n', (3019, 3048), True, 'import tensorflow as tf\n'), ((4362, 4394), 'numpy.transpose', 'np.transpose', (['g_yx', '[0, 2, 3, 1]'], {}), '(g_yx, [0, 2, 3, 1])\n', (4374, 4394), True, 'import numpy as np\n'), ((5784, 5805), 'numpy.max', 'np.max', (['hmaps'], {'axis': '(0)'}), '(hmaps, axis=0)\n', (5790, 5805), True, 'import numpy as np\n'), ((6119, 6138), 'tensorflow.zeros_like', 'tf.zeros_like', (['rotY'], {}), '(rotY)\n', (6132, 6138), True, 'import tensorflow as tf\n'), ((6149, 6167), 'tensorflow.ones_like', 'tf.ones_like', (['rotY'], {}), '(rotY)\n', (6161, 6167), True, 'import tensorflow as tf\n'), ((6993, 7036), 'tensorflow.stack', 'tf.stack', (['([transvec] * nb_landmarks)'], {'axis': '(1)'}), '([transvec] * nb_landmarks, axis=1)\n', (7001, 7036), True, 'import tensorflow as tf\n'), ((7232, 7249), 'tensorflow.zeros_like', 'tf.zeros_like', (['px'], {}), '(px)\n', (7245, 7249), True, 'import tensorflow as tf\n'), ((7259, 7275), 'tensorflow.ones_like', 'tf.ones_like', (['px'], {}), '(px)\n', (7271, 7275), True, 'import tensorflow as tf\n'), ((7431, 7453), 'tensorflow.cast', 'tf.cast', (['K', 'tf.float64'], {}), '(K, tf.float64)\n', (7438, 7453), True, 'import tensorflow as tf\n'), ((7462, 7486), 'tensorflow.identity', 'tf.identity', (['K'], {'name': '"""K"""'}), "(K, name='K')\n", (7473, 7486), True, 'import tensorflow as tf\n'), ((7634, 7654), 'tensorflow.linalg.inv', 'tf.linalg.inv', (['sigma'], {}), '(sigma)\n', (7647, 7654), True, 'import tensorflow as tf\n'), ((7665, 7689), 'tensorflow.cast', 'tf.cast', (['mus', 'tf.float64'], {}), '(mus, tf.float64)\n', (7672, 7689), True, 'import tensorflow as tf\n'), ((7870, 7911), 'tensorflow.matmul', 'tf.matmul', (['M0', 'invsigma'], {'transpose_b': '(True)'}), '(M0, invsigma, transpose_b=True)\n', (7879, 7911), True, 'import tensorflow as tf\n'), ((8447, 8465), 'tensorflow.linalg.det', 'tf.linalg.det', (['M31'], {}), '(M31)\n', (8460, 8465), True, 'import tensorflow as tf\n'), ((8480, 8498), 'tensorflow.linalg.det', 'tf.linalg.det', (['M23'], {}), '(M23)\n', (8493, 8498), True, 'import tensorflow as tf\n'), ((8513, 8531), 'tensorflow.linalg.det', 'tf.linalg.det', (['M33'], {}), '(M33)\n', (8526, 8531), True, 'import tensorflow as tf\n'), ((8544, 8560), 'tensorflow.linalg.det', 'tf.linalg.det', (['M'], {}), '(M)\n', (8557, 8560), True, 'import tensorflow as tf\n'), ((8720, 8769), 'tensorflow.stack', 'tf.stack', (['[K[:, :, 0, 2], K[:, :, 1, 2]]'], {'axis': '(-1)'}), '([K[:, :, 0, 2], K[:, :, 1, 2]], axis=-1)\n', (8728, 8769), True, 'import tensorflow as tf\n'), ((8876, 8894), 'tensorflow.linalg.inv', 'tf.linalg.inv', (['M33'], {}), '(M33)\n', (8889, 8894), True, 'import tensorflow as tf\n'), ((8960, 8984), 'tensorflow.cast', 'tf.cast', (['mup', 'tf.float32'], {}), '(mup, tf.float32)\n', (8967, 8984), True, 'import tensorflow as tf\n'), ((8998, 9025), 'tensorflow.cast', 'tf.cast', (['sigmap', 'tf.float32'], {}), '(sigmap, tf.float32)\n', (9005, 9025), True, 'import tensorflow as tf\n'), ((9036, 9065), 'tensorflow.identity', 'tf.identity', (['mup'], {'name': '"""mu2d"""'}), "(mup, name='mu2d')\n", (9047, 9065), True, 'import tensorflow as tf\n'), ((9079, 9114), 'tensorflow.identity', 'tf.identity', (['sigmap'], {'name': '"""sigma2d"""'}), "(sigmap, name='sigma2d')\n", (9090, 9114), True, 'import tensorflow as tf\n'), ((9563, 9581), 'tensorflow.ones_like', 'tf.ones_like', (['rotY'], {}), '(rotY)\n', (9575, 9581), True, 'import tensorflow as tf\n'), ((9591, 9610), 'tensorflow.zeros_like', 'tf.zeros_like', (['rotY'], {}), '(rotY)\n', (9604, 9610), True, 'import tensorflow as tf\n'), ((10434, 10477), 'tensorflow.stack', 'tf.stack', (['([transvec] * nb_landmarks)'], {'axis': '(1)'}), '([transvec] * nb_landmarks, axis=1)\n', (10442, 10477), True, 'import tensorflow as tf\n'), ((10667, 10684), 'tensorflow.zeros_like', 'tf.zeros_like', (['px'], {}), '(px)\n', (10680, 10684), True, 'import tensorflow as tf\n'), ((10694, 10710), 'tensorflow.ones_like', 'tf.ones_like', (['px'], {}), '(px)\n', (10706, 10710), True, 'import tensorflow as tf\n'), ((10866, 10890), 'tensorflow.identity', 'tf.identity', (['K'], {'name': '"""K"""'}), "(K, name='K')\n", (10877, 10890), True, 'import tensorflow as tf\n'), ((11182, 11202), 'tensorflow.linalg.inv', 'tf.linalg.inv', (['sigma'], {}), '(sigma)\n', (11195, 11202), True, 'import tensorflow as tf\n'), ((11281, 11322), 'tensorflow.matmul', 'tf.matmul', (['M0', 'invsigma'], {'transpose_b': '(True)'}), '(M0, invsigma, transpose_b=True)\n', (11290, 11322), True, 'import tensorflow as tf\n'), ((11846, 11864), 'tensorflow.linalg.det', 'tf.linalg.det', (['M31'], {}), '(M31)\n', (11859, 11864), True, 'import tensorflow as tf\n'), ((11879, 11897), 'tensorflow.linalg.det', 'tf.linalg.det', (['M23'], {}), '(M23)\n', (11892, 11897), True, 'import tensorflow as tf\n'), ((11912, 11930), 'tensorflow.linalg.det', 'tf.linalg.det', (['M33'], {}), '(M33)\n', (11925, 11930), True, 'import tensorflow as tf\n'), ((11943, 11959), 'tensorflow.linalg.det', 'tf.linalg.det', (['M'], {}), '(M)\n', (11956, 11959), True, 'import tensorflow as tf\n'), ((12107, 12156), 'tensorflow.stack', 'tf.stack', (['[K[:, :, 0, 2], K[:, :, 1, 2]]'], {'axis': '(-1)'}), '([K[:, :, 0, 2], K[:, :, 1, 2]], axis=-1)\n', (12115, 12156), True, 'import tensorflow as tf\n'), ((12263, 12281), 'tensorflow.linalg.inv', 'tf.linalg.inv', (['M33'], {}), '(M33)\n', (12276, 12281), True, 'import tensorflow as tf\n'), ((13410, 13444), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['m_to_l_model', '"""rb"""'], {}), "(m_to_l_model, 'rb')\n", (13424, 13444), True, 'import tensorflow as tf\n'), ((13467, 13480), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (13478, 13480), True, 'import tensorflow as tf\n'), ((13571, 13610), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'name': '""""""'}), "(graph_def, name='')\n", (13590, 13610), True, 'import tensorflow as tf\n'), ((13617, 13651), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['l_to_m_model', '"""rb"""'], {}), "(l_to_m_model, 'rb')\n", (13631, 13651), True, 'import tensorflow as tf\n'), ((13674, 13687), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (13685, 13687), True, 'import tensorflow as tf\n'), ((13778, 13817), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['graph_def'], {'name': '""""""'}), "(graph_def, name='')\n", (13797, 13817), True, 'import tensorflow as tf\n'), ((14736, 14772), 'os.makedirs', 'os.makedirs', (['eval_dir'], {'exist_ok': '(True)'}), '(eval_dir, exist_ok=True)\n', (14747, 14772), False, 'import os\n'), ((14919, 14955), 'os.makedirs', 'os.makedirs', (['eval_dir'], {'exist_ok': '(True)'}), '(eval_dir, exist_ok=True)\n', (14930, 14955), False, 'import os\n'), ((14969, 14994), 'os.path.dirname', 'os.path.dirname', (['eval_dir'], {}), '(eval_dir)\n', (14984, 14994), False, 'import os\n'), ((15301, 15335), 'numpy.linspace', 'np.linspace', (['(0.0)', 'angle', 'nb_frames'], {}), '(0.0, angle, nb_frames)\n', (15312, 15335), True, 'import numpy as np\n'), ((17632, 17676), 'os.path.join', 'os.path.join', (['eval_dir', "('vid_gm%d.mp4' % its)"], {}), "(eval_dir, 'vid_gm%d.mp4' % its)\n", (17644, 17676), False, 'import os\n'), ((17740, 17784), 'os.path.join', 'os.path.join', (['eval_dir', "('vid_gm%d.avi' % its)"], {}), "(eval_dir, 'vid_gm%d.avi' % its)\n", (17752, 17784), False, 'import os\n'), ((18011, 18054), 'os.path.join', 'os.path.join', (['eval_dir', "('vid_m%d.mp4' % its)"], {}), "(eval_dir, 'vid_m%d.mp4' % its)\n", (18023, 18054), False, 'import os\n'), ((18115, 18158), 'os.path.join', 'os.path.join', (['eval_dir', "('vid_m%d.avi' % its)"], {}), "(eval_dir, 'vid_m%d.avi' % its)\n", (18127, 18158), False, 'import os\n'), ((956, 986), 'os.path.join', 'os.path.join', (['orig_im', '"""*.png"""'], {}), "(orig_im, '*.png')\n", (968, 986), False, 'import os\n'), ((1560, 1572), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (1570, 1572), False, 'import io\n'), ((2581, 2610), 'numpy.reshape', 'np.reshape', (['box', '[1, 1, 1, 4]'], {}), '(box, [1, 1, 1, 4])\n', (2591, 2610), True, 'import numpy as np\n'), ((2901, 2934), 'tensorflow.linspace', 'tf.linspace', (['(-1.0)', '(1.0)', 'axis_size'], {}), '(-1.0, 1.0, axis_size)\n', (2912, 2934), True, 'import tensorflow as tf\n'), ((3355, 3393), 'tensorflow.name_scope', 'tf.name_scope', (['None', '"""gauss_map"""', '[mu]'], {}), "(None, 'gauss_map', [mu])\n", (3368, 3393), True, 'import tensorflow as tf\n'), ((3450, 3484), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'shape_hw[0]'], {}), '(0.0, 1.0, shape_hw[0])\n', (3461, 3484), True, 'import numpy as np\n'), ((3492, 3526), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'shape_hw[1]'], {}), '(0.0, 1.0, shape_hw[1])\n', (3503, 3526), True, 'import numpy as np\n'), ((3634, 3671), 'numpy.reshape', 'np.reshape', (['y', '[1, 1, shape_hw[0], 1]'], {}), '(y, [1, 1, shape_hw[0], 1])\n', (3644, 3671), True, 'import numpy as np\n'), ((3680, 3717), 'numpy.reshape', 'np.reshape', (['x', '[1, 1, 1, shape_hw[1]]'], {}), '(x, [1, 1, 1, shape_hw[1]])\n', (3690, 3717), True, 'import numpy as np\n'), ((3729, 3748), 'numpy.square', 'np.square', (['(y - mu_y)'], {}), '(y - mu_y)\n', (3738, 3748), True, 'import numpy as np\n'), ((3759, 3778), 'numpy.square', 'np.square', (['(x - mu_x)'], {}), '(x - mu_x)\n', (3768, 3778), True, 'import numpy as np\n'), ((6821, 6838), 'tensorflow.matmul', 'tf.matmul', (['RX', 'RY'], {}), '(RX, RY)\n', (6830, 6838), True, 'import tensorflow as tf\n'), ((6934, 6958), 'numpy.array', 'np.array', (['[[tx, ty, tz]]'], {}), '([[tx, ty, tz]])\n', (6942, 6958), True, 'import numpy as np\n'), ((7198, 7214), 'tensorflow.ones_like', 'tf.ones_like', (['px'], {}), '(px)\n', (7210, 7214), True, 'import tensorflow as tf\n'), ((7496, 7518), 'tensorflow.cast', 'tf.cast', (['R', 'tf.float64'], {}), '(R, tf.float64)\n', (7503, 7518), True, 'import tensorflow as tf\n'), ((7521, 7540), 'tensorflow.ones_like', 'tf.ones_like', (['sigma'], {}), '(sigma)\n', (7533, 7540), True, 'import tensorflow as tf\n'), ((7570, 7596), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (['R', 'sigma'], {}), '(R, sigma)\n', (7586, 7596), True, 'import tensorflow as tf\n'), ((7822, 7859), 'tensorflow.matmul', 'tf.matmul', (['mus', 'mus'], {'transpose_a': '(True)'}), '(mus, mus, transpose_a=True)\n', (7831, 7859), True, 'import tensorflow as tf\n'), ((8050, 8093), 'numpy.array', 'np.array', (['[[1, 1, 0], [1, 1, 0], [0, 0, 1]]'], {}), '([[1, 1, 0], [1, 1, 0], [0, 0, 1]])\n', (8058, 8093), True, 'import numpy as np\n'), ((8189, 8217), 'tensorflow.gather', 'tf.gather', (['M', '[0, 1]'], {'axis': '(2)'}), '(M, [0, 1], axis=2)\n', (8198, 8217), True, 'import tensorflow as tf\n'), ((8255, 8283), 'tensorflow.gather', 'tf.gather', (['K', '[0, 1]'], {'axis': '(2)'}), '(K, [0, 1], axis=2)\n', (8264, 8283), True, 'import tensorflow as tf\n'), ((8321, 8349), 'tensorflow.gather', 'tf.gather', (['M', '[0, 1]'], {'axis': '(2)'}), '(M, [0, 1], axis=2)\n', (8330, 8349), True, 'import tensorflow as tf\n'), ((8387, 8415), 'tensorflow.gather', 'tf.gather', (['M', '[0, 2]'], {'axis': '(2)'}), '(M, [0, 2], axis=2)\n', (8396, 8415), True, 'import tensorflow as tf\n'), ((10262, 10279), 'tensorflow.matmul', 'tf.matmul', (['RX', 'RY'], {}), '(RX, RY)\n', (10271, 10279), True, 'import tensorflow as tf\n'), ((10375, 10399), 'numpy.array', 'np.array', (['[[tx, ty, tz]]'], {}), '([[tx, ty, tz]])\n', (10383, 10399), True, 'import numpy as np\n'), ((10633, 10649), 'tensorflow.ones_like', 'tf.ones_like', (['px'], {}), '(px)\n', (10645, 10649), True, 'import tensorflow as tf\n'), ((10904, 10923), 'tensorflow.ones_like', 'tf.ones_like', (['sigma'], {}), '(sigma)\n', (10916, 10923), True, 'import tensorflow as tf\n'), ((10953, 10979), 'tensorflow.linalg.matmul', 'tf.linalg.matmul', (['R', 'sigma'], {}), '(R, sigma)\n', (10969, 10979), True, 'import tensorflow as tf\n'), ((11233, 11270), 'tensorflow.matmul', 'tf.matmul', (['mus', 'mus'], {'transpose_a': '(True)'}), '(mus, mus, transpose_a=True)\n', (11242, 11270), True, 'import tensorflow as tf\n'), ((11461, 11504), 'numpy.array', 'np.array', (['[[1, 1, 0], [1, 1, 0], [0, 0, 1]]'], {}), '([[1, 1, 0], [1, 1, 0], [0, 0, 1]])\n', (11469, 11504), True, 'import numpy as np\n'), ((11588, 11616), 'tensorflow.gather', 'tf.gather', (['M', '[0, 1]'], {'axis': '(2)'}), '(M, [0, 1], axis=2)\n', (11597, 11616), True, 'import tensorflow as tf\n'), ((11654, 11682), 'tensorflow.gather', 'tf.gather', (['K', '[0, 1]'], {'axis': '(2)'}), '(K, [0, 1], axis=2)\n', (11663, 11682), True, 'import tensorflow as tf\n'), ((11720, 11748), 'tensorflow.gather', 'tf.gather', (['M', '[0, 1]'], {'axis': '(2)'}), '(M, [0, 1], axis=2)\n', (11729, 11748), True, 'import tensorflow as tf\n'), ((11786, 11814), 'tensorflow.gather', 'tf.gather', (['M', '[0, 2]'], {'axis': '(2)'}), '(M, [0, 2], axis=2)\n', (11795, 11814), True, 'import tensorflow as tf\n'), ((12628, 12666), 'tensorflow.name_scope', 'tf.name_scope', (['None', '"""gauss_map"""', '[mu]'], {}), "(None, 'gauss_map', [mu])\n", (12641, 12666), True, 'import tensorflow as tf\n'), ((12796, 12813), 'tensorflow.meshgrid', 'tf.meshgrid', (['x', 'y'], {}), '(x, y)\n', (12807, 12813), True, 'import tensorflow as tf\n'), ((12822, 12847), 'tensorflow.stack', 'tf.stack', (['[x, y]'], {'axis': '(-1)'}), '([x, y], axis=-1)\n', (12830, 12847), True, 'import tensorflow as tf\n'), ((12857, 12894), 'tensorflow.stack', 'tf.stack', (['([xy] * nb_landmarks)'], {'axis': '(0)'}), '([xy] * nb_landmarks, axis=0)\n', (12865, 12894), True, 'import tensorflow as tf\n'), ((12988, 13008), 'tensorflow.linalg.inv', 'tf.linalg.inv', (['sigma'], {}), '(sigma)\n', (13001, 13008), True, 'import tensorflow as tf\n'), ((13024, 13053), 'tensorflow.cast', 'tf.cast', (['invsigma', 'tf.float32'], {}), '(invsigma, tf.float32)\n', (13031, 13053), True, 'import tensorflow as tf\n'), ((13063, 13131), 'tensorflow.tile', 'tf.tile', (['invsigma[:, :, tf.newaxis, :, :]', '[1, 1, shape_hw[1], 1, 1]'], {}), '(invsigma[:, :, tf.newaxis, :, :], [1, 1, shape_hw[1], 1, 1])\n', (13070, 13131), True, 'import tensorflow as tf\n'), ((13157, 13173), 'tensorflow.matmul', 'tf.matmul', (['X', 'pp'], {}), '(X, pp)\n', (13166, 13173), True, 'import tensorflow as tf\n'), ((13184, 13216), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(dist * X)'], {'axis': '(-1)'}), '(dist * X, axis=-1)\n', (13197, 13216), True, 'import tensorflow as tf\n'), ((13229, 13242), 'tensorflow.exp', 'tf.exp', (['(-dist)'], {}), '(-dist)\n', (13235, 13242), True, 'import tensorflow as tf\n'), ((13255, 13292), 'tensorflow.transpose', 'tf.transpose', (['g_yx'], {'perm': '[0, 2, 3, 1]'}), '(g_yx, perm=[0, 2, 3, 1])\n', (13267, 13292), True, 'import tensorflow as tf\n'), ((14684, 14713), 'os.path.dirname', 'os.path.dirname', (['m_to_l_model'], {}), '(m_to_l_model)\n', (14699, 14713), False, 'import os\n'), ((14871, 14900), 'os.path.dirname', 'os.path.dirname', (['m_to_l_model'], {}), '(m_to_l_model)\n', (14886, 14900), False, 'import os\n'), ((16025, 16064), 'os.path.join', 'os.path.join', (['eval_dir', "('m%d.png' % its)"], {}), "(eval_dir, 'm%d.png' % its)\n", (16037, 16064), False, 'import os\n'), ((16233, 16273), 'os.path.join', 'os.path.join', (['eval_dir', "('m_%d.npy' % its)"], {}), "(eval_dir, 'm_%d.npy' % its)\n", (16245, 16273), False, 'import os\n'), ((16291, 16335), 'os.path.join', 'os.path.join', (['eval_dir', "('sig3d_%d.npy' % its)"], {}), "(eval_dir, 'sig3d_%d.npy' % its)\n", (16303, 16335), False, 'import os\n'), ((16380, 16413), 'numpy.array', 'np.array', (['[[x]]'], {'dtype': 'np.float32'}), '([[x]], dtype=np.float32)\n', (16388, 16413), True, 'import numpy as np\n'), ((16428, 16444), 'tensorflow.zeros_like', 'tf.zeros_like', (['x'], {}), '(x)\n', (16441, 16444), True, 'import tensorflow as tf\n'), ((3574, 3598), 'numpy.expand_dims', 'np.expand_dims', (['mu_y', '(-1)'], {}), '(mu_y, -1)\n', (3588, 3598), True, 'import numpy as np\n'), ((3600, 3624), 'numpy.expand_dims', 'np.expand_dims', (['mu_x', '(-1)'], {}), '(mu_x, -1)\n', (3614, 3624), True, 'import numpy as np\n'), ((3851, 3864), 'numpy.exp', 'np.exp', (['(-dist)'], {}), '(-dist)\n', (3857, 3864), True, 'import numpy as np\n'), ((3957, 3991), 'tensorflow.reshape', 'tf.reshape', (['y', '[1, 1, shape_hw[0]]'], {}), '(y, [1, 1, shape_hw[0]])\n', (3967, 3991), True, 'import tensorflow as tf\n'), ((4000, 4034), 'tensorflow.reshape', 'tf.reshape', (['x', '[1, 1, shape_hw[1]]'], {}), '(x, [1, 1, shape_hw[1]])\n', (4010, 4034), True, 'import tensorflow as tf\n'), ((4175, 4202), 'tensorflow.expand_dims', 'tf.expand_dims', (['g_y'], {'axis': '(3)'}), '(g_y, axis=3)\n', (4189, 4202), True, 'import tensorflow as tf\n'), ((4213, 4240), 'tensorflow.expand_dims', 'tf.expand_dims', (['g_x'], {'axis': '(2)'}), '(g_x, axis=2)\n', (4227, 4240), True, 'import tensorflow as tf\n'), ((4252, 4271), 'tensorflow.matmul', 'tf.matmul', (['g_y', 'g_x'], {}), '(g_y, g_x)\n', (4261, 4271), True, 'import tensorflow as tf\n'), ((5665, 5701), 'numpy.expand_dims', 'np.expand_dims', (['maps[..., i]'], {'axis': '(3)'}), '(maps[..., i], axis=3)\n', (5679, 5701), True, 'import numpy as np\n'), ((5704, 5739), 'numpy.reshape', 'np.reshape', (['COLORS[i]', '[1, 1, 1, 3]'], {}), '(COLORS[i], [1, 1, 1, 3])\n', (5714, 5739), True, 'import numpy as np\n'), ((6188, 6220), 'tensorflow.stack', 'tf.stack', (['[ons, zr, zr]'], {'axis': '(-1)'}), '([ons, zr, zr], axis=-1)\n', (6196, 6220), True, 'import tensorflow as tf\n'), ((6430, 6462), 'tensorflow.stack', 'tf.stack', (['[zr, ons, zr]'], {'axis': '(-1)'}), '([zr, ons, zr], axis=-1)\n', (6438, 6462), True, 'import tensorflow as tf\n'), ((6711, 6743), 'tensorflow.stack', 'tf.stack', (['[zr, zr, ons]'], {'axis': '(-1)'}), '([zr, zr, ons], axis=-1)\n', (6719, 6743), True, 'import tensorflow as tf\n'), ((7294, 7326), 'tensorflow.stack', 'tf.stack', (['[fvs, zv, zv]'], {'axis': '(-1)'}), '([fvs, zv, zv], axis=-1)\n', (7302, 7326), True, 'import tensorflow as tf\n'), ((7328, 7360), 'tensorflow.stack', 'tf.stack', (['[zv, fvs, zv]'], {'axis': '(-1)'}), '([zv, fvs, zv], axis=-1)\n', (7336, 7360), True, 'import tensorflow as tf\n'), ((7380, 7411), 'tensorflow.stack', 'tf.stack', (['[px, py, ov]'], {'axis': '(-1)'}), '([px, py, ov], axis=-1)\n', (7388, 7411), True, 'import tensorflow as tf\n'), ((7932, 7956), 'tensorflow.matmul', 'tf.matmul', (['mus', 'invsigma'], {}), '(mus, invsigma)\n', (7941, 7956), True, 'import tensorflow as tf\n'), ((9630, 9662), 'tensorflow.stack', 'tf.stack', (['[ons, zr, zr]'], {'axis': '(-1)'}), '([ons, zr, zr], axis=-1)\n', (9638, 9662), True, 'import tensorflow as tf\n'), ((9872, 9904), 'tensorflow.stack', 'tf.stack', (['[zr, ons, zr]'], {'axis': '(-1)'}), '([zr, ons, zr], axis=-1)\n', (9880, 9904), True, 'import tensorflow as tf\n'), ((10153, 10185), 'tensorflow.stack', 'tf.stack', (['[zr, zr, ons]'], {'axis': '(-1)'}), '([zr, zr, ons], axis=-1)\n', (10161, 10185), True, 'import tensorflow as tf\n'), ((10729, 10761), 'tensorflow.stack', 'tf.stack', (['[fvs, zv, zv]'], {'axis': '(-1)'}), '([fvs, zv, zv], axis=-1)\n', (10737, 10761), True, 'import tensorflow as tf\n'), ((10763, 10795), 'tensorflow.stack', 'tf.stack', (['[zv, fvs, zv]'], {'axis': '(-1)'}), '([zv, fvs, zv], axis=-1)\n', (10771, 10795), True, 'import tensorflow as tf\n'), ((10815, 10846), 'tensorflow.stack', 'tf.stack', (['[px, py, ov]'], {'axis': '(-1)'}), '([px, py, ov], axis=-1)\n', (10823, 10846), True, 'import tensorflow as tf\n'), ((11343, 11367), 'tensorflow.matmul', 'tf.matmul', (['mus', 'invsigma'], {}), '(mus, invsigma)\n', (11352, 11367), True, 'import tensorflow as tf\n'), ((12689, 12724), 'tensorflow.linspace', 'tf.linspace', (['(-1.0)', '(1.0)', 'shape_hw[0]'], {}), '(-1.0, 1.0, shape_hw[0])\n', (12700, 12724), True, 'import tensorflow as tf\n'), ((12747, 12782), 'tensorflow.linspace', 'tf.linspace', (['(-1.0)', '(1.0)', 'shape_hw[1]'], {}), '(-1.0, 1.0, shape_hw[1])\n', (12758, 12782), True, 'import tensorflow as tf\n'), ((13526, 13536), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (13534, 13536), True, 'import tensorflow as tf\n'), ((13733, 13743), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (13741, 13743), True, 'import tensorflow as tf\n'), ((17869, 17896), 'os.path.basename', 'os.path.basename', (['vidgmpath'], {}), '(vidgmpath)\n', (17885, 17896), False, 'import os\n'), ((17964, 17991), 'os.path.basename', 'os.path.basename', (['vidgmpath'], {}), '(vidgmpath)\n', (17980, 17991), False, 'import os\n'), ((18241, 18267), 'os.path.basename', 'os.path.basename', (['vidmpath'], {}), '(vidmpath)\n', (18257, 18267), False, 'import os\n'), ((18334, 18360), 'os.path.basename', 'os.path.basename', (['vidmpath'], {}), '(vidmpath)\n', (18350, 18360), False, 'import os\n'), ((4601, 4626), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1.0)'], {}), '(0, 1.0)\n', (4618, 4626), True, 'import numpy as np\n'), ((7103, 7116), 'tensorflow.shape', 'tf.shape', (['mus'], {}), '(mus)\n', (7111, 7116), True, 'import tensorflow as tf\n'), ((7155, 7168), 'tensorflow.shape', 'tf.shape', (['mus'], {}), '(mus)\n', (7163, 7168), True, 'import tensorflow as tf\n'), ((7733, 7764), 'tensorflow.transpose', 'tf.transpose', (['mus', '[0, 1, 3, 2]'], {}), '(mus, [0, 1, 3, 2])\n', (7745, 7764), True, 'import tensorflow as tf\n'), ((10538, 10551), 'tensorflow.shape', 'tf.shape', (['mus'], {}), '(mus)\n', (10546, 10551), True, 'import tensorflow as tf\n'), ((10590, 10603), 'tensorflow.shape', 'tf.shape', (['mus'], {}), '(mus)\n', (10598, 10603), True, 'import tensorflow as tf\n'), ((11108, 11139), 'tensorflow.transpose', 'tf.transpose', (['mus', '[0, 1, 3, 2]'], {}), '(mus, [0, 1, 3, 2])\n', (11120, 11139), True, 'import tensorflow as tf\n'), ((17180, 17223), 'os.path.join', 'os.path.join', (['eval_dir', "('2dgm_%d.png' % its)"], {}), "(eval_dir, '2dgm_%d.png' % its)\n", (17192, 17223), False, 'import os\n'), ((17431, 17474), 'os.path.join', 'os.path.join', (['eval_dir', "('genm_%d.png' % its)"], {}), "(eval_dir, 'genm_%d.png' % its)\n", (17443, 17474), False, 'import os\n'), ((3896, 3922), 'tensorflow.pow', 'tf.pow', (['(dist + 1e-05)', '(0.25)'], {}), '(dist + 1e-05, 0.25)\n', (3902, 3922), True, 'import tensorflow as tf\n'), ((6236, 6248), 'tensorflow.cos', 'tf.cos', (['rotX'], {}), '(rotX)\n', (6242, 6248), True, 'import tensorflow as tf\n'), ((6309, 6321), 'tensorflow.sin', 'tf.sin', (['rotX'], {}), '(rotX)\n', (6315, 6321), True, 'import tensorflow as tf\n'), ((6323, 6335), 'tensorflow.cos', 'tf.cos', (['rotX'], {}), '(rotX)\n', (6329, 6335), True, 'import tensorflow as tf\n'), ((6387, 6399), 'tensorflow.cos', 'tf.cos', (['rotY'], {}), '(rotY)\n', (6393, 6399), True, 'import tensorflow as tf\n'), ((6405, 6417), 'tensorflow.sin', 'tf.sin', (['rotY'], {}), '(rotY)\n', (6411, 6417), True, 'import tensorflow as tf\n'), ((6512, 6524), 'tensorflow.cos', 'tf.cos', (['rotY'], {}), '(rotY)\n', (6518, 6524), True, 'import tensorflow as tf\n'), ((6576, 6588), 'tensorflow.cos', 'tf.cos', (['rotZ'], {}), '(rotZ)\n', (6582, 6588), True, 'import tensorflow as tf\n'), ((6649, 6661), 'tensorflow.sin', 'tf.sin', (['rotZ'], {}), '(rotZ)\n', (6655, 6661), True, 'import tensorflow as tf\n'), ((6663, 6675), 'tensorflow.cos', 'tf.cos', (['rotZ'], {}), '(rotZ)\n', (6669, 6675), True, 'import tensorflow as tf\n'), ((8599, 8637), 'tensorflow.stack', 'tf.stack', (['[det_m31, -det_m23]'], {'axis': '(-1)'}), '([det_m31, -det_m23], axis=-1)\n', (8607, 8637), True, 'import tensorflow as tf\n'), ((9678, 9690), 'tensorflow.cos', 'tf.cos', (['rotX'], {}), '(rotX)\n', (9684, 9690), True, 'import tensorflow as tf\n'), ((9751, 9763), 'tensorflow.sin', 'tf.sin', (['rotX'], {}), '(rotX)\n', (9757, 9763), True, 'import tensorflow as tf\n'), ((9765, 9777), 'tensorflow.cos', 'tf.cos', (['rotX'], {}), '(rotX)\n', (9771, 9777), True, 'import tensorflow as tf\n'), ((9829, 9841), 'tensorflow.cos', 'tf.cos', (['rotY'], {}), '(rotY)\n', (9835, 9841), True, 'import tensorflow as tf\n'), ((9847, 9859), 'tensorflow.sin', 'tf.sin', (['rotY'], {}), '(rotY)\n', (9853, 9859), True, 'import tensorflow as tf\n'), ((9954, 9966), 'tensorflow.cos', 'tf.cos', (['rotY'], {}), '(rotY)\n', (9960, 9966), True, 'import tensorflow as tf\n'), ((10018, 10030), 'tensorflow.cos', 'tf.cos', (['rotZ'], {}), '(rotZ)\n', (10024, 10030), True, 'import tensorflow as tf\n'), ((10091, 10103), 'tensorflow.sin', 'tf.sin', (['rotZ'], {}), '(rotZ)\n', (10097, 10103), True, 'import tensorflow as tf\n'), ((10105, 10117), 'tensorflow.cos', 'tf.cos', (['rotZ'], {}), '(rotZ)\n', (10111, 10117), True, 'import tensorflow as tf\n'), ((11998, 12036), 'tensorflow.stack', 'tf.stack', (['[det_m31, -det_m23]'], {'axis': '(-1)'}), '([det_m31, -det_m23], axis=-1)\n', (12006, 12036), True, 'import tensorflow as tf\n'), ((6251, 6263), 'tensorflow.sin', 'tf.sin', (['rotX'], {}), '(rotX)\n', (6257, 6263), True, 'import tensorflow as tf\n'), ((6494, 6506), 'tensorflow.sin', 'tf.sin', (['rotY'], {}), '(rotY)\n', (6500, 6506), True, 'import tensorflow as tf\n'), ((6591, 6603), 'tensorflow.sin', 'tf.sin', (['rotZ'], {}), '(rotZ)\n', (6597, 6603), True, 'import tensorflow as tf\n'), ((9693, 9705), 'tensorflow.sin', 'tf.sin', (['rotX'], {}), '(rotX)\n', (9699, 9705), True, 'import tensorflow as tf\n'), ((9936, 9948), 'tensorflow.sin', 'tf.sin', (['rotY'], {}), '(rotY)\n', (9942, 9948), True, 'import tensorflow as tf\n'), ((10033, 10045), 'tensorflow.sin', 'tf.sin', (['rotZ'], {}), '(rotZ)\n', (10039, 10045), True, 'import tensorflow as tf\n'), ((15528, 15560), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'allow_growth': '(True)'}), '(allow_growth=True)\n', (15541, 15560), True, 'import tensorflow as tf\n'), ((16872, 16909), 'numpy.squeeze', 'np.squeeze', (['((m_final + 1) * 0.5 * 255)'], {}), '((m_final + 1) * 0.5 * 255)\n', (16882, 16909), True, 'import numpy as np\n'), ((4069, 4097), 'tensorflow.abs', 'tf.abs', (['((mu_y - y) * inv_std)'], {}), '((mu_y - y) * inv_std)\n', (4075, 4097), True, 'import tensorflow as tf\n'), ((4133, 4161), 'tensorflow.abs', 'tf.abs', (['((mu_x - x) * inv_std)'], {}), '((mu_x - x) * inv_std)\n', (4139, 4161), True, 'import tensorflow as tf\n'), ((16623, 16655), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'allow_growth': '(True)'}), '(allow_growth=True)\n', (16636, 16655), True, 'import tensorflow as tf\n')]
|
import json
from seesaw.query_interface import AccessMethod
import numpy as np
import pandas as pd
from .dataset_manager import GlobalDataManager, SeesawDatasetManager
import os
import time
import numpy as np
import sklearn.metrics
import math
import pyroaring as pr
from dataclasses import dataclass, field
def get_image_paths(image_root, path_array, idxs):
return [
os.path.normpath(f"{image_root}/{path_array[int(i)]}").replace("//", "/")
for i in idxs
]
from .basic_types import *
from .search_loop_models import *
from .search_loop_tools import *
from .dataset_tools import *
from .fine_grained_embedding import *
from .search_loop_models import adjust_vec, adjust_vec2
from .util import *
from .pairwise_rank_loss import VecState
from .query_interface import *
from .textual_feedback_box import OnlineModel, join_vecs2annotations
@dataclass
class LoopState:
curr_str: str = None
tvec: np.ndarray = None
tmod: str = None
latency_profile: list = field(default_factory=list)
vec_state: VecState = None
model: OnlineModel = None
class SeesawLoop:
q: InteractiveQuery
params: SessionParams
state: LoopState
def __init__(
self, gdm: GlobalDataManager, q: InteractiveQuery, params: SessionParams
):
self.params = params
self.state = LoopState()
self.q = q
if self.params.interactive in ["textual"]:
param_dict = gdm.global_cache.read_state_dict(
"/home/gridsan/groups/fastai/omoll/seesaw_root/models/clip/ViT-B-32.pt",
jit=True,
)
self.state.model = OnlineModel(param_dict, self.params.method_config)
lp = {
"n_images": None,
"n_posvecs": None,
"n_negvecs": None,
"lookup": None,
"label": None,
"refine": None,
}
s = self.state
## ensure non-empty
s.latency_profile.append(lp)
def next_batch(self):
"""
gets next batch of image indices based on current vector
"""
start_lookup = time.time()
s = self.state
p = self.params
lp = {
"n_images": None,
"n_posvecs": None,
"n_negvecs": None,
"lookup": None,
"label": None,
"refine": None,
}
s.latency_profile.append(lp)
if p.interactive == "textual":
if p.method_config["mode"] == "finetune":
vec = s.model.encode_string(s.curr_str)
rescore_m = lambda vecs: vecs @ vec.reshape(-1, 1)
elif p.method_config["mode"] == "linear":
if len(s.model.linear_scorer.scorers) == 0: ## first time
vec = s.model.encode_string(s.curr_str)
s.model.linear_scorer.add_scorer(
s.curr_str, torch.from_numpy(vec.reshape(-1))
)
rescore_m = self.state.model.score_vecs
vec = self.state.model.get_lookup_vec(s.curr_str)
else:
vec = s.tvec
rescore_m = lambda vecs: vecs @ vec.reshape(-1, 1)
b = self.q.query_stateful(
mode=s.tmode,
vector=vec,
batch_size=p.batch_size,
shortlist_size=p.shortlist_size,
agg_method=p.agg_method,
rescore_method=rescore_m,
)
idxbatch = b["dbidxs"]
lp["n_images"] = idxbatch.shape[0]
lp["lookup"] = time.time() - start_lookup
return b
def compute_image_activatins(self, dbidx, annotations):
pass
def refine(self):
"""
update based on vector. box dict will have every index from idx batch, including empty dfs.
"""
start_refine = time.time()
p = self.params
s = self.state
lp = s.latency_profile[-1]
lp["label"] = start_refine - lp["lookup"]
if p.interactive == "plain":
return
elif p.interactive in ["textual"]:
# get vectors and string corresponding to current annotations
# run the updater.
print("textual update")
if (
"image_vector_strategy" not in p.dict()
or p.image_vector_strategy == None
or p.image_vector_strategy == "matched"
):
vecs = []
strs = []
acc = []
for dbidx in self.q.label_db.get_seen():
annot = self.q.label_db.get(dbidx, format="box")
assert annot is not None
if len(annot) == 0:
continue
dfvec, dfbox = join_vecs2annotations(self.q.index, dbidx, annot)
# best_box_iou, best_box_idx
## vectors with overlap
df = dfbox # use boxes as guide for now
mask_boxes = df.best_box_iou > p.method_config["vector_box_min_iou"]
df = df[mask_boxes]
if df.shape[0] > 0:
vecs.append(df.vectors.values)
strs.append(df.descriptions.values)
acc.append(df.marked_accepted.values)
if len(vecs) == 0:
print("no annotations for update... skipping")
return
all_vecs = np.concatenate(vecs)
all_strs = np.concatenate(strs)
marked_accepted = np.concatenate(acc)
elif p.image_vector_strategy == "computed":
vecs = []
strs = []
acc = []
# annot = self.q.label_db.get(dbidx, format='box')
for dbidx in self.q.label_db.get_seen():
annot = self.q.label_db.get(dbidx, format="box")
if len(annot) == 0:
continue
vecs.append(self.compute_image_activations(dbidx, annot))
strs.append()
pass
else:
assert False, "unknown image vec strategy"
losses = s.model.update(all_vecs, marked_accepted, all_strs, s.curr_str)
print("done with update", losses)
else:
Xt, yt = self.q.getXy()
lp["n_posvecs"] = (yt == 1).sum() # .shape[0]
lp["n_negvecs"] = (yt != 1).sum()
if (yt.shape[0] > 0) and (yt.max() > yt.min()):
s.tmode = "dot"
if p.interactive == "sklearn":
lr = sklearn.linear_model.LogisticRegression(
class_weight="balanced"
)
lr.fit(Xt, yt)
s.tvec = lr.coef_.reshape(1, -1)
elif p.interactive == "pytorch":
prob = yt.sum() / yt.shape[0]
w = np.clip((1 - prob) / prob, 0.1, 10.0)
cfg = p.method_config
if cfg["model_type"] == "logistic":
mod = PTLogisiticRegression(
Xt.shape[1],
learning_ratep=p.learning_rate,
C=0,
positive_weight=w,
)
if cfg["warm_start"] == "warm":
iv = torch.from_numpy(s.tvec)
iv = iv / iv.norm()
mod.linear.weight.data = iv.type(mod.linear.weight.dtype)
elif cfg["warm_start"] == "default":
pass
fit_reg(
mod=mod,
X=Xt.astype("float32"),
y=yt.astype("float"),
batch_size=p.minibatch_size,
)
s.tvec = mod.linear.weight.detach().numpy().reshape(1, -1)
elif cfg["model_type"] in ["cosine", "multirank"]:
for i in range(cfg["num_epochs"]):
s.tvec = adjust_vec(
s.tvec,
Xt,
yt,
learning_rate=cfg["learning_rate"],
max_examples=cfg["max_examples"],
minibatch_size=cfg["minibatch_size"],
loss_margin=cfg["loss_margin"],
)
elif cfg["model_type"] in ["multirank2"]:
npairs = yt.sum() * (1 - yt).sum()
max_iters = (
math.ceil(
min(npairs, cfg["max_examples"])
// cfg["minibatch_size"]
)
* cfg["num_epochs"]
)
print("max iters this round would have been", max_iters)
# print(s.vec_state.)
# vecs * niters = number of vector seen.
# n vec seen <= 10000
# niters <= 10000/vecs
max_vec_seen = 10000
n_iters = math.ceil(max_vec_seen / Xt.shape[0])
n_steps = np.clip(n_iters, 20, 200)
# print(f'steps for this iteration {n_steps}. num vecs: {Xt.shape[0]} ')
# want iters * vecs to be const..
# eg. dota. 1000*100*30
for _ in range(n_steps):
loss = s.vec_state.update(Xt, yt)
if loss == 0: # gradient is 0 when loss is 0.
print("loss is 0, breaking early")
break
s.tvec = s.vec_state.get_vec()
elif cfg["model_type"] == "solver":
s.tvec = adjust_vec2(s.tvec, Xt, yt, **p.solver_opts)
else:
assert False, "model type"
else:
assert False
else:
# print('missing positives or negatives to do any training', yt.shape, yt.max(), yt.min())
pass
class Session:
current_dataset: str
current_index: str
loop: SeesawLoop
acc_indices: list
image_timing: dict
acc_activations: list
total_results: int
timing: list
seen: pr.BitMap
accepted: pr.BitMap
q: InteractiveQuery
index: AccessMethod
def __init__(
self,
gdm: GlobalDataManager,
dataset: SeesawDatasetManager,
hdb: AccessMethod,
params: SessionParams,
):
self.gdm = gdm
self.dataset = dataset
self.acc_indices = []
self.acc_activations = []
self.seen = pr.BitMap([])
self.accepted = pr.BitMap([])
self.params = params
self.init_q = None
self.timing = []
self.image_timing = {}
self.index = hdb
self.q = hdb.new_query()
self.loop = SeesawLoop(self.gdm, self.q, params=self.params)
self.action_log = []
self._log("init")
def get_totals(self):
return {"seen": len(self.seen), "accepted": len(self.accepted)}
def _log(self, message: str):
self.action_log.append(
{
"logger": "server",
"time": time.time(),
"message": message,
"seen": len(self.seen),
"accepted": len(self.accepted),
}
)
def next(self):
self._log("next.start")
start = time.time()
r = self.loop.next_batch()
delta = time.time() - start
self.acc_indices.append(r["dbidxs"])
self.acc_activations.append(r["activations"])
self.timing.append(delta)
self._log("next.end")
return r["dbidxs"]
def set_text(self, key):
self._log("set_text")
self.init_q = key
p = self.loop.params
s = self.loop.state
s.curr_str = key
s.tvec = self.index.string2vec(string=key)
s.tmode = "dot"
if p.method_config.get("model_type", None) == "multirank2":
s.vec_state = VecState(
s.tvec,
margin=p.loss_margin,
opt_class=torch.optim.SGD,
opt_params={"lr": p.learning_rate},
)
def update_state(self, state: SessionState):
self._update_labeldb(state)
self._log(
"update_state.end"
) # log this after updating so that it includes all new information
def refine(self):
self._log("refine.start")
self.loop.refine()
self._log("refine.end")
def get_state(self) -> SessionState:
gdata = []
for indices, accs in zip(self.acc_indices, self.acc_activations):
imdata = self.get_panel_data(idxbatch=indices, activation_batch=accs)
gdata.append(imdata)
dat = {}
dat["action_log"] = self.action_log
dat["gdata"] = gdata
dat["timing"] = self.timing
dat["reference_categories"] = []
dat["params"] = self.params
dat["query_string"] = self.loop.state.curr_str
return SessionState(**dat)
def get_panel_data(self, *, idxbatch, activation_batch=None):
reslabs = []
urls = get_image_paths(self.dataset.image_root, self.dataset.paths, idxbatch)
for i, (url, dbidx) in enumerate(zip(urls, idxbatch)):
dbidx = int(dbidx)
boxes = self.q.label_db.get(
dbidx, format="box"
) # None means no annotations yet (undef), empty means no boxes.
if activation_batch is None:
activations = None
else:
activations = []
for row in activation_batch[i].to_dict(orient="records"):
score = row["score"]
del row["score"]
activations.append(ActivationData(box=Box(**row), score=score))
elt = Imdata(
url=url,
dbidx=dbidx,
boxes=boxes,
activations=activations,
timing=self.image_timing.get(dbidx, []),
)
reslabs.append(elt)
return reslabs
def _update_labeldb(self, state: SessionState):
## clear bitmap and reconstruct bc user may have erased previously accepted images
self.action_log = state.action_log # just replace the log
gdata = state.gdata
self.accepted.clear()
self.seen.clear()
for ldata in gdata:
for imdata in ldata:
self.image_timing[imdata.dbidx] = imdata.timing
self.seen.add(imdata.dbidx)
if is_image_accepted(imdata):
self.accepted.add(imdata.dbidx)
self.q.label_db.put(imdata.dbidx, imdata.boxes)
def prep_data(ds, p: SessionParams):
box_data, qgt = ds.load_ground_truth()
catgt = qgt[p.index_spec.c_name]
positive_box_data = box_data[box_data.category == p.index_spec.c_name]
present = pr.FrozenBitMap(catgt[~catgt.isna()].index.values)
positive = pr.FrozenBitMap(positive_box_data.dbidx.values)
assert positive.intersection(present) == positive
return box_data, present, positive
def make_session(gdm, p: SessionParams):
ds = gdm.get_dataset(p.index_spec.d_name)
hdb = gdm.load_index(p.index_spec.d_name, p.index_spec.i_name)
print("index loaded")
box_data = None
subset = None
positive = None
if p.index_spec.c_name is not None:
print("prepping data....")
box_data, subset, positive = prep_data(ds, p)
assert len(positive) != 0
hdb = hdb.subset(subset)
print("about to construct session...")
session = Session(gdm, ds, hdb, p)
print("session constructed...")
return {
"session": session,
"box_data": box_data,
"subset": subset,
"positive": positive,
}
|
[
"math.ceil",
"pyroaring.FrozenBitMap",
"numpy.clip",
"time.time",
"dataclasses.field",
"pyroaring.BitMap",
"numpy.concatenate"
] |
[((997, 1024), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (1002, 1024), False, 'from dataclasses import dataclass, field\n'), ((15548, 15595), 'pyroaring.FrozenBitMap', 'pr.FrozenBitMap', (['positive_box_data.dbidx.values'], {}), '(positive_box_data.dbidx.values)\n', (15563, 15595), True, 'import pyroaring as pr\n'), ((2116, 2127), 'time.time', 'time.time', ([], {}), '()\n', (2125, 2127), False, 'import time\n'), ((3828, 3839), 'time.time', 'time.time', ([], {}), '()\n', (3837, 3839), False, 'import time\n'), ((11100, 11113), 'pyroaring.BitMap', 'pr.BitMap', (['[]'], {}), '([])\n', (11109, 11113), True, 'import pyroaring as pr\n'), ((11138, 11151), 'pyroaring.BitMap', 'pr.BitMap', (['[]'], {}), '([])\n', (11147, 11151), True, 'import pyroaring as pr\n'), ((11917, 11928), 'time.time', 'time.time', ([], {}), '()\n', (11926, 11928), False, 'import time\n'), ((3540, 3551), 'time.time', 'time.time', ([], {}), '()\n', (3549, 3551), False, 'import time\n'), ((11981, 11992), 'time.time', 'time.time', ([], {}), '()\n', (11990, 11992), False, 'import time\n'), ((11686, 11697), 'time.time', 'time.time', ([], {}), '()\n', (11695, 11697), False, 'import time\n'), ((5477, 5497), 'numpy.concatenate', 'np.concatenate', (['vecs'], {}), '(vecs)\n', (5491, 5497), True, 'import numpy as np\n'), ((5525, 5545), 'numpy.concatenate', 'np.concatenate', (['strs'], {}), '(strs)\n', (5539, 5545), True, 'import numpy as np\n'), ((5580, 5599), 'numpy.concatenate', 'np.concatenate', (['acc'], {}), '(acc)\n', (5594, 5599), True, 'import numpy as np\n'), ((6985, 7022), 'numpy.clip', 'np.clip', (['((1 - prob) / prob)', '(0.1)', '(10.0)'], {}), '((1 - prob) / prob, 0.1, 10.0)\n', (6992, 7022), True, 'import numpy as np\n'), ((9442, 9479), 'math.ceil', 'math.ceil', (['(max_vec_seen / Xt.shape[0])'], {}), '(max_vec_seen / Xt.shape[0])\n', (9451, 9479), False, 'import math\n'), ((9514, 9539), 'numpy.clip', 'np.clip', (['n_iters', '(20)', '(200)'], {}), '(n_iters, 20, 200)\n', (9521, 9539), True, 'import numpy as np\n')]
|
#
# Plot convergence of reduced models as the non-dimensional conductivity is
# increased. Here "bar" refers to the averaged through-cell model (i.e. DFNCC)
#
import pybamm
import sys
import pickle
import shared
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
# set style
matplotlib.rc_file("_matplotlibrc", use_default_template=True)
# increase recursion limit for large expression trees
sys.setrecursionlimit(100000)
pybamm.set_logging_level("INFO")
# choose values to loop over and provide filenames
values = np.array([1e5, 1e6, 1e7, 1e8, 1e9]) / 4.758 # sets non-dim sigma to 1e5 etc.
filenames = [
"comsol_data/comsol_1plus1D_sigma_1e5.pickle",
"comsol_data/comsol_1plus1D_sigma_1e6.pickle",
"comsol_data/comsol_1plus1D_sigma_1e7.pickle",
"comsol_data/comsol_1plus1D_sigma_1e8.pickle",
"comsol_data/comsol_1plus1D_sigma_1e9.pickle",
]
# load current collector and DFN models
cc_model = pybamm.current_collector.EffectiveResistance1D()
dfn_av = pybamm.lithium_ion.DFN({"thermal": "x-lumped"}, name="Average DFN")
dfn = pybamm.lithium_ion.DFN(
{"current collector": "potential pair", "dimensionality": 1, "thermal": "x-lumped"},
name="1+1D DFN",
)
models = {"Current collector": cc_model, "Average DFN": dfn_av, "1+1D DFN": dfn}
# parameters
param = dfn.default_parameter_values
# process model and geometry, and discretise
meshes = {}
discs = {}
for name, model in models.items():
param.process_model(model)
geometry = model.default_geometry
param.process_geometry(geometry)
# set mesh
var = pybamm.standard_spatial_vars
submesh_types = model.default_submesh_types
# set npts
var = pybamm.standard_spatial_vars
npts = 16
var_pts = {
var.x_n: npts,
var.x_s: npts,
var.x_p: npts,
var.r_n: npts,
var.r_p: npts,
var.z: npts,
}
meshes[name] = pybamm.Mesh(geometry, submesh_types, var_pts)
discs[name] = pybamm.Discretisation(meshes[name], model.default_spatial_methods)
discs[name].process_model(model, check_model=False)
# solve models. Then compute "error"
errors = {
"Negative current collector potential [V]": [None] * len(values),
"Positive current collector potential [V]": [None] * len(values),
"X-averaged negative particle surface concentration [mol.m-3]": [None]
* len(values),
"X-averaged positive particle surface concentration [mol.m-3]": [None]
* len(values),
"Current collector current density [A.m-2]": [None] * len(values),
"X-averaged cell temperature [K]": [None] * len(values),
"Terminal voltage [V]": [None] * len(values),
}
errors_bar = {
"Negative current collector potential [V]": [None] * len(values),
"Positive current collector potential [V]": [None] * len(values),
"X-averaged negative particle surface concentration [mol.m-3]": [None]
* len(values),
"X-averaged positive particle surface concentration [mol.m-3]": [None]
* len(values),
"Current collector current density [A.m-2]": [None] * len(values),
"X-averaged cell temperature [K]": [None] * len(values),
"Terminal voltage [V]": [None] * len(values),
}
sigmas = [None] * len(values)
for i, val in enumerate(values):
comsol_variables = pickle.load(open(filenames[i], "rb"))
comsol_t = comsol_variables["time"]
# update values
param.update(
{
"Negative current collector conductivity [S.m-1]": val,
"Positive current collector conductivity [S.m-1]": val,
}
)
for name, model in models.items():
param.update_model(model, discs[name])
# solve
tau = param.evaluate(pybamm.standard_parameters_lithium_ion.tau_discharge)
time = comsol_t / tau
solutions = {}
for name, model in models.items():
if name == "Current collector":
solver = pybamm.AlgebraicSolver(tol=1e-6)
solutions[name] = solver.solve(model)
else:
# solver
solver = pybamm.CasadiSolver(
atol=1e-6, rtol=1e-6, root_tol=1e-3, root_method="hybr", mode="fast"
)
solutions[name] = solver.solve(model, time)
mesh = meshes["1+1D DFN"]
cc_mesh = meshes["Current collector"]
solution = solutions["1+1D DFN"]
solution_1D = solutions["Average DFN"]
cc_solution = solutions["Current collector"]
# create comsol vars interpolated onto pybamm mesh to compare errors
comsol_model = shared.make_comsol_model(comsol_variables, mesh, param, thermal=True)
# compute "error" using times up to voltage cut off
t = solutions["1+1D DFN"].t
# Note: casadi doesnt support events so we find this time after the solve
if isinstance(solver, pybamm.CasadiSolver):
V_cutoff = param.evaluate(
pybamm.standard_parameters_lithium_ion.voltage_low_cut_dimensional
)
voltage = pybamm.ProcessedVariable(
models["1+1D DFN"].variables["Terminal voltage [V]"],
solution.t,
solution.y,
mesh=mesh,
)(time)
# only use times up to the voltage cutoff
voltage_OK = voltage[voltage > V_cutoff]
t = t[0 : len(voltage_OK)]
def compute_error(variable_name):
domain = comsol_model.variables[variable_name].domain
if domain == []:
comsol_var = pybamm.ProcessedVariable(
comsol_model.variables[variable_name], solution.t, solution.y, mesh=mesh
)(t=t)
pybamm_var = pybamm.ProcessedVariable(
models["1+1D DFN"].variables[variable_name],
solution.t,
solution.y,
mesh=mesh,
)(t=t)
else:
z = mesh["current collector"][0].nodes
comsol_var = pybamm.ProcessedVariable(
comsol_model.variables[variable_name], solution.t, solution.y, mesh=mesh
)(z=z, t=t)
pybamm_var = pybamm.ProcessedVariable(
models["1+1D DFN"].variables[variable_name],
solution.t,
solution.y,
mesh=mesh,
)(z=z, t=t)
# Compute error in positive potential with respect to the voltage
if variable_name == "Positive current collector potential [V]":
comsol_var = comsol_var - pybamm.ProcessedVariable(
comsol_model.variables["Terminal voltage [V]"],
solution.t,
solution.y,
mesh=mesh,
)(t=t)
pybamm_var = pybamm_var - pybamm.ProcessedVariable(
models["1+1D DFN"].variables["Terminal voltage [V]"],
solution.t,
solution.y,
mesh=mesh,
)(t=t)
# compute RMS difference divided by RMS of comsol_var
error = np.sqrt(np.nanmean((pybamm_var - comsol_var) ** 2)) / np.sqrt(
np.nanmean((comsol_var) ** 2)
)
return error
def compute_error_bar(variable_name):
domain = comsol_model.variables[variable_name].domain
if domain == []:
comsol_var = pybamm.ProcessedVariable(
comsol_model.variables[variable_name],
solution.t,
solution.y,
mesh=cc_mesh,
)(t=t)
else:
z = cc_mesh["current collector"][0].nodes
comsol_var = pybamm.ProcessedVariable(
comsol_model.variables[variable_name],
solution.t,
solution.y,
mesh=cc_mesh,
)(z=z, t=t)
# Compute error in positive potential with respect to the voltage
if variable_name == "Positive current collector potential [V]":
comsol_var = comsol_var - pybamm.ProcessedVariable(
comsol_model.variables["Terminal voltage [V]"],
solution.t,
solution.y,
mesh=mesh,
)(t=t)
# compute pybamm vars for 1+1D bar model
R_cc = param.process_symbol(
cc_model.variables["Effective current collector resistance"]
).evaluate(t=cc_solution.t, y=cc_solution.y)[0][0]
V_av_1D = pybamm.ProcessedVariable(
models["Average DFN"].variables["Terminal voltage"],
solution_1D.t,
solution_1D.y,
mesh=mesh,
)
I_av = pybamm.ProcessedVariable(
models["Average DFN"].variables["Total current density"],
solution_1D.t,
solution_1D.y,
mesh=mesh,
)
def V_av(t):
return V_av_1D(t) - I_av(t) * R_cc
pot_scale = param.evaluate(
pybamm.standard_parameters_lithium_ion.potential_scale
)
U_ref = param.evaluate(
pybamm.standard_parameters_lithium_ion.U_p_ref
) - param.evaluate(pybamm.standard_parameters_lithium_ion.U_n_ref)
def V_av_dim(t):
return U_ref + V_av(t) * pot_scale
if variable_name == "Negative current collector potential [V]":
potentials = cc_model.get_processed_potentials(
cc_solution, cc_mesh, param, V_av, I_av
)
pybamm_var = potentials[variable_name](t, z)
elif variable_name == "Positive current collector potential [V]":
potentials = cc_model.get_processed_potentials(
cc_solution, cc_mesh, param, V_av, I_av
)
pybamm_var = potentials[variable_name](t, z) - V_av_dim(t)
elif variable_name == "Terminal voltage [V]":
pybamm_var = V_av_dim(t)
else:
pybamm_var_1D = pybamm.ProcessedVariable(
models["Average DFN"].variables[variable_name],
solution_1D.t,
solution_1D.y,
mesh=mesh,
)
pybamm_var = np.transpose(
np.repeat(pybamm_var_1D(t)[:, np.newaxis], len(z), axis=1)
)
# compute RMS difference divided by RMS of comsol_var
error = np.sqrt(np.nanmean((pybamm_var - comsol_var) ** 2)) / np.sqrt(
np.nanmean((comsol_var) ** 2)
)
return error
# compute non-dim sigma (note sigma_cn=sigma_cp)
sigmas[i] = param.evaluate(pybamm.standard_parameters_lithium_ion.sigma_cn)
# compute errors
for variable in errors.keys():
try:
errors[variable][i] = compute_error(variable)
except KeyError:
pass
try:
errors_bar[variable][i] = compute_error_bar(variable)
except KeyError:
pass
# set up figure
fig, ax = plt.subplots(1, 2, figsize=(6.4, 4))
fig.subplots_adjust(left=0.1, bottom=0.1, right=0.8, top=0.93, wspace=0.33, hspace=0.5)
labels = {
"Negative current collector potential [V]": r"$\phi^*_{\mathrm{s,cn}}$",
"Positive current collector potential [V]": r"$\phi^*_{\mathrm{s,cp}} - V^*$",
"X-averaged negative particle surface concentration [mol.m-3]": r"$\bar{c}_{\mathrm{s,n,surf}}^*$",
"X-averaged positive particle surface concentration [mol.m-3]": r"$\bar{c}_{\mathrm{s,p,surf}}^*$",
"Current collector current density [A.m-2]": r"$\mathcal{I}^*$",
"X-averaged cell temperature [K]": r"$\bar{T}^*$",
"Terminal voltage [V]": r"$V^*$",
}
# loop of vals to plot
delta = param.evaluate(pybamm.standard_parameters_lithium_ion.delta)
sigmas = np.array(sigmas)
counter = 0
for variable in [
"Negative current collector potential [V]",
"Positive current collector potential [V]",
"X-averaged negative particle surface concentration [mol.m-3]",
"X-averaged positive particle surface concentration [mol.m-3]",
]:
counter += 1
# dummy points for colors to add to legend
ax[1].plot(np.nan, np.nan, "o", color="C{}".format(counter), label=labels[variable])
try:
ax[0].plot(
sigmas * delta ** 2,
errors[variable],
marker="o",
linestyle="solid",
markersize=7,
fillstyle="none",
color="C{}".format(counter),
)
except KeyError:
pass
try:
ax[0].plot(
sigmas * delta ** 2,
errors_bar[variable],
marker="x",
linestyle="dotted",
markersize=7,
color="C{}".format(counter),
)
except KeyError:
pass
for variable in [
"Current collector current density [A.m-2]",
"X-averaged cell temperature [K]",
"Terminal voltage [V]",
]:
counter += 1
# dummy points for colors to add to legend
ax[1].plot(np.nan, np.nan, "o", color="C{}".format(counter), label=labels[variable])
try:
ax[1].plot(
sigmas * delta ** 2,
errors[variable],
marker="o",
linestyle="solid",
markersize=7,
fillstyle="none",
color="C{}".format(counter),
)
except KeyError:
pass
try:
ax[1].plot(
sigmas * delta ** 2,
errors_bar[variable],
marker="x",
linestyle="dotted",
markersize=7,
color="C{}".format(counter),
)
except KeyError:
pass
# labels and legend
ax[0].set_xlabel(r"$\sigma' = \delta^2 \sigma$")
ax[0].set_ylabel("RMS Error")
ax[0].set_xscale("log")
ax[0].set_yscale("log")
ax[1].set_xlabel(r"$\sigma' = \delta^2 \sigma$")
ax[1].set_ylabel("RMS Error")
ax[1].set_xscale("log")
ax[1].set_yscale("log")
ax[0].set_xlim([1e-1, 1e4])
ax[0].set_ylim([1e-4, 1])
ax[0].set_xticks([1, 1e1, 1e2, 1e3, 1e4])
ax[0].set_yticks([1e-4, 1e-3, 1e-2, 1e-1, 1e-2, 1e-1, 1])
ax[1].set_xlim([1e-1, 1e4])
ax[1].set_ylim([1e-6, 1])
ax[1].set_xticks([1, 1e1, 1e2, 1e3, 1e4])
ax[1].set_yticks([1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, 1e-2, 1e-1, 1])
leg1 = ax[1].legend(loc="lower left", bbox_to_anchor=(1.05, 0.1), borderaxespad=0.0)
# add dummy points for legend of styles
m_1plus1D, = ax[1].plot(np.nan, np.nan, "ko-", fillstyle="none")
m_DFNCC, = ax[1].plot(np.nan, np.nan, "kx:")
leg2 = ax[1].legend(
[m_1plus1D, m_DFNCC],
[r"$1+1$D", "DFNCC"],
loc="lower left",
bbox_to_anchor=(1.05, 0.8),
borderaxespad=0.0,
)
ax[1].add_artist(leg1)
plt.show()
|
[
"pybamm.CasadiSolver",
"pybamm.current_collector.EffectiveResistance1D",
"matplotlib.pyplot.show",
"pybamm.Discretisation",
"pybamm.AlgebraicSolver",
"pybamm.Mesh",
"pybamm.set_logging_level",
"shared.make_comsol_model",
"matplotlib.rc_file",
"matplotlib.pyplot.subplots",
"numpy.array",
"sys.setrecursionlimit",
"pybamm.lithium_ion.DFN",
"pybamm.ProcessedVariable",
"numpy.nanmean"
] |
[((295, 357), 'matplotlib.rc_file', 'matplotlib.rc_file', (['"""_matplotlibrc"""'], {'use_default_template': '(True)'}), "('_matplotlibrc', use_default_template=True)\n", (313, 357), False, 'import matplotlib\n'), ((413, 442), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(100000)'], {}), '(100000)\n', (434, 442), False, 'import sys\n'), ((444, 476), 'pybamm.set_logging_level', 'pybamm.set_logging_level', (['"""INFO"""'], {}), "('INFO')\n", (468, 476), False, 'import pybamm\n'), ((939, 987), 'pybamm.current_collector.EffectiveResistance1D', 'pybamm.current_collector.EffectiveResistance1D', ([], {}), '()\n', (985, 987), False, 'import pybamm\n'), ((997, 1064), 'pybamm.lithium_ion.DFN', 'pybamm.lithium_ion.DFN', (["{'thermal': 'x-lumped'}"], {'name': '"""Average DFN"""'}), "({'thermal': 'x-lumped'}, name='Average DFN')\n", (1019, 1064), False, 'import pybamm\n'), ((1071, 1199), 'pybamm.lithium_ion.DFN', 'pybamm.lithium_ion.DFN', (["{'current collector': 'potential pair', 'dimensionality': 1, 'thermal':\n 'x-lumped'}"], {'name': '"""1+1D DFN"""'}), "({'current collector': 'potential pair',\n 'dimensionality': 1, 'thermal': 'x-lumped'}, name='1+1D DFN')\n", (1093, 1199), False, 'import pybamm\n'), ((10686, 10722), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(6.4, 4)'}), '(1, 2, figsize=(6.4, 4))\n', (10698, 10722), True, 'import matplotlib.pyplot as plt\n'), ((11456, 11472), 'numpy.array', 'np.array', (['sigmas'], {}), '(sigmas)\n', (11464, 11472), True, 'import numpy as np\n'), ((14295, 14305), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (14303, 14305), True, 'import matplotlib.pyplot as plt\n'), ((538, 608), 'numpy.array', 'np.array', (['[100000.0, 1000000.0, 10000000.0, 100000000.0, 1000000000.0]'], {}), '([100000.0, 1000000.0, 10000000.0, 100000000.0, 1000000000.0])\n', (546, 608), True, 'import numpy as np\n'), ((1897, 1942), 'pybamm.Mesh', 'pybamm.Mesh', (['geometry', 'submesh_types', 'var_pts'], {}), '(geometry, submesh_types, var_pts)\n', (1908, 1942), False, 'import pybamm\n'), ((1961, 2027), 'pybamm.Discretisation', 'pybamm.Discretisation', (['meshes[name]', 'model.default_spatial_methods'], {}), '(meshes[name], model.default_spatial_methods)\n', (1982, 2027), False, 'import pybamm\n'), ((4473, 4542), 'shared.make_comsol_model', 'shared.make_comsol_model', (['comsol_variables', 'mesh', 'param'], {'thermal': '(True)'}), '(comsol_variables, mesh, param, thermal=True)\n', (4497, 4542), False, 'import shared\n'), ((8230, 8353), 'pybamm.ProcessedVariable', 'pybamm.ProcessedVariable', (["models['Average DFN'].variables['Terminal voltage']", 'solution_1D.t', 'solution_1D.y'], {'mesh': 'mesh'}), "(models['Average DFN'].variables['Terminal voltage'\n ], solution_1D.t, solution_1D.y, mesh=mesh)\n", (8254, 8353), False, 'import pybamm\n'), ((8423, 8551), 'pybamm.ProcessedVariable', 'pybamm.ProcessedVariable', (["models['Average DFN'].variables['Total current density']", 'solution_1D.t', 'solution_1D.y'], {'mesh': 'mesh'}), "(models['Average DFN'].variables[\n 'Total current density'], solution_1D.t, solution_1D.y, mesh=mesh)\n", (8447, 8551), False, 'import pybamm\n'), ((3863, 3896), 'pybamm.AlgebraicSolver', 'pybamm.AlgebraicSolver', ([], {'tol': '(1e-06)'}), '(tol=1e-06)\n', (3885, 3896), False, 'import pybamm\n'), ((4002, 4099), 'pybamm.CasadiSolver', 'pybamm.CasadiSolver', ([], {'atol': '(1e-06)', 'rtol': '(1e-06)', 'root_tol': '(0.001)', 'root_method': '"""hybr"""', 'mode': '"""fast"""'}), "(atol=1e-06, rtol=1e-06, root_tol=0.001, root_method=\n 'hybr', mode='fast')\n", (4021, 4099), False, 'import pybamm\n'), ((4900, 5018), 'pybamm.ProcessedVariable', 'pybamm.ProcessedVariable', (["models['1+1D DFN'].variables['Terminal voltage [V]']", 'solution.t', 'solution.y'], {'mesh': 'mesh'}), "(models['1+1D DFN'].variables[\n 'Terminal voltage [V]'], solution.t, solution.y, mesh=mesh)\n", (4924, 5018), False, 'import pybamm\n'), ((5365, 5467), 'pybamm.ProcessedVariable', 'pybamm.ProcessedVariable', (['comsol_model.variables[variable_name]', 'solution.t', 'solution.y'], {'mesh': 'mesh'}), '(comsol_model.variables[variable_name], solution.t,\n solution.y, mesh=mesh)\n', (5389, 5467), False, 'import pybamm\n'), ((5524, 5632), 'pybamm.ProcessedVariable', 'pybamm.ProcessedVariable', (["models['1+1D DFN'].variables[variable_name]", 'solution.t', 'solution.y'], {'mesh': 'mesh'}), "(models['1+1D DFN'].variables[variable_name],\n solution.t, solution.y, mesh=mesh)\n", (5548, 5632), False, 'import pybamm\n'), ((5803, 5905), 'pybamm.ProcessedVariable', 'pybamm.ProcessedVariable', (['comsol_model.variables[variable_name]', 'solution.t', 'solution.y'], {'mesh': 'mesh'}), '(comsol_model.variables[variable_name], solution.t,\n solution.y, mesh=mesh)\n', (5827, 5905), False, 'import pybamm\n'), ((5967, 6075), 'pybamm.ProcessedVariable', 'pybamm.ProcessedVariable', (["models['1+1D DFN'].variables[variable_name]", 'solution.t', 'solution.y'], {'mesh': 'mesh'}), "(models['1+1D DFN'].variables[variable_name],\n solution.t, solution.y, mesh=mesh)\n", (5991, 6075), False, 'import pybamm\n'), ((6861, 6903), 'numpy.nanmean', 'np.nanmean', (['((pybamm_var - comsol_var) ** 2)'], {}), '((pybamm_var - comsol_var) ** 2)\n', (6871, 6903), True, 'import numpy as np\n'), ((6928, 6955), 'numpy.nanmean', 'np.nanmean', (['(comsol_var ** 2)'], {}), '(comsol_var ** 2)\n', (6938, 6955), True, 'import numpy as np\n'), ((7145, 7250), 'pybamm.ProcessedVariable', 'pybamm.ProcessedVariable', (['comsol_model.variables[variable_name]', 'solution.t', 'solution.y'], {'mesh': 'cc_mesh'}), '(comsol_model.variables[variable_name], solution.t,\n solution.y, mesh=cc_mesh)\n', (7169, 7250), False, 'import pybamm\n'), ((7424, 7529), 'pybamm.ProcessedVariable', 'pybamm.ProcessedVariable', (['comsol_model.variables[variable_name]', 'solution.t', 'solution.y'], {'mesh': 'cc_mesh'}), '(comsol_model.variables[variable_name], solution.t,\n solution.y, mesh=cc_mesh)\n', (7448, 7529), False, 'import pybamm\n'), ((10105, 10147), 'numpy.nanmean', 'np.nanmean', (['((pybamm_var - comsol_var) ** 2)'], {}), '((pybamm_var - comsol_var) ** 2)\n', (10115, 10147), True, 'import numpy as np\n'), ((10172, 10199), 'numpy.nanmean', 'np.nanmean', (['(comsol_var ** 2)'], {}), '(comsol_var ** 2)\n', (10182, 10199), True, 'import numpy as np\n'), ((6346, 6457), 'pybamm.ProcessedVariable', 'pybamm.ProcessedVariable', (["comsol_model.variables['Terminal voltage [V]']", 'solution.t', 'solution.y'], {'mesh': 'mesh'}), "(comsol_model.variables['Terminal voltage [V]'],\n solution.t, solution.y, mesh=mesh)\n", (6370, 6457), False, 'import pybamm\n'), ((6576, 6694), 'pybamm.ProcessedVariable', 'pybamm.ProcessedVariable', (["models['1+1D DFN'].variables['Terminal voltage [V]']", 'solution.t', 'solution.y'], {'mesh': 'mesh'}), "(models['1+1D DFN'].variables[\n 'Terminal voltage [V]'], solution.t, solution.y, mesh=mesh)\n", (6600, 6694), False, 'import pybamm\n'), ((7800, 7911), 'pybamm.ProcessedVariable', 'pybamm.ProcessedVariable', (["comsol_model.variables['Terminal voltage [V]']", 'solution.t', 'solution.y'], {'mesh': 'mesh'}), "(comsol_model.variables['Terminal voltage [V]'],\n solution.t, solution.y, mesh=mesh)\n", (7824, 7911), False, 'import pybamm\n'), ((9696, 9813), 'pybamm.ProcessedVariable', 'pybamm.ProcessedVariable', (["models['Average DFN'].variables[variable_name]", 'solution_1D.t', 'solution_1D.y'], {'mesh': 'mesh'}), "(models['Average DFN'].variables[variable_name],\n solution_1D.t, solution_1D.y, mesh=mesh)\n", (9720, 9813), False, 'import pybamm\n')]
|
from numpy import mat
from math import sin, cos, radians
def rot_y(de):
t = mat([
[ cos(de), 0, sin(de)],
[ 0, 1, 0],
[-sin(de), 0, cos(de)]
])
return t
def rot_z(de):
t = mat([
[cos(de), -sin(de), 0],
[sin(de), cos(de), 0],
[ 0, 0, 1]
])
return t
def transf(x, y, z, a_degree, b_degree):
a, b = radians(a_degree), radians(b_degree)
nozzle_length = 3.63
nozzle_offset = 7.63
table_x_offset = 30
table_z_offset = 54.375
point_init = mat([nozzle_length, 0 , -nozzle_offset]).T
point_after = rot_z(b) * rot_y(a) * point_init\
+ mat([x, y, -z]).T
axis_trans = mat([-table_x_offset, 0, table_z_offset]).T
point_table = point_after + axis_trans
#print(point_table)
vector = rot_z(b) * rot_y(a) * mat([1, 0, 0]).T
#print(vector)
print(x, y, z, a_degree, b_degree)
return point_table[0,0], point_table[1,0], point_table[2,0],\
vector[0,0], vector[1,0], vector[2,0]
|
[
"math.radians",
"numpy.mat",
"math.cos",
"math.sin"
] |
[((523, 540), 'math.radians', 'radians', (['a_degree'], {}), '(a_degree)\n', (530, 540), False, 'from math import sin, cos, radians\n'), ((542, 559), 'math.radians', 'radians', (['b_degree'], {}), '(b_degree)\n', (549, 559), False, 'from math import sin, cos, radians\n'), ((680, 719), 'numpy.mat', 'mat', (['[nozzle_length, 0, -nozzle_offset]'], {}), '([nozzle_length, 0, -nozzle_offset])\n', (683, 719), False, 'from numpy import mat\n'), ((834, 875), 'numpy.mat', 'mat', (['[-table_x_offset, 0, table_z_offset]'], {}), '([-table_x_offset, 0, table_z_offset])\n', (837, 875), False, 'from numpy import mat\n'), ((798, 813), 'numpy.mat', 'mat', (['[x, y, -z]'], {}), '([x, y, -z])\n', (801, 813), False, 'from numpy import mat\n'), ((984, 998), 'numpy.mat', 'mat', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (987, 998), False, 'from numpy import mat\n'), ((105, 112), 'math.cos', 'cos', (['de'], {}), '(de)\n', (108, 112), False, 'from math import sin, cos, radians\n'), ((117, 124), 'math.sin', 'sin', (['de'], {}), '(de)\n', (120, 124), False, 'from math import sin, cos, radians\n'), ((197, 204), 'math.cos', 'cos', (['de'], {}), '(de)\n', (200, 204), False, 'from math import sin, cos, radians\n'), ((310, 317), 'math.cos', 'cos', (['de'], {}), '(de)\n', (313, 317), False, 'from math import sin, cos, radians\n'), ((350, 357), 'math.sin', 'sin', (['de'], {}), '(de)\n', (353, 357), False, 'from math import sin, cos, radians\n'), ((360, 367), 'math.cos', 'cos', (['de'], {}), '(de)\n', (363, 367), False, 'from math import sin, cos, radians\n'), ((185, 192), 'math.sin', 'sin', (['de'], {}), '(de)\n', (188, 192), False, 'from math import sin, cos, radians\n'), ((320, 327), 'math.sin', 'sin', (['de'], {}), '(de)\n', (323, 327), False, 'from math import sin, cos, radians\n')]
|
"""resample converts audio samples from one sampling rate to another.
This module contains no actual resampling code; it simply tries a series of
options in descending order of preference, using the best one available.
"""
import numpy as np
import signal
@signal.processor
def resample(clip, new_rate):
# this would be a good place to make sure that the sampling rate requested
# is reasonable.
data = _engine(clip, clip.sample_rate, new_rate)
return signal.Clip(data, new_rate)
def _scikits(data, old_rate, new_rate):
ratio = float(new_rate) / float(old_rate)
# samples must run along axis 0 and there is no option to change
# that, so we'll have to transpose our array in each direction.
return scikits.samplerate.resample(data.T, ratio).T
def _samplerate(data, old_rate, new_rate):
ratio = float(new_rate) / float(old_rate)
# samplerate expects data to be in [samples, channels] order, with no
# option to specify the axis.
return samplerate.resample(data.T, ratio).T
def _resampy(data, old_rate, new_rate):
# resampy assumes [channels, samples] unless you tell it otherwise
return resampy.resample(data, old_rate, new_rate)
def _nnresample(data, old_rate, new_rate):
# nnresample copies scipy.signal's API, so it assumes time is axis 0
# unless you specify otherwise
return nnresample.resample(data, new_rate, old_rate, axis=-1)
def _scipy_signal(data, old_rate, new_rate):
ratio = float(new_rate) / float(old_rate)
new_length = int(np.ceil(data.shape[-1] * ratio))
# scipy.signal assumes [samples, channels] unless you specify axis
return scipy.signal.resample(data, new_length, axis=-1)
def _audioop(data, old_rate, new_rate):
nchannels = data.shape[0] if data.ndim > 1 else 1
orig_type = data.dtype
if data.dtype.kind == "f":
# ratecv only works on integer arrays, so we'll have to convert floats.
data = (data * np.iinfo(np.int16).max).astype(np.int16)
width = data.dtype.itemsize
buf, _ = audioop.ratecv(data, width, nchannels, old_rate, new_rate, None)
out = np.frombuffer(buf, dtype=data.dtype)
if orig_type.kind == "f":
out = out.astype(orig_type) / float(np.iinfo(np.int16).max)
return out
# On initialization, try different options until we find a resampling library.
_engine = None
# scikits.samplerate is fast and good, but it is based on libsamplerate
# so it won't load if the library is not installed; it also hasn't been
# updated for python 3, or in a while at all.
if not _engine:
try:
import scikits.samplerate
_engine = _scikits
except ImportError:
pass
# samplerate is a separate fork of scikits.samplerate, also based on
# libsamplerate, with the same interface.
if not _engine:
try:
import samplerate
_engine = _samplerate
except ImportError:
pass
# resampy is fast and good.
if not _engine:
try:
import resampy
_engine = _resampy
except ImportError:
pass
# nnresample is a better wrapper around scipy.signal.resample
if not _engine:
try:
import nnresample
_engine = _nnresample
except ImportError:
pass
# scipy.signal.resample works but the audio quality is poor
if not _engine:
try:
import scipy.signal
_engine = _scipy_signal
except ImportError:
pass
# audioop.ratecv isn't good, but it's built in.
if not _engine:
import audioop
_engine = _audioop
|
[
"nnresample.resample",
"samplerate.resample",
"numpy.ceil",
"signal.Clip",
"numpy.frombuffer",
"resampy.resample",
"numpy.iinfo",
"audioop.ratecv"
] |
[((472, 499), 'signal.Clip', 'signal.Clip', (['data', 'new_rate'], {}), '(data, new_rate)\n', (483, 499), False, 'import signal\n'), ((1152, 1194), 'resampy.resample', 'resampy.resample', (['data', 'old_rate', 'new_rate'], {}), '(data, old_rate, new_rate)\n', (1168, 1194), False, 'import resampy\n'), ((1359, 1413), 'nnresample.resample', 'nnresample.resample', (['data', 'new_rate', 'old_rate'], {'axis': '(-1)'}), '(data, new_rate, old_rate, axis=-1)\n', (1378, 1413), False, 'import nnresample\n'), ((2035, 2099), 'audioop.ratecv', 'audioop.ratecv', (['data', 'width', 'nchannels', 'old_rate', 'new_rate', 'None'], {}), '(data, width, nchannels, old_rate, new_rate, None)\n', (2049, 2099), False, 'import audioop\n'), ((2110, 2146), 'numpy.frombuffer', 'np.frombuffer', (['buf'], {'dtype': 'data.dtype'}), '(buf, dtype=data.dtype)\n', (2123, 2146), True, 'import numpy as np\n'), ((991, 1025), 'samplerate.resample', 'samplerate.resample', (['data.T', 'ratio'], {}), '(data.T, ratio)\n', (1010, 1025), False, 'import samplerate\n'), ((1528, 1559), 'numpy.ceil', 'np.ceil', (['(data.shape[-1] * ratio)'], {}), '(data.shape[-1] * ratio)\n', (1535, 1559), True, 'import numpy as np\n'), ((2221, 2239), 'numpy.iinfo', 'np.iinfo', (['np.int16'], {}), '(np.int16)\n', (2229, 2239), True, 'import numpy as np\n'), ((1949, 1967), 'numpy.iinfo', 'np.iinfo', (['np.int16'], {}), '(np.int16)\n', (1957, 1967), True, 'import numpy as np\n')]
|
import torch
import numpy as np
from .energy.base import Energy
from .sampling.base import Sampler
from .distribution import CustomDistribution
__all__ = ["ProductEnergy", "ProductSampler", "ProductDistribution"]
class ProductEnergy(Energy):
"""Stack multiple energies together to form an energy on the product space.
The energy on the product space is the sum of its independent components.
Parameters
----------
components : Sequence[Energy]
The individual energies that form the direct product.
cat_dim : int or None
If None, the .energy function takes multiple tensors (one for each component).
Otherwise, it expects one tensor that is then split along dimension `cat_dim`.
Notes
-----
The underlying components have to be single-event energies.
"""
def __init__(self, components, cat_dim=None, **kwargs):
event_shapes, lengths = _stacked_event_shapes([c.event_shape for c in components], cat_dim)
super().__init__(dim=event_shapes, **kwargs)
self._components = torch.nn.ModuleList(components)
self._cat_dim = cat_dim
self._lengths = lengths
def _energy(self, *xs):
if self._cat_dim is None:
assert len(xs) == len(self._components)
energies = [dist.energy(x) for dist, x in zip(self._components, xs)]
else:
assert len(xs) == 1
xs = xs[0].split(self._lengths, dim=self._cat_dim)
energies = [dist.energy(x) for x, dist in zip(xs, self._components)]
return torch.sum(torch.stack(energies, dim=-1), dim=-1)
def __getitem__(self, index):
return self._components[index]
def __iter__(self):
return self._components.__iter__()
def __len__(self):
return self._components.__len__()
class ProductSampler(Sampler):
"""Sampler on the product space.
Parameters
----------
components : Sequence[Sampler]
The individual samplers that form the direct product.
cat_dim : int or None
If None, the .sample function generates multiple tensors (one for each component).
Otherwise, it returns one tensor that is concatenated along dimension `cat_dim`.
"""
def __init__(self, components, cat_dim=None, **kwargs):
super().__init__(**kwargs)
self._components = torch.nn.ModuleList(components)
self._cat_dim = cat_dim
def _sample(self, n_samples):
samples = tuple(dist._sample(n_samples) for dist in self._components)
if self._cat_dim is None:
return samples
else:
return torch.cat(samples, dim=self._cat_dim)
def _sample_with_temperature(self, n_samples, temperature=1.0):
samples = tuple(dist._sample_with_temperature(n_samples, temperature) for dist in self._components)
if self._cat_dim is None:
return samples
else:
return torch.cat(samples, dim=self._cat_dim)
def __getitem__(self, index):
return self._components[index]
def __iter__(self):
return self._components.__iter__()
def __len__(self):
return self._components.__len__()
class ProductDistribution(CustomDistribution):
"""Distribution on a product space.
Encapsulate multiple distributions in one object.
Parameters
----------
components : Iterable
List of distributions.
cat_dim : int or None
The dimension along which samples from the individual components are concatenated.
If None, don't concatenate.
Notes
-----
The underlying components have to be single-event distributions.
"""
def __init__(self, components, cat_dim=None):
super().__init__(
energy=ProductEnergy(components=components, cat_dim=cat_dim),
sampler=ProductSampler(components=components, cat_dim=cat_dim)
)
def _stacked_event_shapes(event_shapes, cat_dim):
if cat_dim is None:
return event_shapes, None
else:
lengths = [e[cat_dim] for e in event_shapes]
shape = np.array(event_shapes[0])
# assert that shapes are consistent
for e in event_shapes:
assert len(e) == len(shape)
assert _shapes_consistent(e, shape, cat_dim)
# concatenate events along dimensions `cat_dim`
shape[cat_dim] = sum(s[cat_dim] for s in event_shapes)
event_shapes = torch.Size(shape.tolist())
return event_shapes, lengths
def _shapes_consistent(shape1, shape2, cat_dim):
"""check if shapes are the same in all dimensions but `cat_dim`"""
diff = np.abs(np.array(shape1) - shape2)
return diff.sum() == diff[cat_dim]
|
[
"torch.cat",
"numpy.array",
"torch.stack",
"torch.nn.ModuleList"
] |
[((1066, 1097), 'torch.nn.ModuleList', 'torch.nn.ModuleList', (['components'], {}), '(components)\n', (1085, 1097), False, 'import torch\n'), ((2354, 2385), 'torch.nn.ModuleList', 'torch.nn.ModuleList', (['components'], {}), '(components)\n', (2373, 2385), False, 'import torch\n'), ((4087, 4112), 'numpy.array', 'np.array', (['event_shapes[0]'], {}), '(event_shapes[0])\n', (4095, 4112), True, 'import numpy as np\n'), ((1573, 1602), 'torch.stack', 'torch.stack', (['energies'], {'dim': '(-1)'}), '(energies, dim=-1)\n', (1584, 1602), False, 'import torch\n'), ((2625, 2662), 'torch.cat', 'torch.cat', (['samples'], {'dim': 'self._cat_dim'}), '(samples, dim=self._cat_dim)\n', (2634, 2662), False, 'import torch\n'), ((2934, 2971), 'torch.cat', 'torch.cat', (['samples'], {'dim': 'self._cat_dim'}), '(samples, dim=self._cat_dim)\n', (2943, 2971), False, 'import torch\n'), ((4631, 4647), 'numpy.array', 'np.array', (['shape1'], {}), '(shape1)\n', (4639, 4647), True, 'import numpy as np\n')]
|
"""
for ssl, use nginx, and get cert as per
https://www.nginx.com/blog/using-free-ssltls-certificates-from-lets-encrypt-with-nginx/
example config
server {
server_name <url to server here>;
client_max_body_size 200M;
## Main site location.
location / {
proxy_pass http://127.0.0.1:5000;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-Host $server_name;
proxy_set_header X-Real-IP $remote_addr;
}
listen 443 ssl; # managed by Certbot
ssl_certificate /etc/letsencrypt/live/<url to server here>/fullchain.pem; # managed by Certbot
ssl_certificate_key /etc/letsencrypt/live/<url to server here>/privkey.pem; # managed by Certbot
include /etc/letsencrypt/options-ssl-nginx.conf; # managed by Certbot
ssl_dhparam /etc/letsencrypt/ssl-dhparams.pem; # managed by Certbot
}
server {
if ($host = <url to server here>) {
return 301 https://$host$request_uri;
} # managed by Certbot
listen 80 default_server;
server_name <url to server here>;
return 404; # managed by Certbot
}
"""
import datetime
import argparse
import uuid
import random
from typing import List, Optional
import os
from aiohttp import web
from aiohttp_cors.resource_options import ResourceOptions
from ruamel import yaml
import sqlalchemy
from sqlalchemy.orm import sessionmaker
import aiohttp_cors
import numpy as np
from mll.turk.webservice.task_creator import TaskCreator
from mll.turk.webservice import tables, datetime_utils
num_examples_per_game = 50
def get_unique_string():
return uuid.uuid4().hex
def meaning_to_str(meaning: List[int]):
return ','.join([str(v) for v in meaning])
def create_game_instance(
remote, requester_id: str, task_id: str, task_type: str, grammar: str, num_holdout: int, config):
seed = r.randint(1000000)
task_creator = TaskCreator(
task_type=task_type, seed=seed, grammar=grammar,
num_examples=num_examples_per_game)
game_instance = tables.GameInstance(
requester_id=requester_id, task_id=task_id, example_idx=0, seed=seed, num_steps=num_examples_per_game,
start_datetime=datetime_utils.datetime_to_str(datetime.datetime.now()), status='STARTED',
completion_code=get_unique_string(), max_cards=task_creator.max_cards, remote=remote, num_cards=2,
num_holdout=num_holdout, num_holdout_correct=0, cents_per_ten=config.cents_per_ten)
return game_instance
def get_config(task_id: str):
with open(f'mll/turk/webservice/configs/{task_id}.yaml') as f:
config = yaml.safe_load(f)
# print('config', config)
config['num_holdout'] = config.get('num_holdout', 0)
config['holdout_idxes'] = config.get('holdout_idxes', [])
config['cents_per_ten'] = config.get('cents_per_ten', 0)
# print('config', config)
return argparse.Namespace(**config)
class AutoInc:
def __init__(self, autoinc_every: Optional[int], game_instance):
self.autoinc_every = autoinc_every
self.game_instance = game_instance
def __call__(self, example_idx):
if self.autoinc_every is not None:
if (example_idx + 1) % self.autoinc_every == 0:
_min_num_cards = (example_idx + 1) // self.autoinc_every + 2
_min_num_cards = min(self.game_instance.max_cards - self.game_instance.num_holdout, _min_num_cards)
if _min_num_cards > self.game_instance.num_cards:
self.game_instance.num_cards += 1
def handle_completion(game_instance):
end_datetime = datetime.datetime.now()
game_instance.finish_datetime = datetime_utils.datetime_to_str(end_datetime)
game_instance.status = 'COMPLETE'
start_datetime = datetime_utils.str_to_datetime(game_instance.start_datetime)
duration_seconds = datetime_utils.datetime_diff_seconds(end_datetime, start_datetime)
game_instance.duration_seconds = duration_seconds
session.commit()
return web.json_response({
'messageType': 'gameCompleted',
'taskId': game_instance.task_id,
'requesterId': game_instance.requester_id,
'score': game_instance.score,
'completionCode': game_instance.completion_code
})
def create_new_step_result(config, game_instance):
task_creator = TaskCreator(
task_type=config.task_type, seed=game_instance.seed,
num_examples=num_examples_per_game, grammar=config.grammar)
is_holdout = False
# print('holdout_idxes', config.holdout_idxes)
if game_instance.example_idx in config.holdout_idxes:
# print('creating holdout example')
# holdout example
holdout_idx = config.holdout_idxes.index(game_instance.example_idx)
idx = game_instance.num_cards + holdout_idx
is_holdout = True
else:
idx = random.randint(0, game_instance.num_cards - 1)
# print('num_cards', game_instance.num_cards, 'idx', idx)
ex = task_creator.create_example(idx=idx)
# print('ex', ex)
start_datetime = datetime_utils.datetime_to_str(datetime.datetime.now())
step_result = tables.StepResult(
requester_id=game_instance.requester_id, task_id=game_instance.task_id,
example_idx=game_instance.example_idx, image_path=ex['filepath'],
expected_utt=ex['expected'], meaning=meaning_to_str(ex['meaning']), start_datetime=start_datetime,
status='TODO', score=0, num_cards=game_instance.num_cards, is_holdout=is_holdout)
return step_result
async def _fetch(request_object, increment_idx: bool = False):
"""
Expected request, example:
{
requesterId: "ABCEEFGCDF",
taskId: "COMP",
}
response example:
{
pictureUrl: "img/asdevsafdfd.png",
requesterId: "ABCEEFGCDF",
taskId: "COMP",
exampleIdx: 15,
numCards: 3
}
"""
request = argparse.Namespace(**await request_object.json())
# print('_fetch', request)
config = get_config(request.taskId)
game_instance = session.query(tables.GameInstance).filter_by(
requester_id=request.requesterId, task_id=request.taskId).first()
if game_instance is None:
# print('creating new game instance')
client_ip = get_client_ip(request_object=request_object)
game_instance = create_game_instance(
requester_id=request.requesterId, task_id=request.taskId, remote=client_ip,
grammar=config.grammar, task_type=config.task_type,
num_holdout=config.num_holdout, config=config)
session.add(game_instance)
session.commit()
game_instance.example_idx
if game_instance.num_cards is None:
game_instance.num_cards = 2
# print('game_instance', game_instance, 'example_idx', game_instance.example_idx)
auto_inc = AutoInc(autoinc_every=config.autoinc_every, game_instance=game_instance)
step_result = session.query(tables.StepResult).filter_by(
requester_id=request.requesterId, task_id=request.taskId, example_idx=game_instance.example_idx).first()
if increment_idx:
if step_result.status == 'DONE':
game_instance.example_idx += 1
if game_instance.example_idx >= game_instance.num_steps:
return handle_completion(game_instance=game_instance)
step_result = None
auto_inc(game_instance.example_idx)
else:
increment_idx = False
if step_result is None:
step_result = create_new_step_result(
config=config, game_instance=game_instance)
session.add(step_result)
session.commit()
response = {
'messageType': 'example',
'taskId': request.taskId,
'requesterId': request.requesterId,
'exampleIdx': game_instance.example_idx,
'pictureUrl': step_result.image_path,
'score': game_instance.score,
'totalSteps': game_instance.num_steps,
'maxCards': game_instance.max_cards,
'numCards': game_instance.num_cards,
'isHoldout': step_result.is_holdout,
'cents_per_ten': game_instance.cents_per_ten
}
return web.json_response(response)
async def fetch_task(request):
return await _fetch(request)
async def fetch_next(request):
return await _fetch(request, increment_idx=True)
async def fetch_training_example(request_object):
request = argparse.Namespace(**await request_object.json())
# print('fetch_training_example', request)
game_instance = session.query(tables.GameInstance).filter_by(
requester_id=request.requesterId, task_id=request.taskId).first()
if game_instance is None:
print('game instance None => exiting')
return web.json_response({'messageType': 'error', 'error': 'game instance not found'})
config = get_config(task_id=request.taskId)
task_creator = TaskCreator(
task_type=config.task_type, seed=game_instance.seed,
num_examples=num_examples_per_game, grammar=config.grammar)
meaning_idx = random.randint(0, game_instance.num_cards - 1)
# print('num_cards', game_instance.num_cards, 'meaning_idx', meaning_idx)
ex = task_creator.create_example(idx=meaning_idx)
# print('ex', ex)
print(request.taskId)
response = {
'messageType': 'example',
'taskId': request.taskId,
'requesterId': request.requesterId,
'pictureUrl': ex['filepath'],
'utt': ex['expected'],
}
return web.json_response(response)
async def send_feedback(request_object):
request = argparse.Namespace(**await request_object.json())
print('send_feedback', request.feedback)
game_instance = session.query(tables.GameInstance).filter_by(
requester_id=request.requesterId, task_id=request.taskId).first()
if game_instance is None:
print('game instance None => exiting')
return web.json_response({'messageType': 'error', 'error': 'game instance not found'})
if game_instance.feedback is None:
game_instance.feedback = ''
game_instance.feedback = game_instance.feedback + request.feedback
session.commit()
response = {
'messageType': 'receivedFeedback',
'taskId': request.taskId,
'requesterId': request.requesterId,
}
return web.json_response(response)
def handle_correct(request, step_result, game_instance):
return_score = 1
if step_result.status == 'TODO':
step_result.status = 'DONE'
step_result.score = step_result.num_cards - 1
step_result.player_utt = request.code
step_result.finish_datetime = datetime_utils.datetime_to_str(datetime.datetime.now())
game_instance.score += step_result.score
if step_result.is_holdout:
game_instance.num_holdout_correct += 1
result_text = f'Yes! You get {step_result.score} points!'
else:
result_text = 'Yes! But you already submitted for this example :)'
return result_text, return_score
def handle_wrong(step_result, request):
result_text = 'No. The correct code is: ' + step_result.expected_utt
return_score = 0
if step_result.status == 'TODO':
step_result.status = 'DONE'
step_result.score = 0
step_result.player_utt = request.code
step_result.finish_datetime = datetime_utils.datetime_to_str(datetime.datetime.now())
return result_text, return_score
async def evaluate(request_object):
"""
request has keys requesterId, taskId, exampleIdx, code
response has keys requesterId, taskId, exampleIdx, resultText, score
"""
request = argparse.Namespace(**await request_object.json())
# print('evaluate', request)
game_instance = session.query(tables.GameInstance).filter_by(
requester_id=request.requesterId, task_id=request.taskId).first()
# print('game_instance', game_instance)
step_result = session.query(tables.StepResult).filter_by(
requester_id=request.requesterId, task_id=request.taskId, example_idx=game_instance.example_idx).first()
if game_instance.score is None:
game_instance.score = 0
if step_result.expected_utt == request.code:
result_str = '=='
result_text, return_score = handle_correct(
game_instance=game_instance, request=request, step_result=step_result)
else:
result_str = '!='
result_text, return_score = handle_wrong(step_result=step_result, request=request)
session.commit()
print(
request.taskId, request.code, result_str, step_result.expected_utt,
'ex=' + str(game_instance.example_idx), 'score=' + str(game_instance.score),
'num_ho=' + str(game_instance.num_holdout_correct)
)
response = {
'requesterId': request.requesterId,
'taskId': request.taskId,
'exampleCorrect': return_score,
'score': game_instance.score,
'resultText': result_text
}
return web.json_response(response)
async def add_card(request_object):
request = argparse.Namespace(**await request_object.json())
print('add_card', request)
game_instance = session.query(tables.GameInstance).filter_by(
requester_id=request.requesterId, task_id=request.taskId).first()
print('game_instance', game_instance)
if game_instance.num_cards < game_instance.max_cards - game_instance.num_holdout:
game_instance.num_cards += 1
session.commit()
return web.json_response({
'numCards': game_instance.num_cards
})
async def remove_card(request_object):
request = argparse.Namespace(**await request_object.json())
print('remove_card', request)
game_instance = session.query(tables.GameInstance).filter_by(
requester_id=request.requesterId, task_id=request.taskId).first()
print('game_instance', game_instance)
if game_instance.num_cards > 2:
game_instance.num_cards -= 1
session.commit()
return web.json_response({
'numCards': game_instance.num_cards
})
def get_client_ip(request_object):
return request_object.headers.get('X-Real-IP', request_object.remote)
def diag(request):
print('request.remote', request.remote, 'host', request.host)
print('peername', request.transport.get_extra_info('peername'))
print('headers', request.headers)
print('client ip', request.headers.get('X-Real-IP', 'not found'))
print('client ip', get_client_ip(request))
response = {}
return web.json_response(response)
if not os.path.isdir('data'):
os.makedirs('data')
engine = sqlalchemy.create_engine('sqlite:///data/turk.db', echo=False)
print('engine', engine)
Session = sessionmaker(bind=engine)
print('Session', Session)
session = Session()
tables.create_tables(engine)
r = np.random.RandomState()
app = web.Application()
# await remotes_setup(app)
cors = aiohttp_cors.setup(app, defaults={
'*': ResourceOptions(allow_credentials=False, allow_headers=['content-type'])
})
app.add_routes([
web.static('/html/img', 'html/img'),
web.static('/web', 'mll/turk/turk-web/build', show_index=False),
web.static('/favicon', 'mll/turk/turk-web/public/favicon/', show_index=False)
])
cors.add(app.router.add_route('POST', r'/api/v1/fetch_task', fetch_task))
cors.add(app.router.add_route('POST', r'/api/v1/fetch_next', fetch_next))
cors.add(app.router.add_route('POST', r'/api/v1/fetch_training_example', fetch_training_example))
cors.add(app.router.add_route('POST', r'/api/v1/evaluate', evaluate))
cors.add(app.router.add_route('POST', r'/api/v1/add_card', add_card))
cors.add(app.router.add_route('POST', r'/api/v1/remove_card', remove_card))
cors.add(app.router.add_route('POST', r'/api/v1/send_feedback', send_feedback))
cors.add(app.router.add_route('GET', r'/api/v1/diag', diag))
if __name__ == '__main__':
web.run_app(app)
|
[
"argparse.Namespace",
"mll.turk.webservice.datetime_utils.datetime_to_str",
"aiohttp_cors.resource_options.ResourceOptions",
"mll.turk.webservice.datetime_utils.str_to_datetime",
"random.randint",
"ruamel.yaml.safe_load",
"mll.turk.webservice.datetime_utils.datetime_diff_seconds",
"numpy.random.RandomState",
"aiohttp.web.json_response",
"aiohttp.web.static",
"aiohttp.web.run_app",
"datetime.datetime.now",
"aiohttp.web.Application",
"mll.turk.webservice.tables.create_tables",
"sqlalchemy.orm.sessionmaker",
"uuid.uuid4",
"mll.turk.webservice.task_creator.TaskCreator",
"os.makedirs",
"os.path.isdir",
"sqlalchemy.create_engine"
] |
[((14626, 14688), 'sqlalchemy.create_engine', 'sqlalchemy.create_engine', (['"""sqlite:///data/turk.db"""'], {'echo': '(False)'}), "('sqlite:///data/turk.db', echo=False)\n", (14650, 14688), False, 'import sqlalchemy\n'), ((14723, 14748), 'sqlalchemy.orm.sessionmaker', 'sessionmaker', ([], {'bind': 'engine'}), '(bind=engine)\n', (14735, 14748), False, 'from sqlalchemy.orm import sessionmaker\n'), ((14795, 14823), 'mll.turk.webservice.tables.create_tables', 'tables.create_tables', (['engine'], {}), '(engine)\n', (14815, 14823), False, 'from mll.turk.webservice import tables, datetime_utils\n'), ((14829, 14852), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (14850, 14852), True, 'import numpy as np\n'), ((14860, 14877), 'aiohttp.web.Application', 'web.Application', ([], {}), '()\n', (14875, 14877), False, 'from aiohttp import web\n'), ((1991, 2092), 'mll.turk.webservice.task_creator.TaskCreator', 'TaskCreator', ([], {'task_type': 'task_type', 'seed': 'seed', 'grammar': 'grammar', 'num_examples': 'num_examples_per_game'}), '(task_type=task_type, seed=seed, grammar=grammar, num_examples=\n num_examples_per_game)\n', (2002, 2092), False, 'from mll.turk.webservice.task_creator import TaskCreator\n'), ((2964, 2992), 'argparse.Namespace', 'argparse.Namespace', ([], {}), '(**config)\n', (2982, 2992), False, 'import argparse\n'), ((3678, 3701), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3699, 3701), False, 'import datetime\n'), ((3738, 3782), 'mll.turk.webservice.datetime_utils.datetime_to_str', 'datetime_utils.datetime_to_str', (['end_datetime'], {}), '(end_datetime)\n', (3768, 3782), False, 'from mll.turk.webservice import tables, datetime_utils\n'), ((3842, 3902), 'mll.turk.webservice.datetime_utils.str_to_datetime', 'datetime_utils.str_to_datetime', (['game_instance.start_datetime'], {}), '(game_instance.start_datetime)\n', (3872, 3902), False, 'from mll.turk.webservice import tables, datetime_utils\n'), ((3926, 3992), 'mll.turk.webservice.datetime_utils.datetime_diff_seconds', 'datetime_utils.datetime_diff_seconds', (['end_datetime', 'start_datetime'], {}), '(end_datetime, start_datetime)\n', (3962, 3992), False, 'from mll.turk.webservice import tables, datetime_utils\n'), ((4079, 4294), 'aiohttp.web.json_response', 'web.json_response', (["{'messageType': 'gameCompleted', 'taskId': game_instance.task_id,\n 'requesterId': game_instance.requester_id, 'score': game_instance.score,\n 'completionCode': game_instance.completion_code}"], {}), "({'messageType': 'gameCompleted', 'taskId': game_instance.\n task_id, 'requesterId': game_instance.requester_id, 'score':\n game_instance.score, 'completionCode': game_instance.completion_code})\n", (4096, 4294), False, 'from aiohttp import web\n'), ((4404, 4532), 'mll.turk.webservice.task_creator.TaskCreator', 'TaskCreator', ([], {'task_type': 'config.task_type', 'seed': 'game_instance.seed', 'num_examples': 'num_examples_per_game', 'grammar': 'config.grammar'}), '(task_type=config.task_type, seed=game_instance.seed,\n num_examples=num_examples_per_game, grammar=config.grammar)\n', (4415, 4532), False, 'from mll.turk.webservice.task_creator import TaskCreator\n'), ((8216, 8243), 'aiohttp.web.json_response', 'web.json_response', (['response'], {}), '(response)\n', (8233, 8243), False, 'from aiohttp import web\n'), ((8941, 9069), 'mll.turk.webservice.task_creator.TaskCreator', 'TaskCreator', ([], {'task_type': 'config.task_type', 'seed': 'game_instance.seed', 'num_examples': 'num_examples_per_game', 'grammar': 'config.grammar'}), '(task_type=config.task_type, seed=game_instance.seed,\n num_examples=num_examples_per_game, grammar=config.grammar)\n', (8952, 9069), False, 'from mll.turk.webservice.task_creator import TaskCreator\n'), ((9102, 9148), 'random.randint', 'random.randint', (['(0)', '(game_instance.num_cards - 1)'], {}), '(0, game_instance.num_cards - 1)\n', (9116, 9148), False, 'import random\n'), ((9545, 9572), 'aiohttp.web.json_response', 'web.json_response', (['response'], {}), '(response)\n', (9562, 9572), False, 'from aiohttp import web\n'), ((10360, 10387), 'aiohttp.web.json_response', 'web.json_response', (['response'], {}), '(response)\n', (10377, 10387), False, 'from aiohttp import web\n'), ((13008, 13035), 'aiohttp.web.json_response', 'web.json_response', (['response'], {}), '(response)\n', (13025, 13035), False, 'from aiohttp import web\n'), ((13510, 13566), 'aiohttp.web.json_response', 'web.json_response', (["{'numCards': game_instance.num_cards}"], {}), "({'numCards': game_instance.num_cards})\n", (13527, 13566), False, 'from aiohttp import web\n'), ((14011, 14067), 'aiohttp.web.json_response', 'web.json_response', (["{'numCards': game_instance.num_cards}"], {}), "({'numCards': game_instance.num_cards})\n", (14028, 14067), False, 'from aiohttp import web\n'), ((14532, 14559), 'aiohttp.web.json_response', 'web.json_response', (['response'], {}), '(response)\n', (14549, 14559), False, 'from aiohttp import web\n'), ((14569, 14590), 'os.path.isdir', 'os.path.isdir', (['"""data"""'], {}), "('data')\n", (14582, 14590), False, 'import os\n'), ((14596, 14615), 'os.makedirs', 'os.makedirs', (['"""data"""'], {}), "('data')\n", (14607, 14615), False, 'import os\n'), ((15880, 15896), 'aiohttp.web.run_app', 'web.run_app', (['app'], {}), '(app)\n', (15891, 15896), False, 'from aiohttp import web\n'), ((1702, 1714), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1712, 1714), False, 'import uuid\n'), ((2695, 2712), 'ruamel.yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (2709, 2712), False, 'from ruamel import yaml\n'), ((4926, 4972), 'random.randint', 'random.randint', (['(0)', '(game_instance.num_cards - 1)'], {}), '(0, game_instance.num_cards - 1)\n', (4940, 4972), False, 'import random\n'), ((5156, 5179), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5177, 5179), False, 'import datetime\n'), ((8792, 8871), 'aiohttp.web.json_response', 'web.json_response', (["{'messageType': 'error', 'error': 'game instance not found'}"], {}), "({'messageType': 'error', 'error': 'game instance not found'})\n", (8809, 8871), False, 'from aiohttp import web\n'), ((9957, 10036), 'aiohttp.web.json_response', 'web.json_response', (["{'messageType': 'error', 'error': 'game instance not found'}"], {}), "({'messageType': 'error', 'error': 'game instance not found'})\n", (9974, 10036), False, 'from aiohttp import web\n'), ((15053, 15088), 'aiohttp.web.static', 'web.static', (['"""/html/img"""', '"""html/img"""'], {}), "('/html/img', 'html/img')\n", (15063, 15088), False, 'from aiohttp import web\n'), ((15094, 15157), 'aiohttp.web.static', 'web.static', (['"""/web"""', '"""mll/turk/turk-web/build"""'], {'show_index': '(False)'}), "('/web', 'mll/turk/turk-web/build', show_index=False)\n", (15104, 15157), False, 'from aiohttp import web\n'), ((15163, 15240), 'aiohttp.web.static', 'web.static', (['"""/favicon"""', '"""mll/turk/turk-web/public/favicon/"""'], {'show_index': '(False)'}), "('/favicon', 'mll/turk/turk-web/public/favicon/', show_index=False)\n", (15173, 15240), False, 'from aiohttp import web\n'), ((10710, 10733), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (10731, 10733), False, 'import datetime\n'), ((11412, 11435), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11433, 11435), False, 'import datetime\n'), ((14956, 15028), 'aiohttp_cors.resource_options.ResourceOptions', 'ResourceOptions', ([], {'allow_credentials': '(False)', 'allow_headers': "['content-type']"}), "(allow_credentials=False, allow_headers=['content-type'])\n", (14971, 15028), False, 'from aiohttp_cors.resource_options import ResourceOptions\n'), ((2311, 2334), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2332, 2334), False, 'import datetime\n')]
|
#######################################################################
# Copyright (C) 2017 <NAME>(<EMAIL>) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
import numpy as np
import pickle
import logging
from load_mnist import *
from Backprop import *
tag = 'online_MNIST'
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('log/%s.txt' % tag)
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
train_x, train_y = load_mnist('training')
test_x, test_y = load_mnist('testing')
dim_in = 28 * 28
train_examples = 5000
test_examples = 100
train_x = train_x[: train_examples, :].reshape([-1, dim_in])
train_x = np.concatenate((train_x, np.ones((train_examples, 1))), axis=1)
train_y = train_y[: train_examples, :]
test_x = test_x[: test_examples, :].reshape([-1, dim_in])
test_x = np.concatenate((test_x, np.ones((test_examples, 1))), axis=1)
test_y = test_y[: test_examples, :]
train_x = np.asarray(train_x)
train_y = np.asarray(train_y)
test_x = np.asarray(test_x)
test_y = np.asarray(test_y)
dims = [dim_in, 1024, 10]
labels = ['SMD', 'BP']
window_size = 0
train_acc = np.zeros((len(labels), train_examples))
def train(learning_rate):
# init_fn = orthogonal_init
init_fn = normal_init
gate_type = Relu
# gate_type = Tanh
smd = Backprop(dims, learning_rate, gate_type, SMDLayer, init_fn)
bp = Backprop(dims, learning_rate, gate_type, BPLayer, init_fn)
methods = [smd, bp]
for train_index in range(len(train_x)):
for method_ind in range(len(methods)):
method = methods[method_ind]
x = train_x[train_index, :].reshape(1, -1)
y = train_y[train_index, :].reshape(1, -1)
correct_labels = method.train(x, y)
train_acc[method_ind, train_index] = correct_labels
if train_index - window_size >= 0:
# acc = np.mean(train_acc[method_ind, train_index - window_size: train_index])
acc = np.mean(train_acc[method_ind, :train_index])
logger.info('%s, %dth example, average accuracy %f' %
(labels[method_ind], train_index, acc))
else:
logger.info('%s, %dth example %d' %
(labels[method_ind], train_index, correct_labels))
with open('tmp/%s_%s_%s.bin' % (tag, gate_type().name, str(learning_rate)), 'wb') as f:
pickle.dump({'acc': train_acc}, f)
train(0.0001)
|
[
"pickle.dump",
"logging.FileHandler",
"numpy.asarray",
"logging.StreamHandler",
"numpy.ones",
"logging.Formatter",
"numpy.mean",
"logging.getLogger"
] |
[((458, 485), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (475, 485), False, 'import logging\n'), ((522, 561), 'logging.FileHandler', 'logging.FileHandler', (["('log/%s.txt' % tag)"], {}), "('log/%s.txt' % tag)\n", (541, 561), False, 'import logging\n'), ((594, 617), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (615, 617), False, 'import logging\n'), ((656, 729), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'], {}), "('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (673, 729), False, 'import logging\n'), ((1321, 1340), 'numpy.asarray', 'np.asarray', (['train_x'], {}), '(train_x)\n', (1331, 1340), True, 'import numpy as np\n'), ((1351, 1370), 'numpy.asarray', 'np.asarray', (['train_y'], {}), '(train_y)\n', (1361, 1370), True, 'import numpy as np\n'), ((1380, 1398), 'numpy.asarray', 'np.asarray', (['test_x'], {}), '(test_x)\n', (1390, 1398), True, 'import numpy as np\n'), ((1408, 1426), 'numpy.asarray', 'np.asarray', (['test_y'], {}), '(test_y)\n', (1418, 1426), True, 'import numpy as np\n'), ((1067, 1095), 'numpy.ones', 'np.ones', (['(train_examples, 1)'], {}), '((train_examples, 1))\n', (1074, 1095), True, 'import numpy as np\n'), ((1236, 1263), 'numpy.ones', 'np.ones', (['(test_examples, 1)'], {}), '((test_examples, 1))\n', (1243, 1263), True, 'import numpy as np\n'), ((2789, 2823), 'pickle.dump', 'pickle.dump', (["{'acc': train_acc}", 'f'], {}), "({'acc': train_acc}, f)\n", (2800, 2823), False, 'import pickle\n'), ((2356, 2400), 'numpy.mean', 'np.mean', (['train_acc[method_ind, :train_index]'], {}), '(train_acc[method_ind, :train_index])\n', (2363, 2400), True, 'import numpy as np\n')]
|
import numpy as np
from scipy.signal import convolve
import cv2
import os
def generateDefocusKernel(diameter, kernelSize=33):
"""
Generate a defocus kernel.
:param diameter: Diameter of the actual generated kernel.
:param kernelSize: Overall size of the kernel image in px.
:return: Generated defocus blur kernel.
"""
# Ensure uneven kernel diameter
if diameter % 2 == 0:
diameter += 1
# Generate centered defocus blur kernel.
kern = np.zeros((kernelSize, kernelSize), np.uint8)
cv2.circle(kern, (kernelSize, kernelSize), diameter, 255, -1, cv2.LINE_AA, shift=1)
# Normalize kernel to range [0,1]
kern = np.float32(kern) / 255.0
kern /= np.sum(kern)
return kern
def addDefocusBlur(img, diameter, keep_image_dim=True):
"""
Generate a defocus blur kernel and applied it an image.
:param img: Input image to blur.
:param diameter: Diameter of the generated defocus kernel.
:param keep_image_dim: Keep the input image dimensions during the convolution of image and kernel.
"""
# Decide convolution mode
conv_mode = "valid"
if keep_image_dim:
conv_mode = "same"
# Generate defocus blur kernel.
kernel = generateDefocusKernel(int(diameter))
resultChannels = ()
numChannels = 3 if len(img.shape) == 3 else 1
if numChannels > 1:
for channelIdx in range(numChannels):
# Convolve each image channel individually with the defocus kernel.
resultChannel = convolve(img[:,:,channelIdx], kernel, mode=conv_mode).astype("uint8")
# Collect blurred channels.
resultChannels += resultChannel,
result = np.dstack(resultChannels)
else:
result = convolve(img, kernel, mode=conv_mode).astype("uint8")
return result
# Example
# if __name__ == '__main__':
# dirIn = r"../../data/udacity/img/GT"
# imgName = "1478019984182279255.jpg"
# defocusDiameter = 11
# dirOut = os.path.join(r"../../data/udacity/img/defocusBlur", str(defocusDiameter))
# if not os.path.exists(dirOut):
# os.makedirs(dirOut)
# img = cv2.imread(os.path.join(dirIn, imgName))
# defocusedImg = addDefocusBlur(img, 11)
# cv2.imwrite(os.path.join(dirOut, imgName), defocusedImg)
|
[
"numpy.dstack",
"cv2.circle",
"numpy.sum",
"numpy.float32",
"numpy.zeros",
"scipy.signal.convolve"
] |
[((493, 537), 'numpy.zeros', 'np.zeros', (['(kernelSize, kernelSize)', 'np.uint8'], {}), '((kernelSize, kernelSize), np.uint8)\n', (501, 537), True, 'import numpy as np\n'), ((542, 629), 'cv2.circle', 'cv2.circle', (['kern', '(kernelSize, kernelSize)', 'diameter', '(255)', '(-1)', 'cv2.LINE_AA'], {'shift': '(1)'}), '(kern, (kernelSize, kernelSize), diameter, 255, -1, cv2.LINE_AA,\n shift=1)\n', (552, 629), False, 'import cv2\n'), ((717, 729), 'numpy.sum', 'np.sum', (['kern'], {}), '(kern)\n', (723, 729), True, 'import numpy as np\n'), ((680, 696), 'numpy.float32', 'np.float32', (['kern'], {}), '(kern)\n', (690, 696), True, 'import numpy as np\n'), ((1711, 1736), 'numpy.dstack', 'np.dstack', (['resultChannels'], {}), '(resultChannels)\n', (1720, 1736), True, 'import numpy as np\n'), ((1764, 1801), 'scipy.signal.convolve', 'convolve', (['img', 'kernel'], {'mode': 'conv_mode'}), '(img, kernel, mode=conv_mode)\n', (1772, 1801), False, 'from scipy.signal import convolve\n'), ((1539, 1594), 'scipy.signal.convolve', 'convolve', (['img[:, :, channelIdx]', 'kernel'], {'mode': 'conv_mode'}), '(img[:, :, channelIdx], kernel, mode=conv_mode)\n', (1547, 1594), False, 'from scipy.signal import convolve\n')]
|
#
# Copyright (c) 2018 TECHNICAL UNIVERSITY OF MUNICH, DEPARTMENT OF MECHANICAL ENGINEERING, CHAIR OF APPLIED MECHANICS,
# BOLTZMANNSTRASSE 15, 85748 GARCHING/MUNICH, GERMANY, <EMAIL>.
#
# Distributed under 3-Clause BSD license. See LICENSE file for more information.
#
r"""
This module describes different nonholonomic and holonomic constraints and provides a base classes for them
This module follows the following conventions:
Holonomic Constraint:
.. math::
g(u, t) = 0
The differential of the constraint (Pfaffian Form) is:
.. math::
\mathrm{d}g = B(u, t) \mathrm{d}u + b(u, t) \mathrm{d}t = 0
The above is the starting point for nonholonomic constraints but can simply be derived for holonomic constraints
via the derivatives of the constraint function g(u, t)
The total time derivative of the constraint describes the constraints on velocity level
.. math::
\frac{\mathrm{d}g}{\mathrm{d}t} = B(u, t) \cdot \dot{u} + b(u, t) = 0
The second time derivative of the constraint function describes the constraints on acceleration level:
.. math::
\frac{\mathrm{d}^2 g}{\mathrm{d} t^2} &= B(u, t) \cdot \ddot{u} + \frac{\mathrm{d}B(u, t)}{\mathrm{d} t} \cdot \
\dot{u} + \frac{\mathrm{d}b(u, t)}{\mathrm{d} t} \\
&= B(u, t) \cdot \ddot{u} + \frac{\partial B(u, t)}{\partial u} \cdot \dot{u}^2 + \
\frac{\partial B(u, t)}{\partial t} \cdot \dot{u} + \frac{\partial b(u, t)}{\partial u} \dot{u} + \
\frac{\partial b(u, t)}{\partial t} \\
&= B(u, t) \cdot \ddot{u} + a(u, du, t) \\
&= 0
"""
import numpy as np
from ..linalg.norms import vector_norm
__all__ = ['NonholonomicConstraintBase',
'HolonomicConstraintBase',
'DirichletConstraint',
'FixedDistanceConstraint',
'FixedDistanceToLineConstraint',
'NodesCollinear2DConstraint',
'EqualDisplacementConstraint',
'FixedDistanceToPlaneConstraint',
'NodesCoplanarConstraint'
]
class NonholonomicConstraintBase:
NO_OF_CONSTRAINTS = 0
def __init__(self):
return
def after_assignment(self, dofids):
"""
Method that is called after assignment in Constraint Manager
No changes are needed in this function for most cases.
But it provides the opportunity to change the state of the constraint after it has been assigned to the
constraint manager
Parameters
----------
dofids: list or numpy.array
list or numpy.array containing the dofids of the dofs which are passed to the constraint
Returns
-------
None
"""
return
def B(self, X, u, t):
"""
Linear map from velocities to constraint function (c.f. module description)
Parameters
----------
X: ndarray
local node coordinates of dofs in reference domain
u: ndarray
local displacements
t: float
current time
Returns
-------
B: ndarray
Linear map from velocities to constraint function
"""
raise NotImplementedError('The B has not been implemented for this constraint')
def b(self, X, u, t):
"""
Part of the nonholonomic constraint equation that is independent of the velocities
Parameters
----------
X: ndarray
local node coordinates of dofs in reference domain
u: ndarray
local displacements
t: float
time
Returns
-------
b: ndarray
Velocity independent part of the constraint function on velocity level
"""
raise NotImplementedError('The partial time derivative of the constraint function is not implemented for this'
'constraint')
def a(self, X, u, du, t):
r"""
It computes the inhomogeneous part on acceleration level
.. math::
\frac{\partial B(u, t)}{\partial u} \cdot \dot{u}^2 + \
\frac{\partial B(u, t)}{\partial t} \cdot \dot{u} + \frac{\partial b(u, t)}{\partial u} \dot{u} + \
\frac{\partial b(u, t)}{\partial t} \\
Parameters
----------
X: numpy.array
Empty numpy array because dirichlet constraints do not need information about node coordinates
u: numpy.array
current displacements for the dofs that shall be constrained
du: numpy.array
current velocities for the dofs that schall be constrained
t: float
time
Returns
-------
a: numpy.array
The above described entity (inhomogeneous part of acceleration level constraint)
"""
raise NotImplementedError('The total time derivative of the partial time derivative is not implemented for this'
'constraint')
class HolonomicConstraintBase(NonholonomicConstraintBase):
NO_OF_CONSTRAINTS = 0
def __init__(self):
super().__init__()
return
def B(self, X, u, t):
"""
Partial Derivative of holonomic constraint function g w.r.t. displacements u
Parameters
----------
X: ndarray
local node coordinates of dofs in reference domain
u: ndarray
local displacements
t: float
time
Returns
-------
B: ndarray
Partial derivative of constraint function g w.r.t. displacements u
"""
raise NotImplementedError('The B has not been implemented for this constraint')
def b(self, X, u, t):
"""
Partial Derivative of holonomic constraint function g w.r.t. time t
Parameters
----------
X: ndarray
local node coordinates of dofs in reference domain
u: ndarray
local displacements
t: float
time
Returns
-------
b: ndarray
Partial derivative of the constraint function g w.r.t. time t
"""
raise NotImplementedError('The partial time derivative of the constraint function is not implemented for this'
'constraint')
def a(self, X, u, du, t):
r"""
It computes the inhomogeneous part on acceleration level
.. math::
\frac{\partial B(u, t)}{\partial u} \cdot \dot{u}^2 + \
\frac{\partial B(u, t)}{\partial t} \cdot \dot{u} + \frac{\partial b(u, t)}{\partial u} \dot{u} + \
\frac{\partial b(u, t)}{\partial t} \\
Parameters
----------
X: numpy.array
Empty numpy array because dirichlet constraints do not need information about node coordinates
u: numpy.array
current displacements for the dofs that shall be constrained
du: numpy.array
current velocities for the dofs that schall be constrained
t: float
time
Returns
-------
a: numpy.array
The above described entity (inhomogeneous part of acceleration level constraint)
"""
raise NotImplementedError('The total time derivative of the partial time derivative is not implemented for this'
'constraint')
def g(self, X, u, t):
"""
Residual of holonomic constraint-function g
Parameters
----------
X: ndarray
local node coordinates of dofs in reference domain
u: ndarray
local displacements
t: float
time
Returns
-------
g: ndarray
residual of holonomic constraint function
"""
raise NotImplementedError('The constraint function has not been implemented for this constraint')
class DirichletConstraint(HolonomicConstraintBase):
"""
Class to define a Dirichlet constraints on several dofs.
Attributes
----------
_U: function
contains the function of enforced displacements
_dU: function
contains the function of enforced velocities (time derivative of _U)
_ddU: function
contains the function of enforced accelerations (time derivative of _dU)
"""
NO_OF_CONSTRAINTS = 1
def __init__(self, U=(lambda t: 0.), dU=(lambda t: 0.), ddU=(lambda t: 0.)):
"""
A Dirichlet Constraint can be initialized with predefined displacements
Parameters
----------
U: function
function with signature float U: f(float: t)
describing enforced displacements
dU: function
function with signature float dU: f(float: t)
describing enforced velocities
ddU: function
function with signature float ddU: f(float: t)
describing enforced accelerations
"""
super().__init__()
self._U = U
self._dU = dU
self._ddU = ddU
return
def after_assignment(self, dofids):
"""
In this case the number of constraints is set after assignment because this is unknown before
Parameters
----------
dofids: list or numpy.array
list or numpy.array containing the dof-IDs of the dofs that are constrained by this Dirichlet Constraint
Returns
-------
None
"""
return
def g(self, X_local, u_local, t):
"""
Constraint-function for a fixed dirichlet constraint.
Parameters
----------
X_local: numpy.array
Empty numpy array because Dirichlet Constraints to not need node coordinates
u_local: numpy.array
current displacements for the dofs that shall be constrained
t: float
time
Returns
-------
g: ndarray
Residual of the holonomic constraint function
"""
return np.array(u_local - self._U(t), dtype=float)
def B(self, X_local, u_local, t):
"""
Jacobian of constraint-function w.r.t. displacements u
Parameters
----------
X_local: numpy.array
Empty numpy array because dirichlet constraints do not need information about node coordinates
u_local: numpy.array
current displacements for the dofs that shall be constrained
t: float
time
Returns
-------
B: ndarray
Partial derivative of constraint function g w.r.t. displacements u
"""
return np.array([1], dtype=float)
def b(self, X, u, t):
"""
Partial Derivative of holonomic constraint function g w.r.t. time t
Parameters
----------
X: ndarray
local node coordinates of dofs in reference domain
u: ndarray
local displacements
t: float
time
Returns
-------
b: ndarray
Partial derivative of the constraint function g w.r.t. time t
"""
return np.array([-self._dU(t)], ndmin=1)
def a(self, X, u, du, t):
r"""
It computes the inhomogeneous part on acceleration level
.. math::
\frac{\partial B(u, t)}{\partial u} \cdot \dot{u}^2 + \
\frac{\partial B(u, t)}{\partial t} \cdot \dot{u} + \frac{\partial b(u, t)}{\partial u} \dot{u} + \
\frac{\partial b(u, t)}{\partial t} \\
Parameters
----------
X: numpy.array
Empty numpy array because dirichlet constraints do not need information about node coordinates
u: numpy.array
current displacements for the dofs that shall be constrained
du: numpy.array
current velocities for the dofs that schall be constrained
t: float
time
Returns
-------
a: numpy.array
The above described entity (inhomogeneous part of acceleration level constraint)
"""
return np.array([-self._ddU(t)], ndmin=1)
class FixedDistanceConstraint(HolonomicConstraintBase):
"""
Class to define a fixed distance between two nodes.
"""
NO_OF_CONSTRAINTS = 1
def __init__(self):
super().__init__()
return
def g(self, X_local, u_local, t):
"""
Return residual of constraint function for a fixed distance constraint between two nodes.
Parameters
----------
X_local: numpy.array
X-Coords in reference Domain for both points just concatenated
e.g. [x1 y1 z1 x2 y2 z2], [x1 y1 x2 y2] for 3D or 2D problems respectively
u_local: numpy.array
current displacements for both points just concatenated (c.f. X_local)
t: float
time, default: 0, in this case not necessary because fixed distance does not
change over time
Returns
-------
g : numpy.array
Residual of constraint function
"""
dofs_per_node = len(u_local) // 2
u1 = u_local[:dofs_per_node]
u2 = u_local[dofs_per_node:]
X1 = X_local[:dofs_per_node]
X2 = X_local[dofs_per_node:]
x1 = X1 + u1
x2 = X2 + u2
scaling = vector_norm(X2 - X1)
return np.array((vector_norm(x2 - x1) - vector_norm(X2 - X1)) * 10. / scaling, dtype=float, ndmin=1)
def B(self, X_local, u_local, t):
"""
Return derivative of c_equation with respect to u for a Fixed Distance constraint.
Parameters
----------
X_local: numpy.array
X-Coords in reference Domain for degrees of freedom
e.g. [x1 x2 y3 y4 z5] if x-direction of node 1 and 2, y-direction node 3 and 4 and z-direction of node 5 is
constrained
u_local: numpy.array
current displacements for the dofs that shall be constrained
t: float
time
Returns
-------
B: ndarray
Partial derivative of constraint function g w.r.t. displacements u
"""
dofs_per_node = len(u_local) // 2
u1 = u_local[:dofs_per_node]
u2 = u_local[dofs_per_node:]
X1 = X_local[:dofs_per_node]
X2 = X_local[dofs_per_node:]
x1 = X1 + u1
x2 = X2 + u2
scaling = vector_norm(X2 - X1)
l_current = vector_norm(x2 - x1)
return 10.0 * np.concatenate((-(x2 - x1) / (l_current * scaling), (x2 - x1) / (l_current * scaling)))
def b(self, X, u, t):
"""
Partial Derivative of holonomic constraint function g w.r.t. time t
Parameters
----------
X: ndarray
local node coordinates of dofs in reference domain
u: ndarray
local displacements
t: float
time
Returns
-------
b: ndarray
Partial derivative of the constraint function g w.r.t. time t
"""
return np.array([0.0], dtype=float, ndmin=1)
def a(self, X, u, du, t):
r"""
It computes the inhomogeneous part on acceleration level
.. math::
\frac{\partial B(u, t)}{\partial u} \cdot \dot{u}^2 + \
\frac{\partial B(u, t)}{\partial t} \cdot \dot{u} + \frac{\partial b(u, t)}{\partial u} \dot{u} + \
\frac{\partial b(u, t)}{\partial t} \\
Parameters
----------
X: numpy.array
Empty numpy array because dirichlet constraints do not need information about node coordinates
u: numpy.array
current displacements for the dofs that shall be constrained
du: numpy.array
current velocities for the dofs that schall be constrained
t: float
time
Returns
-------
a: numpy.array
The above described entity (inhomogeneous part of acceleration level constraint)
"""
# a consists of four terms:
# 1. partial derivative dB/du * du^2
no_of_dofs = len(u)
delta = 1e-8*vector_norm(u) + 1e-8
dBdu = np.zeros((1, no_of_dofs))
uplus = u.copy()
uminus = u.copy()
for i in range(no_of_dofs):
uplus[:] = u
uplus[i] = uplus[i] + delta
uminus[:] = u
uminus[i] = uminus[i] - delta
jplus = self.B(X, uplus, t)
jminus = self.B(X, uminus, t)
dBdu[:] += (jplus - jminus)/(2*delta)*du[i]
# 2. partial derivative dB/dt
delta = 1e-8
dBdt = (self.B(X, u, t + delta) - self.B(X, u, t - delta)) / (2 * delta)
# 3. partial derivative db/du is zero
# 4. partial derivative db/dt is zero
return dBdu.dot(du) + dBdt.dot(du)
class FixedDistanceToLineConstraint(HolonomicConstraintBase):
NO_OF_CONSTRAINTS = 1
def __init__(self):
"""
"""
super().__init__()
return
def g(self, X_local, u_local, t):
"""
Constraint-function for a fixed distance to line constraint.
This function calculates the residuum of the constraints for a Fixed
Distance To Line Constraint. The idea is that I will have a lot of
nodes, forming a Line (not necessarily a straight line), and a point x3.
This point is then constrained to keep a fixed distance from this line,
based on linear approximations of the line by its nodes.
Parameters
----------
X_local: numpy.array
X-Coords in reference Domain for 2 points forming a line
and a third point that shall keep the same distance to this line
e.g. [x1 y1 z1 x2 y2 z2 x3 y3 z3], [x1 y1 x2 y2 x3 y3] for 3D or 2D problems respectively
u_local: numpy.array
current displacements for both points just concatenated (c.f. X_local)
t: float
time
Returns
-------
g: ndarray
Residual of the holonomic constraint function
"""
dofs_per_node = len(u_local)//3
X1 = X_local[:dofs_per_node]
X2 = X_local[dofs_per_node:2*dofs_per_node]
X3 = X_local[-dofs_per_node:]
x = X_local + u_local
x1 = x[:dofs_per_node]
x2 = x[dofs_per_node:2*dofs_per_node]
x3 = x[-dofs_per_node:]
# The vector of direction of the line is
line_dir = x2 - x1
# x3_dir is the vector from x1 to x3, so that we can find the distance
x3_dir = x3 - x1
# The norm of the rejection of x3_dir relative to line_dir gives us the
# distance from x3 to the small line we have
# rejection current is the perpendicular line from line to x3
# therefore the norm of rejection_current is the distance of x3 to the line
rejection_current = x3_dir - ((x3_dir.dot(line_dir))
/(np.linalg.norm(line_dir))**2)*line_dir
# Calculate the initial rejection vector
initial_dir = X2 - X1
X3_dir_initial = X3 - X1
rejection_initial = X3_dir_initial - ((X3_dir_initial.dot(initial_dir)) /
(np.linalg.norm(initial_dir))**2)*initial_dir
# the squared current distance must be equal to the squared initial distance
return np.array([rejection_current.dot(rejection_current) - rejection_initial.dot(rejection_initial)],
ndmin=1)
def B(self, X_local, u_local, t):
"""
Jacobian of constraint-function w.r.t. displacements u
Parameters
----------
X_local: numpy.array
X-Coords in reference Domain for 2 points forming a line
and a third point that shall keep the same distance to this line
e.g. [x1 y1 z1 x2 y2 z2 x3 y3 z3], [x1 y1 x2 y2 x3 y3] for 3D or 2D problems respectively
u_local: numpy.array
current displacements for the dofs
t: float
time
Returns
-------
B: ndarray
Partial derivative of constraint function g w.r.t. displacements u
"""
dofs_per_node = len(u_local)//3
if dofs_per_node > 2:
x = X_local + u_local
x1 = x[:dofs_per_node]
x2 = x[dofs_per_node:2*dofs_per_node]
x3 = x[-dofs_per_node:]
r_12 = x2 - x1
r_13 = x3 - x1
a1 = x1[0]
b1 = x1[1]
c1 = x1[2]
a2 = x2[0]
b2 = x2[1]
c2 = x2[2]
# a3 = x3[0]
# b3 = x3[1]
# c3 = x3[2]
# r_13 = [a3-a1, b3-b1, c3-c1]
# r_12 = [a2-a1, b2-b1, c2-c1]
# coef is s in documentation
coef = ((r_13.dot(r_12))/(r_12.dot(r_12)))
v = r_13 - coef*r_12
# dcoefdP1 = ( r_12 @ (-I) ) / (r_12.dot(r_12))
# + ( r_13 @ ( (-I) / (r_12.dot(r_12))
# + (2*r_12.T @ r_13)/(r_12.dot(r_12)**2))
# drejdP1 = -I - (dcoefdP1.T @ r_12) + coef * I
# bP1 = 2 * v @ drejdP1.T
# -------------------------------------------------------------------
# Derivatives of coeff with respect to...
# ... x1
dcoefda1 = np.array([-1,0,0]).dot(r_12)/(r_12.dot(r_12)) \
+ r_13.dot(np.array([-1,0,0])/(r_12.dot(r_12)) \
+r_12*(2*(a2-a1)/(r_12.dot(r_12)**2)))
# ... y1
dcoefdb1 = np.array([0, -1, 0]).dot(r_12) / (r_12.dot(r_12)) \
+ r_13.dot(np.array([0, -1, 0]) / (r_12.dot(r_12)) \
+ r_12 * (2 * (b2 - b1) / (r_12.dot(r_12) ** 2)))
# ... z1
dcoefdc1 = np.array([0, 0, -1]).dot(r_12) / (r_12.dot(r_12)) \
+ r_13.dot(np.array([0, 0, -1]) / (r_12.dot(r_12)) \
+ r_12 * (2 * (c2 - c1) / (r_12.dot(r_12) ** 2)))
# ... x2
dcoefda2 = r_13.dot(np.array([1, 0, 0]) / (r_12.dot(r_12)) \
+ r_12 * (2 * (a2 - a1) / (r_12.dot(r_12) ** 2)))
# ... y2
dcoefdb2 = r_13.dot(np.array([0, 1, 0]) / (r_12.dot(r_12)) \
+ r_12 * (2 * (b2 - b1) / (r_12.dot(r_12) ** 2)))
# ... z2
dcoefdc2 = r_13.dot(np.array([0, 0, 1]) / (r_12.dot(r_12)) \
+ r_12 * (2 * (c2 - c1) / (r_12.dot(r_12) ** 2)))
# ... x3
dcoefda3 = np.array([1, 0, 0]).dot(r_12) / (r_12.dot(r_12))
# ... y3
dcoefdb3 = np.array([0, 1, 0]).dot(r_12) / (r_12.dot(r_12))
# ... z3
dcoefdc3 = np.array([0, 0, 1]).dot(r_12) / (r_12.dot(r_12))
# END of derivatives of coeff
# All formulas checked by Meyer
# ----------------------------------------------------------------
# ----------------------------------------------------------------
# Comment by Meyer: THIS SECTION IS PROBABLY WRONG!
#
drejda1 = np.array([-1, 0, 0]) - dcoefda1*r_12 \
+ np.array([coef, 0, 0])
drejdb1 = np.array([0, -1, 0]) - dcoefdb1*r_12 \
+ np.array([0, coef, 0])
drejdc1 = np.array([0, 0, -1]) - dcoefdc1*r_12 \
+ np.array([0, 0, coef])
drejda2 = - dcoefda2*r_12 - np.array([coef, 0, 0])
drejdb2 = - dcoefdb2*r_12 - np.array([0, coef, 0])
drejdc2 = - dcoefdc2*r_12 - np.array([0, 0, coef])
drejda3 = np.array([1,0,0]) - dcoefda3*r_12
drejdb3 = np.array([0,1,0]) - dcoefdb3*r_12
drejdc3 = np.array([0,0,1]) - dcoefdc3*r_12
bx1 = np.array([
2*v.dot(drejda1),
2*v.dot(drejdb1),
2*v.dot(drejdc1)
])
bx2 = np.array([
2*v.dot(drejda2),
2*v.dot(drejdb2),
2*v.dot(drejdc2)
])
bx3 = np.array([
2*v.dot(drejda3),
2*v.dot(drejdb3),
2*v.dot(drejdc3)
])
b = np.concatenate((bx1,bx2,bx3))
else:
x = X_local + u_local
x1 = x[:dofs_per_node]
x2 = x[dofs_per_node:2*dofs_per_node]
x3 = x[-dofs_per_node:]
r_12 = x2 - x1
r_13 = x3 - x1
a1 = x1[0]
b1 = x1[1]
a2 = x2[0]
b2 = x2[1]
# coef is s in documentation
coef = ((r_13.dot(r_12))/(r_12.dot(r_12)))
v = r_13 - coef*r_12
# -------------------------------------------------------------------
# Derivatives of coeff with respect to...
# ... x1
dcoefda1 = np.array([-1,0]).dot(r_12)/(r_12.dot(r_12)) \
+ r_13.dot(np.array([-1,0])/(r_12.dot(r_12)) \
+r_12*(2*(a2-a1)/(r_12.dot(r_12)**2)))
# ... y1
dcoefdb1 = np.array([0, -1]).dot(r_12) / (r_12.dot(r_12)) \
+ r_13.dot(np.array([0, -1]) / (r_12.dot(r_12)) \
+ r_12 * (2 * (b2 - b1) / (r_12.dot(r_12) ** 2)))
# ... x2
dcoefda2 = r_13.dot(np.array([1, 0]) / (r_12.dot(r_12)) \
+ r_12 * (2 * (a2 - a1) / (r_12.dot(r_12) ** 2)))
# ... y2
dcoefdb2 = np.array([0,0]).dot(r_12)/(r_12.dot(r_12)) \
+ r_13.dot(np.array([0,1])/(r_12.dot(r_12)) \
+r_12*(2*(b2-b1)/(r_12.dot(r_12)**2)))
# ... x3
dcoefda3 = np.array([1, 0]).dot(r_12) / (r_12.dot(r_12))
# ... y3
dcoefdb3 = np.array([0, 1]).dot(r_12) / (r_12.dot(r_12))
# END of derivatives of coeff
# All formulas checked by Meyer
# -----------------------------------------------------------------------
# Comment by Meyer: CAUTION: The following formulas seem to be wrong!
# The formulas in the thesis of Gruber seem to be correct but are not the same as here
drejda1 = np.array([-1, 0]) - dcoefda1*r_12 + np.array([coef, 0])
drejdb1 = np.array([0, -1]) - dcoefdb1*r_12 + np.array([0, coef])
drejda2 = - dcoefda2*r_12 - np.array([coef, 0])
drejdb2 = - dcoefdb2*r_12 - np.array([0, coef])
drejda3 = np.array([1,0]) - dcoefda3*r_12
drejdb3 = np.array([0,1]) - dcoefdb3*r_12
bx1 = np.array([
2*v.dot(drejda1),
2*v.dot(drejdb1)
])
bx2 = np.array([
2*v.dot(drejda2),
2*v.dot(drejdb2)
])
bx3 = np.array([
2*v.dot(drejda3),
2*v.dot(drejdb3)
])
b = np.concatenate((bx1,bx2,bx3))
return b
def b(self, X, u, t):
"""
Partial Derivative of holonomic constraint function g w.r.t. time t
Parameters
----------
X: ndarray
local node coordinates of dofs in reference domain
u: ndarray
local displacements
t: float
time
Returns
-------
b: ndarray
Partial derivative of the constraint function g w.r.t. time t
"""
return np.array([0.0], dtype=float, ndmin=1)
def a(self, X, u, du, t):
r"""
It computes the inhomogeneous part on acceleration level
.. math::
\frac{\partial B(u, t)}{\partial u} \cdot \dot{u}^2 + \
\frac{\partial B(u, t)}{\partial t} \cdot \dot{u} + \frac{\partial b(u, t)}{\partial u} \dot{u} + \
\frac{\partial b(u, t)}{\partial t} \\
Parameters
----------
X: numpy.array
Empty numpy array because dirichlet constraints do not need information about node coordinates
u: numpy.array
current displacements for the dofs that shall be constrained
du: numpy.array
current velocities for the dofs that schall be constrained
t: float
time
Returns
-------
a: numpy.array
The above described entity (inhomogeneous part of acceleration level constraint)
"""
raise NotImplementedError('The a entity has not been implemented for the fixed distance to line'
'constraint yet')
class NodesCollinear2DConstraint(HolonomicConstraintBase):
"""
Class to define collinearity (three points on a line).
This function works with two given coordinates of the nodes and makes
them collinear. If you want a 3D effect, you have to make two constraints,
one for (X and Y) and the other for (X and Z or Y and Z).
This is made in this manner because each constraint can only remove one
degree of freedom of the system, not two, at a time.
Caution: Only three points are allowed to be passed to the functions
Otherwise the results will be wrong."""
NO_OF_CONSTRAINTS = 1
def __init__(self):
"""
Constructor
"""
super().__init__()
return
def g(self, X_local, u_local, t):
"""
Constraint-function for a 2d collinear constraint.
Parameters
----------
X_local: numpy.array
X-Coords in reference Domain for three points just concatenated but only for 2 dimensions
e.g. x and y [x1 y1 x2 y2 x3 y3] or y and z [y1 z1 y2 z2 y3 z3]
u_local: numpy.array
current displacements for three points just concatenated (c.f. X_local)
t: float
time
Returns
-------
g: ndarray
Residual of the holonomic constraint function
"""
x = X_local + u_local
x1 = x[:2]
x2 = x[2:4]
x3 = x[-2:]
# Three points are collinear if and only if the determinant of the matrix
# A here is zero.
A = np.hstack((np.vstack((x1, x2, x3)), np.ones((3, 1))))
return np.array([np.linalg.det(A)], ndmin=1) # **2
def B(self, X_local, u_local, t):
"""
Jacobian of constraint-function w.r.t. displacements u
Parameters
----------
X_local: numpy.array
X-Coords in reference Domain for three points just concatenated but only for 2 dimensions
e.g. x and y [x1 y1 x2 y2 x3 y3] or y and z [y1 z1 y2 z2 y3 z3]
u_local: numpy.array
current displacements for three points just concatenated (c.f. X_local)
t: float
time
Returns
-------
B: ndarray
Partial derivative of constraint function g w.r.t. displacements u
"""
dofs_per_node = len(u_local) // 3
x = X_local + u_local
x1 = x[:dofs_per_node]
x2 = x[dofs_per_node:2 * dofs_per_node]
x3 = x[-dofs_per_node:]
# A = np.hstack((np.vstack((x1,x2,x3)), np.ones((3,1))))
# det_A = np.linalg.det(A)
a1 = x1[0]
b1 = x1[1]
a2 = x2[0]
b2 = x2[1]
a3 = x3[0]
b3 = x3[1]
b = np.array([
b2 - b3, # 2*(b2 - b3)*(det_A),
-a2 + a3, # -2*(a2 - a3)*(det_A), #
-b1 + b3, # -2*(b1 - b3)*(det_A), #
a1 - a3, # 2*(a1 - a3)*(det_A), #
b1 - b2, # 2*(b1 - b2)*(det_A), #
-a1 + a2, # -2*(a1 - a2)*(det_A), #
])
return b
def b(self, X, u, t):
"""
Partial Derivative of holonomic constraint function g w.r.t. time t
Parameters
----------
X: ndarray
local node coordinates of dofs in reference domain
u: ndarray
local displacements
t: float
time
Returns
-------
b: ndarray
Partial derivative of the constraint function g w.r.t. time t
"""
return np.array([0.0], dtype=float, ndmin=1)
def a(self, X, u, du, t):
r"""
It computes the inhomogeneous part on acceleration level
.. math::
\frac{\partial B(u, t)}{\partial u} \cdot \dot{u}^2 + \
\frac{\partial B(u, t)}{\partial t} \cdot \dot{u} + \frac{\partial b(u, t)}{\partial u} \dot{u} + \
\frac{\partial b(u, t)}{\partial t} \\
Parameters
----------
X: numpy.array
Empty numpy array because dirichlet constraints do not need information about node coordinates
u: numpy.array
current displacements for the dofs that shall be constrained
du: numpy.array
current velocities for the dofs that schall be constrained
t: float
time
Returns
-------
a: numpy.array
The above described entity (inhomogeneous part of acceleration level constraint)
"""
raise NotImplementedError('The a entity has not been implemented for the fixed distance to line'
'constraint yet')
class EqualDisplacementConstraint(HolonomicConstraintBase):
"""
Class to define a fixed distance between two nodes.
"""
NO_OF_CONSTRAINTS = 1
def __init__(self):
super().__init__()
return
def g(self, X_local, u_local, t):
"""
Return residual of constraint function for an equal displacement constraint between two nodes.
Parameters
----------
X_local: numpy.array
not needed for this constraint
u_local: numpy.array
current displacements for both dofs
t: float
time
Returns
-------
g : numpy.array
Residual of constraint function
"""
return np.array([u_local[1] - u_local[0]], dtype=float, ndmin=1)
def B(self, X_local, u_local, t):
"""
Return derivative for an equal displacement constraint
Parameters
----------
X_local: numpy.array
not needed for this constraint
u_local: numpy.array
current displacements for the dofs that shall be constrained
t: float
time
Returns
-------
B: ndarray
Partial derivative of constraint function g w.r.t. displacements u
"""
return np.array([-1.0, 1.0], dtype=float)
def b(self, X, u, t):
"""
Partial Derivative of holonomic constraint function g w.r.t. time t
Parameters
----------
X: ndarray
local node coordinates of dofs in reference domain
u: ndarray
local displacements
t: float
time
Returns
-------
b: ndarray
Partial derivative of the constraint function g w.r.t. time t
"""
return np.array([0.0], dtype=float, ndmin=1)
def a(self, X, u, du, t):
r"""
It computes the inhomogeneous part on acceleration level
.. math::
\frac{\partial B(u, t)}{\partial u} \cdot \dot{u}^2 + \
\frac{\partial B(u, t)}{\partial t} \cdot \dot{u} + \frac{\partial b(u, t)}{\partial u} \dot{u} + \
\frac{\partial b(u, t)}{\partial t} \\
Parameters
----------
X: numpy.array
Empty numpy array because dirichlet constraints do not need information about node coordinates
u: numpy.array
current displacements for the dofs that shall be constrained
du: numpy.array
current velocities for the dofs that schall be constrained
t: float
time
Returns
-------
a: numpy.array
The above described entity (inhomogeneous part of acceleration level constraint)
"""
return np.array([0.0], ndmin=1)
class FixedDistanceToPlaneConstraint(HolonomicConstraintBase):
"""
Class to define a fixed distance to plane constraint where three nodes define the plane
and one node has a fixed distance to it.
"""
NO_OF_CONSTRAINTS = 1
def __init__(self):
super().__init__()
raise NotImplementedError('Theano is not compatible anymore, the constraint must be reimplemented')
def g(self, X_local, u_local, t):
"""
Return residual of constraint function for a fixed distance to plane constraint.
Parameters
----------
X_local: numpy.array
X-Coords in reference Domain for four points just concatenated
The first three points define the plane, the fourth point shall have fixed distance
e.g. [x1 y1 z1 x2 y2 z2 x3 y3 z3 x4 y4 z4]
u_local: numpy.array
current displacements for all four points just concatenated (c.f. X_local)
t: float
time
Returns
-------
g : numpy.array
Residual of constraint function
"""
x = X_local + u_local
x1 = x[:3]
x2 = x[3:2 * 3]
x3 = x[2 * 3:3 * 3]
x4 = x[-3:]
plane_vector_1 = x2 - x1
plane_vector_2 = x3 - x1
plane_normal = np.cross(plane_vector_1, plane_vector_2)
plane_normal = plane_normal / (np.linalg.norm(plane_normal))
x4_vector = x4 - x1
X1 = X_local[:3]
X2 = X_local[3:2 * 3]
X3 = X_local[2 * 3:3 * 3]
X4 = X_local[-3:]
initial_vector_1 = X2 - X1
initial_vector_2 = X3 - X1
ini_plane_normal = np.cross(initial_vector_1, initial_vector_2)
ini_plane_normal = ini_plane_normal / (np.linalg.norm(ini_plane_normal))
X4_vector = X4 - X1
return np.array([np.dot(x4_vector, plane_normal) -
np.dot(X4_vector, ini_plane_normal) ], dtype=float, ndmin=1)
def B(self, X_local, u_local, t):
"""
Return derivative of c_equation with respect to u for a Fixed Distance constraint.
Parameters
----------
X_local: numpy.array
X-Coords in reference Domain for degrees of freedom
e.g. [x1 x2 y3 y4 z5] if x-direction of node 1 and 2, y-direction node 3 and 4 and z-direction of node 5 is
constrained
u_local: numpy.array
current displacements for the dofs that shall be constrained
t: float
time
Returns
-------
B: ndarray
Partial derivative of constraint function g w.r.t. displacements u
"""
raise NotImplementedError('Theano is not compatible to AMfe anymore. This constraint is not implemented'
'for now')
def b(self, X, u, t):
"""
Partial Derivative of holonomic constraint function g w.r.t. time t
Parameters
----------
X: ndarray
local node coordinates of dofs in reference domain
u: ndarray
local displacements
t: float
time
Returns
-------
b: ndarray
Partial derivative of the constraint function g w.r.t. time t
"""
return np.array([0.0], dtype=float, ndmin=1)
def a(self, X, u, du, t):
raise NotImplementedError('The a entity has not been implemented for the fixed distance to'
'plane constraint yet.')
class NodesCoplanarConstraint(HolonomicConstraintBase):
"""
Class to define a nodes coplanar constraint between four nodes.
"""
NO_OF_CONSTRAINTS = 1
def __init__(self):
super().__init__()
return
def g(self, X_local, u_local, t):
"""
Return residual of constraint function for a nodes coplanar constraint between four nodes.
Parameters
----------
X_local: numpy.array
X-Coords in reference Domain for four points just concatenated
e.g. [x1 y1 z1 x2 y2 z2 x3 y3 z3 x4 y4 z4]
u_local: numpy.array
current displacements for all four nodes just concatenated (c.f. X_local)
t: float
time
Returns
-------
g : numpy.array
Residual of constraint function
"""
x = X_local + u_local
x1 = x[:3]
x2 = x[3:2 * 3]
x3 = x[2 * 3:3 * 3]
x4 = x[-3:]
# x1, x2, x3 and x4 are coplanar if the determinant of A is 0
A = np.hstack((x1.T - x4.T, x2.T - x4.T, x3.T - x4.T)).reshape((3, 3))
return np.array([np.linalg.det(A)], ndmin=1)
def B(self, X_local, u_local, t):
"""
Return derivative of c_equation with respect to u for a Fixed Distance constraint.
Parameters
----------
X_local: numpy.array
X-Coords in reference Domain for four points just concatenated
e.g. [x1 y1 z1 x2 y2 z2 x3 y3 z3 x4 y4 z4]
u_local: numpy.array
current displacements for the dofs that shall be constrained
t: float
time
Returns
-------
B: ndarray
Partial derivative of constraint function g w.r.t. displacements u
"""
x = X_local + u_local
x1 = x[:3]
x2 = x[3:2*3]
x3 = x[2*3:3*3]
x4 = x[-3:]
a1, b1, c1 = x1
a2, b2, c2 = x2
a3, b3, c3 = x3
a4, b4, c4 = x4
b = np.array([
b2*c3 - b2*c4 - b3*c2 + b3*c4 + b4*c2 - b4*c3, # a1
-a2*c3 + a2*c4 + a3*c2 - a3*c4 - a4*c2 + a4*c3, # b1
a2*b3 - a2*b4 - a3*b2 + a3*b4 + a4*b2 - a4*b3, # c1
-b1*c3 + b1*c4 + b3*c1 - b3*c4 - b4*c1 + b4*c3, # a2
a1*c3 - a1*c4 - a3*c1 + a3*c4 + a4*c1 - a4*c3, # b2
-a1*b3 + a1*b4 + a3*b1 - a3*b4 - a4*b1 + a4*b3, # c2
b1*c2 - b1*c4 - b2*c1 + b2*c4 + b4*c1 - b4*c2, # a3
-a1*c2 + a1*c4 + a2*c1 - a2*c4 - a4*c1 + a4*c2, # b3
a1*b2 - a1*b4 - a2*b1 + a2*b4 + a4*b1 - a4*b2, # c3
-b1*c2 + b1*c3 + b2*c1 - b2*c3 - b3*c1 + b3*c2, # a4
a1*c2 - a1*c3 - a2*c1 + a2*c3 + a3*c1 - a3*c2, # b4
-a1*b2 + a1*b3 + a2*b1 - a2*b3 - a3*b1 + a3*b2, # c4
])
# Checked that this is the same as in the thesis by Gruber. But only first two rows have been checked by Meyer
return b
def b(self, X, u, t):
"""
Partial Derivative of holonomic constraint function g w.r.t. time t
Parameters
----------
X: ndarray
local node coordinates of dofs in reference domain
u: ndarray
local displacements
t: float
time
Returns
-------
b: ndarray
Partial derivative of the constraint function g w.r.t. time t
"""
return np.array([0.0], dtype=float, ndmin=1)
def a(self, X, u, du, t):
r"""
It computes the inhomogeneous part on acceleration level
.. math::
\frac{\partial B(u, t)}{\partial u} \cdot \dot{u}^2 + \
\frac{\partial B(u, t)}{\partial t} \cdot \dot{u} + \frac{\partial b(u, t)}{\partial u} \dot{u} + \
\frac{\partial b(u, t)}{\partial t} \\
Parameters
----------
X: numpy.array
Empty numpy array because dirichlet constraints do not need information about node coordinates
u: numpy.array
current displacements for the dofs that shall be constrained
du: numpy.array
current velocities for the dofs that schall be constrained
t: float
time
Returns
-------
a: numpy.array
The above described entity (inhomogeneous part of acceleration level constraint)
"""
# a consists of four terms:
# 1. partial derivative dB/du * du^2
raise NotImplementedError('The a entity has not been implemented for the coplanar constraint yet.')
|
[
"numpy.zeros",
"numpy.cross",
"numpy.ones",
"numpy.hstack",
"numpy.linalg.det",
"numpy.linalg.norm",
"numpy.array",
"numpy.dot",
"numpy.vstack",
"numpy.concatenate"
] |
[((10657, 10683), 'numpy.array', 'np.array', (['[1]'], {'dtype': 'float'}), '([1], dtype=float)\n', (10665, 10683), True, 'import numpy as np\n'), ((15100, 15137), 'numpy.array', 'np.array', (['[0.0]'], {'dtype': 'float', 'ndmin': '(1)'}), '([0.0], dtype=float, ndmin=1)\n', (15108, 15137), True, 'import numpy as np\n'), ((16220, 16245), 'numpy.zeros', 'np.zeros', (['(1, no_of_dofs)'], {}), '((1, no_of_dofs))\n', (16228, 16245), True, 'import numpy as np\n'), ((27951, 27988), 'numpy.array', 'np.array', (['[0.0]'], {'dtype': 'float', 'ndmin': '(1)'}), '([0.0], dtype=float, ndmin=1)\n', (27959, 27988), True, 'import numpy as np\n'), ((31819, 31886), 'numpy.array', 'np.array', (['[b2 - b3, -a2 + a3, -b1 + b3, a1 - a3, b1 - b2, -a1 + a2]'], {}), '([b2 - b3, -a2 + a3, -b1 + b3, a1 - a3, b1 - b2, -a1 + a2])\n', (31827, 31886), True, 'import numpy as np\n'), ((32618, 32655), 'numpy.array', 'np.array', (['[0.0]'], {'dtype': 'float', 'ndmin': '(1)'}), '([0.0], dtype=float, ndmin=1)\n', (32626, 32655), True, 'import numpy as np\n'), ((34459, 34516), 'numpy.array', 'np.array', (['[u_local[1] - u_local[0]]'], {'dtype': 'float', 'ndmin': '(1)'}), '([u_local[1] - u_local[0]], dtype=float, ndmin=1)\n', (34467, 34516), True, 'import numpy as np\n'), ((35036, 35070), 'numpy.array', 'np.array', (['[-1.0, 1.0]'], {'dtype': 'float'}), '([-1.0, 1.0], dtype=float)\n', (35044, 35070), True, 'import numpy as np\n'), ((35545, 35582), 'numpy.array', 'np.array', (['[0.0]'], {'dtype': 'float', 'ndmin': '(1)'}), '([0.0], dtype=float, ndmin=1)\n', (35553, 35582), True, 'import numpy as np\n'), ((36513, 36537), 'numpy.array', 'np.array', (['[0.0]'], {'ndmin': '(1)'}), '([0.0], ndmin=1)\n', (36521, 36537), True, 'import numpy as np\n'), ((37852, 37892), 'numpy.cross', 'np.cross', (['plane_vector_1', 'plane_vector_2'], {}), '(plane_vector_1, plane_vector_2)\n', (37860, 37892), True, 'import numpy as np\n'), ((38204, 38248), 'numpy.cross', 'np.cross', (['initial_vector_1', 'initial_vector_2'], {}), '(initial_vector_1, initial_vector_2)\n', (38212, 38248), True, 'import numpy as np\n'), ((39833, 39870), 'numpy.array', 'np.array', (['[0.0]'], {'dtype': 'float', 'ndmin': '(1)'}), '([0.0], dtype=float, ndmin=1)\n', (39841, 39870), True, 'import numpy as np\n'), ((42075, 42836), 'numpy.array', 'np.array', (['[b2 * c3 - b2 * c4 - b3 * c2 + b3 * c4 + b4 * c2 - b4 * c3, -a2 * c3 + a2 *\n c4 + a3 * c2 - a3 * c4 - a4 * c2 + a4 * c3, a2 * b3 - a2 * b4 - a3 * b2 +\n a3 * b4 + a4 * b2 - a4 * b3, -b1 * c3 + b1 * c4 + b3 * c1 - b3 * c4 - \n b4 * c1 + b4 * c3, a1 * c3 - a1 * c4 - a3 * c1 + a3 * c4 + a4 * c1 - a4 *\n c3, -a1 * b3 + a1 * b4 + a3 * b1 - a3 * b4 - a4 * b1 + a4 * b3, b1 * c2 -\n b1 * c4 - b2 * c1 + b2 * c4 + b4 * c1 - b4 * c2, -a1 * c2 + a1 * c4 + \n a2 * c1 - a2 * c4 - a4 * c1 + a4 * c2, a1 * b2 - a1 * b4 - a2 * b1 + a2 *\n b4 + a4 * b1 - a4 * b2, -b1 * c2 + b1 * c3 + b2 * c1 - b2 * c3 - b3 *\n c1 + b3 * c2, a1 * c2 - a1 * c3 - a2 * c1 + a2 * c3 + a3 * c1 - a3 * c2,\n -a1 * b2 + a1 * b3 + a2 * b1 - a2 * b3 - a3 * b1 + a3 * b2]'], {}), '([b2 * c3 - b2 * c4 - b3 * c2 + b3 * c4 + b4 * c2 - b4 * c3, -a2 *\n c3 + a2 * c4 + a3 * c2 - a3 * c4 - a4 * c2 + a4 * c3, a2 * b3 - a2 * b4 -\n a3 * b2 + a3 * b4 + a4 * b2 - a4 * b3, -b1 * c3 + b1 * c4 + b3 * c1 - \n b3 * c4 - b4 * c1 + b4 * c3, a1 * c3 - a1 * c4 - a3 * c1 + a3 * c4 + a4 *\n c1 - a4 * c3, -a1 * b3 + a1 * b4 + a3 * b1 - a3 * b4 - a4 * b1 + a4 *\n b3, b1 * c2 - b1 * c4 - b2 * c1 + b2 * c4 + b4 * c1 - b4 * c2, -a1 * c2 +\n a1 * c4 + a2 * c1 - a2 * c4 - a4 * c1 + a4 * c2, a1 * b2 - a1 * b4 - a2 *\n b1 + a2 * b4 + a4 * b1 - a4 * b2, -b1 * c2 + b1 * c3 + b2 * c1 - b2 *\n c3 - b3 * c1 + b3 * c2, a1 * c2 - a1 * c3 - a2 * c1 + a2 * c3 + a3 * c1 -\n a3 * c2, -a1 * b2 + a1 * b3 + a2 * b1 - a2 * b3 - a3 * b1 + a3 * b2])\n', (42083, 42836), True, 'import numpy as np\n'), ((43645, 43682), 'numpy.array', 'np.array', (['[0.0]'], {'dtype': 'float', 'ndmin': '(1)'}), '([0.0], dtype=float, ndmin=1)\n', (43653, 43682), True, 'import numpy as np\n'), ((14538, 14629), 'numpy.concatenate', 'np.concatenate', (['(-(x2 - x1) / (l_current * scaling), (x2 - x1) / (l_current * scaling))'], {}), '((-(x2 - x1) / (l_current * scaling), (x2 - x1) / (l_current *\n scaling)))\n', (14552, 14629), True, 'import numpy as np\n'), ((24567, 24598), 'numpy.concatenate', 'np.concatenate', (['(bx1, bx2, bx3)'], {}), '((bx1, bx2, bx3))\n', (24581, 24598), True, 'import numpy as np\n'), ((27429, 27460), 'numpy.concatenate', 'np.concatenate', (['(bx1, bx2, bx3)'], {}), '((bx1, bx2, bx3))\n', (27443, 27460), True, 'import numpy as np\n'), ((37932, 37960), 'numpy.linalg.norm', 'np.linalg.norm', (['plane_normal'], {}), '(plane_normal)\n', (37946, 37960), True, 'import numpy as np\n'), ((38296, 38328), 'numpy.linalg.norm', 'np.linalg.norm', (['ini_plane_normal'], {}), '(ini_plane_normal)\n', (38310, 38328), True, 'import numpy as np\n'), ((23353, 23375), 'numpy.array', 'np.array', (['[coef, 0, 0]'], {}), '([coef, 0, 0])\n', (23361, 23375), True, 'import numpy as np\n'), ((23462, 23484), 'numpy.array', 'np.array', (['[0, coef, 0]'], {}), '([0, coef, 0])\n', (23470, 23484), True, 'import numpy as np\n'), ((23572, 23594), 'numpy.array', 'np.array', (['[0, 0, coef]'], {}), '([0, 0, coef])\n', (23580, 23594), True, 'import numpy as np\n'), ((23636, 23658), 'numpy.array', 'np.array', (['[coef, 0, 0]'], {}), '([coef, 0, 0])\n', (23644, 23658), True, 'import numpy as np\n'), ((23700, 23722), 'numpy.array', 'np.array', (['[0, coef, 0]'], {}), '([0, coef, 0])\n', (23708, 23722), True, 'import numpy as np\n'), ((23764, 23786), 'numpy.array', 'np.array', (['[0, 0, coef]'], {}), '([0, 0, coef])\n', (23772, 23786), True, 'import numpy as np\n'), ((23810, 23829), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (23818, 23829), True, 'import numpy as np\n'), ((23867, 23886), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (23875, 23886), True, 'import numpy as np\n'), ((23924, 23943), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (23932, 23943), True, 'import numpy as np\n'), ((26627, 26646), 'numpy.array', 'np.array', (['[coef, 0]'], {}), '([coef, 0])\n', (26635, 26646), True, 'import numpy as np\n'), ((26706, 26725), 'numpy.array', 'np.array', (['[0, coef]'], {}), '([0, coef])\n', (26714, 26725), True, 'import numpy as np\n'), ((26767, 26786), 'numpy.array', 'np.array', (['[coef, 0]'], {}), '([coef, 0])\n', (26775, 26786), True, 'import numpy as np\n'), ((26828, 26847), 'numpy.array', 'np.array', (['[0, coef]'], {}), '([0, coef])\n', (26836, 26847), True, 'import numpy as np\n'), ((26871, 26887), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (26879, 26887), True, 'import numpy as np\n'), ((26926, 26942), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (26934, 26942), True, 'import numpy as np\n'), ((30640, 30663), 'numpy.vstack', 'np.vstack', (['(x1, x2, x3)'], {}), '((x1, x2, x3))\n', (30649, 30663), True, 'import numpy as np\n'), ((30665, 30680), 'numpy.ones', 'np.ones', (['(3, 1)'], {}), '((3, 1))\n', (30672, 30680), True, 'import numpy as np\n'), ((30709, 30725), 'numpy.linalg.det', 'np.linalg.det', (['A'], {}), '(A)\n', (30722, 30725), True, 'import numpy as np\n'), ((41111, 41161), 'numpy.hstack', 'np.hstack', (['(x1.T - x4.T, x2.T - x4.T, x3.T - x4.T)'], {}), '((x1.T - x4.T, x2.T - x4.T, x3.T - x4.T))\n', (41120, 41161), True, 'import numpy as np\n'), ((41203, 41219), 'numpy.linalg.det', 'np.linalg.det', (['A'], {}), '(A)\n', (41216, 41219), True, 'import numpy as np\n'), ((23290, 23310), 'numpy.array', 'np.array', (['[-1, 0, 0]'], {}), '([-1, 0, 0])\n', (23298, 23310), True, 'import numpy as np\n'), ((23399, 23419), 'numpy.array', 'np.array', (['[0, -1, 0]'], {}), '([0, -1, 0])\n', (23407, 23419), True, 'import numpy as np\n'), ((23509, 23529), 'numpy.array', 'np.array', (['[0, 0, -1]'], {}), '([0, 0, -1])\n', (23517, 23529), True, 'import numpy as np\n'), ((26591, 26608), 'numpy.array', 'np.array', (['[-1, 0]'], {}), '([-1, 0])\n', (26599, 26608), True, 'import numpy as np\n'), ((26670, 26687), 'numpy.array', 'np.array', (['[0, -1]'], {}), '([0, -1])\n', (26678, 26687), True, 'import numpy as np\n'), ((38384, 38415), 'numpy.dot', 'np.dot', (['x4_vector', 'plane_normal'], {}), '(x4_vector, plane_normal)\n', (38390, 38415), True, 'import numpy as np\n'), ((38443, 38478), 'numpy.dot', 'np.dot', (['X4_vector', 'ini_plane_normal'], {}), '(X4_vector, ini_plane_normal)\n', (38449, 38478), True, 'import numpy as np\n'), ((19026, 19050), 'numpy.linalg.norm', 'np.linalg.norm', (['line_dir'], {}), '(line_dir)\n', (19040, 19050), True, 'import numpy as np\n'), ((19308, 19335), 'numpy.linalg.norm', 'np.linalg.norm', (['initial_dir'], {}), '(initial_dir)\n', (19322, 19335), True, 'import numpy as np\n'), ((22179, 22198), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (22187, 22198), True, 'import numpy as np\n'), ((22358, 22377), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (22366, 22377), True, 'import numpy as np\n'), ((22537, 22556), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (22545, 22556), True, 'import numpy as np\n'), ((22707, 22726), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (22715, 22726), True, 'import numpy as np\n'), ((22801, 22820), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (22809, 22820), True, 'import numpy as np\n'), ((22895, 22914), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (22903, 22914), True, 'import numpy as np\n'), ((25700, 25716), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (25708, 25716), True, 'import numpy as np\n'), ((26077, 26093), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (26085, 26093), True, 'import numpy as np\n'), ((26168, 26184), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (26176, 26184), True, 'import numpy as np\n'), ((21429, 21449), 'numpy.array', 'np.array', (['[-1, 0, 0]'], {}), '([-1, 0, 0])\n', (21437, 21449), True, 'import numpy as np\n'), ((21511, 21531), 'numpy.array', 'np.array', (['[-1, 0, 0]'], {}), '([-1, 0, 0])\n', (21519, 21531), True, 'import numpy as np\n'), ((21656, 21676), 'numpy.array', 'np.array', (['[0, -1, 0]'], {}), '([0, -1, 0])\n', (21664, 21676), True, 'import numpy as np\n'), ((21742, 21762), 'numpy.array', 'np.array', (['[0, -1, 0]'], {}), '([0, -1, 0])\n', (21750, 21762), True, 'import numpy as np\n'), ((21913, 21933), 'numpy.array', 'np.array', (['[0, 0, -1]'], {}), '([0, 0, -1])\n', (21921, 21933), True, 'import numpy as np\n'), ((21999, 22019), 'numpy.array', 'np.array', (['[0, 0, -1]'], {}), '([0, 0, -1])\n', (22007, 22019), True, 'import numpy as np\n'), ((25228, 25245), 'numpy.array', 'np.array', (['[-1, 0]'], {}), '([-1, 0])\n', (25236, 25245), True, 'import numpy as np\n'), ((25308, 25325), 'numpy.array', 'np.array', (['[-1, 0]'], {}), '([-1, 0])\n', (25316, 25325), True, 'import numpy as np\n'), ((25451, 25468), 'numpy.array', 'np.array', (['[0, -1]'], {}), '([0, -1])\n', (25459, 25468), True, 'import numpy as np\n'), ((25534, 25551), 'numpy.array', 'np.array', (['[0, -1]'], {}), '([0, -1])\n', (25542, 25551), True, 'import numpy as np\n'), ((25856, 25872), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (25864, 25872), True, 'import numpy as np\n'), ((25935, 25951), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (25943, 25951), True, 'import numpy as np\n')]
|
"""Provides a class to allow for lazy transposing and slicing operations on h5py datasets and zarr arrays
## Usage:
from lazy_ops import DatasetView
# h5py #
import h5py
dsetview = DatasetView(dataset) # dataset is an instance of h5py.Dataset
view1 = dsetview.lazy_slice[1:40:2,:,0:50:5].lazy_transpose([2,0,1]).lazy_slice[8,5:10]
# zarr #
import zarr
zarrview = DatasetView(zarray) # dataset is an instance of zarr.core.Array
view1 = zview.lazy_slice[1:10:2,:,5:10].lazy_transpose([0,2,1]).lazy_slice[0:3,1:4]
# reading from view on either h5py or zarr
A = view1[:] # Brackets on DataSetView call the h5py or zarr slicing method, returning the data
B = view1.dsetread() # same as view1[:]
# iterating on either h5yy or zarr
for ib in view.lazy_iter(axis=1):
print(ib[0])
"""
import numpy as np
from abc import ABCMeta, abstractmethod
from typing import Union
import h5py
installed_dataset_types = h5py.Dataset
class DatasetView(metaclass=ABCMeta):
def __new__(cls, dataset: installed_dataset_types = None, slice_index=(np.index_exp[:],()), axis_order=None):
"""
Args:
dataset: the underlying dataset
slice_index: the aggregate slice and int indices after multiple lazy calls
axis_order: the aggregate axis_order after multiple transpositions
Returns:
lazy object
"""
if cls == DatasetView:
if isinstance(dataset,h5py.Dataset):
return DatasetViewh5py(dataset=dataset)
elif HAVE_ZARR:
if isinstance(dataset,zarr.core.Array):
return DatasetViewzarr(dataset=dataset)
elif str(z1).find("zarr") != -1:
raise TypeError("To use DatasetView with a zarr array install zarr: \n pip install zarr\n")
raise TypeError("DatasetView requires either an h5py dataset or a zarr array as first argument")
else:
return super().__new__(cls)
def __init__(self, dataset: installed_dataset_types = None, slice_index=(np.index_exp[:],()), axis_order=None):
"""
Args:
dataset: the underlying dataset
slice_index: the aggregate slice and int indices after multiple lazy calls
axis_order: the aggregate axis_order after multiple transpositions
"""
if axis_order is None:
self._axis_order = tuple(range(len(dataset.shape)))
else:
self._axis_order = axis_order
self._lazy_slice_call = False
self._dataset = dataset
self._shape, self._key, self._int_index, self._axis_order = self._slice_shape(slice_index)
@property
def lazy_slice(self):
""" Indicator for lazy_slice calls """
self._lazy_slice_call = True
return self
@property
def dataset(self):
return self._dataset
@property
def shape(self):
return self._shape
def __len__(self):
return self.len()
def len(self):
return self.shape[0]
@property
def key(self):
""" The self.key slice is passed to the lazy instance """
return self._key
@property
def axis_order(self):
return self._axis_order
def _slice_tuple(self, key):
""" Allows single slice and int function calls
Args:
key: A slice object, or int
Returns:
The slice object tuple
"""
if isinstance(key, (slice,int,np.ndarray)):
key = key,
else:
key = *key,
return key
def _slice_shape(self, slice_):
""" For an slice returned by _slice_composition function, finds the shape
Args:
slice_: The slice and int_index object
Returns:
slice_shape: Shape of the slice object
slice_key: An equivalent slice tuple with positive starts and stops
int_index: a nested tuple, int_index records the information needed by dsetread to access data
Each element of int_index, denoted ind is given by:
ind[2] is the dataset axis at which the integer index operates
ind[1] is the value of the integer index entered by the user
ind[0] is the lazy_axis at which the integer index operates
,the lazy_axis is the axis number had the operations
been carried out by h5py instead of lazy_ops
axis_order: removes the elements of current axis_order where integer indexing has been applied
"""
int_ind = slice_[1]
slice_ = self._slice_tuple(slice_[0])
# converting the slice to regular slices that only contain integers
slice_regindices = [slice(*slice_[i].indices(self.dataset.shape[self.axis_order[i]])) if isinstance(slice_[i],slice)
else slice_[i]
for i in range(len(slice_))]
slice_shape = ()
int_index = ()
axis_order = ()
for i in range(len(slice_)):
if isinstance(slice_[i],slice):
slice_start, slice_stop, slice_step = slice_regindices[i].start, slice_regindices[i].stop, slice_regindices[i].step
if slice_step < 1:
raise ValueError("Slice step parameter must be positive")
if slice_stop < slice_start:
slice_start = slice_stop
slice_regindices[i] = slice(slice_start, slice_stop, slice_step)
slice_shape += (1 + (slice_stop - slice_start -1 )//slice_step if slice_stop != slice_start else 0,)
axis_order += (self.axis_order[i],)
elif isinstance(slice_[i],int):
int_index += ((i,slice_[i],self.axis_order[i]),)
else:
# slice_[i] is an iterator of integers
slice_shape += (len(slice_[i]),)
axis_order += (self.axis_order[i],)
slice_regindices = tuple(el for el in slice_regindices if not isinstance(el,int))
axis_order += tuple(self.axis_order[len(axis_order)+len(int_index)::])
int_index += int_ind
slice_shape += self.dataset.shape[len(slice_shape)+len(int_index)::]
return slice_shape, slice_regindices, int_index, axis_order
def __getitem__(self, new_slice):
""" supports python's colon slicing syntax
Args:
new_slice: the new slice to compose with the lazy instance's self.key slice
Returns:
lazy object
"""
key_reinit = self._slice_composition(new_slice)
if self._lazy_slice_call:
self._lazy_slice_call = False
return DatasetView(self.dataset, (key_reinit, self._int_index), self.axis_order)
return DatasetView(self.dataset, (key_reinit, self._int_index), self.axis_order).dsetread()
def lazy_iter(self, axis=0):
""" lazy iterator over the first axis
Modifications to the items are not stored
"""
for i in range(self._shape[axis]):
yield self.lazy_slice[(*np.index_exp[:]*axis,i)]
def __call__(self, new_slice):
""" allows lazy_slice function calls with slice objects as input"""
return self.__getitem__(new_slice)
def dsetread(self):
""" Returns the data
Returns:
numpy array
"""
# Note: Directly calling regionref with slices with a zero dimension does not
# retain shape information of the other dimensions
lazy_axis_order = self.axis_order
lazy_key = self.key
for ind in self._int_index:
lazy_axis_order = lazy_axis_order[:ind[0]] + (ind[2],) + lazy_axis_order[ind[0]:]
lazy_key = lazy_key[:ind[0]] + (ind[1],) + lazy_key[ind[0]:]
reversed_axis_order = sorted(range(len(lazy_axis_order)), key=lambda i: lazy_axis_order[i])
reversed_slice_key = tuple(lazy_key[i] for i in reversed_axis_order if i < len(lazy_key))
# this is equivalent to reducing the values in the self.axis_order to account for
# dimensions dropped by int indexing
reversed_axis_order_read = sorted(range(len(self.axis_order)), key=lambda i: self.axis_order[i])
axis_order_read = sorted(range(len(self.axis_order)), key=lambda i: reversed_axis_order_read[i])
return self.dataset[reversed_slice_key].transpose(axis_order_read)
def _slice_composition(self, new_slice):
""" composes a new_slice with the self.key slice
Args:
new_slice: The new slice
Returns:
merged slice object
"""
new_slice = self._slice_tuple(new_slice)
new_slice = self._ellipsis_slices(new_slice)
slice_result = ()
# Iterating over the new slicing tuple to change the merged dataset slice.
for i in range(len(new_slice)):
if isinstance(new_slice[i],slice):
if i < len(self.key):
# converting new_slice slice to regular slices,
# newkey_start, newkey_stop, newkey_step only contains positive or zero integers
newkey_start, newkey_stop, newkey_step = new_slice[i].indices(self._shape[i])
if newkey_step < 1:
# regionref requires step>=1 for dataset data calls
raise ValueError("Slice step parameter must be positive")
if newkey_stop < newkey_start:
newkey_start = newkey_stop
if isinstance(self.key[i],slice):
slice_result += (slice(min(self.key[i].start + self.key[i].step * newkey_start, self.key[i].stop),
min(self.key[i].start + self.key[i].step * newkey_stop, self.key[i].stop),
newkey_step * self.key[i].step),)
else:
# self.key[i] is an iterator of integers
slice_result += (self.key[i][new_slice[i]],)
else:
slice_result += (slice(*new_slice[i].indices(self.dataset.shape[self.axis_order[i]])),)
elif isinstance(new_slice[i],int):
if i < len(self.key):
if new_slice[i] >= self._shape[i] or new_slice[i] <= ~self._shape[i]:
raise IndexError("Index %d out of range, dim %d of size %d" % (new_slice[i],i,self._shape[i]))
if isinstance(self.key[i],slice):
int_index = self.key[i].start + self.key[i].step*(new_slice[i]%self._shape[i])
slice_result += (int_index,)
else:
# self.key[i] is an iterator of integers
slice_result += (self.key[i][new_slice[i]],)
else:
slice_result += (new_slice[i],)
else:
try:
if not all(isinstance(el,int) for el in new_slice[i]):
if new_slice[i].dtype.kind != 'b':
raise ValueError("Indices must be either integers or booleans")
else:
# boolean indexing
if len(new_slice[i]) != self.shape[i]:
raise IndexError("Length of boolean index $d must be equal to size %d in dim %d" % (len(new_slice[i]),self.shape[i],i))
new_slice_i = new_slice[i].nonzero()[0]
else:
new_slice_i = new_slice[i]
if i < len(self.key):
if any(el >= self._shape[i] or el <= ~self._shape[i] for el in new_slice_i):
raise IndexError("Index %s out of range, dim %d of size %d" % (str(new_slice_i),i,self._shape[i]))
if isinstance(self.key[i],slice):
slice_result += (tuple(self.key[i].start + self.key[i].step*(ind%self._shape[i]) for ind in new_slice_i),)
else:
# self.key[i] is an iterator of integers
slice_result += (tuple(self.key[i][ind] for ind in new_slice_i),)
else:
slice_result += (new_slice_i,)
except:
raise IndexError("Indices must be either integers, iterators of integers, slice objects, or numpy boolean arrays")
slice_result += self.key[len(new_slice):]
return slice_result
@property
def T(self):
""" Same as lazy_transpose() """
return self.lazy_transpose()
def lazy_transpose(self, axis_order=None):
""" Array lazy transposition, no axis_order reverses the order of dimensions
Args:
axis_order: permutation order for transpose
Returns:
lazy object
"""
if axis_order is None:
axis_order = tuple(reversed(range(len(self.axis_order))))
axis_order_reinit = tuple(self.axis_order[i] if i < len(self.axis_order) else i for i in axis_order)
key_reinit = tuple(self.key[i] if i < len(self.key) else np.s_[:] for i in axis_order)
key_reinit += tuple(self.key[i] for i in self.axis_order if i not in axis_order_reinit)
axis_order_reinit += tuple(i for i in self.axis_order if i not in axis_order_reinit)
return DatasetView(self.dataset, (key_reinit, self._int_index), axis_order_reinit)
def __array__(self):
""" Convert to numpy array
"""
return np.atleast_1d(self.dsetread())
def _ellipsis_slices(self, new_slice):
""" Change Ellipsis dimensions to slices
Args:
new_slice: The new slice
Returns:
equivalent slices with Ellipsis expanded
"""
ellipsis_count = sum(s==Ellipsis for s in new_slice if not isinstance(s,np.ndarray))
if ellipsis_count == 1:
ellipsis_index = new_slice.index(Ellipsis)
if ellipsis_index == len(new_slice)-1:
new_slice = new_slice[:-1]
else:
num_ellipsis_dims = len(self.shape) - (len(new_slice) - 1)
new_slice = new_slice[:ellipsis_index] + np.index_exp[:]*num_ellipsis_dims + new_slice[ellipsis_index+1:]
elif ellipsis_count > 0:
raise IndexError("Only a single Ellipsis is allowed")
return new_slice
def read_direct(self, dest, source_sel=None, dest_sel=None):
""" Using dataset.read_direct, reads data into an existing array
Args:
dest: C-contiguous as required by Dataset.read_direct
source_sel: new selection slice
dest_sel: output selection slice
Returns:
numpy array
"""
if source_sel is None:
new_key, new_int_index, new_axis_order = self.key, self._int_index, self.axis_order
else:
key_reinit = self._slice_composition(source_sel)
_, new_key, new_int_index, new_axis_order = self._slice_shape(key_reinit)
axis_order_slices = new_axis_order
for ind in new_int_index:
new_axis_order = new_axis_order[:ind[0]] + (ind[2],) + new_axis_order[ind[0]:]
new_key = new_key[:ind[0]] + (ind[1],) + new_key[ind[0]:]
reversed_axis_order = sorted(range(len(new_axis_order)), key=lambda i: new_axis_order[i])
reversed_slice_key = tuple(new_key[i] for i in reversed_axis_order if i < len(new_key))
# this is equivalent to reducing the values in the self.axis_order to account for
# dimensions dropped by int indexing
reversed_axis_order_read = sorted(range(len(axis_order_slices)), key=lambda i: axis_order_slices[i])
axis_order_read = sorted(range(len(axis_order_slices)), key=lambda i: reversed_axis_order_read[i])
reversed_dest_shape = tuple(dest.shape[i] for i in reversed_axis_order_read if i < len(dest.shape))
reversed_dest = np.empty(shape=reversed_dest_shape, dtype=dest.dtype)
if dest_sel is None:
reversed_dest_sel = dest_sel
else:
reversed_dest_sel = tuple(dest_sel[i] for i in reversed_axis_order if i < len(dest_sel))
self.dataset.read_direct(reversed_dest, source_sel=reversed_slice_key, dest_sel=reversed_dest_sel)
np.copyto(dest, reversed_dest.transpose(axis_order_read))
def lazy_transpose(dset: installed_dataset_types, axes=None):
""" Array lazy transposition, not passing axis argument reverses the order of dimensions
Args:
dset: h5py dataset
axes: permutation order for transpose
Returns:
lazy transposed DatasetView object
"""
if axes is None:
axes = tuple(reversed(range(len(dset.shape))))
return DatasetView(dset).lazy_transpose(axis_order=axes)
class DatasetViewh5py(DatasetView, h5py.Dataset):
def __new__(cls,dataset):
_self = super().__new__(cls)
h5py.Dataset.__init__(_self, dataset.id)
return _self
try:
import zarr
from .lazy_loading_zarr import DatasetViewzarr
installed_dataset_types = Union[installed_dataset_types,zarr.core.Array]
HAVE_ZARR = True
except ImportError:
HAVE_ZARR = False
DatasetViewzarr = None
|
[
"numpy.empty",
"h5py.Dataset.__init__"
] |
[((16211, 16264), 'numpy.empty', 'np.empty', ([], {'shape': 'reversed_dest_shape', 'dtype': 'dest.dtype'}), '(shape=reversed_dest_shape, dtype=dest.dtype)\n', (16219, 16264), True, 'import numpy as np\n'), ((17188, 17228), 'h5py.Dataset.__init__', 'h5py.Dataset.__init__', (['_self', 'dataset.id'], {}), '(_self, dataset.id)\n', (17209, 17228), False, 'import h5py\n')]
|
import io
import pytest
import os
import h5py
import tempfile
import warnings
from contextlib import contextmanager
import numpy as np
from numpy.testing import assert_allclose
from numpy.testing import assert_raises
from keras import backend as K
from keras.models import Model, Sequential
from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed
from keras.layers import Bidirectional, GRU, LSTM
from keras.layers import Conv2D, Flatten, Activation
from keras.layers import Input, InputLayer
from keras.initializers import Constant
from keras import optimizers
from keras import losses
from keras import metrics
from keras.models import save_model, load_model
try:
from unittest.mock import patch
except:
from mock import patch
skipif_no_tf_gpu = True
def test_sequential_model_saving():
model = Sequential()
model.add(Dense(2, input_shape=(3,)))
model.add(RepeatVector(3))
model.add(TimeDistributed(Dense(3)))
model.compile(loss=losses.MeanSquaredError(),
optimizer=optimizers.RMSprop(lr=0.0001),
metrics=[metrics.categorical_accuracy],
sample_weight_mode='temporal')
x = np.random.random((1, 3))
y = np.random.random((1, 3, 3))
model.train_on_batch(x, y)
out = model.predict(x)
_, fname = tempfile.mkstemp('.h5')
save_model(model, fname)
new_model = load_model(fname)
os.remove(fname)
x2 = np.random.random((1, 3))
y2 = np.random.random((1, 3, 3))
model.train_on_batch(x2, y2)
out_2 = model.predict(x2)
new_out = new_model.predict(x)
assert_allclose(out, new_out, atol=1e-05)
# test that new updates are the same with both models
new_model.train_on_batch(x2, y2)
new_out_2 = new_model.predict(x2)
assert_allclose(out_2, new_out_2, atol=1e-05)
def test_sequential_model_saving_2():
# test with custom optimizer, loss
custom_opt = optimizers.RMSprop
custom_loss = losses.mse
model = Sequential()
model.add(Dense(2, input_shape=(3,)))
model.add(Dense(3))
model.compile(loss=custom_loss, optimizer=custom_opt(), metrics=['acc'])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
load_kwargs = {'custom_objects': {'custom_opt': custom_opt,
'custom_loss': custom_loss}}
_, fname = tempfile.mkstemp('.h5')
save_model(model, fname)
new_model = load_model(fname, **load_kwargs)
os.remove(fname)
new_out = new_model.predict(x)
assert_allclose(out, new_out, atol=1e-05)
def _get_sample_model_and_input():
inputs = Input(shape=(3,))
x = Dense(2)(inputs)
outputs = Dense(3)(x)
model = Model(inputs, outputs)
model.compile(loss=losses.MSE,
optimizer=optimizers.Adam(),
metrics=[metrics.categorical_accuracy])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
return model, x
def test_functional_model_saving():
model, x = _get_sample_model_and_input()
out = model.predict(x)
_, fname = tempfile.mkstemp('.h5')
save_model(model, fname)
new_model = load_model(fname)
os.remove(fname)
new_out = new_model.predict(x)
assert_allclose(out, new_out, atol=1e-05)
def DISABLED_test_model_saving_to_pre_created_h5py_file():
model, x = _get_sample_model_and_input()
out = model.predict(x)
_, fname = tempfile.mkstemp('.h5')
with h5py.File(fname, mode='r+') as h5file:
save_model(model, h5file)
loaded_model = load_model(h5file)
out2 = loaded_model.predict(x)
assert_allclose(out, out2, atol=1e-05)
# test non-default options in h5
with h5py.File('does not matter', driver='core',
backing_store=False) as h5file:
save_model(model, h5file)
loaded_model = load_model(h5file)
out2 = loaded_model.predict(x)
assert_allclose(out, out2, atol=1e-05)
with h5py.File(fname, mode='r+') as h5file:
g = h5file.create_group('model')
save_model(model, g)
loaded_model = load_model(g)
out2 = loaded_model.predict(x)
assert_allclose(out, out2, atol=1e-05)
@contextmanager
def temp_filename(filename):
"""Context that returns a temporary filename and deletes the file on exit if
it still exists (so that this is not forgotten).
"""
_, temp_fname = tempfile.mkstemp(filename)
yield temp_fname
if os.path.exists(temp_fname):
os.remove(temp_fname)
def DISABLED_test_model_saving_to_binary_stream():
model, x = _get_sample_model_and_input()
out = model.predict(x)
with temp_filename('h5') as fname:
# save directly to binary file
with open(fname, 'wb') as raw_file:
save_model(model, raw_file)
# Load the data the usual way, and make sure the model is intact.
with h5py.File(fname, mode='r') as h5file:
loaded_model = load_model(h5file)
out2 = loaded_model.predict(x)
assert_allclose(out, out2, atol=1e-05)
def DISABLED_test_model_loading_from_binary_stream():
model, x = _get_sample_model_and_input()
out = model.predict(x)
with temp_filename('h5') as fname:
# save the model the usual way
with h5py.File(fname, mode='w') as h5file:
save_model(model, h5file)
# Load the data binary, and make sure the model is intact.
with open(fname, 'rb') as raw_file:
loaded_model = load_model(raw_file)
out2 = loaded_model.predict(x)
assert_allclose(out, out2, atol=1e-05)
def DISABLED_test_model_save_load_binary_in_memory():
model, x = _get_sample_model_and_input()
out = model.predict(x)
stream = io.BytesIO()
save_model(model, stream)
stream.seek(0)
loaded_model = load_model(stream)
out2 = loaded_model.predict(x)
assert_allclose(out, out2, atol=1e-05)
def test_saving_multiple_metrics_outputs():
inputs = Input(shape=(5,))
x = Dense(5)(inputs)
output1 = Dense(1, name='output1')(x)
output2 = Dense(1, name='output2')(x)
model = Model(inputs=inputs, outputs=[output1, output2])
metrics = {'output1': ['mse', 'binary_accuracy'],
'output2': ['mse', 'binary_accuracy']
}
loss = {'output1': 'mse', 'output2': 'mse'}
model.compile(loss=loss, optimizer='sgd', metrics=metrics)
# assure that model is working
x = np.array([[1, 1, 1, 1, 1]])
out = model.predict(x)
_, fname = tempfile.mkstemp('.h5')
save_model(model, fname)
model = load_model(fname)
os.remove(fname)
out2 = model.predict(x)
assert_allclose(out, out2, atol=1e-05)
def test_saving_without_compilation():
"""Test saving model without compiling.
"""
model = Sequential()
model.add(Dense(2, input_shape=(3,)))
model.add(Dense(3))
_, fname = tempfile.mkstemp('.h5')
save_model(model, fname)
model = load_model(fname)
os.remove(fname)
def test_saving_right_after_compilation():
model = Sequential()
model.add(Dense(2, input_shape=(3,)))
model.add(Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
_, fname = tempfile.mkstemp('.h5')
save_model(model, fname)
model = load_model(fname)
os.remove(fname)
def test_saving_unused_layers_is_ok():
a = Input(shape=(256, 512, 6))
b = Input(shape=(256, 512, 1))
c = Lambda(lambda x: x[:, :, :, :1])(a)
model = Model(inputs=[a, b], outputs=c)
_, fname = tempfile.mkstemp('.h5')
save_model(model, fname)
load_model(fname)
os.remove(fname)
def test_loading_weights_by_name_2():
"""
test loading model weights by name on:
- both sequential and functional api models
- different architecture with shared names
"""
custom_loss = losses.mse
# sequential model
model = Sequential()
model.add(Dense(2, input_shape=(3,), name='rick'))
model.add(Dense(3, name='morty'))
model.compile(loss=custom_loss, optimizer='rmsprop', metrics=['acc'])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
old_weights = [layer.get_weights() for layer in model.layers]
_, fname = tempfile.mkstemp('.h5')
model.save_weights(fname)
# delete and recreate model using Functional API
del(model)
data = Input(shape=(3,))
rick = Dense(2, name='rick')(data)
jerry = Dense(3, name='jerry')(rick) # add 2 layers (but maintain shapes)
jessica = Dense(2, name='jessica')(jerry)
morty = Dense(3, name='morty')(jessica)
model = Model(inputs=[data], outputs=[morty])
model.compile(loss=custom_loss, optimizer='rmsprop', metrics=['acc'])
# load weights from first model
model.load_weights(fname, by_name=True)
os.remove(fname)
out2 = model.predict(x)
assert np.max(np.abs(out - out2)) > 1e-05
rick = model.layers[1].get_weights()
jerry = model.layers[2].get_weights()
jessica = model.layers[3].get_weights()
morty = model.layers[4].get_weights()
assert_allclose(old_weights[0][0], rick[0], atol=1e-05)
assert_allclose(old_weights[0][1], rick[1], atol=1e-05)
assert_allclose(old_weights[1][0], morty[0], atol=1e-05)
assert_allclose(old_weights[1][1], morty[1], atol=1e-05)
assert_allclose(np.zeros_like(jerry[1]), jerry[1]) # biases init to 0
assert_allclose(np.zeros_like(jessica[1]), jessica[1]) # biases init to 0
def test_loading_weights_by_name_skip_mismatch():
"""
test skipping layers while loading model weights by name on:
- sequential model
"""
# test with custom optimizer, loss
custom_loss = losses.mse
# sequential model
model = Sequential()
model.add(Dense(2, input_shape=(3,), name='rick'))
model.add(Dense(3, name='morty'))
model.compile(loss=custom_loss, optimizer='rmsprop', metrics=['acc'])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
old_weights = [layer.get_weights() for layer in model.layers]
_, fname = tempfile.mkstemp('.h5')
model.save_weights(fname)
# delete and recreate model
del(model)
model = Sequential()
model.add(Dense(2, input_shape=(3,), name='rick'))
model.add(Dense(4, name='morty')) # different shape w.r.t. previous model
model.compile(loss=custom_loss, optimizer='rmsprop', metrics=['acc'])
# load weights from first model
model.load_weights(fname, by_name=True, skip_mismatch=True)
os.remove(fname)
# assert layers 'rick' are equal
for old, new in zip(old_weights[0], model.layers[0].get_weights()):
assert_allclose(old, new, atol=1e-05)
# assert layers 'morty' are not equal, since we skipped loading this layer
for old, new in zip(old_weights[1], model.layers[1].get_weights()):
assert_raises(AssertionError, assert_allclose, old, new, atol=1e-05)
# a function to be called from the Lambda layer
def square_fn(x):
return x * x
def test_saving_lambda_custom_objects():
inputs = Input(shape=(3,))
x = Lambda(lambda x: square_fn(x), output_shape=(3,))(inputs)
outputs = Dense(3)(x)
model = Model(inputs, outputs)
model.compile(loss=losses.MSE,
optimizer=optimizers.RMSprop(lr=0.0001),
metrics=[metrics.categorical_accuracy])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
_, fname = tempfile.mkstemp('.h5')
save_model(model, fname)
model = load_model(fname, custom_objects={'square_fn': square_fn})
os.remove(fname)
out2 = model.predict(x)
assert_allclose(out, out2, atol=1e-05)
def test_saving_lambda_numpy_array_arguments():
mean = np.random.random((4, 2, 3))
std = np.abs(np.random.random((4, 2, 3))) + 1e-5
inputs = Input(shape=(4, 2, 3))
outputs = Lambda(lambda image, mu, std: (image - mu) / std,
arguments={'mu': mean, 'std': std})(inputs)
model = Model(inputs, outputs)
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
_, fname = tempfile.mkstemp('.h5')
save_model(model, fname)
model = load_model(fname)
os.remove(fname)
assert_allclose(mean, model.layers[1].arguments['mu'])
assert_allclose(std, model.layers[1].arguments['std'])
def test_saving_custom_activation_function():
x = Input(shape=(3,))
output = Dense(3, activation=K.cos)(x)
model = Model(x, output)
model.compile(loss=losses.MSE,
optimizer=optimizers.RMSprop(lr=0.0001),
metrics=[metrics.categorical_accuracy])
x = np.random.random((1, 3))
y = np.random.random((1, 3))
model.train_on_batch(x, y)
out = model.predict(x)
_, fname = tempfile.mkstemp('.h5')
save_model(model, fname)
model = load_model(fname, custom_objects={'cos': K.cos})
os.remove(fname)
out2 = model.predict(x)
assert_allclose(out, out2, atol=1e-05)
def test_saving_model_with_long_layer_names():
# This layer name will make the `layers_name` HDF5 attribute blow
# out of proportion. Note that it fits into the internal HDF5
# attribute memory limit on its own but because h5py converts
# the list of layer names into numpy array, which uses the same
# amout of memory for every item, it increases the memory
# requirements substantially.
x = Input(shape=(2,), name='input_' + ('x' * (2**15)))
f = x
for i in range(4):
f = Dense(2, name='dense_%d' % (i,))(f)
model = Model(inputs=[x], outputs=[f])
model.compile(loss='mse', optimizer='adam', metrics=['acc'])
x = np.random.random((1, 2))
y = np.random.random((1, 2))
model.train_on_batch(x, y)
out = model.predict(x)
_, fname = tempfile.mkstemp('.h5')
save_model(model, fname)
model = load_model(fname)
# Check that the HDF5 files contains chunked array
# of layer names.
with h5py.File(fname, 'r') as h5file:
n_layer_names_arrays = len([attr for attr in h5file['model_weights'].attrs
if attr.startswith('layer_names')])
os.remove(fname)
# The chunking of layer names array should have happened.
assert n_layer_names_arrays > 0
out2 = model.predict(x)
assert_allclose(out, out2, atol=1e-05)
def test_saving_model_with_long_weights_names():
x = Input(shape=(2,), name='nested_model_input')
f = x
for i in range(4):
f = Dense(2, name='nested_model_dense_%d' % (i,))(f)
f = Dense(2, name='nested_model_dense_4', trainable=False)(f)
# This layer name will make the `weights_name`
# HDF5 attribute blow out of proportion.
f = Dense(2, name='nested_model_output' + ('x' * (2**15)))(f)
nested_model = Model(inputs=[x], outputs=[f], name='nested_model')
x = Input(shape=(2,), name='outer_model_input')
f = nested_model(x)
f = Dense(2, name='outer_model_output')(f)
model = Model(inputs=[x], outputs=[f])
model.compile(loss='mse', optimizer='adam', metrics=['acc'])
x = np.random.random((1, 2))
y = np.random.random((1, 2))
model.train_on_batch(x, y)
out = model.predict(x)
_, fname = tempfile.mkstemp('.h5')
save_model(model, fname)
model = load_model(fname)
# Check that the HDF5 files contains chunked array
# of weight names.
with h5py.File(fname, 'r') as h5file:
attrs = [attr for attr in h5file['model_weights']['nested_model'].attrs
if attr.startswith('weight_names')]
n_weight_names_arrays = len(attrs)
os.remove(fname)
# The chunking of layer names array should have happened.
assert n_weight_names_arrays > 0
out2 = model.predict(x)
assert_allclose(out, out2, atol=1e-05)
def test_saving_recurrent_layer_with_init_state():
vector_size = 8
input_length = 20
input_initial_state = Input(shape=(vector_size,))
input_x = Input(shape=(input_length, vector_size))
lstm = LSTM(vector_size, return_sequences=True)(
input_x, initial_state=[input_initial_state, input_initial_state])
model = Model(inputs=[input_x, input_initial_state], outputs=[lstm])
_, fname = tempfile.mkstemp('.h5')
model.save(fname)
loaded_model = load_model(fname)
os.remove(fname)
def test_saving_recurrent_layer_without_bias():
vector_size = 8
input_length = 20
input_x = Input(shape=(input_length, vector_size))
lstm = LSTM(vector_size, use_bias=False)(input_x)
model = Model(inputs=[input_x], outputs=[lstm])
_, fname = tempfile.mkstemp('.h5')
model.save(fname)
loaded_model = load_model(fname)
os.remove(fname)
def test_loop_model_saving():
model = Sequential()
model.add(Dense(2, input_shape=(3,)))
model.compile(loss=losses.MSE,
optimizer=optimizers.RMSprop(lr=0.0001),
metrics=[metrics.categorical_accuracy])
x = np.random.random((1, 3))
y = np.random.random((1, 2))
_, fname = tempfile.mkstemp('.h5')
for _ in range(3):
model.train_on_batch(x, y)
save_model(model, fname, overwrite=True)
out = model.predict(x)
new_model = load_model(fname)
os.remove(fname)
out2 = new_model.predict(x)
assert_allclose(out, out2, atol=1e-05)
def test_saving_constant_initializer_with_numpy():
"""Test saving and loading model of constant initializer with numpy inputs.
"""
model = Sequential()
model.add(Dense(2, input_shape=(3,),
kernel_initializer=Constant(np.ones((3, 2)))))
model.add(Dense(3))
model.compile(loss='mse', optimizer='sgd', metrics=['acc'])
_, fname = tempfile.mkstemp('.h5')
save_model(model, fname)
model = load_model(fname)
os.remove(fname)
def test_saving_group_naming_h5py(tmpdir):
"""Test saving model with layer which name is prefix to a previous layer
name
"""
input_layer = Input((None, None, 3), name='test_input')
x = Conv2D(1, 1, name='conv1/conv')(input_layer)
x = Activation('relu', name='conv1')(x)
model = Model(inputs=input_layer, outputs=x)
p = tmpdir.mkdir("test").join("test.h5")
model.save_weights(p)
model.load_weights(p)
def _make_nested_model(input_shape, layer, level=1, model_type='func'):
# example: make_nested_seq_model((1,), Dense(10), level=2).summary()
def make_nested_seq_model(input_shape, layer, level=1):
model = layer
for i in range(1, level + 1):
layers = [InputLayer(input_shape), model] if (i == 1) else [model]
model = Sequential(layers)
return model
# example: make_nested_func_model((1,), Dense(10), level=2).summary()
def make_nested_func_model(input_shape, layer, level=1):
input = Input(input_shape)
model = layer
for i in range(level):
model = Model(input, model(input))
return model
if model_type == 'func':
return make_nested_func_model(input_shape, layer, level)
elif model_type == 'seq':
return make_nested_seq_model(input_shape, layer, level)
def _convert_model_weights(source_model, target_model):
_, fname = tempfile.mkstemp('.h5')
source_model.save_weights(fname)
target_model.load_weights(fname)
os.remove(fname)
def test_model_saving_with_rnn_initial_state_and_args():
class CustomRNN(LSTM):
def call(self, inputs, arg=1, mask=None, training=None, initial_state=None):
if isinstance(inputs, list):
inputs = inputs[:]
shape = K.int_shape(inputs[0])
inputs[0] *= arg
inputs[0]._keras_shape = shape # for theano backend
else:
shape = K.int_shape(inputs)
inputs *= arg
inputs._keras_shape = shape # for theano backend
return super(CustomRNN, self).call(inputs, mask, training, initial_state)
inp = Input((3, 2))
rnn_out, h, c = CustomRNN(2, return_state=True, return_sequences=True)(inp)
assert hasattr(rnn_out, '_keras_history')
assert hasattr(h, '_keras_history')
assert hasattr(c, '_keras_history')
rnn2_out = CustomRNN(2)(rnn_out, arg=2, initial_state=[h, c])
assert hasattr(rnn2_out, '_keras_history')
model = Model(inputs=inp, outputs=rnn2_out)
x = np.random.random((2, 3, 2))
y1 = model.predict(x)
_, fname = tempfile.mkstemp('.h5')
with warnings.catch_warnings():
warnings.filterwarnings('error')
model.save(fname)
model2 = load_model(fname, custom_objects={'CustomRNN': CustomRNN})
y2 = model2.predict(x)
assert_allclose(y1, y2, atol=1e-5)
os.remove(fname)
if __name__ == '__main__':
pytest.main([__file__])
|
[
"keras.models.load_model",
"os.remove",
"numpy.abs",
"numpy.ones",
"keras.models.Model",
"pytest.main",
"keras.layers.Input",
"numpy.zeros_like",
"os.path.exists",
"warnings.catch_warnings",
"numpy.testing.assert_allclose",
"keras.losses.MeanSquaredError",
"io.BytesIO",
"h5py.File",
"numpy.testing.assert_raises",
"keras.optimizers.Adam",
"keras.layers.Conv2D",
"keras.models.save_model",
"keras.backend.int_shape",
"keras.layers.RepeatVector",
"keras.optimizers.RMSprop",
"tempfile.mkstemp",
"keras.layers.Activation",
"warnings.filterwarnings",
"keras.layers.LSTM",
"numpy.random.random",
"numpy.array",
"keras.layers.Dense",
"keras.layers.Lambda",
"keras.layers.InputLayer",
"keras.models.Sequential"
] |
[((828, 840), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (838, 840), False, 'from keras.models import Model, Sequential\n'), ((1179, 1203), 'numpy.random.random', 'np.random.random', (['(1, 3)'], {}), '((1, 3))\n', (1195, 1203), True, 'import numpy as np\n'), ((1212, 1239), 'numpy.random.random', 'np.random.random', (['(1, 3, 3)'], {}), '((1, 3, 3))\n', (1228, 1239), True, 'import numpy as np\n'), ((1315, 1338), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""".h5"""'], {}), "('.h5')\n", (1331, 1338), False, 'import tempfile\n'), ((1343, 1367), 'keras.models.save_model', 'save_model', (['model', 'fname'], {}), '(model, fname)\n', (1353, 1367), False, 'from keras.models import save_model, load_model\n'), ((1384, 1401), 'keras.models.load_model', 'load_model', (['fname'], {}), '(fname)\n', (1394, 1401), False, 'from keras.models import save_model, load_model\n'), ((1406, 1422), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (1415, 1422), False, 'import os\n'), ((1433, 1457), 'numpy.random.random', 'np.random.random', (['(1, 3)'], {}), '((1, 3))\n', (1449, 1457), True, 'import numpy as np\n'), ((1467, 1494), 'numpy.random.random', 'np.random.random', (['(1, 3, 3)'], {}), '((1, 3, 3))\n', (1483, 1494), True, 'import numpy as np\n'), ((1598, 1639), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', 'new_out'], {'atol': '(1e-05)'}), '(out, new_out, atol=1e-05)\n', (1613, 1639), False, 'from numpy.testing import assert_allclose\n'), ((1777, 1822), 'numpy.testing.assert_allclose', 'assert_allclose', (['out_2', 'new_out_2'], {'atol': '(1e-05)'}), '(out_2, new_out_2, atol=1e-05)\n', (1792, 1822), False, 'from numpy.testing import assert_allclose\n'), ((1979, 1991), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1989, 1991), False, 'from keras.models import Model, Sequential\n'), ((2144, 2168), 'numpy.random.random', 'np.random.random', (['(1, 3)'], {}), '((1, 3))\n', (2160, 2168), True, 'import numpy as np\n'), ((2177, 2201), 'numpy.random.random', 'np.random.random', (['(1, 3)'], {}), '((1, 3))\n', (2193, 2201), True, 'import numpy as np\n'), ((2407, 2430), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""".h5"""'], {}), "('.h5')\n", (2423, 2430), False, 'import tempfile\n'), ((2435, 2459), 'keras.models.save_model', 'save_model', (['model', 'fname'], {}), '(model, fname)\n', (2445, 2459), False, 'from keras.models import save_model, load_model\n'), ((2476, 2508), 'keras.models.load_model', 'load_model', (['fname'], {}), '(fname, **load_kwargs)\n', (2486, 2508), False, 'from keras.models import save_model, load_model\n'), ((2513, 2529), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (2522, 2529), False, 'import os\n'), ((2570, 2611), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', 'new_out'], {'atol': '(1e-05)'}), '(out, new_out, atol=1e-05)\n', (2585, 2611), False, 'from numpy.testing import assert_allclose\n'), ((2662, 2679), 'keras.layers.Input', 'Input', ([], {'shape': '(3,)'}), '(shape=(3,))\n', (2667, 2679), False, 'from keras.layers import Input, InputLayer\n'), ((2744, 2766), 'keras.models.Model', 'Model', (['inputs', 'outputs'], {}), '(inputs, outputs)\n', (2749, 2766), False, 'from keras.models import Model, Sequential\n'), ((2915, 2939), 'numpy.random.random', 'np.random.random', (['(1, 3)'], {}), '((1, 3))\n', (2931, 2939), True, 'import numpy as np\n'), ((2948, 2972), 'numpy.random.random', 'np.random.random', (['(1, 3)'], {}), '((1, 3))\n', (2964, 2972), True, 'import numpy as np\n'), ((3150, 3173), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""".h5"""'], {}), "('.h5')\n", (3166, 3173), False, 'import tempfile\n'), ((3178, 3202), 'keras.models.save_model', 'save_model', (['model', 'fname'], {}), '(model, fname)\n', (3188, 3202), False, 'from keras.models import save_model, load_model\n'), ((3219, 3236), 'keras.models.load_model', 'load_model', (['fname'], {}), '(fname)\n', (3229, 3236), False, 'from keras.models import save_model, load_model\n'), ((3241, 3257), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (3250, 3257), False, 'import os\n'), ((3298, 3339), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', 'new_out'], {'atol': '(1e-05)'}), '(out, new_out, atol=1e-05)\n', (3313, 3339), False, 'from numpy.testing import assert_allclose\n'), ((3489, 3512), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""".h5"""'], {}), "('.h5')\n", (3505, 3512), False, 'import tempfile\n'), ((3680, 3718), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', 'out2'], {'atol': '(1e-05)'}), '(out, out2, atol=1e-05)\n', (3695, 3718), False, 'from numpy.testing import assert_allclose\n'), ((3980, 4018), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', 'out2'], {'atol': '(1e-05)'}), '(out, out2, atol=1e-05)\n', (3995, 4018), False, 'from numpy.testing import assert_allclose\n'), ((4218, 4256), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', 'out2'], {'atol': '(1e-05)'}), '(out, out2, atol=1e-05)\n', (4233, 4256), False, 'from numpy.testing import assert_allclose\n'), ((4466, 4492), 'tempfile.mkstemp', 'tempfile.mkstemp', (['filename'], {}), '(filename)\n', (4482, 4492), False, 'import tempfile\n'), ((4521, 4547), 'os.path.exists', 'os.path.exists', (['temp_fname'], {}), '(temp_fname)\n', (4535, 4547), False, 'import os\n'), ((5077, 5115), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', 'out2'], {'atol': '(1e-05)'}), '(out, out2, atol=1e-05)\n', (5092, 5115), False, 'from numpy.testing import assert_allclose\n'), ((5610, 5648), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', 'out2'], {'atol': '(1e-05)'}), '(out, out2, atol=1e-05)\n', (5625, 5648), False, 'from numpy.testing import assert_allclose\n'), ((5791, 5803), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (5801, 5803), False, 'import io\n'), ((5808, 5833), 'keras.models.save_model', 'save_model', (['model', 'stream'], {}), '(model, stream)\n', (5818, 5833), False, 'from keras.models import save_model, load_model\n'), ((5872, 5890), 'keras.models.load_model', 'load_model', (['stream'], {}), '(stream)\n', (5882, 5890), False, 'from keras.models import save_model, load_model\n'), ((5930, 5968), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', 'out2'], {'atol': '(1e-05)'}), '(out, out2, atol=1e-05)\n', (5945, 5968), False, 'from numpy.testing import assert_allclose\n'), ((6028, 6045), 'keras.layers.Input', 'Input', ([], {'shape': '(5,)'}), '(shape=(5,))\n', (6033, 6045), False, 'from keras.layers import Input, InputLayer\n'), ((6168, 6216), 'keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': '[output1, output2]'}), '(inputs=inputs, outputs=[output1, output2])\n', (6173, 6216), False, 'from keras.models import Model, Sequential\n'), ((6498, 6525), 'numpy.array', 'np.array', (['[[1, 1, 1, 1, 1]]'], {}), '([[1, 1, 1, 1, 1]])\n', (6506, 6525), True, 'import numpy as np\n'), ((6568, 6591), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""".h5"""'], {}), "('.h5')\n", (6584, 6591), False, 'import tempfile\n'), ((6596, 6620), 'keras.models.save_model', 'save_model', (['model', 'fname'], {}), '(model, fname)\n', (6606, 6620), False, 'from keras.models import save_model, load_model\n'), ((6634, 6651), 'keras.models.load_model', 'load_model', (['fname'], {}), '(fname)\n', (6644, 6651), False, 'from keras.models import save_model, load_model\n'), ((6656, 6672), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (6665, 6672), False, 'import os\n'), ((6706, 6744), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', 'out2'], {'atol': '(1e-05)'}), '(out, out2, atol=1e-05)\n', (6721, 6744), False, 'from numpy.testing import assert_allclose\n'), ((6850, 6862), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (6860, 6862), False, 'from keras.models import Model, Sequential\n'), ((6945, 6968), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""".h5"""'], {}), "('.h5')\n", (6961, 6968), False, 'import tempfile\n'), ((6973, 6997), 'keras.models.save_model', 'save_model', (['model', 'fname'], {}), '(model, fname)\n', (6983, 6997), False, 'from keras.models import save_model, load_model\n'), ((7010, 7027), 'keras.models.load_model', 'load_model', (['fname'], {}), '(fname)\n', (7020, 7027), False, 'from keras.models import save_model, load_model\n'), ((7032, 7048), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (7041, 7048), False, 'import os\n'), ((7106, 7118), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (7116, 7118), False, 'from keras.models import Model, Sequential\n'), ((7265, 7288), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""".h5"""'], {}), "('.h5')\n", (7281, 7288), False, 'import tempfile\n'), ((7293, 7317), 'keras.models.save_model', 'save_model', (['model', 'fname'], {}), '(model, fname)\n', (7303, 7317), False, 'from keras.models import save_model, load_model\n'), ((7330, 7347), 'keras.models.load_model', 'load_model', (['fname'], {}), '(fname)\n', (7340, 7347), False, 'from keras.models import save_model, load_model\n'), ((7352, 7368), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (7361, 7368), False, 'import os\n'), ((7418, 7444), 'keras.layers.Input', 'Input', ([], {'shape': '(256, 512, 6)'}), '(shape=(256, 512, 6))\n', (7423, 7444), False, 'from keras.layers import Input, InputLayer\n'), ((7453, 7479), 'keras.layers.Input', 'Input', ([], {'shape': '(256, 512, 1)'}), '(shape=(256, 512, 1))\n', (7458, 7479), False, 'from keras.layers import Input, InputLayer\n'), ((7537, 7568), 'keras.models.Model', 'Model', ([], {'inputs': '[a, b]', 'outputs': 'c'}), '(inputs=[a, b], outputs=c)\n', (7542, 7568), False, 'from keras.models import Model, Sequential\n'), ((7585, 7608), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""".h5"""'], {}), "('.h5')\n", (7601, 7608), False, 'import tempfile\n'), ((7613, 7637), 'keras.models.save_model', 'save_model', (['model', 'fname'], {}), '(model, fname)\n', (7623, 7637), False, 'from keras.models import save_model, load_model\n'), ((7642, 7659), 'keras.models.load_model', 'load_model', (['fname'], {}), '(fname)\n', (7652, 7659), False, 'from keras.models import save_model, load_model\n'), ((7664, 7680), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (7673, 7680), False, 'import os\n'), ((7948, 7960), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (7958, 7960), False, 'from keras.models import Model, Sequential\n'), ((8137, 8161), 'numpy.random.random', 'np.random.random', (['(1, 3)'], {}), '((1, 3))\n', (8153, 8161), True, 'import numpy as np\n'), ((8170, 8194), 'numpy.random.random', 'np.random.random', (['(1, 3)'], {}), '((1, 3))\n', (8186, 8194), True, 'import numpy as np\n'), ((8335, 8358), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""".h5"""'], {}), "('.h5')\n", (8351, 8358), False, 'import tempfile\n'), ((8470, 8487), 'keras.layers.Input', 'Input', ([], {'shape': '(3,)'}), '(shape=(3,))\n', (8475, 8487), False, 'from keras.layers import Input, InputLayer\n'), ((8709, 8746), 'keras.models.Model', 'Model', ([], {'inputs': '[data]', 'outputs': '[morty]'}), '(inputs=[data], outputs=[morty])\n', (8714, 8746), False, 'from keras.models import Model, Sequential\n'), ((8906, 8922), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (8915, 8922), False, 'import os\n'), ((9173, 9228), 'numpy.testing.assert_allclose', 'assert_allclose', (['old_weights[0][0]', 'rick[0]'], {'atol': '(1e-05)'}), '(old_weights[0][0], rick[0], atol=1e-05)\n', (9188, 9228), False, 'from numpy.testing import assert_allclose\n'), ((9233, 9288), 'numpy.testing.assert_allclose', 'assert_allclose', (['old_weights[0][1]', 'rick[1]'], {'atol': '(1e-05)'}), '(old_weights[0][1], rick[1], atol=1e-05)\n', (9248, 9288), False, 'from numpy.testing import assert_allclose\n'), ((9293, 9349), 'numpy.testing.assert_allclose', 'assert_allclose', (['old_weights[1][0]', 'morty[0]'], {'atol': '(1e-05)'}), '(old_weights[1][0], morty[0], atol=1e-05)\n', (9308, 9349), False, 'from numpy.testing import assert_allclose\n'), ((9354, 9410), 'numpy.testing.assert_allclose', 'assert_allclose', (['old_weights[1][1]', 'morty[1]'], {'atol': '(1e-05)'}), '(old_weights[1][1], morty[1], atol=1e-05)\n', (9369, 9410), False, 'from numpy.testing import assert_allclose\n'), ((9830, 9842), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (9840, 9842), False, 'from keras.models import Model, Sequential\n'), ((10019, 10043), 'numpy.random.random', 'np.random.random', (['(1, 3)'], {}), '((1, 3))\n', (10035, 10043), True, 'import numpy as np\n'), ((10052, 10076), 'numpy.random.random', 'np.random.random', (['(1, 3)'], {}), '((1, 3))\n', (10068, 10076), True, 'import numpy as np\n'), ((10217, 10240), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""".h5"""'], {}), "('.h5')\n", (10233, 10240), False, 'import tempfile\n'), ((10332, 10344), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (10342, 10344), False, 'from keras.models import Model, Sequential\n'), ((10658, 10674), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (10667, 10674), False, 'import os\n'), ((11201, 11218), 'keras.layers.Input', 'Input', ([], {'shape': '(3,)'}), '(shape=(3,))\n', (11206, 11218), False, 'from keras.layers import Input, InputLayer\n'), ((11324, 11346), 'keras.models.Model', 'Model', (['inputs', 'outputs'], {}), '(inputs, outputs)\n', (11329, 11346), False, 'from keras.models import Model, Sequential\n'), ((11507, 11531), 'numpy.random.random', 'np.random.random', (['(1, 3)'], {}), '((1, 3))\n', (11523, 11531), True, 'import numpy as np\n'), ((11540, 11564), 'numpy.random.random', 'np.random.random', (['(1, 3)'], {}), '((1, 3))\n', (11556, 11564), True, 'import numpy as np\n'), ((11639, 11662), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""".h5"""'], {}), "('.h5')\n", (11655, 11662), False, 'import tempfile\n'), ((11667, 11691), 'keras.models.save_model', 'save_model', (['model', 'fname'], {}), '(model, fname)\n', (11677, 11691), False, 'from keras.models import save_model, load_model\n'), ((11705, 11763), 'keras.models.load_model', 'load_model', (['fname'], {'custom_objects': "{'square_fn': square_fn}"}), "(fname, custom_objects={'square_fn': square_fn})\n", (11715, 11763), False, 'from keras.models import save_model, load_model\n'), ((11768, 11784), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (11777, 11784), False, 'import os\n'), ((11818, 11856), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', 'out2'], {'atol': '(1e-05)'}), '(out, out2, atol=1e-05)\n', (11833, 11856), False, 'from numpy.testing import assert_allclose\n'), ((11918, 11945), 'numpy.random.random', 'np.random.random', (['(4, 2, 3)'], {}), '((4, 2, 3))\n', (11934, 11945), True, 'import numpy as np\n'), ((12012, 12034), 'keras.layers.Input', 'Input', ([], {'shape': '(4, 2, 3)'}), '(shape=(4, 2, 3))\n', (12017, 12034), False, 'from keras.layers import Input, InputLayer\n'), ((12176, 12198), 'keras.models.Model', 'Model', (['inputs', 'outputs'], {}), '(inputs, outputs)\n', (12181, 12198), False, 'from keras.models import Model, Sequential\n'), ((12279, 12302), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""".h5"""'], {}), "('.h5')\n", (12295, 12302), False, 'import tempfile\n'), ((12307, 12331), 'keras.models.save_model', 'save_model', (['model', 'fname'], {}), '(model, fname)\n', (12317, 12331), False, 'from keras.models import save_model, load_model\n'), ((12345, 12362), 'keras.models.load_model', 'load_model', (['fname'], {}), '(fname)\n', (12355, 12362), False, 'from keras.models import save_model, load_model\n'), ((12367, 12383), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (12376, 12383), False, 'import os\n'), ((12389, 12443), 'numpy.testing.assert_allclose', 'assert_allclose', (['mean', "model.layers[1].arguments['mu']"], {}), "(mean, model.layers[1].arguments['mu'])\n", (12404, 12443), False, 'from numpy.testing import assert_allclose\n'), ((12448, 12502), 'numpy.testing.assert_allclose', 'assert_allclose', (['std', "model.layers[1].arguments['std']"], {}), "(std, model.layers[1].arguments['std'])\n", (12463, 12502), False, 'from numpy.testing import assert_allclose\n'), ((12559, 12576), 'keras.layers.Input', 'Input', ([], {'shape': '(3,)'}), '(shape=(3,))\n', (12564, 12576), False, 'from keras.layers import Input, InputLayer\n'), ((12633, 12649), 'keras.models.Model', 'Model', (['x', 'output'], {}), '(x, output)\n', (12638, 12649), False, 'from keras.models import Model, Sequential\n'), ((12810, 12834), 'numpy.random.random', 'np.random.random', (['(1, 3)'], {}), '((1, 3))\n', (12826, 12834), True, 'import numpy as np\n'), ((12843, 12867), 'numpy.random.random', 'np.random.random', (['(1, 3)'], {}), '((1, 3))\n', (12859, 12867), True, 'import numpy as np\n'), ((12942, 12965), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""".h5"""'], {}), "('.h5')\n", (12958, 12965), False, 'import tempfile\n'), ((12970, 12994), 'keras.models.save_model', 'save_model', (['model', 'fname'], {}), '(model, fname)\n', (12980, 12994), False, 'from keras.models import save_model, load_model\n'), ((13008, 13056), 'keras.models.load_model', 'load_model', (['fname'], {'custom_objects': "{'cos': K.cos}"}), "(fname, custom_objects={'cos': K.cos})\n", (13018, 13056), False, 'from keras.models import save_model, load_model\n'), ((13061, 13077), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (13070, 13077), False, 'import os\n'), ((13111, 13149), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', 'out2'], {'atol': '(1e-05)'}), '(out, out2, atol=1e-05)\n', (13126, 13149), False, 'from numpy.testing import assert_allclose\n'), ((13573, 13621), 'keras.layers.Input', 'Input', ([], {'shape': '(2,)', 'name': "('input_' + 'x' * 2 ** 15)"}), "(shape=(2,), name='input_' + 'x' * 2 ** 15)\n", (13578, 13621), False, 'from keras.layers import Input, InputLayer\n'), ((13718, 13748), 'keras.models.Model', 'Model', ([], {'inputs': '[x]', 'outputs': '[f]'}), '(inputs=[x], outputs=[f])\n', (13723, 13748), False, 'from keras.models import Model, Sequential\n'), ((13824, 13848), 'numpy.random.random', 'np.random.random', (['(1, 2)'], {}), '((1, 2))\n', (13840, 13848), True, 'import numpy as np\n'), ((13857, 13881), 'numpy.random.random', 'np.random.random', (['(1, 2)'], {}), '((1, 2))\n', (13873, 13881), True, 'import numpy as np\n'), ((13957, 13980), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""".h5"""'], {}), "('.h5')\n", (13973, 13980), False, 'import tempfile\n'), ((13985, 14009), 'keras.models.save_model', 'save_model', (['model', 'fname'], {}), '(model, fname)\n', (13995, 14009), False, 'from keras.models import save_model, load_model\n'), ((14023, 14040), 'keras.models.load_model', 'load_model', (['fname'], {}), '(fname)\n', (14033, 14040), False, 'from keras.models import save_model, load_model\n'), ((14321, 14337), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (14330, 14337), False, 'import os\n'), ((14470, 14508), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', 'out2'], {'atol': '(1e-05)'}), '(out, out2, atol=1e-05)\n', (14485, 14508), False, 'from numpy.testing import assert_allclose\n'), ((14568, 14612), 'keras.layers.Input', 'Input', ([], {'shape': '(2,)', 'name': '"""nested_model_input"""'}), "(shape=(2,), name='nested_model_input')\n", (14573, 14612), False, 'from keras.layers import Input, InputLayer\n'), ((14954, 15005), 'keras.models.Model', 'Model', ([], {'inputs': '[x]', 'outputs': '[f]', 'name': '"""nested_model"""'}), "(inputs=[x], outputs=[f], name='nested_model')\n", (14959, 15005), False, 'from keras.models import Model, Sequential\n'), ((15015, 15058), 'keras.layers.Input', 'Input', ([], {'shape': '(2,)', 'name': '"""outer_model_input"""'}), "(shape=(2,), name='outer_model_input')\n", (15020, 15058), False, 'from keras.layers import Input, InputLayer\n'), ((15143, 15173), 'keras.models.Model', 'Model', ([], {'inputs': '[x]', 'outputs': '[f]'}), '(inputs=[x], outputs=[f])\n', (15148, 15173), False, 'from keras.models import Model, Sequential\n'), ((15249, 15273), 'numpy.random.random', 'np.random.random', (['(1, 2)'], {}), '((1, 2))\n', (15265, 15273), True, 'import numpy as np\n'), ((15282, 15306), 'numpy.random.random', 'np.random.random', (['(1, 2)'], {}), '((1, 2))\n', (15298, 15306), True, 'import numpy as np\n'), ((15382, 15405), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""".h5"""'], {}), "('.h5')\n", (15398, 15405), False, 'import tempfile\n'), ((15410, 15434), 'keras.models.save_model', 'save_model', (['model', 'fname'], {}), '(model, fname)\n', (15420, 15434), False, 'from keras.models import save_model, load_model\n'), ((15448, 15465), 'keras.models.load_model', 'load_model', (['fname'], {}), '(fname)\n', (15458, 15465), False, 'from keras.models import save_model, load_model\n'), ((15768, 15784), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (15777, 15784), False, 'import os\n'), ((15918, 15956), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', 'out2'], {'atol': '(1e-05)'}), '(out, out2, atol=1e-05)\n', (15933, 15956), False, 'from numpy.testing import assert_allclose\n'), ((16079, 16106), 'keras.layers.Input', 'Input', ([], {'shape': '(vector_size,)'}), '(shape=(vector_size,))\n', (16084, 16106), False, 'from keras.layers import Input, InputLayer\n'), ((16121, 16161), 'keras.layers.Input', 'Input', ([], {'shape': '(input_length, vector_size)'}), '(shape=(input_length, vector_size))\n', (16126, 16161), False, 'from keras.layers import Input, InputLayer\n'), ((16304, 16364), 'keras.models.Model', 'Model', ([], {'inputs': '[input_x, input_initial_state]', 'outputs': '[lstm]'}), '(inputs=[input_x, input_initial_state], outputs=[lstm])\n', (16309, 16364), False, 'from keras.models import Model, Sequential\n'), ((16381, 16404), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""".h5"""'], {}), "('.h5')\n", (16397, 16404), False, 'import tempfile\n'), ((16447, 16464), 'keras.models.load_model', 'load_model', (['fname'], {}), '(fname)\n', (16457, 16464), False, 'from keras.models import save_model, load_model\n'), ((16469, 16485), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (16478, 16485), False, 'import os\n'), ((16593, 16633), 'keras.layers.Input', 'Input', ([], {'shape': '(input_length, vector_size)'}), '(shape=(input_length, vector_size))\n', (16598, 16633), False, 'from keras.layers import Input, InputLayer\n'), ((16700, 16739), 'keras.models.Model', 'Model', ([], {'inputs': '[input_x]', 'outputs': '[lstm]'}), '(inputs=[input_x], outputs=[lstm])\n', (16705, 16739), False, 'from keras.models import Model, Sequential\n'), ((16756, 16779), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""".h5"""'], {}), "('.h5')\n", (16772, 16779), False, 'import tempfile\n'), ((16822, 16839), 'keras.models.load_model', 'load_model', (['fname'], {}), '(fname)\n', (16832, 16839), False, 'from keras.models import save_model, load_model\n'), ((16844, 16860), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (16853, 16860), False, 'import os\n'), ((16905, 16917), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (16915, 16917), False, 'from keras.models import Model, Sequential\n'), ((17121, 17145), 'numpy.random.random', 'np.random.random', (['(1, 3)'], {}), '((1, 3))\n', (17137, 17145), True, 'import numpy as np\n'), ((17154, 17178), 'numpy.random.random', 'np.random.random', (['(1, 2)'], {}), '((1, 2))\n', (17170, 17178), True, 'import numpy as np\n'), ((17194, 17217), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""".h5"""'], {}), "('.h5')\n", (17210, 17217), False, 'import tempfile\n'), ((17374, 17391), 'keras.models.load_model', 'load_model', (['fname'], {}), '(fname)\n', (17384, 17391), False, 'from keras.models import save_model, load_model\n'), ((17396, 17412), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (17405, 17412), False, 'import os\n'), ((17450, 17488), 'numpy.testing.assert_allclose', 'assert_allclose', (['out', 'out2'], {'atol': '(1e-05)'}), '(out, out2, atol=1e-05)\n', (17465, 17488), False, 'from numpy.testing import assert_allclose\n'), ((17642, 17654), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (17652, 17654), False, 'from keras.models import Model, Sequential\n'), ((17867, 17890), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""".h5"""'], {}), "('.h5')\n", (17883, 17890), False, 'import tempfile\n'), ((17895, 17919), 'keras.models.save_model', 'save_model', (['model', 'fname'], {}), '(model, fname)\n', (17905, 17919), False, 'from keras.models import save_model, load_model\n'), ((17932, 17949), 'keras.models.load_model', 'load_model', (['fname'], {}), '(fname)\n', (17942, 17949), False, 'from keras.models import save_model, load_model\n'), ((17954, 17970), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (17963, 17970), False, 'import os\n'), ((18129, 18170), 'keras.layers.Input', 'Input', (['(None, None, 3)'], {'name': '"""test_input"""'}), "((None, None, 3), name='test_input')\n", (18134, 18170), False, 'from keras.layers import Input, InputLayer\n'), ((18281, 18317), 'keras.models.Model', 'Model', ([], {'inputs': 'input_layer', 'outputs': 'x'}), '(inputs=input_layer, outputs=x)\n', (18286, 18317), False, 'from keras.models import Model, Sequential\n'), ((19375, 19398), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""".h5"""'], {}), "('.h5')\n", (19391, 19398), False, 'import tempfile\n'), ((19477, 19493), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (19486, 19493), False, 'import os\n'), ((20145, 20158), 'keras.layers.Input', 'Input', (['(3, 2)'], {}), '((3, 2))\n', (20150, 20158), False, 'from keras.layers import Input, InputLayer\n'), ((20490, 20525), 'keras.models.Model', 'Model', ([], {'inputs': 'inp', 'outputs': 'rnn2_out'}), '(inputs=inp, outputs=rnn2_out)\n', (20495, 20525), False, 'from keras.models import Model, Sequential\n'), ((20534, 20561), 'numpy.random.random', 'np.random.random', (['(2, 3, 2)'], {}), '((2, 3, 2))\n', (20550, 20561), True, 'import numpy as np\n'), ((20603, 20626), 'tempfile.mkstemp', 'tempfile.mkstemp', (['""".h5"""'], {}), "('.h5')\n", (20619, 20626), False, 'import tempfile\n'), ((20743, 20801), 'keras.models.load_model', 'load_model', (['fname'], {'custom_objects': "{'CustomRNN': CustomRNN}"}), "(fname, custom_objects={'CustomRNN': CustomRNN})\n", (20753, 20801), False, 'from keras.models import save_model, load_model\n'), ((20833, 20868), 'numpy.testing.assert_allclose', 'assert_allclose', (['y1', 'y2'], {'atol': '(1e-05)'}), '(y1, y2, atol=1e-05)\n', (20848, 20868), False, 'from numpy.testing import assert_allclose\n'), ((20872, 20888), 'os.remove', 'os.remove', (['fname'], {}), '(fname)\n', (20881, 20888), False, 'import os\n'), ((20922, 20945), 'pytest.main', 'pytest.main', (['[__file__]'], {}), '([__file__])\n', (20933, 20945), False, 'import pytest\n'), ((855, 881), 'keras.layers.Dense', 'Dense', (['(2)'], {'input_shape': '(3,)'}), '(2, input_shape=(3,))\n', (860, 881), False, 'from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed\n'), ((897, 912), 'keras.layers.RepeatVector', 'RepeatVector', (['(3)'], {}), '(3)\n', (909, 912), False, 'from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed\n'), ((2006, 2032), 'keras.layers.Dense', 'Dense', (['(2)'], {'input_shape': '(3,)'}), '(2, input_shape=(3,))\n', (2011, 2032), False, 'from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed\n'), ((2048, 2056), 'keras.layers.Dense', 'Dense', (['(3)'], {}), '(3)\n', (2053, 2056), False, 'from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed\n'), ((2688, 2696), 'keras.layers.Dense', 'Dense', (['(2)'], {}), '(2)\n', (2693, 2696), False, 'from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed\n'), ((2719, 2727), 'keras.layers.Dense', 'Dense', (['(3)'], {}), '(3)\n', (2724, 2727), False, 'from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed\n'), ((3522, 3549), 'h5py.File', 'h5py.File', (['fname'], {'mode': '"""r+"""'}), "(fname, mode='r+')\n", (3531, 3549), False, 'import h5py\n'), ((3569, 3594), 'keras.models.save_model', 'save_model', (['model', 'h5file'], {}), '(model, h5file)\n', (3579, 3594), False, 'from keras.models import save_model, load_model\n'), ((3618, 3636), 'keras.models.load_model', 'load_model', (['h5file'], {}), '(h5file)\n', (3628, 3636), False, 'from keras.models import save_model, load_model\n'), ((3766, 3830), 'h5py.File', 'h5py.File', (['"""does not matter"""'], {'driver': '"""core"""', 'backing_store': '(False)'}), "('does not matter', driver='core', backing_store=False)\n", (3775, 3830), False, 'import h5py\n'), ((3869, 3894), 'keras.models.save_model', 'save_model', (['model', 'h5file'], {}), '(model, h5file)\n', (3879, 3894), False, 'from keras.models import save_model, load_model\n'), ((3918, 3936), 'keras.models.load_model', 'load_model', (['h5file'], {}), '(h5file)\n', (3928, 3936), False, 'from keras.models import save_model, load_model\n'), ((4029, 4056), 'h5py.File', 'h5py.File', (['fname'], {'mode': '"""r+"""'}), "(fname, mode='r+')\n", (4038, 4056), False, 'import h5py\n'), ((4117, 4137), 'keras.models.save_model', 'save_model', (['model', 'g'], {}), '(model, g)\n', (4127, 4137), False, 'from keras.models import save_model, load_model\n'), ((4161, 4174), 'keras.models.load_model', 'load_model', (['g'], {}), '(g)\n', (4171, 4174), False, 'from keras.models import save_model, load_model\n'), ((4557, 4578), 'os.remove', 'os.remove', (['temp_fname'], {}), '(temp_fname)\n', (4566, 4578), False, 'import os\n'), ((6054, 6062), 'keras.layers.Dense', 'Dense', (['(5)'], {}), '(5)\n', (6059, 6062), False, 'from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed\n'), ((6085, 6109), 'keras.layers.Dense', 'Dense', (['(1)'], {'name': '"""output1"""'}), "(1, name='output1')\n", (6090, 6109), False, 'from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed\n'), ((6127, 6151), 'keras.layers.Dense', 'Dense', (['(1)'], {'name': '"""output2"""'}), "(1, name='output2')\n", (6132, 6151), False, 'from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed\n'), ((6877, 6903), 'keras.layers.Dense', 'Dense', (['(2)'], {'input_shape': '(3,)'}), '(2, input_shape=(3,))\n', (6882, 6903), False, 'from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed\n'), ((6919, 6927), 'keras.layers.Dense', 'Dense', (['(3)'], {}), '(3)\n', (6924, 6927), False, 'from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed\n'), ((7133, 7159), 'keras.layers.Dense', 'Dense', (['(2)'], {'input_shape': '(3,)'}), '(2, input_shape=(3,))\n', (7138, 7159), False, 'from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed\n'), ((7175, 7183), 'keras.layers.Dense', 'Dense', (['(3)'], {}), '(3)\n', (7180, 7183), False, 'from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed\n'), ((7488, 7520), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x[:, :, :, :1])'], {}), '(lambda x: x[:, :, :, :1])\n', (7494, 7520), False, 'from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed\n'), ((7975, 8014), 'keras.layers.Dense', 'Dense', (['(2)'], {'input_shape': '(3,)', 'name': '"""rick"""'}), "(2, input_shape=(3,), name='rick')\n", (7980, 8014), False, 'from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed\n'), ((8030, 8052), 'keras.layers.Dense', 'Dense', (['(3)'], {'name': '"""morty"""'}), "(3, name='morty')\n", (8035, 8052), False, 'from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed\n'), ((8499, 8520), 'keras.layers.Dense', 'Dense', (['(2)'], {'name': '"""rick"""'}), "(2, name='rick')\n", (8504, 8520), False, 'from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed\n'), ((8539, 8561), 'keras.layers.Dense', 'Dense', (['(3)'], {'name': '"""jerry"""'}), "(3, name='jerry')\n", (8544, 8561), False, 'from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed\n'), ((8620, 8644), 'keras.layers.Dense', 'Dense', (['(2)'], {'name': '"""jessica"""'}), "(2, name='jessica')\n", (8625, 8644), False, 'from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed\n'), ((8664, 8686), 'keras.layers.Dense', 'Dense', (['(3)'], {'name': '"""morty"""'}), "(3, name='morty')\n", (8669, 8686), False, 'from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed\n'), ((9431, 9454), 'numpy.zeros_like', 'np.zeros_like', (['jerry[1]'], {}), '(jerry[1])\n', (9444, 9454), True, 'import numpy as np\n'), ((9506, 9531), 'numpy.zeros_like', 'np.zeros_like', (['jessica[1]'], {}), '(jessica[1])\n', (9519, 9531), True, 'import numpy as np\n'), ((9857, 9896), 'keras.layers.Dense', 'Dense', (['(2)'], {'input_shape': '(3,)', 'name': '"""rick"""'}), "(2, input_shape=(3,), name='rick')\n", (9862, 9896), False, 'from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed\n'), ((9912, 9934), 'keras.layers.Dense', 'Dense', (['(3)'], {'name': '"""morty"""'}), "(3, name='morty')\n", (9917, 9934), False, 'from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed\n'), ((10359, 10398), 'keras.layers.Dense', 'Dense', (['(2)'], {'input_shape': '(3,)', 'name': '"""rick"""'}), "(2, input_shape=(3,), name='rick')\n", (10364, 10398), False, 'from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed\n'), ((10414, 10436), 'keras.layers.Dense', 'Dense', (['(4)'], {'name': '"""morty"""'}), "(4, name='morty')\n", (10419, 10436), False, 'from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed\n'), ((10793, 10830), 'numpy.testing.assert_allclose', 'assert_allclose', (['old', 'new'], {'atol': '(1e-05)'}), '(old, new, atol=1e-05)\n', (10808, 10830), False, 'from numpy.testing import assert_allclose\n'), ((10991, 11059), 'numpy.testing.assert_raises', 'assert_raises', (['AssertionError', 'assert_allclose', 'old', 'new'], {'atol': '(1e-05)'}), '(AssertionError, assert_allclose, old, new, atol=1e-05)\n', (11004, 11059), False, 'from numpy.testing import assert_raises\n'), ((11299, 11307), 'keras.layers.Dense', 'Dense', (['(3)'], {}), '(3)\n', (11304, 11307), False, 'from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed\n'), ((12049, 12138), 'keras.layers.Lambda', 'Lambda', (['(lambda image, mu, std: (image - mu) / std)'], {'arguments': "{'mu': mean, 'std': std}"}), "(lambda image, mu, std: (image - mu) / std, arguments={'mu': mean,\n 'std': std})\n", (12055, 12138), False, 'from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed\n'), ((12590, 12616), 'keras.layers.Dense', 'Dense', (['(3)'], {'activation': 'K.cos'}), '(3, activation=K.cos)\n', (12595, 12616), False, 'from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed\n'), ((14128, 14149), 'h5py.File', 'h5py.File', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (14137, 14149), False, 'import h5py\n'), ((14715, 14769), 'keras.layers.Dense', 'Dense', (['(2)'], {'name': '"""nested_model_dense_4"""', 'trainable': '(False)'}), "(2, name='nested_model_dense_4', trainable=False)\n", (14720, 14769), False, 'from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed\n'), ((14877, 14929), 'keras.layers.Dense', 'Dense', (['(2)'], {'name': "('nested_model_output' + 'x' * 2 ** 15)"}), "(2, name='nested_model_output' + 'x' * 2 ** 15)\n", (14882, 14929), False, 'from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed\n'), ((15091, 15126), 'keras.layers.Dense', 'Dense', (['(2)'], {'name': '"""outer_model_output"""'}), "(2, name='outer_model_output')\n", (15096, 15126), False, 'from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed\n'), ((15554, 15575), 'h5py.File', 'h5py.File', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (15563, 15575), False, 'import h5py\n'), ((16174, 16214), 'keras.layers.LSTM', 'LSTM', (['vector_size'], {'return_sequences': '(True)'}), '(vector_size, return_sequences=True)\n', (16178, 16214), False, 'from keras.layers import Bidirectional, GRU, LSTM\n'), ((16645, 16678), 'keras.layers.LSTM', 'LSTM', (['vector_size'], {'use_bias': '(False)'}), '(vector_size, use_bias=False)\n', (16649, 16678), False, 'from keras.layers import Bidirectional, GRU, LSTM\n'), ((16932, 16958), 'keras.layers.Dense', 'Dense', (['(2)'], {'input_shape': '(3,)'}), '(2, input_shape=(3,))\n', (16937, 16958), False, 'from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed\n'), ((17285, 17325), 'keras.models.save_model', 'save_model', (['model', 'fname'], {'overwrite': '(True)'}), '(model, fname, overwrite=True)\n', (17295, 17325), False, 'from keras.models import save_model, load_model\n'), ((17777, 17785), 'keras.layers.Dense', 'Dense', (['(3)'], {}), '(3)\n', (17782, 17785), False, 'from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed\n'), ((18179, 18210), 'keras.layers.Conv2D', 'Conv2D', (['(1)', '(1)'], {'name': '"""conv1/conv"""'}), "(1, 1, name='conv1/conv')\n", (18185, 18210), False, 'from keras.layers import Conv2D, Flatten, Activation\n'), ((18232, 18264), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': '"""conv1"""'}), "('relu', name='conv1')\n", (18242, 18264), False, 'from keras.layers import Conv2D, Flatten, Activation\n'), ((18973, 18991), 'keras.layers.Input', 'Input', (['input_shape'], {}), '(input_shape)\n', (18978, 18991), False, 'from keras.layers import Input, InputLayer\n'), ((20636, 20661), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (20659, 20661), False, 'import warnings\n'), ((20671, 20703), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""error"""'], {}), "('error')\n", (20694, 20703), False, 'import warnings\n'), ((944, 952), 'keras.layers.Dense', 'Dense', (['(3)'], {}), '(3)\n', (949, 952), False, 'from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed\n'), ((978, 1003), 'keras.losses.MeanSquaredError', 'losses.MeanSquaredError', ([], {}), '()\n', (1001, 1003), False, 'from keras import losses\n'), ((1033, 1062), 'keras.optimizers.RMSprop', 'optimizers.RMSprop', ([], {'lr': '(0.0001)'}), '(lr=0.0001)\n', (1051, 1062), False, 'from keras import optimizers\n'), ((2830, 2847), 'keras.optimizers.Adam', 'optimizers.Adam', ([], {}), '()\n', (2845, 2847), False, 'from keras import optimizers\n'), ((4839, 4866), 'keras.models.save_model', 'save_model', (['model', 'raw_file'], {}), '(model, raw_file)\n', (4849, 4866), False, 'from keras.models import save_model, load_model\n'), ((4954, 4980), 'h5py.File', 'h5py.File', (['fname'], {'mode': '"""r"""'}), "(fname, mode='r')\n", (4963, 4980), False, 'import h5py\n'), ((5019, 5037), 'keras.models.load_model', 'load_model', (['h5file'], {}), '(h5file)\n', (5029, 5037), False, 'from keras.models import save_model, load_model\n'), ((5336, 5362), 'h5py.File', 'h5py.File', (['fname'], {'mode': '"""w"""'}), "(fname, mode='w')\n", (5345, 5362), False, 'import h5py\n'), ((5386, 5411), 'keras.models.save_model', 'save_model', (['model', 'h5file'], {}), '(model, h5file)\n', (5396, 5411), False, 'from keras.models import save_model, load_model\n'), ((5550, 5570), 'keras.models.load_model', 'load_model', (['raw_file'], {}), '(raw_file)\n', (5560, 5570), False, 'from keras.models import save_model, load_model\n'), ((8970, 8988), 'numpy.abs', 'np.abs', (['(out - out2)'], {}), '(out - out2)\n', (8976, 8988), True, 'import numpy as np\n'), ((11410, 11439), 'keras.optimizers.RMSprop', 'optimizers.RMSprop', ([], {'lr': '(0.0001)'}), '(lr=0.0001)\n', (11428, 11439), False, 'from keras import optimizers\n'), ((11963, 11990), 'numpy.random.random', 'np.random.random', (['(4, 2, 3)'], {}), '((4, 2, 3))\n', (11979, 11990), True, 'import numpy as np\n'), ((12713, 12742), 'keras.optimizers.RMSprop', 'optimizers.RMSprop', ([], {'lr': '(0.0001)'}), '(lr=0.0001)\n', (12731, 12742), False, 'from keras import optimizers\n'), ((13669, 13701), 'keras.layers.Dense', 'Dense', (['(2)'], {'name': "('dense_%d' % (i,))"}), "(2, name='dense_%d' % (i,))\n", (13674, 13701), False, 'from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed\n'), ((14658, 14703), 'keras.layers.Dense', 'Dense', (['(2)'], {'name': "('nested_model_dense_%d' % (i,))"}), "(2, name='nested_model_dense_%d' % (i,))\n", (14663, 14703), False, 'from keras.layers import Dense, Lambda, RepeatVector, TimeDistributed\n'), ((17023, 17052), 'keras.optimizers.RMSprop', 'optimizers.RMSprop', ([], {'lr': '(0.0001)'}), '(lr=0.0001)\n', (17041, 17052), False, 'from keras import optimizers\n'), ((18781, 18799), 'keras.models.Sequential', 'Sequential', (['layers'], {}), '(layers)\n', (18791, 18799), False, 'from keras.models import Model, Sequential\n'), ((19765, 19787), 'keras.backend.int_shape', 'K.int_shape', (['inputs[0]'], {}), '(inputs[0])\n', (19776, 19787), True, 'from keras import backend as K\n'), ((19932, 19951), 'keras.backend.int_shape', 'K.int_shape', (['inputs'], {}), '(inputs)\n', (19943, 19951), True, 'from keras import backend as K\n'), ((17744, 17759), 'numpy.ones', 'np.ones', (['(3, 2)'], {}), '((3, 2))\n', (17751, 17759), True, 'import numpy as np\n'), ((18704, 18727), 'keras.layers.InputLayer', 'InputLayer', (['input_shape'], {}), '(input_shape)\n', (18714, 18727), False, 'from keras.layers import Input, InputLayer\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.