code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
#get spectral coefficients for omega
#script for plotting stuff directly from hard disk and not to be used with the bash script
import os
import sys
import glob
import time
import pathlib
import logging
import numpy as np
from mpi4py import MPI
comm = MPI.COMM_WORLD
from scipy.sparse import linalg as spla
from dedalus.tools.config import config
from simple_sphere import SimpleSphere, TensorField, TensorSystem
import equations
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
from dedalus.extras import plot_tools
import logging
from matplotlib.animation import FFMpegWriter
logger = logging.getLogger(__name__)
from matplotlib.patches import Rectangle
from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable
#add path to data folder
sim_number = 110
input_folder = "/Volumes/ExtDrive/data/sphere%i" %(sim_number)
output_folder = "videos"
first_frame = 1
last_frame = len(glob.glob1("".join([input_folder,'/']),"*.npz"))
last_frame = 3000
dpi = 300
FPS = 20
fields = ['om']
ell_max = 20 #for plotting
marker_size = 0.5
step = 5 #number of frames to skip
vphlim = 10
axs = [None for i in range(3)]
w, h = 0.4, 0.6
#plotting
#plt.rc('font', size=15)
plt.rc('text', usetex=True)
plt.rc('font', **{'family': 'serif', 'serif': ['Computer Modern Roman'], 'size': 13})
fig = plt.figure(figsize=(8,4.5))
axs[0] = plt.axes((0.1, 0.2, 0.45, h))
axs[1] = plt.axes((0.63, 0.2, 0.33, h))
# Setup output folder
if comm.rank == 0:
if not os.path.exists(output_folder):
os.makedirs(output_folder)
comm.barrier()
max_vals = {key: 0 for key in fields}
clims = {key: 0 for key in fields}
#for field in fields:
# for i in range(first_frame + comm.rank+1, last_frame + 1, comm.size):
# with np.load("".join([input_folder, '/output_%i.npz' %i])) as file:
# fieldval = file[field]
# max_vals[field] = max(max_vals[field], np.max(fieldval))
#for field in fields:
# clims[field] = 0.75*max_vals[field]
clims['om'] = 150.0
metadata = dict(title='Movie', artist='Matplotlib', comment='Movie support!')
writer = FFMpegWriter(fps=FPS, metadata=metadata)
with writer.saving(fig, "%s/sphere%i_om_coeffs.mp4" %(output_folder, sim_number), dpi):
for ind in range(first_frame + comm.rank + 1, last_frame + 1, step):
if ind%1==0: logger.info("Frame: %i" %(ind))
with np.load(os.path.join(input_folder, 'output_%i.npz' %(ind))) as file:
if ind == first_frame + comm.rank +1:
phi = file['phi']
theta = file['theta']
L_max = len(theta)-1
S_max = 4
simplesphere = SimpleSphere(L_max, S_max)
omega = TensorField(simplesphere, rank=0)
om = file['om']
vph = np.mean(file['v_ph'], axis=0)
print(np.max(vph))
time = file['t'][0]
# assign loaded data
omega.component_fields[0]['g'] = om
# spectral transform
omega.forward_phi()
omega.forward_theta()
coeffs = omega.coeffs
#assign coeffs to a numpy array
coeffs_arr = np.zeros([L_max+1, L_max+1], dtype=complex)
for m in range(len(coeffs)):
coeffs_arr[m,m:] = coeffs[m]
mag = np.abs(coeffs_arr)
phase = np.angle(coeffs_arr)
if ind == first_frame + comm.rank +1:
mag_fac = marker_size/np.max(mag)
m = np.arange(0,L_max+1)
ell = np.arange(0,L_max+1)
ellell, mm = np.meshgrid(ell, m)
if ind == first_frame + comm.rank +1:
title = fig.suptitle(r'$t/\tau = %.4f$' %time, usetex=True)
ax = axs[0]
img0 = ax.pcolormesh(phi, np.pi/2-theta, om.T, cmap='RdBu_r', shading='garoud', rasterized=True)
ax.set_ylabel(r'Latitude $(\pi/2-\theta)$', usetex=True);
ax.set_yticks([-np.pi/2, 0, np.pi/2])
ax.set_yticklabels([r'$-\frac{\pi}{2}$', r'$0$', r'$\frac{\pi}{2}$'])
ax.set_xlabel(r'Longitude $\phi$', usetex=True)
ax.set_xticks([0, np.pi, 2*np.pi])
ax.set_xticklabels([r'$0$', r'$\pi$', r'$2 \pi$'])
img0.set_clim([-clims['om'], clims['om']])
#add colorbar
ax_divider = make_axes_locatable(ax)
cax = ax_divider.append_axes("top", size="7%", pad="4%")
cb = plt.colorbar(img0, cax=cax, orientation="horizontal")
cax.xaxis.set_ticks_position("top")
cb.set_ticks([-clims['om'], 0, clims['om']])
#add axis for v_ph
vph_ax = ax_divider.append_axes("right", size="30%", pad="17%")
line, = vph_ax.plot(vph, np.pi/2-theta, 'k', linewidth=1)
vph_ax.set_yticks([]); vph_ax.set_ylim([-np.pi/2, np.pi/2])
vph_ax.set_xlim([-vphlim, vphlim]);
vph_ax.set_xticks([-vphlim, 0, vphlim]);
vph_ax.axvline(linestyle='--',color='k',linewidth=0.5)
vph_ax.set_xlabel(r'$\langle v_\phi\rangle_\phi/(R/\tau)$')
ax = axs[1]
img1 = ax.scatter(mm.flatten(), ellell.flatten(), mag_fac*mag.flatten(), c=phase.flatten(), \
cmap='hsv', edgecolor='none')
rect = Rectangle((-1, 10.4), ell_max+1, 4, facecolor='k', alpha=0.2)
ax.add_patch(rect)
ax.set_xlim(-1, ell_max), ax.set_ylim(-1, ell_max)
ax.set_xlabel('$m$', usetex=True), ax.set_ylabel('$\ell$', usetex=True, rotation=0)
img1.set_clim(0, 2*np.pi)
ax_divider = make_axes_locatable(ax)
# add an axes above the main axes.
cax = ax_divider.append_axes("top", size="7%", pad="4%")
cb = plt.colorbar(img1, cax=cax, orientation="horizontal")
cb.set_ticks([0, np.pi, 2*np.pi])
cb.set_ticklabels(['$0$', r'$\pi$', r'$2 \pi$'])
cax.xaxis.set_ticks_position("top")
#fig.tight_layout()
else:
title.set_text(r'$t/\tau = %.4f$' %time)
img0.set_array(om.T.ravel())
img1.set_sizes(mag_fac*mag.flatten())
img1.set_array(phase.flatten())
line.set_xdata(vph)
writer.grab_frame()
|
[
"numpy.abs",
"matplotlib.pyplot.axes",
"numpy.angle",
"simple_sphere.SimpleSphere",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.arange",
"os.path.join",
"numpy.meshgrid",
"matplotlib.patches.Rectangle",
"os.path.exists",
"matplotlib.pyplot.colorbar",
"numpy.max",
"matplotlib.pyplot.rc",
"mpl_toolkits.axes_grid1.axes_divider.make_axes_locatable",
"simple_sphere.TensorField",
"matplotlib.use",
"os.makedirs",
"numpy.zeros",
"matplotlib.animation.FFMpegWriter",
"logging.getLogger"
] |
[((449, 470), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (463, 470), False, 'import matplotlib\n'), ((638, 665), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (655, 665), False, 'import logging\n'), ((1218, 1245), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (1224, 1245), True, 'import matplotlib.pyplot as plt\n'), ((1246, 1335), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {}), "('font', **{'family': 'serif', 'serif': ['Computer Modern Roman'],\n 'size': 13})\n", (1252, 1335), True, 'import matplotlib.pyplot as plt\n'), ((1339, 1367), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 4.5)'}), '(figsize=(8, 4.5))\n', (1349, 1367), True, 'import matplotlib.pyplot as plt\n'), ((1376, 1405), 'matplotlib.pyplot.axes', 'plt.axes', (['(0.1, 0.2, 0.45, h)'], {}), '((0.1, 0.2, 0.45, h))\n', (1384, 1405), True, 'import matplotlib.pyplot as plt\n'), ((1415, 1445), 'matplotlib.pyplot.axes', 'plt.axes', (['(0.63, 0.2, 0.33, h)'], {}), '((0.63, 0.2, 0.33, h))\n', (1423, 1445), True, 'import matplotlib.pyplot as plt\n'), ((2107, 2147), 'matplotlib.animation.FFMpegWriter', 'FFMpegWriter', ([], {'fps': 'FPS', 'metadata': 'metadata'}), '(fps=FPS, metadata=metadata)\n', (2119, 2147), False, 'from matplotlib.animation import FFMpegWriter\n'), ((1499, 1528), 'os.path.exists', 'os.path.exists', (['output_folder'], {}), '(output_folder)\n', (1513, 1528), False, 'import os\n'), ((1538, 1564), 'os.makedirs', 'os.makedirs', (['output_folder'], {}), '(output_folder)\n', (1549, 1564), False, 'import os\n'), ((3142, 3189), 'numpy.zeros', 'np.zeros', (['[L_max + 1, L_max + 1]'], {'dtype': 'complex'}), '([L_max + 1, L_max + 1], dtype=complex)\n', (3150, 3189), True, 'import numpy as np\n'), ((3279, 3297), 'numpy.abs', 'np.abs', (['coeffs_arr'], {}), '(coeffs_arr)\n', (3285, 3297), True, 'import numpy as np\n'), ((3314, 3334), 'numpy.angle', 'np.angle', (['coeffs_arr'], {}), '(coeffs_arr)\n', (3322, 3334), True, 'import numpy as np\n'), ((3441, 3464), 'numpy.arange', 'np.arange', (['(0)', '(L_max + 1)'], {}), '(0, L_max + 1)\n', (3450, 3464), True, 'import numpy as np\n'), ((3476, 3499), 'numpy.arange', 'np.arange', (['(0)', '(L_max + 1)'], {}), '(0, L_max + 1)\n', (3485, 3499), True, 'import numpy as np\n'), ((3518, 3537), 'numpy.meshgrid', 'np.meshgrid', (['ell', 'm'], {}), '(ell, m)\n', (3529, 3537), True, 'import numpy as np\n'), ((2795, 2824), 'numpy.mean', 'np.mean', (["file['v_ph']"], {'axis': '(0)'}), "(file['v_ph'], axis=0)\n", (2802, 2824), True, 'import numpy as np\n'), ((4271, 4294), 'mpl_toolkits.axes_grid1.axes_divider.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (4290, 4294), False, 'from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable\n'), ((4381, 4434), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['img0'], {'cax': 'cax', 'orientation': '"""horizontal"""'}), "(img0, cax=cax, orientation='horizontal')\n", (4393, 4434), True, 'import matplotlib.pyplot as plt\n'), ((5230, 5293), 'matplotlib.patches.Rectangle', 'Rectangle', (['(-1, 10.4)', '(ell_max + 1)', '(4)'], {'facecolor': '"""k"""', 'alpha': '(0.2)'}), "((-1, 10.4), ell_max + 1, 4, facecolor='k', alpha=0.2)\n", (5239, 5293), False, 'from matplotlib.patches import Rectangle\n'), ((5545, 5568), 'mpl_toolkits.axes_grid1.axes_divider.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (5564, 5568), False, 'from mpl_toolkits.axes_grid1.axes_divider import make_axes_locatable\n'), ((5702, 5755), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['img1'], {'cax': 'cax', 'orientation': '"""horizontal"""'}), "(img1, cax=cax, orientation='horizontal')\n", (5714, 5755), True, 'import matplotlib.pyplot as plt\n'), ((2386, 2435), 'os.path.join', 'os.path.join', (['input_folder', "('output_%i.npz' % ind)"], {}), "(input_folder, 'output_%i.npz' % ind)\n", (2398, 2435), False, 'import os\n'), ((2663, 2689), 'simple_sphere.SimpleSphere', 'SimpleSphere', (['L_max', 'S_max'], {}), '(L_max, S_max)\n', (2675, 2689), False, 'from simple_sphere import SimpleSphere, TensorField, TensorSystem\n'), ((2714, 2747), 'simple_sphere.TensorField', 'TensorField', (['simplesphere'], {'rank': '(0)'}), '(simplesphere, rank=0)\n', (2725, 2747), False, 'from simple_sphere import SimpleSphere, TensorField, TensorSystem\n'), ((2843, 2854), 'numpy.max', 'np.max', (['vph'], {}), '(vph)\n', (2849, 2854), True, 'import numpy as np\n'), ((3416, 3427), 'numpy.max', 'np.max', (['mag'], {}), '(mag)\n', (3422, 3427), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 25 13:53:31 2018
@author: alechat
"""
import os, sys
if os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) not in sys.path:
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
import numpy as np
import datetime as dt
from keras.callbacks import ModelCheckpoint, Callback
from keras import backend as K
from keras.models import load_model
from keras.utils.generic_utils import get_custom_objects
from keras import metrics
from DeepDeconv.utils.batch_utils import get_batch_from_fits, dynamic_batches, npy_batches
import tensorflow as tf
#Used for loading the model
import json
import h5py
import keras.optimizers as optimizers
from keras.layers import Input
from keras.utils.io_utils import H5Dict
# Write to a file
def write_log(s, filetxt):
with open(filetxt, 'a') as f:
f.write(s)
f.write("\n")
class LoggingCallback(Callback):
"""Callback that logs message at end of epoch."""
def __init__(self, filetxt='log.txt', log=write_log):
Callback.__init__(self)
self.log = log
self.filetxt = filetxt
def on_epoch_end(self, epoch, logs={}):
msg = dt.datetime.now().strftime('%Y-%m-%d_%H:%M:%S - ') + str("Epoch: %i, "%(epoch+1)) + str(", ".join("%s: %f" % (k, v) for k, v in logs.items()))
self.log(msg, self.filetxt)
print(msg)
class ModelCheckpointExtraSave(ModelCheckpoint):
"""ModelCheckpoint wiht extra information."""
def __init__(self, filepath, monitor='val_loss', verbose=0,
save_best_only=False, save_weights_only=False,
mode='auto',nepochs=1, period=1,best_epoch=None,best_val=None):
super(ModelCheckpointExtraSave, self).__init__(filepath, monitor=monitor, verbose=verbose,
save_best_only=save_best_only, save_weights_only=save_weights_only,
mode=mode, period=period)
self.nepochs=nepochs
if (best_epoch!=None) and (best_val!=None):
self.best=best_val
self.best_epoch=best_epoch
def on_epoch_end(self,epoch,logs=None):
"""This is essentially the same as ModelCheckpoint, except for the 2 np.savetxt"""
self.epochs_since_last_save += 1
if self.epochs_since_last_save >= self.period:
self.epochs_since_last_save = 0
filepath = self.filepath.format(epoch=epoch + 1, **logs)
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
warnings.warn('Can save best model only with %s available, '
'skipping.' % (self.monitor), RuntimeWarning)
else:
if self.monitor_op(current, self.best):
if self.verbose > 0:
print('\nEpoch %05d: %s improved from %0.5f to %0.5f,'
' saving model to %s'
% (epoch + 1, self.monitor, self.best,
current, filepath))
self.best = current
self.best_epoch=epoch + 1
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
np.savetxt(filepath+".best_params",np.asarray([self.best_epoch,self.best,self.nepochs]))
else:
if self.verbose > 0:
print('\nEpoch %05d: %s did not improve from %0.5f' %
(epoch + 1, self.monitor, self.best))
else:
if self.verbose > 0:
print('\nEpoch %05d: saving model to %s' % (epoch + 1, filepath))
if self.save_weights_only:
self.model.save_weights(filepath, overwrite=True)
else:
self.model.save(filepath, overwrite=True)
self.best_epoch=epoch + 1
np.savetxt(filepath+".best_params",np.asarray([self.best_epoch,self.best,nmax_epoch,self.nepochs]))
#%%DATA INITIALIZATION
import numpy as np
from AlphaTransform import AlphaShearletTransform as AST
import shape_constraint.cadmos_lib as cl
import os
row,column = np.array([96,96])
U = cl.makeUi(row,column)
# Get shearlet elements
#Step 1 : create a shearlet transform instance
trafo = AST(column, row, [0.5]*3,real=True,parseval=True,verbose=False)
#Step 2 : get shearlets filters
shearlets = trafo.shearlets
#Step 3 : get the adjoints
adjoints = cl.get_adjoint_coeff(trafo)
#Normalize shearlets filter banks
#/!\ The order is important/!\
adjoints = cl.shear_norm(adjoints,shearlets)
shearlets = cl.shear_norm(shearlets,shearlets)
#Compute moments constraint normalization coefficients
#the $\Psi^*_j$ are noted adj_U
adj_U = cl.comp_adj(U,adjoints).reshape(6,27,1,96,96,1)
mu = cl.comp_mu(adj_U)
def custom_loss(y_true, y_pred):
weights = y_true[:,:,:,1]
y_true = y_true[:,:,:,0]
return K.mean(K.tf.multiply(weights, K.square(y_pred - y_true)), axis=-1)
def swish(x):
return (K.sigmoid(x) * x)
get_custom_objects().update({'swish': swish})
def get_model_memory_usage(batch_size, model):
'''Compute memory usage for the model and one batch of data'''
shapes_mem_count = 0
for l in model.layers:
single_layer_mem = 1
for s in l.output_shape:
if s is None:
continue
single_layer_mem *= s
shapes_mem_count += single_layer_mem
trainable_count = np.sum([K.count_params(p) for p in set(model.trainable_weights)])
non_trainable_count = np.sum([K.count_params(p) for p in set(model.non_trainable_weights)])
total_memory = 4.0*batch_size*(shapes_mem_count + trainable_count + non_trainable_count)
gbytes = np.round(total_memory / (1024.0 ** 3), 3)
return gbytes
def makeU1(n,m):
"""Create a n x m numpy array with (i)_{i,j} entries where i is the ith
line and j is the jth column
INPUT: n positive integer (number of lines),
m positive (integer number of columns)
OUTPUT: n x m numpy array"""
U1 = np.tile(np.arange(n),(m,1)).T
return U1
def makeU3(n,m):
"""Create a n x m numpy array with (1)_{i,j} entries where i is the ith
line and j is the jth column
INPUT: n positive integer (number of lines),
m positive (integer number of columns)
OUTPUT: n x m numpy array"""
U3 = np.ones((n,m))
U3=add_extra_dimension(U3)
return U3
def makeU6(n,m):
"""Create a n x m numpy array with (i*j)_{i,j} entries where i is the ith
line and j is the jth column
INPUT: n positive integer (number of lines),
m positive (integer number of columns)
OUTPUT: n x m numpy array"""
U6 = np.outer(np.arange(n),np.arange(m))
U6=add_extra_dimension(U6)
return U6
def add_extra_dimension(U1):
lns=tuple(list(np.shape(U1))+[1])
return np.reshape(U1,lns)
def makeUi(n,m):
"""Create a 6 x n x m numpy array containing U1, U2, U3, U4, U5 and U6
INPUT: n positive integer (number of lines),
m positive (integer number of columns)
OUTPUT: 6 x n x m numpy array"""
U1 = makeU1(n,m)
Ul = U1**2
Uc = Ul.T
U1T=U1.T
U1=add_extra_dimension(U1)
U1T=add_extra_dimension(U1T)
Uc=add_extra_dimension(Uc)
Ul=add_extra_dimension(Ul)
return np.array([U1,U1T,makeU3(n,m),Ul+Uc,Ul-Uc,makeU6(n,m)])
class DeepNet(object):
def __init__(self, network_name = 'DNN', img_rows = 96, img_cols = 96, model_file='', verbose=False,shape_constraint=False, gamma=0,shearlet=False):
self.network_name = network_name
self.img_rows = img_rows
self.img_cols = img_cols
self.U=makeUi(img_rows,img_cols)
self.shape_constraint=shape_constraint
self.gamma=gamma
self.model_file=model_file
self.shearlet=shearlet
self.build_model(model_file, verbose)
def build_model(self, model_file = '', verbose = False):
if model_file == '':
raise ValueError('No model provided')
else:
print('Loading model...')
print(model_file)
print('Renaming as...')
if self.network_name=="DNN":
new_name=model_file.rsplit(".hdf5")[0]
self.network_name = new_name
print(self.network_name)
if self.shearlet:
#Load the structure of the model
custom_objects={'shearlet_loss': self.shearlet_loss,'shearlet_metric':self.shearlet_metric}
self.model = load_model(model_file, custom_objects=custom_objects,compile=True)
if not self.shearlet and not self.shape_constraint:
self.model = load_model(model_file,compile=True)
if self.shape_constraint:
#Load the structure of the model
custom_objects={'shape_loss': self.shape_loss,'shape_metric':self.shape_metric}
self.model = load_model(model_file, custom_objects=custom_objects,compile=False)
#The non-connected to output input placeholder layers are not present. Need to add them and register them
#START with window
window_layer=Input(shape=(self.img_rows, self.img_cols,1),name='window')
self.model.inputs.append(window_layer)
self.model.input_names.append("window")
self.model._feed_inputs.append(window_layer)
self.model._feed_input_names.append("window")
self.model._feed_input_shapes.append(K.int_shape(window_layer))
#Then with norm
norm_layer=Input(shape=(6, 1,1),name='norm')
self.model.inputs.append(norm_layer)
self.model.input_names.append("norm")
self.model._feed_inputs.append(norm_layer)
self.model._feed_input_names.append("norm")
self.model._feed_input_shapes.append(K.int_shape(norm_layer))
#Now we need to compile the model
def convert_custom_objects(obj):
"""Handles custom object lookup.
# Arguments
obj: object, dict, or list.
# Returns
The same structure, where occurrences
of a custom object name have been replaced
with the custom object.
"""
if isinstance(obj, list):
deserialized = []
for value in obj:
deserialized.append(convert_custom_objects(value))
return deserialized
if isinstance(obj, dict):
deserialized = {}
for key, value in obj.items():
deserialized[key] = convert_custom_objects(value)
return deserialized
if obj in custom_objects:
return custom_objects[obj]
return obj
#Now we update all optimization parameters (compile=True)
h5dict=H5Dict(model_file)
training_config = h5dict.get('training_config')
if training_config is None:
warnings.warn('No training configuration found in save file: '
'the model was *not* compiled. '
'Compile it manually.')
else:
training_config = json.loads(training_config.decode('utf-8'))
optimizer_config = training_config['optimizer_config']
optimizer = optimizers.deserialize(optimizer_config,
custom_objects=custom_objects)
# Recover loss functions and metrics.
loss = convert_custom_objects(training_config['loss'])
net_metrics = convert_custom_objects(training_config['metrics'])
if len(net_metrics)==0:
net_metrics=[metrics.mse,self.shape_metric]
sample_weight_mode = training_config['sample_weight_mode']
loss_weights = training_config['loss_weights']
# Compile model.
self.model.compile(optimizer=optimizer,
loss=loss,
weighted_metrics=net_metrics,
loss_weights=loss_weights,
sample_weight_mode=sample_weight_mode)
# Set optimizer weights.
if 'optimizer_weights' in h5dict:
# Build train function (to get weight updates).
self.model._make_train_function()
optimizer_weights_group = h5dict['optimizer_weights']
optimizer_weight_names = [
n.decode('utf8') for n in
optimizer_weights_group['weight_names']]
optimizer_weight_values = [optimizer_weights_group[n] for n in
optimizer_weight_names]
try:
self.model.optimizer.set_weights(optimizer_weight_values)
except ValueError:
warnings.warn('Error in loading the saved optimizer '
'state. As a result, your model is '
'starting with a freshly initialized '
'optimizer.')
if verbose:
print(self.model.summary())
def train(self, train_data, model_file = '', epochs=20, batch_size=32, validation_split=0.1, logfile='log.txt'):
#TO BE UPDATED SOME TIMES FOR SHAPE CONSTRAINT
if self.model is None:
raise Exception("No model found, please use build_model()")
if model_file == '':
model_file = self.network_name + '.hdf5'
print('Model will be saved at %s/%s'%(os.getcwd(), model_file))
model_checkpoint = ModelCheckpoint(model_file, monitor='val_loss',verbose=1, save_best_only=True)
print('Fitting model...')
self.model.fit(train_data[0], train_data[1], batch_size=batch_size, epochs=epochs, verbose=1,
validation_split=validation_split, shuffle=True,
callbacks=[model_checkpoint, LoggingCallback(filetxt=logfile, log=write_log)])
def train_generator(self, train_files, validation_file, epochs=20, batch_size=32, model_file = '',
nb_img_per_file=10000, validation_set_size=10000,
noise_std=None, SNR=None,
noiseless_img_hdu=0, targets_hdu=2, psf_hdu=1,
image_dim=96, image_per_row=100,
deconv_mode=None, rho_fista=1e-3,
risktype="GCV",reg="Dirac",reg_frac=1.0,tol=1e-12,
win_filename=None, win_hdu=0, mom_hdu=1,
logfile='log.txt',win_validation_filename=None,initial_epoch=0,keep_best_loss=False):
if self.model is None:
raise Exception("No model found, please use build_model()")
if model_file == '':
model_file = self.network_name + '.hdf5'
print('Model will be saved at %s/%s'%(os.getcwd(), model_file))
print('Memory usage for the model + one batch (GB): %f'%(get_model_memory_usage(batch_size, self.model)))
with open(logfile, 'a') as f:
f.write(self.network_name)
f.write("\n")
validation_data = get_batch_from_fits(validation_file,
idx_list=np.arange(validation_set_size),
noise_std=noise_std, SNR=SNR,
noiseless_img_hdu=noiseless_img_hdu,
targets_hdu=targets_hdu, psf_hdu=psf_hdu,
image_dim=image_dim, image_per_row=image_per_row,
deconv_mode=deconv_mode, rho_fista=rho_fista,
risktype=risktype,reg=reg,tol=tol,shape_constraint=self.shape_constraint,
win_filename=win_validation_filename, win_hdu=0,mom_hdu=1)
samples_per_epoch = int(len(train_files)*np.ceil(nb_img_per_file/batch_size))
if keep_best_loss:
best_params_file=self.model_file.replace(".hdf5",".hdf5.best_params")
if os.path.isfile(best_params_file):
best_epoch,best_val,nepoch=np.loadtxt(best_params_file)
print("Current best_parameters:",int(best_epoch),best_val)
model_checkpoint = ModelCheckpointExtraSave(model_file, monitor='val_loss', verbose=1, save_best_only=True,nepochs=epochs,best_epoch=int(best_epoch),best_val=best_val)
else:
print("Cannot have access to best parameters for monitor for checkpoint")
model_checkpoint = ModelCheckpointExtraSave(model_file, monitor='val_loss', verbose=1, save_best_only=True,nepochs=epochs)
else:
print("Not using any previous monitored value for checkpoint")
model_checkpoint = ModelCheckpointExtraSave(model_file, monitor='val_loss', verbose=1, save_best_only=True,nepochs=epochs)
gen = dynamic_batches(train_files, batch_size=batch_size, nb_img_per_file=nb_img_per_file,
noise_std=noise_std, SNR=SNR, noiseless_img_hdu=noiseless_img_hdu,
targets_hdu=targets_hdu, psf_hdu=psf_hdu,
image_dim=image_dim, image_per_row=image_per_row,
deconv_mode=deconv_mode, rho_fista=rho_fista,
risktype=risktype,reg=reg,tol=tol,reg_frac=reg_frac,
shape_constraint = self.shape_constraint,
win_filename=win_filename, win_hdu=0,mom_hdu=1)
history = self.model.fit_generator(gen, samples_per_epoch=samples_per_epoch, epochs=epochs,
validation_data=validation_data, verbose=1,
callbacks=[model_checkpoint, LoggingCallback(filetxt=logfile, log=write_log)],
initial_epoch=initial_epoch)
return history
def train_generator_npy(self, train_files, validation_file, epochs=20, batch_size=32,
nb_img_per_file=10000, model_file = '', logfile='log.txt'):
#TO BE UPDATED SOME TIMES FOR SHAPE CONSTRAINT
if self.model is None:
raise Exception("No model found, please use build_model()")
if model_file == '':
model_file = self.network_name + '.hdf5'
print('Model will be saved at %s/%s'%(os.getcwd(), model_file))
print('Memory usage for the model + one batch (GB): %f'%(get_model_memory_usage(batch_size, self.model)))
with open(logfile, 'a') as f:
f.write(self.network_name)
f.write("\n")
validation_data = np.load(validation_file)
samples_per_epoch = int(len(train_files)*np.ceil(nb_img_per_file/batch_size))
model_checkpoint = ModelCheckpoint(model_file, monitor='val_loss', verbose=1, save_best_only=True)
gen = npy_batches(train_files, batch_size=batch_size, nb_img_per_file=nb_img_per_file)
history = self.model.fit_generator(gen, samples_per_epoch=samples_per_epoch, epochs=epochs, validation_data=validation_data, verbose=1, callbacks=[model_checkpoint, LoggingCallback(write_log)])
return history
def predict(self, test_data, verbose=1):
if self.model is None:
raise Exception("No model found, please use build_model()")
output_test = self.model.predict(test_data, batch_size=1, verbose=verbose)
return output_test
def get_layer_output(self, test_data, layer_idx):
if self.model is None:
raise Exception("No model found, please use build_model()")
get_output = K.function([self.model.layers[0].input], [self.model.layers[layer_idx].output])
return get_output([test_data])[0]
def shape_loss(self,y_true,y_pred):
#window = y_true[1]
#mu=y_true[2]
#print(K.int_shape(y_true),K.int_shape(y_pred))
residual=y_true-y_pred
M=K.mean(K.square(y_true-y_pred), axis=-1)
window=self.model.input[1]
mu=self.model.input[2]
print(M[0],window,mu,residual)
#print(K.eval(mu[0,0,:,:]))
#M1=K.eval(M[0])
#print("MSE=",K.int_shape(M))
#print(self.model.input[1])
#print(self.model.input[2])
#print('WIN=',K.int_shape(window),'\n','RES=',K.int_shape(residual),'\n','U=',np.shape((self.U)[0]),'\n','MU=',K.int_shape(mu[:,0,:,:]))
#print(K.sum(y_true * y_pred, axis=-1))
#for i in range(6):
#M=M+self.gamma*mu[:,i,:,:]*K.square(K.sum(residual*window*self.U[i],axis=-1))/2.0
temp=0
for i in range(6):
temp+=self.gamma*mu[:,i,0,0]*(K.square((K.sum((residual)*window*self.U[i],axis=(1,2,3)))))/2
#print("MSE+SHAPE",K.int_shape(K.expand_dims(temp, axis=-1)))
temp=temp/(self.img_rows*self.img_cols)
temp=K.expand_dims((K.expand_dims(temp, axis=-1)),axis=-1)
#print(M1,'\n',"MSE+SHAPE",K.int_shape(M1))
return M+temp
# np.array(
# [m*((residual*window*u).sum())**2
# for m,u in zip(mu,self.U)])/2.).sum()
def shape_metric(self,y_true,y_pred):
temp=0
residual=y_true-y_pred
window=self.model.input[1]
mu=self.model.input[2]
temp=0
for i in range(6):
temp=temp+self.gamma*mu[:,i,0,0]*(K.square((K.sum(residual*window*self.U[i],axis=(1,2,3)))))/2
temp=temp/(self.img_rows*self.img_cols)
temp=K.expand_dims((K.expand_dims(temp, axis=-1)),axis=-1)
return temp
def shearlet_loss(self,ytrue,ypred):
@tf.custom_gradient
def closs(ypred):
residual=ypred-ytrue
temp=0
temp_grad=0
loss=K.mean(K.square(ytrue-ypred),axis=-1)
print('loss',K.int_shape(loss))
for i in range(6):
for j in range(27):
temp+=mu[i,j]*K.square(K.sum(residual*adj_U[i,j],axis=(1,2,3)))
temp_grad+=mu[i,j]*K.sum(residual*adj_U[i,j],axis=(1,2,3))*adj_U[i,j]
temp=temp*self.gamma/(self.img_rows*self.img_cols)
temp_grad=temp_grad*self.gamma/(self.img_rows*self.img_cols)
temp=K.expand_dims((K.expand_dims(temp, axis=-1)),axis=-1)
temp_grad=K.permute_dimensions(temp_grad,(3,1,2,0))
print('temp',K.int_shape(temp))
loss+=temp
def grad(dy):
return (2*(ypred-ytrue)+temp_grad)*K.expand_dims(dy,axis=-1)
return loss,grad
loss=closs(ypred)
print(type(loss))
return closs(ypred)
def shearlet_metric(self, ytrue,ypred):
residual=ypred-ytrue
temp=0
for i in range(6):
for j in range(27):
temp+=mu[i,j]*K.square(K.sum(residual*adj_U[i,j],axis=(1,2,3)))
temp=temp*self.gamma/(self.img_rows*self.img_cols)
temp=K.expand_dims((K.expand_dims(temp, axis=-1)),axis=-1)
return temp
def custom_mse_3(self,y_true,y_pred):
print(K.int_shape(y_true),K.int_shape(y_pred))
@tf.custom_gradient
def closs(y_pred):
loss=K.square(y_true-y_pred)
def grad(dy):
print(K.int_shape(dy))
return 2*dy*(y_pred-y_true)
print(K.int_shape(loss))
return loss,grad
return closs(y_pred)
|
[
"keras.models.load_model",
"keras.utils.io_utils.H5Dict",
"numpy.load",
"numpy.ones",
"keras.utils.generic_utils.get_custom_objects",
"numpy.shape",
"os.path.isfile",
"numpy.arange",
"shape_constraint.cadmos_lib.shear_norm",
"keras.layers.Input",
"numpy.round",
"AlphaTransform.AlphaShearletTransform",
"keras.backend.permute_dimensions",
"shape_constraint.cadmos_lib.comp_mu",
"numpy.reshape",
"DeepDeconv.utils.batch_utils.dynamic_batches",
"numpy.loadtxt",
"keras.optimizers.deserialize",
"datetime.datetime.now",
"shape_constraint.cadmos_lib.comp_adj",
"DeepDeconv.utils.batch_utils.npy_batches",
"numpy.ceil",
"keras.callbacks.ModelCheckpoint",
"keras.backend.expand_dims",
"os.path.realpath",
"keras.backend.function",
"shape_constraint.cadmos_lib.makeUi",
"numpy.asarray",
"keras.callbacks.Callback.__init__",
"keras.backend.sigmoid",
"keras.backend.int_shape",
"shape_constraint.cadmos_lib.get_adjoint_coeff",
"os.getcwd",
"keras.backend.sum",
"keras.backend.count_params",
"numpy.array",
"keras.backend.square"
] |
[((4534, 4552), 'numpy.array', 'np.array', (['[96, 96]'], {}), '([96, 96])\n', (4542, 4552), True, 'import numpy as np\n'), ((4556, 4578), 'shape_constraint.cadmos_lib.makeUi', 'cl.makeUi', (['row', 'column'], {}), '(row, column)\n', (4565, 4578), True, 'import shape_constraint.cadmos_lib as cl\n'), ((4658, 4726), 'AlphaTransform.AlphaShearletTransform', 'AST', (['column', 'row', '([0.5] * 3)'], {'real': '(True)', 'parseval': '(True)', 'verbose': '(False)'}), '(column, row, [0.5] * 3, real=True, parseval=True, verbose=False)\n', (4661, 4726), True, 'from AlphaTransform import AlphaShearletTransform as AST\n'), ((4820, 4847), 'shape_constraint.cadmos_lib.get_adjoint_coeff', 'cl.get_adjoint_coeff', (['trafo'], {}), '(trafo)\n', (4840, 4847), True, 'import shape_constraint.cadmos_lib as cl\n'), ((4925, 4959), 'shape_constraint.cadmos_lib.shear_norm', 'cl.shear_norm', (['adjoints', 'shearlets'], {}), '(adjoints, shearlets)\n', (4938, 4959), True, 'import shape_constraint.cadmos_lib as cl\n'), ((4971, 5006), 'shape_constraint.cadmos_lib.shear_norm', 'cl.shear_norm', (['shearlets', 'shearlets'], {}), '(shearlets, shearlets)\n', (4984, 5006), True, 'import shape_constraint.cadmos_lib as cl\n'), ((5155, 5172), 'shape_constraint.cadmos_lib.comp_mu', 'cl.comp_mu', (['adj_U'], {}), '(adj_U)\n', (5165, 5172), True, 'import shape_constraint.cadmos_lib as cl\n'), ((6087, 6126), 'numpy.round', 'np.round', (['(total_memory / 1024.0 ** 3)', '(3)'], {}), '(total_memory / 1024.0 ** 3, 3)\n', (6095, 6126), True, 'import numpy as np\n'), ((6728, 6743), 'numpy.ones', 'np.ones', (['(n, m)'], {}), '((n, m))\n', (6735, 6743), True, 'import numpy as np\n'), ((7218, 7237), 'numpy.reshape', 'np.reshape', (['U1', 'lns'], {}), '(U1, lns)\n', (7228, 7237), True, 'import numpy as np\n'), ((1126, 1149), 'keras.callbacks.Callback.__init__', 'Callback.__init__', (['self'], {}), '(self)\n', (1143, 1149), False, 'from keras.callbacks import ModelCheckpoint, Callback\n'), ((5102, 5126), 'shape_constraint.cadmos_lib.comp_adj', 'cl.comp_adj', (['U', 'adjoints'], {}), '(U, adjoints)\n', (5113, 5126), True, 'import shape_constraint.cadmos_lib as cl\n'), ((5371, 5383), 'keras.backend.sigmoid', 'K.sigmoid', (['x'], {}), '(x)\n', (5380, 5383), True, 'from keras import backend as K\n'), ((5390, 5410), 'keras.utils.generic_utils.get_custom_objects', 'get_custom_objects', ([], {}), '()\n', (5408, 5410), False, 'from keras.utils.generic_utils import get_custom_objects\n'), ((7067, 7079), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (7076, 7079), True, 'import numpy as np\n'), ((7080, 7092), 'numpy.arange', 'np.arange', (['m'], {}), '(m)\n', (7089, 7092), True, 'import numpy as np\n'), ((14544, 14623), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['model_file'], {'monitor': '"""val_loss"""', 'verbose': '(1)', 'save_best_only': '(True)'}), "(model_file, monitor='val_loss', verbose=1, save_best_only=True)\n", (14559, 14623), False, 'from keras.callbacks import ModelCheckpoint, Callback\n'), ((17948, 18411), 'DeepDeconv.utils.batch_utils.dynamic_batches', 'dynamic_batches', (['train_files'], {'batch_size': 'batch_size', 'nb_img_per_file': 'nb_img_per_file', 'noise_std': 'noise_std', 'SNR': 'SNR', 'noiseless_img_hdu': 'noiseless_img_hdu', 'targets_hdu': 'targets_hdu', 'psf_hdu': 'psf_hdu', 'image_dim': 'image_dim', 'image_per_row': 'image_per_row', 'deconv_mode': 'deconv_mode', 'rho_fista': 'rho_fista', 'risktype': 'risktype', 'reg': 'reg', 'tol': 'tol', 'reg_frac': 'reg_frac', 'shape_constraint': 'self.shape_constraint', 'win_filename': 'win_filename', 'win_hdu': '(0)', 'mom_hdu': '(1)'}), '(train_files, batch_size=batch_size, nb_img_per_file=\n nb_img_per_file, noise_std=noise_std, SNR=SNR, noiseless_img_hdu=\n noiseless_img_hdu, targets_hdu=targets_hdu, psf_hdu=psf_hdu, image_dim=\n image_dim, image_per_row=image_per_row, deconv_mode=deconv_mode,\n rho_fista=rho_fista, risktype=risktype, reg=reg, tol=tol, reg_frac=\n reg_frac, shape_constraint=self.shape_constraint, win_filename=\n win_filename, win_hdu=0, mom_hdu=1)\n', (17963, 18411), False, 'from DeepDeconv.utils.batch_utils import get_batch_from_fits, dynamic_batches, npy_batches\n'), ((19705, 19729), 'numpy.load', 'np.load', (['validation_file'], {}), '(validation_file)\n', (19712, 19729), True, 'import numpy as np\n'), ((19843, 19922), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['model_file'], {'monitor': '"""val_loss"""', 'verbose': '(1)', 'save_best_only': '(True)'}), "(model_file, monitor='val_loss', verbose=1, save_best_only=True)\n", (19858, 19922), False, 'from keras.callbacks import ModelCheckpoint, Callback\n'), ((19937, 20022), 'DeepDeconv.utils.batch_utils.npy_batches', 'npy_batches', (['train_files'], {'batch_size': 'batch_size', 'nb_img_per_file': 'nb_img_per_file'}), '(train_files, batch_size=batch_size, nb_img_per_file=nb_img_per_file\n )\n', (19948, 20022), False, 'from DeepDeconv.utils.batch_utils import get_batch_from_fits, dynamic_batches, npy_batches\n'), ((20681, 20760), 'keras.backend.function', 'K.function', (['[self.model.layers[0].input]', '[self.model.layers[layer_idx].output]'], {}), '([self.model.layers[0].input], [self.model.layers[layer_idx].output])\n', (20691, 20760), True, 'from keras import backend as K\n'), ((5307, 5332), 'keras.backend.square', 'K.square', (['(y_pred - y_true)'], {}), '(y_pred - y_true)\n', (5315, 5332), True, 'from keras import backend as K\n'), ((5826, 5843), 'keras.backend.count_params', 'K.count_params', (['p'], {}), '(p)\n', (5840, 5843), True, 'from keras import backend as K\n'), ((5918, 5935), 'keras.backend.count_params', 'K.count_params', (['p'], {}), '(p)\n', (5932, 5935), True, 'from keras import backend as K\n'), ((6423, 6435), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (6432, 6435), True, 'import numpy as np\n'), ((17096, 17128), 'os.path.isfile', 'os.path.isfile', (['best_params_file'], {}), '(best_params_file)\n', (17110, 17128), False, 'import os\n'), ((21019, 21044), 'keras.backend.square', 'K.square', (['(y_true - y_pred)'], {}), '(y_true - y_pred)\n', (21027, 21044), True, 'from keras import backend as K\n'), ((21938, 21966), 'keras.backend.expand_dims', 'K.expand_dims', (['temp'], {'axis': '(-1)'}), '(temp, axis=-1)\n', (21951, 21966), True, 'from keras import backend as K\n'), ((22574, 22602), 'keras.backend.expand_dims', 'K.expand_dims', (['temp'], {'axis': '(-1)'}), '(temp, axis=-1)\n', (22587, 22602), True, 'from keras import backend as K\n'), ((23378, 23423), 'keras.backend.permute_dimensions', 'K.permute_dimensions', (['temp_grad', '(3, 1, 2, 0)'], {}), '(temp_grad, (3, 1, 2, 0))\n', (23398, 23423), True, 'from keras import backend as K\n'), ((24020, 24048), 'keras.backend.expand_dims', 'K.expand_dims', (['temp'], {'axis': '(-1)'}), '(temp, axis=-1)\n', (24033, 24048), True, 'from keras import backend as K\n'), ((24144, 24163), 'keras.backend.int_shape', 'K.int_shape', (['y_true'], {}), '(y_true)\n', (24155, 24163), True, 'from keras import backend as K\n'), ((24164, 24183), 'keras.backend.int_shape', 'K.int_shape', (['y_pred'], {}), '(y_pred)\n', (24175, 24183), True, 'from keras import backend as K\n'), ((24257, 24282), 'keras.backend.square', 'K.square', (['(y_true - y_pred)'], {}), '(y_true - y_pred)\n', (24265, 24282), True, 'from keras import backend as K\n'), ((175, 201), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (191, 201), False, 'import os\n'), ((7188, 7200), 'numpy.shape', 'np.shape', (['U1'], {}), '(U1)\n', (7196, 7200), True, 'import numpy as np\n'), ((8913, 8980), 'keras.models.load_model', 'load_model', (['model_file'], {'custom_objects': 'custom_objects', 'compile': '(True)'}), '(model_file, custom_objects=custom_objects, compile=True)\n', (8923, 8980), False, 'from keras.models import load_model\n'), ((9073, 9109), 'keras.models.load_model', 'load_model', (['model_file'], {'compile': '(True)'}), '(model_file, compile=True)\n', (9083, 9109), False, 'from keras.models import load_model\n'), ((9321, 9389), 'keras.models.load_model', 'load_model', (['model_file'], {'custom_objects': 'custom_objects', 'compile': '(False)'}), '(model_file, custom_objects=custom_objects, compile=False)\n', (9331, 9389), False, 'from keras.models import load_model\n'), ((9576, 9637), 'keras.layers.Input', 'Input', ([], {'shape': '(self.img_rows, self.img_cols, 1)', 'name': '"""window"""'}), "(shape=(self.img_rows, self.img_cols, 1), name='window')\n", (9581, 9637), False, 'from keras.layers import Input\n'), ((10019, 10054), 'keras.layers.Input', 'Input', ([], {'shape': '(6, 1, 1)', 'name': '"""norm"""'}), "(shape=(6, 1, 1), name='norm')\n", (10024, 10054), False, 'from keras.layers import Input\n'), ((11587, 11605), 'keras.utils.io_utils.H5Dict', 'H5Dict', (['model_file'], {}), '(model_file)\n', (11593, 11605), False, 'from keras.utils.io_utils import H5Dict\n'), ((16191, 16221), 'numpy.arange', 'np.arange', (['validation_set_size'], {}), '(validation_set_size)\n', (16200, 16221), True, 'import numpy as np\n'), ((16935, 16972), 'numpy.ceil', 'np.ceil', (['(nb_img_per_file / batch_size)'], {}), '(nb_img_per_file / batch_size)\n', (16942, 16972), True, 'import numpy as np\n'), ((17174, 17202), 'numpy.loadtxt', 'np.loadtxt', (['best_params_file'], {}), '(best_params_file)\n', (17184, 17202), True, 'import numpy as np\n'), ((19779, 19816), 'numpy.ceil', 'np.ceil', (['(nb_img_per_file / batch_size)'], {}), '(nb_img_per_file / batch_size)\n', (19786, 19816), True, 'import numpy as np\n'), ((22833, 22856), 'keras.backend.square', 'K.square', (['(ytrue - ypred)'], {}), '(ytrue - ypred)\n', (22841, 22856), True, 'from keras import backend as K\n'), ((22889, 22906), 'keras.backend.int_shape', 'K.int_shape', (['loss'], {}), '(loss)\n', (22900, 22906), True, 'from keras import backend as K\n'), ((23317, 23345), 'keras.backend.expand_dims', 'K.expand_dims', (['temp'], {'axis': '(-1)'}), '(temp, axis=-1)\n', (23330, 23345), True, 'from keras import backend as K\n'), ((23445, 23462), 'keras.backend.int_shape', 'K.int_shape', (['temp'], {}), '(temp)\n', (23456, 23462), True, 'from keras import backend as K\n'), ((24408, 24425), 'keras.backend.int_shape', 'K.int_shape', (['loss'], {}), '(loss)\n', (24419, 24425), True, 'from keras import backend as K\n'), ((290, 316), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (306, 316), False, 'import os\n'), ((9933, 9958), 'keras.backend.int_shape', 'K.int_shape', (['window_layer'], {}), '(window_layer)\n', (9944, 9958), True, 'from keras import backend as K\n'), ((10343, 10366), 'keras.backend.int_shape', 'K.int_shape', (['norm_layer'], {}), '(norm_layer)\n', (10354, 10366), True, 'from keras import backend as K\n'), ((12125, 12196), 'keras.optimizers.deserialize', 'optimizers.deserialize', (['optimizer_config'], {'custom_objects': 'custom_objects'}), '(optimizer_config, custom_objects=custom_objects)\n', (12147, 12196), True, 'import keras.optimizers as optimizers\n'), ((15830, 15841), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (15839, 15841), False, 'import os\n'), ((23566, 23592), 'keras.backend.expand_dims', 'K.expand_dims', (['dy'], {'axis': '(-1)'}), '(dy, axis=-1)\n', (23579, 23592), True, 'from keras import backend as K\n'), ((24329, 24344), 'keras.backend.int_shape', 'K.int_shape', (['dy'], {}), '(dy)\n', (24340, 24344), True, 'from keras import backend as K\n'), ((1263, 1280), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (1278, 1280), True, 'import datetime as dt\n'), ((4302, 4368), 'numpy.asarray', 'np.asarray', (['[self.best_epoch, self.best, nmax_epoch, self.nepochs]'], {}), '([self.best_epoch, self.best, nmax_epoch, self.nepochs])\n', (4312, 4368), True, 'import numpy as np\n'), ((14491, 14502), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (14500, 14502), False, 'import os\n'), ((19436, 19447), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (19445, 19447), False, 'import os\n'), ((21739, 21791), 'keras.backend.sum', 'K.sum', (['(residual * window * self.U[i])'], {'axis': '(1, 2, 3)'}), '(residual * window * self.U[i], axis=(1, 2, 3))\n', (21744, 21791), True, 'from keras import backend as K\n'), ((23892, 23937), 'keras.backend.sum', 'K.sum', (['(residual * adj_U[i, j])'], {'axis': '(1, 2, 3)'}), '(residual * adj_U[i, j], axis=(1, 2, 3))\n', (23897, 23937), True, 'from keras import backend as K\n'), ((22446, 22498), 'keras.backend.sum', 'K.sum', (['(residual * window * self.U[i])'], {'axis': '(1, 2, 3)'}), '(residual * window * self.U[i], axis=(1, 2, 3))\n', (22451, 22498), True, 'from keras import backend as K\n'), ((23018, 23063), 'keras.backend.sum', 'K.sum', (['(residual * adj_U[i, j])'], {'axis': '(1, 2, 3)'}), '(residual * adj_U[i, j], axis=(1, 2, 3))\n', (23023, 23063), True, 'from keras import backend as K\n'), ((23098, 23143), 'keras.backend.sum', 'K.sum', (['(residual * adj_U[i, j])'], {'axis': '(1, 2, 3)'}), '(residual * adj_U[i, j], axis=(1, 2, 3))\n', (23103, 23143), True, 'from keras import backend as K\n'), ((3584, 3638), 'numpy.asarray', 'np.asarray', (['[self.best_epoch, self.best, self.nepochs]'], {}), '([self.best_epoch, self.best, self.nepochs])\n', (3594, 3638), True, 'import numpy as np\n')]
|
import torch
import numpy as np
from torch.utils.data import Dataset
class DomainAdaptationMoonDataset(Dataset):
r"""Domain adaptation version of the moon dataset object to iterate and collect samples.
"""
def __init__(self, data):
self.xs, self.ys, self.xt, self.yt = data
def __len__(self):
return self.xs.shape[0]
def __getitem__(self, idx):
xs = self.xs[idx]
ys = self.ys[idx]
xt = self.xt[idx]
yt = self.yt[idx]
# convert to tensors
xs = torch.from_numpy(xs.astype(np.float32))
ys = torch.from_numpy(np.array(ys).astype(np.int64))
xt = torch.from_numpy(xt.astype(np.float32))
yt = torch.from_numpy(np.array(yt).astype(np.int64))
return xs, ys, xt, yt
def create_domain_adaptation_data(config):
"""Creates a domain adaptation version of the moon datasets and dataloader"""
# load data from file
Xs_train = np.load(config.dataloader.MoonsNS.source_train_x)
Ys_train = np.argmax(np.load(config.dataloader.MoonsNS.source_train_y), axis=1)
Xt_train = np.load(config.dataloader.MoonsNS.target_train_x)
Yt_train = np.argmax(np.load(config.dataloader.MoonsNS.target_train_y), axis=1)
Xs_eval = np.load(config.dataloader.MoonsNS.source_valid_x)
Ys_eval = np.argmax(np.load(config.dataloader.MoonsNS.source_valid_y), axis=1)
Xt_eval = np.load(config.dataloader.MoonsNS.target_valid_x)
Yt_eval = np.argmax(np.load(config.dataloader.MoonsNS.target_valid_y), axis=1)
Xs_test = np.load(config.dataloader.MoonsNS.source_test_x)
Ys_test = np.argmax(np.load(config.dataloader.MoonsNS.source_test_y), axis=1)
Xt_test = np.load(config.dataloader.MoonsNS.target_test_x)
Yt_test = np.argmax(np.load(config.dataloader.MoonsNS.target_test_y), axis=1)
if config.dataloader.MoonsNS.loading_schema == 'train-eval':
train_loader = torch.utils.data.DataLoader(
DomainAdaptationMoonDataset((Xs_train, Ys_train, Xt_train, Yt_train)),
batch_size=config.trainer.batchsize,
shuffle=True
)
eval_loader = torch.utils.data.DataLoader(
DomainAdaptationMoonDataset((Xs_eval, Ys_eval, Xt_eval, Yt_eval)),
batch_size=config.trainer.batchsize,
shuffle=False
)
return (train_loader, eval_loader), (Xs_train, Xs_eval, Ys_train, Ys_eval, Xt_train, Xt_eval, Yt_train, Yt_eval)
elif config.dataloader.MoonsNS.loading_schema == 'train-test':
train_loader = torch.utils.data.DataLoader(
DomainAdaptationMoonDataset((Xs_train, Ys_train, Xt_train, Yt_train)),
batch_size=config.trainer.batchsize,
shuffle=True
)
eval_loader = torch.utils.data.DataLoader(
DomainAdaptationMoonDataset((Xs_eval, Ys_eval, Xt_eval, Yt_eval)),
batch_size=config.trainer.batchsize,
shuffle=False
)
test_loader = torch.utils.data.DataLoader(
DomainAdaptationMoonDataset((Xs_test, Ys_test, Xt_test, Yt_test)),
batch_size=config.trainer.batchsize,
shuffle=False
)
return (train_loader, eval_loader, test_loader), (Xs_train, Xs_eval, Xs_test, Ys_train, Ys_eval, Ys_test, \
Xt_train, Xt_eval, Xt_test, Yt_train, Yt_eval, Yt_test)
elif config.dataloader.MoonsNS.loading_schema == 'test':
test_loader = torch.utils.data.DataLoader(
DomainAdaptationMoonDataset((Xs_test, Ys_test, Xt_test, Yt_test)),
batch_size=config.trainer.batchsize,
shuffle=False
)
return (test_loader), (Xs_test, Ys_test, Xt_test, Yt_test)
|
[
"numpy.load",
"numpy.array"
] |
[((944, 993), 'numpy.load', 'np.load', (['config.dataloader.MoonsNS.source_train_x'], {}), '(config.dataloader.MoonsNS.source_train_x)\n', (951, 993), True, 'import numpy as np\n'), ((1093, 1142), 'numpy.load', 'np.load', (['config.dataloader.MoonsNS.target_train_x'], {}), '(config.dataloader.MoonsNS.target_train_x)\n', (1100, 1142), True, 'import numpy as np\n'), ((1242, 1291), 'numpy.load', 'np.load', (['config.dataloader.MoonsNS.source_valid_x'], {}), '(config.dataloader.MoonsNS.source_valid_x)\n', (1249, 1291), True, 'import numpy as np\n'), ((1389, 1438), 'numpy.load', 'np.load', (['config.dataloader.MoonsNS.target_valid_x'], {}), '(config.dataloader.MoonsNS.target_valid_x)\n', (1396, 1438), True, 'import numpy as np\n'), ((1537, 1585), 'numpy.load', 'np.load', (['config.dataloader.MoonsNS.source_test_x'], {}), '(config.dataloader.MoonsNS.source_test_x)\n', (1544, 1585), True, 'import numpy as np\n'), ((1682, 1730), 'numpy.load', 'np.load', (['config.dataloader.MoonsNS.target_test_x'], {}), '(config.dataloader.MoonsNS.target_test_x)\n', (1689, 1730), True, 'import numpy as np\n'), ((1019, 1068), 'numpy.load', 'np.load', (['config.dataloader.MoonsNS.source_train_y'], {}), '(config.dataloader.MoonsNS.source_train_y)\n', (1026, 1068), True, 'import numpy as np\n'), ((1168, 1217), 'numpy.load', 'np.load', (['config.dataloader.MoonsNS.target_train_y'], {}), '(config.dataloader.MoonsNS.target_train_y)\n', (1175, 1217), True, 'import numpy as np\n'), ((1316, 1365), 'numpy.load', 'np.load', (['config.dataloader.MoonsNS.source_valid_y'], {}), '(config.dataloader.MoonsNS.source_valid_y)\n', (1323, 1365), True, 'import numpy as np\n'), ((1463, 1512), 'numpy.load', 'np.load', (['config.dataloader.MoonsNS.target_valid_y'], {}), '(config.dataloader.MoonsNS.target_valid_y)\n', (1470, 1512), True, 'import numpy as np\n'), ((1610, 1658), 'numpy.load', 'np.load', (['config.dataloader.MoonsNS.source_test_y'], {}), '(config.dataloader.MoonsNS.source_test_y)\n', (1617, 1658), True, 'import numpy as np\n'), ((1755, 1803), 'numpy.load', 'np.load', (['config.dataloader.MoonsNS.target_test_y'], {}), '(config.dataloader.MoonsNS.target_test_y)\n', (1762, 1803), True, 'import numpy as np\n'), ((600, 612), 'numpy.array', 'np.array', (['ys'], {}), '(ys)\n', (608, 612), True, 'import numpy as np\n'), ((714, 726), 'numpy.array', 'np.array', (['yt'], {}), '(yt)\n', (722, 726), True, 'import numpy as np\n')]
|
"""
@created by: heyao
@created at: 2021-12-09 13:30:09
"""
import random
import os
import numpy as np
import torch
def seed_everything(seed=42):
random.seed(seed)
os.environ['PYTHONASSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
|
[
"numpy.random.seed",
"torch.manual_seed",
"torch.cuda.manual_seed",
"torch.cuda.is_available",
"random.seed"
] |
[((153, 170), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (164, 170), False, 'import random\n'), ((218, 238), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (232, 238), True, 'import numpy as np\n'), ((243, 266), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (260, 266), False, 'import torch\n'), ((274, 299), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (297, 299), False, 'import torch\n'), ((309, 337), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (331, 337), False, 'import torch\n')]
|
from taurex.log import Logger
import numpy as np
class Output(Logger):
def __init__(self, name):
super().__init__(name)
def open(self):
raise NotImplementedError
def create_group(self, group_name):
raise NotImplementedError
def close(self):
raise NotImplementedError
def __enter__(self):
self.open()
return self
def __exit__(self, type, value, tb):
self.close()
def store_dictionary(self, dictionary, group_name=None):
from taurex.util.util import recursively_save_dict_contents_to_output
out = self
if group_name is not None:
out = self.create_group(group_name)
recursively_save_dict_contents_to_output(out, dictionary)
class OutputGroup(Output):
def __init__(self, name):
super().__init__(name)
self._name = name
def write_array(self, array_name, array, metadata=None):
raise NotImplementedError
def write_list(self, list_name, list_array, metadata=None):
arr = np.array(list_array)
self.write_array(list_name, arr)
def write_scalar(self, scalar_name, scalar, metadata=None):
raise NotImplementedError
def write_string(self, string_name, string, metadata=None):
raise NotImplementedError
def write_string_array(self, string_name, string_array, metadata=None):
raise NotImplementedError
|
[
"taurex.util.util.recursively_save_dict_contents_to_output",
"numpy.array"
] |
[((702, 759), 'taurex.util.util.recursively_save_dict_contents_to_output', 'recursively_save_dict_contents_to_output', (['out', 'dictionary'], {}), '(out, dictionary)\n', (742, 759), False, 'from taurex.util.util import recursively_save_dict_contents_to_output\n'), ((1052, 1072), 'numpy.array', 'np.array', (['list_array'], {}), '(list_array)\n', (1060, 1072), True, 'import numpy as np\n')]
|
"""
This library contains metrics to quantify the shape of a waveform
1. threshold_amplitude - only look at a metric while oscillatory amplitude is above a set percentile threshold
2. rdratio - Ratio of rise time and decay time
3. pt_duration - Peak and trough durations and their ratio
3. symPT - symmetry between peak and trough
4. symRD - symmetry between rise and decay
5. pt_sharp - calculate sharpness of oscillatory extrema
6. rd_steep - calculate rise and decay steepness
7. ptsr - calculate extrema sharpness ratio
8. rdsr - calculate rise-decay steepness ratio
9. average_waveform_trigger - calculate the average waveform of an oscillation by triggering on peak or trough
10. gips_swm - identify a repeated waveform in the signal
11. rd_diff - normalized difference between rise and decay time
"""
from __future__ import division
import numpy as np
from analysis_helpers.misshapen.nonshape import ampT, bandpass_default, findpt
def threshold_amplitude(x, metric, samples, percentile, frange, Fs, filter_fn=None, filter_kwargs=None):
"""
Exclude from analysis the samples in which the amplitude falls below a defined percentile
Parameters
----------
x : numpy array
raw time series
metric : numpy array
series of measures corresponding to time samples in 'samples' (e.g. peak sharpness)
samples : numpy array
time samples at which metric was computer (e.g. peaks)
percentile : float
percentile cutoff for exclusion (e.g. 10 = bottom 10% excluded)
frange : [lo, hi]
frequency range of interest for calculating amplitude
Fs : float
Sampling rate (Hz)
Returns
-------
metric_new : numpy array
same as input 'metric' but only for samples above the amplitude threshold
samples_new : numpy array
samples above the amplitude threshold
"""
# Do nothing if threshold is 0
if percentile == 0:
return metric, samples
# Default filter function
if filter_fn is None:
filter_fn = bandpass_default
if filter_kwargs is None:
filter_kwargs = {}
# Calculate amplitude time series and threshold
amp = ampT(x, frange, Fs, rmv_edge = False, filter_fn=filter_fn, filter_kwargs=filter_kwargs)
amp = amp[samples]
amp_threshold = np.percentile(amp, percentile)
# Update samples used
samples_new = samples[amp>=amp_threshold]
metric_new = metric[amp>=amp_threshold]
return metric_new, samples_new
def rdratio(Ps, Ts):
"""
Calculate the ratio between rise time and decay time for oscillations
Note: must have the same number of peaks and troughs
Note: the final rise or decay is unused
Parameters
----------
Ps : numpy arrays 1d
time points of oscillatory peaks
Ts : numpy arrays 1d
time points of osillatory troughs
Returns
-------
rdr : array-like 1d
rise-decay ratios for each oscillation
"""
# Assure input has the same number of peaks and troughs
if len(Ts) != len(Ps):
raise ValueError('Length of peaks and troughs arrays must be equal')
# Assure Ps and Ts are numpy arrays
if type(Ps)==list or type(Ts)==list:
print('Converted Ps and Ts to numpy arrays')
Ps = np.array(Ps)
Ts = np.array(Ts)
# Calculate rise and decay times
if Ts[0] < Ps[0]:
riset = Ps[:-1] - Ts[:-1]
decayt = Ts[1:] - Ps[:-1]
else:
riset = Ps[1:] - Ts[:-1]
decayt = Ts[:-1] - Ps[:-1]
# Calculate ratio between each rise and decay time
rdr = riset / decayt.astype(float)
return riset, decayt, rdr
def pt_duration(Ps, Ts, zeroxR, zeroxD):
"""
Calculate the ratio between peak and trough durations
NOTE: must have the same number of peaks and troughs
NOTE: the durations of the first and last extrema will be estimated by using the only zerox they have
Parameters
----------
Ps : numpy arrays 1d
time points of oscillatory peaks
Ts : numpy arrays 1d
time points of osillatory troughs
zeroxR : array-like 1d
indices at which oscillatory rising zerocrossings occur
zeroxD : array-like 1d
indices at which oscillatory decaying zerocrossings occur
Returns
-------
Ps_dur : array-like 1d
peak-trough duration ratios for each oscillation
Ts_dur : array-like 1d
peak-trough duration ratios for each oscillation
ptr : array-like 1d
peak-trough duration ratios for each oscillation
"""
# Assure input has the same number of peaks and troughs
if len(Ts) != len(Ps):
raise ValueError('Length of peaks and troughs arrays must be equal')
# Assure Ps and Ts are numpy arrays
if type(Ps)==list or type(Ts)==list:
print('Converted Ps and Ts to numpy arrays')
Ps = np.array(Ps)
Ts = np.array(Ts)
# Calculate the duration of each peak and trough until last
Ps_dur = np.zeros(len(Ps))
Ts_dur = np.zeros(len(Ts))
if Ps[0] < Ts[0]:
# treat first extrema differently
Ps_dur[0] = 2*(zeroxD[0] - Ps[0])
# duration of each peak
for i in range(1, len(Ps)-1):
Ps_dur[i] = (zeroxD[i] - zeroxR[i-1])
# duration of each trough
for i in range(len(Ts)-1):
Ts_dur[i] = (zeroxR[i] - zeroxD[i])
else:
Ts_dur[0] = 2*(zeroxR[0] - Ts[0])
for i in range(len(Ps)-1):
Ps_dur[i] = (zeroxD[i] - zeroxR[i])
# duration of each trough
for i in range(1, len(Ts)-1):
Ts_dur[i] = (zeroxR[i] - zeroxD[i-1])
# Treat last extrema differently
if Ps[-1] < Ts[-1]:
Ps_dur[-1] = (zeroxD[-1] - zeroxR[-1])
Ts_dur[-1] = 2*(Ts[-1] - zeroxD[-1])
else:
Ps_dur[-1] = 2*(Ps[-1] - zeroxR[-1])
Ts_dur[-1] = (zeroxR[-1] - zeroxD[-1])
ptr = Ps_dur/Ts_dur
return Ps_dur, Ts_dur, ptr
def symPT(x, Ps, Ts, window_half):
"""
Measure of asymmetry between oscillatory peaks and troughs
Parameters
----------
x : array-like 1d
voltage time series
Ps : array-like 1d
time points of oscillatory peaks
Ts : array-like 1d
time points of oscillatory troughs
window_half : int
Number of samples around extrema to analyze, in EACH DIRECTION
Returns
-------
sym : array-like 1d
measure of symmetry between each trough-peak pair
Result of 0 means the peak and trough are perfectly symmetric
Notes
-----
Opt 2: Roemer; The metric should be between 0 and 1
Inner product of Peak and Trough divided by the squareroot of the product of SSQ_peak and SSQ_trough
I'll need to fine tune this to make it more complicated and less susceptible to noise
"""
# Assure input has the same number of peaks and troughs
if len(Ts) != len(Ps):
raise ValueError('Length of peaks and troughs arrays must be equal')
E = len(Ps)
sym = np.zeros(E)
for e in range(E):
# Find region around each peak and trough. Make extrema be 0
peak = x[Ps[e]-window_half:Ps[e]+window_half+1] - x[Ps[e]]
peak = -peak
trough = x[Ts[e]-window_half:Ts[e]+window_half+1] - x[Ts[e]]
# Compare the two measures
peakenergy = np.sum(peak**2)
troughenergy = np.sum(trough**2)
energy = np.max((peakenergy,troughenergy))
diffenergy = np.sum((peak-trough)**2)
sym[e] = diffenergy / energy
return sym
def symRD(x, Ts, window_full):
"""
Measure of asymmetry between oscillatory peaks and troughs
Parameters
----------
x : array-like 1d
voltage time series
Ts : array-like 1d
time points of oscillatory troughs
window_full : int
Number of samples after peak to analyze for decay and before peak to analyze for rise
Returns
-------
sym : array-like 1d
measure of symmetry between each rise and decay
"""
T = len(Ts)
sym = np.zeros(T)
for t in range(T):
# Find regions for the rise and the decay
rise = x[Ts[t]:Ts[t]+window_full+1] - x[Ts[t]]
decay = x[Ts[t]-window_full:Ts[t]+1] - x[Ts[t]]
# Ensure the minimum value is 0
rise[rise<0] = 0
decay[decay<0] = 0
# Make rises and decays go the same direction
rise = np.flipud(rise)
# Calculate absolute difference between each point in the rise and decay
diffenergy = np.sum(np.abs(rise-decay))
# Normalize this difference by the max voltage value at each point
rise_decay_maxes = np.max(np.vstack((rise,decay)),axis=0)
energy = np.sum(rise_decay_maxes)
# Compare the two measures
sym[t] = diffenergy / energy
return sym
def pt_sharp(x, Ps, Ts, window_half, method='diff'):
"""
Calculate the sharpness of extrema
Parameters
----------
x : array-like 1d
voltage time series
Ps : array-like 1d
time points of oscillatory peaks
Ts : array-like 1d
time points of oscillatory troughs
window_half : int
Number of samples in each direction around extrema to use for sharpness estimation
Returns
-------
Psharps : array-like 1d
sharpness of peaks
Tsharps : array-like 1d
sharpness of troughs
"""
# Assure input has the same number of peaks and troughs
if len(Ts) != len(Ps):
raise ValueError('Length of peaks and troughs arrays must be equal')
# Calculate the sharpness of each peak
P = len(Ps)
Psharps = np.zeros(P)
for e in range(P):
if method == 'deriv':
Edata = x[Ps[e]-window_half: Ps[e]+window_half+1]
Psharps[e] = np.mean(np.abs(np.diff(Edata)))
elif method == 'diff':
Psharps[e] = np.mean((x[Ps[e]]-x[Ps[e]-window_half],x[Ps[e]]-x[Ps[e]+window_half]))
T = len(Ts)
Tsharps = np.zeros(T)
for e in range(T):
if method == 'deriv':
Edata = x[Ts[e]-window_half: Ts[e]+window_half+1]
Tsharps[e] = np.mean(np.abs(np.diff(Edata)))
elif method == 'diff':
Tsharps[e] = np.mean((x[Ts[e]-window_half]-x[Ts[e]],x[Ts[e]+window_half]-x[Ts[e]]))
return Psharps, Tsharps
def rd_steep(x, Ps, Ts):
"""
Calculate the max steepness of rises and decays
Parameters
----------
x : array-like 1d
voltage time series
Ps : array-like 1d
time points of oscillatory peaks
Ts : array-like 1d
time points of oscillatory troughs
Returns
-------
risesteep : array-like 1d
max steepness in each period for rise
decaysteep : array-like 1d
max steepness in each period for decay
"""
# Assure input has the same number of peaks and troughs
if len(Ts) != len(Ps):
raise ValueError('Length of peaks and troughs arrays must be equal')
# Calculate rise and decay steepness
E = len(Ps) - 1
risesteep = np.zeros(E)
for t in range(E):
if Ts[0] < Ps[0]:
rise = x[Ts[t]:Ps[t]+1]
else:
rise = x[Ts[t]:Ps[t+1]+1]
risesteep[t] = np.max(np.diff(rise))
decaysteep = np.zeros(E)
for p in range(E):
if Ts[0] < Ps[0]:
decay = x[Ps[p]:Ts[p+1]+1]
else:
decay = x[Ps[p]:Ts[p]+1]
decaysteep[p] = -np.min(np.diff(decay))
return risesteep, decaysteep
def ptsr(Psharp,Tsharp, log = True, polarity = True):
if polarity:
sharpnessratio = Psharp/Tsharp
else:
sharpnessratio = np.max((Psharp/Tsharp,Tsharp/Psharp))
if log:
sharpnessratio = np.log10(sharpnessratio)
return sharpnessratio
def rdsr(Rsteep,Dsteep, log = True, polarity = True):
if polarity:
steepnessratio = Rsteep/Dsteep
else:
steepnessratio = np.max((Rsteep/Dsteep,Dsteep/Rsteep))
if log:
steepnessratio = np.log10(steepnessratio)
return steepnessratio
def average_waveform_trigger(x, f_range, Fs, avgwave_halflen, trigger = 'trough'):
"""
Calculate the average waveform of a signal by triggering on the peaks or troughs
Parameters
----------
x : array-like 1d
voltage time series
f_range : (low, high), Hz
frequency range for narrowband signal of interest
Fs : float
The sampling rate
avgwave_halflen : float
length of time for the averaged signal to be recorded in the positive and negative direction
trigger : str
'trough' to trigger the averaging on each trough
'peak' to trigger the averaging on each peak
Returns
-------
avg_wave : array-like 1d
the average waveform in 'x' in the frequency 'f_range' triggered on 'trigger'
"""
# Set up the parameters for averaging
dt = 1/float(Fs)
t_avg_wave = np.arange(-avgwave_halflen,avgwave_halflen+dt, dt)
N_samples_halflen = int(avgwave_halflen*Fs)
# Find the trigger points for averaging
Ps, Ts = findpt(x, f_range, Fs, boundary = N_samples_halflen+1)
if trigger == 'trough':
trig_samps = Ts
elif trigger == 'peak':
trig_samps = Ps
else:
raise ValueError('Trigger not implemented')
# Do the averaging at each trigger
avg_wave = np.zeros(int(N_samples_halflen*2+1))
N_triggers = len(trig_samps)
for i in range(N_triggers):
avg_wave += x[trig_samps[i]-N_samples_halflen:trig_samps[i]+N_samples_halflen+1]
avg_wave = avg_wave/N_triggers
return t_avg_wave, avg_wave
def gips_swm(x, Fs, L, G,
max_iterations = 100, T = 1, window_starts_custom = None):
"""
Sliding window matching methods to find recurring patterns in a time series
using the method by <NAME> in J Neuro Methods 2017.
See matlab code at: https://github.com/bartgips/SWM
Calculate the average waveform of a signal by triggering on the peaks or troughs
Note should high-pass if looking at high frequency activity so that it does not converge on a low frequency motif
L and G should be chosen to be about the size of the motif of interest, and the N derived should be about the number of occurrences
Parameters
----------
x : array-like 1d
voltage time series
Fs : float
The sampling rate (samples per second)
L : float
Window length (seconds)
G : float
Minimum window spacing (seconds)
T : float
temperature parameter. Controls acceptance probability
max_iterations : int
Maximum number of iterations for the pattern finder
window_starts_custom : np.ndarray (1d)
Pre-set locations of initial windows (instead of evenly spaced by 2G)
Returns
-------
avg_wave : np.ndarray (1d)
the average waveform in 'x' in the frequency 'f_range' triggered on 'trigger'
window_starts : np.ndarray (1d)
indices at which each window begins for the final set of windows
J : np.ndarray (1d)
History of costs
"""
# Initialize window positions, separated by 2*G
L_samp = int(L*Fs)
G_samp = int(G*Fs)
if window_starts_custom is None:
window_starts = np.arange(0,len(x)-L_samp,2*G_samp)
else:
window_starts = window_starts_custom
# Calculate the total number of windows
N_windows = len(window_starts)
# Calculate initial cost
J = np.zeros(max_iterations)
J[0] = _gips_compute_J(x, window_starts, L_samp)
# Randomly sample windows with replacement
random_window_idx = np.random.choice(range(N_windows),size=max_iterations)
# Optimize X
iter_num = 1
while iter_num < max_iterations:
print(iter_num)
# Pick a random window position
window_idx_replace = random_window_idx[iter_num]
# Find a new allowed position for the window
# OH. CHANGE IT IN THE WINDOW ARRAY. at the end have all windows
window_starts_temp = np.copy(window_starts)
window_starts_temp[window_idx_replace] = _gips_find_new_windowidx(window_starts, G_samp, L_samp, len(x)-L_samp)
# Calculate the cost
J_temp = _gips_compute_J(x, window_starts_temp, L_samp)
# Calculate the change in cost function
deltaJ = J_temp - J[iter_num-1]
# Calculate the acceptance probability
p_accept = np.exp(-deltaJ/float(T))
# Accept update to J with a certain probability
if np.random.rand() < p_accept:
# Update J
J[iter_num] = J_temp
# Update X
window_starts = window_starts_temp
else:
# Update J
J[iter_num] = J[iter_num-1]
# Update iteration number
iter_num += 1
# Calculate average wave
avg_wave = np.zeros(L_samp)
for w in range(N_windows):
avg_wave = avg_wave + x[window_starts[w]:window_starts[w]+L_samp]
avg_wave = avg_wave/float(N_windows)
return avg_wave, window_starts, J
def _gips_compute_J(x, window_starts, L_samp):
"""Compute the cost, which is the average distance between all windows"""
# Get all windows and zscore them
N_windows = len(window_starts)
windows = np.zeros((N_windows,L_samp))
for w in range(N_windows):
temp = x[window_starts[w]:window_starts[w]+L_samp]
windows[w] = (temp - np.mean(temp))/np.std(temp)
# Calculate distances for all pairs of windows
d = []
for i in range(N_windows):
for j in range(i+1,N_windows):
window_diff = windows[i]-windows[j]
d_temp = 1/float(L_samp) * np.sum(window_diff**2)
d.append(d_temp)
# Calculate cost
J = 1/float(2*(N_windows-1))*np.sum(d)
return J
def _gips_find_new_windowidx(window_starts, G_samp, L_samp, N_samp):
"""Find a new sample for the starting window"""
found = False
while found == False:
# Generate a random sample
new_samp = np.random.randint(N_samp)
# Check how close the sample is to other window starts
dists = np.abs(window_starts - new_samp)
if np.min(dists) > G_samp:
return new_samp
def rd_diff(Ps, Ts):
"""
Calculate the normalized difference between rise and decay times,
as Gips, 2017 refers to as the "skewnwss index"
SI = (T_up-T_down)/(T_up+T_down)
Parameters
----------
Ps : numpy arrays 1d
time points of oscillatory peaks
Ts : numpy arrays 1d
time points of osillatory troughs
Returns
-------
rdr : array-like 1d
rise-decay ratios for each oscillation
"""
# Assure input has the same number of peaks and troughs
if len(Ts) != len(Ps):
raise ValueError('Length of peaks and troughs arrays must be equal')
# Assure Ps and Ts are numpy arrays
if type(Ps)==list or type(Ts)==list:
print('Converted Ps and Ts to numpy arrays')
Ps = np.array(Ps)
Ts = np.array(Ts)
# Calculate rise and decay times
if Ts[0] < Ps[0]:
riset = Ps[:-1] - Ts[:-1]
decayt = Ts[1:] - Ps[:-1]
else:
riset = Ps[1:] - Ts[:-1]
decayt = Ts[:-1] - Ps[:-1]
# Calculate ratio between each rise and decay time
rdr = (riset-decayt) / float(riset+decayt)
return riset, decayt, rdr
|
[
"numpy.sum",
"numpy.abs",
"numpy.random.randint",
"numpy.arange",
"numpy.mean",
"numpy.copy",
"numpy.std",
"numpy.max",
"numpy.log10",
"analysis_helpers.misshapen.nonshape.ampT",
"numpy.flipud",
"numpy.percentile",
"analysis_helpers.misshapen.nonshape.findpt",
"numpy.min",
"numpy.vstack",
"numpy.zeros",
"numpy.diff",
"numpy.array",
"numpy.random.rand"
] |
[((2175, 2265), 'analysis_helpers.misshapen.nonshape.ampT', 'ampT', (['x', 'frange', 'Fs'], {'rmv_edge': '(False)', 'filter_fn': 'filter_fn', 'filter_kwargs': 'filter_kwargs'}), '(x, frange, Fs, rmv_edge=False, filter_fn=filter_fn, filter_kwargs=\n filter_kwargs)\n', (2179, 2265), False, 'from analysis_helpers.misshapen.nonshape import ampT, bandpass_default, findpt\n'), ((2306, 2336), 'numpy.percentile', 'np.percentile', (['amp', 'percentile'], {}), '(amp, percentile)\n', (2319, 2336), True, 'import numpy as np\n'), ((7005, 7016), 'numpy.zeros', 'np.zeros', (['E'], {}), '(E)\n', (7013, 7016), True, 'import numpy as np\n'), ((8037, 8048), 'numpy.zeros', 'np.zeros', (['T'], {}), '(T)\n', (8045, 8048), True, 'import numpy as np\n'), ((9626, 9637), 'numpy.zeros', 'np.zeros', (['P'], {}), '(P)\n', (9634, 9637), True, 'import numpy as np\n'), ((9968, 9979), 'numpy.zeros', 'np.zeros', (['T'], {}), '(T)\n', (9976, 9979), True, 'import numpy as np\n'), ((11036, 11047), 'numpy.zeros', 'np.zeros', (['E'], {}), '(E)\n', (11044, 11047), True, 'import numpy as np\n'), ((11248, 11259), 'numpy.zeros', 'np.zeros', (['E'], {}), '(E)\n', (11256, 11259), True, 'import numpy as np\n'), ((12901, 12954), 'numpy.arange', 'np.arange', (['(-avgwave_halflen)', '(avgwave_halflen + dt)', 'dt'], {}), '(-avgwave_halflen, avgwave_halflen + dt, dt)\n', (12910, 12954), True, 'import numpy as np\n'), ((13058, 13112), 'analysis_helpers.misshapen.nonshape.findpt', 'findpt', (['x', 'f_range', 'Fs'], {'boundary': '(N_samples_halflen + 1)'}), '(x, f_range, Fs, boundary=N_samples_halflen + 1)\n', (13064, 13112), False, 'from analysis_helpers.misshapen.nonshape import ampT, bandpass_default, findpt\n'), ((15443, 15467), 'numpy.zeros', 'np.zeros', (['max_iterations'], {}), '(max_iterations)\n', (15451, 15467), True, 'import numpy as np\n'), ((16818, 16834), 'numpy.zeros', 'np.zeros', (['L_samp'], {}), '(L_samp)\n', (16826, 16834), True, 'import numpy as np\n'), ((17236, 17265), 'numpy.zeros', 'np.zeros', (['(N_windows, L_samp)'], {}), '((N_windows, L_samp))\n', (17244, 17265), True, 'import numpy as np\n'), ((3278, 3290), 'numpy.array', 'np.array', (['Ps'], {}), '(Ps)\n', (3286, 3290), True, 'import numpy as np\n'), ((3304, 3316), 'numpy.array', 'np.array', (['Ts'], {}), '(Ts)\n', (3312, 3316), True, 'import numpy as np\n'), ((4865, 4877), 'numpy.array', 'np.array', (['Ps'], {}), '(Ps)\n', (4873, 4877), True, 'import numpy as np\n'), ((4891, 4903), 'numpy.array', 'np.array', (['Ts'], {}), '(Ts)\n', (4899, 4903), True, 'import numpy as np\n'), ((7323, 7340), 'numpy.sum', 'np.sum', (['(peak ** 2)'], {}), '(peak ** 2)\n', (7329, 7340), True, 'import numpy as np\n'), ((7362, 7381), 'numpy.sum', 'np.sum', (['(trough ** 2)'], {}), '(trough ** 2)\n', (7368, 7381), True, 'import numpy as np\n'), ((7397, 7431), 'numpy.max', 'np.max', (['(peakenergy, troughenergy)'], {}), '((peakenergy, troughenergy))\n', (7403, 7431), True, 'import numpy as np\n'), ((7452, 7480), 'numpy.sum', 'np.sum', (['((peak - trough) ** 2)'], {}), '((peak - trough) ** 2)\n', (7458, 7480), True, 'import numpy as np\n'), ((8396, 8411), 'numpy.flipud', 'np.flipud', (['rise'], {}), '(rise)\n', (8405, 8411), True, 'import numpy as np\n'), ((8701, 8725), 'numpy.sum', 'np.sum', (['rise_decay_maxes'], {}), '(rise_decay_maxes)\n', (8707, 8725), True, 'import numpy as np\n'), ((11628, 11670), 'numpy.max', 'np.max', (['(Psharp / Tsharp, Tsharp / Psharp)'], {}), '((Psharp / Tsharp, Tsharp / Psharp))\n', (11634, 11670), True, 'import numpy as np\n'), ((11703, 11727), 'numpy.log10', 'np.log10', (['sharpnessratio'], {}), '(sharpnessratio)\n', (11711, 11727), True, 'import numpy as np\n'), ((11901, 11943), 'numpy.max', 'np.max', (['(Rsteep / Dsteep, Dsteep / Rsteep)'], {}), '((Rsteep / Dsteep, Dsteep / Rsteep))\n', (11907, 11943), True, 'import numpy as np\n'), ((11976, 12000), 'numpy.log10', 'np.log10', (['steepnessratio'], {}), '(steepnessratio)\n', (11984, 12000), True, 'import numpy as np\n'), ((15998, 16020), 'numpy.copy', 'np.copy', (['window_starts'], {}), '(window_starts)\n', (16005, 16020), True, 'import numpy as np\n'), ((17738, 17747), 'numpy.sum', 'np.sum', (['d'], {}), '(d)\n', (17744, 17747), True, 'import numpy as np\n'), ((17983, 18008), 'numpy.random.randint', 'np.random.randint', (['N_samp'], {}), '(N_samp)\n', (18000, 18008), True, 'import numpy as np\n'), ((18088, 18120), 'numpy.abs', 'np.abs', (['(window_starts - new_samp)'], {}), '(window_starts - new_samp)\n', (18094, 18120), True, 'import numpy as np\n'), ((18955, 18967), 'numpy.array', 'np.array', (['Ps'], {}), '(Ps)\n', (18963, 18967), True, 'import numpy as np\n'), ((18981, 18993), 'numpy.array', 'np.array', (['Ts'], {}), '(Ts)\n', (18989, 18993), True, 'import numpy as np\n'), ((8522, 8542), 'numpy.abs', 'np.abs', (['(rise - decay)'], {}), '(rise - decay)\n', (8528, 8542), True, 'import numpy as np\n'), ((8652, 8676), 'numpy.vstack', 'np.vstack', (['(rise, decay)'], {}), '((rise, decay))\n', (8661, 8676), True, 'import numpy as np\n'), ((11215, 11228), 'numpy.diff', 'np.diff', (['rise'], {}), '(rise)\n', (11222, 11228), True, 'import numpy as np\n'), ((16484, 16500), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (16498, 16500), True, 'import numpy as np\n'), ((17399, 17411), 'numpy.std', 'np.std', (['temp'], {}), '(temp)\n', (17405, 17411), True, 'import numpy as np\n'), ((18132, 18145), 'numpy.min', 'np.min', (['dists'], {}), '(dists)\n', (18138, 18145), True, 'import numpy as np\n'), ((9866, 9945), 'numpy.mean', 'np.mean', (['(x[Ps[e]] - x[Ps[e] - window_half], x[Ps[e]] - x[Ps[e] + window_half])'], {}), '((x[Ps[e]] - x[Ps[e] - window_half], x[Ps[e]] - x[Ps[e] + window_half]))\n', (9873, 9945), True, 'import numpy as np\n'), ((10208, 10287), 'numpy.mean', 'np.mean', (['(x[Ts[e] - window_half] - x[Ts[e]], x[Ts[e] + window_half] - x[Ts[e]])'], {}), '((x[Ts[e] - window_half] - x[Ts[e]], x[Ts[e] + window_half] - x[Ts[e]]))\n', (10215, 10287), True, 'import numpy as np\n'), ((11431, 11445), 'numpy.diff', 'np.diff', (['decay'], {}), '(decay)\n', (11438, 11445), True, 'import numpy as np\n'), ((17384, 17397), 'numpy.mean', 'np.mean', (['temp'], {}), '(temp)\n', (17391, 17397), True, 'import numpy as np\n'), ((17632, 17656), 'numpy.sum', 'np.sum', (['(window_diff ** 2)'], {}), '(window_diff ** 2)\n', (17638, 17656), True, 'import numpy as np\n'), ((9793, 9807), 'numpy.diff', 'np.diff', (['Edata'], {}), '(Edata)\n', (9800, 9807), True, 'import numpy as np\n'), ((10135, 10149), 'numpy.diff', 'np.diff', (['Edata'], {}), '(Edata)\n', (10142, 10149), True, 'import numpy as np\n')]
|
import gc
import os
from glob import glob
import numpy as np
from PIL import Image
import pickle
from tqdm import tqdm_notebook, tqdm
from models.network import U_Net, R2U_Net, AttU_Net, R2AttU_Net
from models.linknet import LinkNet34
from models.deeplabv3.deeplabv3plus import DeepLabV3Plus
from backboned_unet import Unet
import segmentation_models_pytorch as smp
from torchvision import transforms
import cv2
from albumentations import CLAHE
import json
from models.Transpose_unet.unet.model import Unet as Unet_t
from models.octave_unet.unet.model import OctaveUnet
from sklearn.model_selection import KFold, StratifiedKFold
import matplotlib.pyplot as plt
import copy
import torch
class Test(object):
def __init__(self, model_type, image_size, mean, std, t=None):
# Models
self.unet = None
self.image_size = image_size # 模型的输入大小
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.model_type = model_type
self.t = t
self.mean = mean
self.std = std
def build_model(self):
"""Build generator and discriminator."""
if self.model_type == 'U_Net':
self.unet = U_Net(img_ch=3, output_ch=1)
elif self.model_type == 'AttU_Net':
self.unet = AttU_Net(img_ch=3, output_ch=1)
elif self.model_type == 'unet_resnet34':
# self.unet = Unet(backbone_name='resnet34', classes=1)
self.unet = smp.Unet('resnet34', encoder_weights='imagenet', activation=None)
elif self.model_type == 'unet_resnet50':
self.unet = smp.Unet('resnet50', encoder_weights='imagenet', activation=None)
elif self.model_type == 'unet_se_resnext50_32x4d':
self.unet = smp.Unet('se_resnext50_32x4d', encoder_weights='imagenet', activation=None)
elif self.model_type == 'unet_densenet121':
self.unet = smp.Unet('densenet121', encoder_weights='imagenet', activation=None)
elif self.model_type == 'unet_resnet34_t':
self.unet = Unet_t('resnet34', encoder_weights='imagenet', activation=None, use_ConvTranspose2d=True)
elif self.model_type == 'unet_resnet34_oct':
self.unet = OctaveUnet('resnet34', encoder_weights='imagenet', activation=None)
elif self.model_type == 'pspnet_resnet34':
self.unet = smp.PSPNet('resnet34', encoder_weights='imagenet', classes=1, activation=None)
elif self.model_type == 'linknet':
self.unet = LinkNet34(num_classes=1)
elif self.model_type == 'deeplabv3plus':
self.unet = DeepLabV3Plus(model_backbone='res50_atrous', num_classes=1)
# self.unet = DeepLabV3Plus(num_classes=1)
# print('build model done!')
self.unet.to(self.device)
def test_model(
self,
thresholds_classify,
thresholds_seg,
average_threshold,
stage_cla,
stage_seg,
n_splits,
test_best_model=True,
less_than_sum=2048*2,
seg_average_vote=True,
images_path=None,
masks_path=None
):
"""
Args:
thresholds_classify: list, 各个分类模型的阈值,高于这个阈值的置为1,否则置为0
thresholds_seg: list,各个分割模型的阈值
average_threshold: 分割后使用平均策略时所使用的平均阈值
stage_cla: 第几阶段的权重作为分类结果
stage_seg: 第几阶段的权重作为分割结果
n_splits: list, 测试哪几折的结果进行平均
test_best_model: 是否要使用最优模型测试,若不是的话,则取最新的模型测试
less_than_sum: list, 预测图片中有预测出的正样本总和小于这个值时,则忽略所有
seg_average_vote: bool,True:平均,False:投票
"""
# 对于每一折加载模型,对所有测试集测试,并取平均
with torch.no_grad():
for index, (image_path, mask_path) in enumerate(tqdm(zip(images_path, masks_path), total=len(images_path))):
img = Image.open(image_path).convert('RGB')
pred_nfolds = 0
for fold in n_splits:
# 加载分类模型,进行测试
self.unet = None
self.build_model()
if test_best_model:
unet_path = os.path.join('checkpoints', self.model_type,
self.model_type + '_{}_{}_best.pth'.format(stage_cla, fold))
else:
unet_path = os.path.join('checkpoints', self.model_type,
self.model_type + '_{}_{}.pth'.format(stage_cla, fold))
# print("Load classify weight from %s" % unet_path)
self.unet.load_state_dict(torch.load(unet_path)['state_dict'])
self.unet.eval()
seg_unet = copy.deepcopy(self.unet)
# 加载分割模型,进行测试s
if test_best_model:
unet_path = os.path.join('checkpoints', self.model_type,
self.model_type + '_{}_{}_best.pth'.format(stage_seg, fold))
else:
unet_path = os.path.join('checkpoints', self.model_type,
self.model_type + '_{}_{}.pth'.format(stage_seg, fold))
# print('Load segmentation weight from %s.' % unet_path)
seg_unet.load_state_dict(torch.load(unet_path)['state_dict'])
seg_unet.eval()
pred = self.tta(img, self.unet)
# 首先经过阈值和像素阈值,判断该图像中是否有掩模
pred = np.where(pred > thresholds_classify[fold], 1, 0)
if np.sum(pred) < less_than_sum[fold]:
pred[:] = 0
# 如果有掩膜的话,加载分割模型进行测试
if np.sum(pred) > 0:
pred = self.tta(img, seg_unet)
# 如果不是采用平均策略,即投票策略,则进行阈值处理,变成0或1
if not seg_average_vote:
pred = np.where(pred > thresholds_seg[fold], 1, 0)
pred_nfolds += pred
if not seg_average_vote:
vote_model_num = len(n_splits)
vote_ticket = round(vote_model_num / 2.0)
pred = np.where(pred_nfolds > vote_ticket, 1, 0)
# print("Using voting strategy, Ticket / Vote models: %d / %d" % (vote_ticket, vote_model_num))
else:
# print('Using average strategy.')
pred = pred_nfolds / len(n_splits)
pred = np.where(pred > average_threshold, 1, 0)
pred = cv2.resize(pred, (1024, 1024))
mask = Image.open(mask_path)
mask = np.around(np.array(mask.convert('L'))/256.)
self.combine_display(img, mask, pred, 'demo')
def image_transform(self, image):
"""对样本进行预处理
"""
resize = transforms.Resize(self.image_size)
to_tensor = transforms.ToTensor()
normalize = transforms.Normalize(self.mean, self.std)
transform_compose = transforms.Compose([resize, to_tensor, normalize])
return transform_compose(image)
def detection(self, image, model):
"""对输入样本进行检测
Args:
image: 待检测样本,Image
model: 要使用的网络
Return:
pred: 检测结果
"""
image = self.image_transform(image)
image = torch.unsqueeze(image, dim=0)
image = image.float().to(self.device)
pred = torch.sigmoid(model(image))
# 预测出的结果
pred = pred.view(self.image_size, self.image_size)
pred = pred.detach().cpu().numpy()
return pred
def tta(self, image, model):
"""执行TTA预测
Args:
image: Image图片
model: 要使用的网络
Return:
pred: 最后预测的结果
"""
preds = np.zeros([self.image_size, self.image_size])
# 768大小
# image_resize = image.resize((768, 768))
# resize_pred = self.detection(image_resize)
# resize_pred_img = Image.fromarray(resize_pred)
# resize_pred_img = resize_pred_img.resize((1024, 1024))
# preds += np.asarray(resize_pred_img)
# 左右翻转
image_hflip = image.transpose(Image.FLIP_LEFT_RIGHT)
hflip_pred = self.detection(image_hflip, model)
hflip_pred_img = Image.fromarray(hflip_pred)
pred_img = hflip_pred_img.transpose(Image.FLIP_LEFT_RIGHT)
preds += np.asarray(pred_img)
# CLAHE
aug = CLAHE(p=1.0)
image_np = np.asarray(image)
clahe_image = aug(image=image_np)['image']
clahe_image = Image.fromarray(clahe_image)
clahe_pred = self.detection(clahe_image, model)
preds += clahe_pred
# 原图
original_pred = self.detection(image, model)
preds += original_pred
# 求平均
pred = preds / 3.0
return pred
# dice for threshold selection
def dice_overall(self, preds, targs):
n = preds.shape[0] # batch size为多少
preds = preds.view(n, -1)
targs = targs.view(n, -1)
# preds, targs = preds.to(self.device), targs.to(self.device)
preds, targs = preds.cpu(), targs.cpu()
# tensor之间按位相成,求两个集合的交(只有1×1等于1)后。按照第二个维度求和,得到[batch size]大小的tensor,每一个值代表该输入图片真实类标与预测类标的交集大小
intersect = (preds * targs).sum(-1).float()
# tensor之间按位相加,求两个集合的并。然后按照第二个维度求和,得到[batch size]大小的tensor,每一个值代表该输入图片真实类标与预测类标的并集大小
union = (preds + targs).sum(-1).float()
'''
输入图片真实类标与预测类标无并集有两种情况:第一种为预测与真实均没有类标,此时并集之和为0;第二种为真实有类标,但是预测完全错误,此时并集之和不为0;
寻找输入图片真实类标与预测类标并集之和为0的情况,将其交集置为1,并集置为2,最后还有一个2*交集/并集,值为1;
其余情况,直接按照2*交集/并集计算,因为上面的并集并没有减去交集,所以需要拿2*交集,其最大值为1
'''
u0 = union == 0
intersect[u0] = 1
union[u0] = 2
return (2. * intersect / union).mean()
def combine_display(self, image_raw, mask, pred, title_diplay):
plt.suptitle(title_diplay)
plt.subplot(1, 3, 1)
plt.title('image_raw')
plt.imshow(image_raw)
plt.subplot(1, 3, 2)
plt.title('mask')
plt.imshow(mask)
plt.subplot(1, 3, 3)
plt.title('pred')
plt.imshow(pred)
plt.show()
if __name__ == "__main__":
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
# mean = (0.490, 0.490, 0.490)
# std = (0.229, 0.229, 0.229)
model_name = 'unet_resnet34'
# stage_cla表示使用第几阶段的权重作为分类模型,stage_seg表示使用第几阶段的权重作为分割模型,对应不同的image_size,index表示为交叉验证的第几个
# image_size TODO
stage_cla, stage_seg = 2, 3
if stage_cla == 1:
image_size = 768
elif stage_cla == 2:
image_size = 1024
with open('checkpoints/'+model_name+'/result_stage2.json', 'r', encoding='utf-8') as json_file:
config_cla = json.load(json_file)
with open('checkpoints/'+model_name+'/result_stage3.json', 'r', encoding='utf-8') as json_file:
config_seg = json.load(json_file)
n_splits = [0] # 0, 1, 2, 3, 4
thresholds_classify, thresholds_seg, less_than_sum = [0 for x in range(5)], [0 for x in range(5)], [0 for x in range(5)]
for x in n_splits:
thresholds_classify[x] = config_cla[str(x)][0]
less_than_sum[x] = config_cla[str(x)][1]
thresholds_seg[x] = config_seg[str(x)][0]
seg_average_vote = False
average_threshold = np.sum(np.asarray(thresholds_seg))/len(n_splits)
test_best_mode = True
print("stage_cla: %d, stage_seg: %d" % (stage_cla, stage_seg))
print('test fold: ', n_splits)
print('thresholds_classify: ', thresholds_classify)
if seg_average_vote:
print('Using average stategy, average_threshold: %f' % average_threshold)
else:
print('Using vating strategy, thresholds_seg: ', thresholds_seg)
print('less_than_sum: ', less_than_sum)
# 只有test的样本路径
with open('dataset_static_mask.pkl', 'rb') as f:
static = pickle.load(f)
images_path, masks_path, masks_bool = static[0], static[1], static[2]
# 只有stage1的训练集的样本路径
with open('dataset_static_mask_stage1.pkl', 'rb') as f:
static_stage1 = pickle.load(f)
images_path_stage1, masks_path_stage1, masks_bool_stage1 = static_stage1[0], static_stage1[1], static_stage1[2]
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=1)
split = skf.split(images_path, masks_bool)
split_stage1 = skf.split(images_path_stage1, masks_bool_stage1)
val_image_nfolds = list()
val_mask_nfolds = list()
for index, ((train_index, val_index), (train_stage1_index, val_stage1_index)) in enumerate(zip(split, split_stage1)):
val_image = [images_path[x] for x in val_index]
val_mask = [masks_path[x] for x in val_index]
val_image_stage1 = [images_path_stage1[x] for x in val_stage1_index]
val_mask_stage1 = [masks_path_stage1[x] for x in val_stage1_index]
val_image_fold = val_image + val_image_stage1
val_mask_fold = val_mask + val_mask_stage1
val_image_nfolds.append(val_image_fold)
val_mask_nfolds.append(val_mask_fold)
val_image_fold0 = val_image_nfolds[0]
val_mask_fold0 = val_mask_nfolds[0]
solver = Test(model_name, image_size, mean, std)
solver.test_model(
thresholds_classify=thresholds_classify,
thresholds_seg=thresholds_seg,
average_threshold=average_threshold,
stage_cla=stage_cla,
stage_seg=stage_seg,
n_splits=n_splits,
test_best_model=test_best_mode,
less_than_sum=less_than_sum,
seg_average_vote=seg_average_vote,
images_path=images_path,
masks_path=masks_path
)
|
[
"matplotlib.pyplot.title",
"numpy.sum",
"matplotlib.pyplot.suptitle",
"pickle.load",
"torchvision.transforms.Normalize",
"torch.no_grad",
"segmentation_models_pytorch.Unet",
"matplotlib.pyplot.imshow",
"torch.load",
"models.deeplabv3.deeplabv3plus.DeepLabV3Plus",
"models.Transpose_unet.unet.model.Unet",
"models.linknet.LinkNet34",
"torchvision.transforms.Compose",
"models.octave_unet.unet.model.OctaveUnet",
"segmentation_models_pytorch.PSPNet",
"albumentations.CLAHE",
"cv2.resize",
"copy.deepcopy",
"matplotlib.pyplot.show",
"numpy.asarray",
"models.network.U_Net",
"torch.cuda.is_available",
"torch.unsqueeze",
"torchvision.transforms.Resize",
"matplotlib.pyplot.subplot",
"json.load",
"models.network.AttU_Net",
"numpy.zeros",
"PIL.Image.open",
"numpy.where",
"sklearn.model_selection.StratifiedKFold",
"PIL.Image.fromarray",
"torchvision.transforms.ToTensor"
] |
[((12300, 12357), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': '(5)', 'shuffle': '(True)', 'random_state': '(1)'}), '(n_splits=5, shuffle=True, random_state=1)\n', (12315, 12357), False, 'from sklearn.model_selection import KFold, StratifiedKFold\n'), ((6888, 6922), 'torchvision.transforms.Resize', 'transforms.Resize', (['self.image_size'], {}), '(self.image_size)\n', (6905, 6922), False, 'from torchvision import transforms\n'), ((6943, 6964), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (6962, 6964), False, 'from torchvision import transforms\n'), ((6985, 7026), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['self.mean', 'self.std'], {}), '(self.mean, self.std)\n', (7005, 7026), False, 'from torchvision import transforms\n'), ((7056, 7106), 'torchvision.transforms.Compose', 'transforms.Compose', (['[resize, to_tensor, normalize]'], {}), '([resize, to_tensor, normalize])\n', (7074, 7106), False, 'from torchvision import transforms\n'), ((7404, 7433), 'torch.unsqueeze', 'torch.unsqueeze', (['image'], {'dim': '(0)'}), '(image, dim=0)\n', (7419, 7433), False, 'import torch\n'), ((7858, 7902), 'numpy.zeros', 'np.zeros', (['[self.image_size, self.image_size]'], {}), '([self.image_size, self.image_size])\n', (7866, 7902), True, 'import numpy as np\n'), ((8350, 8377), 'PIL.Image.fromarray', 'Image.fromarray', (['hflip_pred'], {}), '(hflip_pred)\n', (8365, 8377), False, 'from PIL import Image\n'), ((8462, 8482), 'numpy.asarray', 'np.asarray', (['pred_img'], {}), '(pred_img)\n', (8472, 8482), True, 'import numpy as np\n'), ((8514, 8526), 'albumentations.CLAHE', 'CLAHE', ([], {'p': '(1.0)'}), '(p=1.0)\n', (8519, 8526), False, 'from albumentations import CLAHE\n'), ((8546, 8563), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (8556, 8563), True, 'import numpy as np\n'), ((8637, 8665), 'PIL.Image.fromarray', 'Image.fromarray', (['clahe_image'], {}), '(clahe_image)\n', (8652, 8665), False, 'from PIL import Image\n'), ((9954, 9980), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['title_diplay'], {}), '(title_diplay)\n', (9966, 9980), True, 'import matplotlib.pyplot as plt\n'), ((9989, 10009), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(1)'], {}), '(1, 3, 1)\n', (10000, 10009), True, 'import matplotlib.pyplot as plt\n'), ((10018, 10040), 'matplotlib.pyplot.title', 'plt.title', (['"""image_raw"""'], {}), "('image_raw')\n", (10027, 10040), True, 'import matplotlib.pyplot as plt\n'), ((10049, 10070), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image_raw'], {}), '(image_raw)\n', (10059, 10070), True, 'import matplotlib.pyplot as plt\n'), ((10080, 10100), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(2)'], {}), '(1, 3, 2)\n', (10091, 10100), True, 'import matplotlib.pyplot as plt\n'), ((10109, 10126), 'matplotlib.pyplot.title', 'plt.title', (['"""mask"""'], {}), "('mask')\n", (10118, 10126), True, 'import matplotlib.pyplot as plt\n'), ((10135, 10151), 'matplotlib.pyplot.imshow', 'plt.imshow', (['mask'], {}), '(mask)\n', (10145, 10151), True, 'import matplotlib.pyplot as plt\n'), ((10161, 10181), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(3)', '(3)'], {}), '(1, 3, 3)\n', (10172, 10181), True, 'import matplotlib.pyplot as plt\n'), ((10190, 10207), 'matplotlib.pyplot.title', 'plt.title', (['"""pred"""'], {}), "('pred')\n", (10199, 10207), True, 'import matplotlib.pyplot as plt\n'), ((10216, 10232), 'matplotlib.pyplot.imshow', 'plt.imshow', (['pred'], {}), '(pred)\n', (10226, 10232), True, 'import matplotlib.pyplot as plt\n'), ((10242, 10252), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10250, 10252), True, 'import matplotlib.pyplot as plt\n'), ((10825, 10845), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (10834, 10845), False, 'import json\n'), ((10972, 10992), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (10981, 10992), False, 'import json\n'), ((11949, 11963), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (11960, 11963), False, 'import pickle\n'), ((12150, 12164), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (12161, 12164), False, 'import pickle\n'), ((1191, 1219), 'models.network.U_Net', 'U_Net', ([], {'img_ch': '(3)', 'output_ch': '(1)'}), '(img_ch=3, output_ch=1)\n', (1196, 1219), False, 'from models.network import U_Net, R2U_Net, AttU_Net, R2AttU_Net\n'), ((3653, 3668), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3666, 3668), False, 'import torch\n'), ((11395, 11421), 'numpy.asarray', 'np.asarray', (['thresholds_seg'], {}), '(thresholds_seg)\n', (11405, 11421), True, 'import numpy as np\n'), ((909, 934), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (932, 934), False, 'import torch\n'), ((1288, 1319), 'models.network.AttU_Net', 'AttU_Net', ([], {'img_ch': '(3)', 'output_ch': '(1)'}), '(img_ch=3, output_ch=1)\n', (1296, 1319), False, 'from models.network import U_Net, R2U_Net, AttU_Net, R2AttU_Net\n'), ((6594, 6624), 'cv2.resize', 'cv2.resize', (['pred', '(1024, 1024)'], {}), '(pred, (1024, 1024))\n', (6604, 6624), False, 'import cv2\n'), ((6648, 6669), 'PIL.Image.open', 'Image.open', (['mask_path'], {}), '(mask_path)\n', (6658, 6669), False, 'from PIL import Image\n'), ((1462, 1527), 'segmentation_models_pytorch.Unet', 'smp.Unet', (['"""resnet34"""'], {'encoder_weights': '"""imagenet"""', 'activation': 'None'}), "('resnet34', encoder_weights='imagenet', activation=None)\n", (1470, 1527), True, 'import segmentation_models_pytorch as smp\n'), ((4698, 4722), 'copy.deepcopy', 'copy.deepcopy', (['self.unet'], {}), '(self.unet)\n', (4711, 4722), False, 'import copy\n'), ((5523, 5571), 'numpy.where', 'np.where', (['(pred > thresholds_classify[fold])', '(1)', '(0)'], {}), '(pred > thresholds_classify[fold], 1, 0)\n', (5531, 5571), True, 'import numpy as np\n'), ((6212, 6253), 'numpy.where', 'np.where', (['(pred_nfolds > vote_ticket)', '(1)', '(0)'], {}), '(pred_nfolds > vote_ticket, 1, 0)\n', (6220, 6253), True, 'import numpy as np\n'), ((6529, 6569), 'numpy.where', 'np.where', (['(pred > average_threshold)', '(1)', '(0)'], {}), '(pred > average_threshold, 1, 0)\n', (6537, 6569), True, 'import numpy as np\n'), ((1601, 1666), 'segmentation_models_pytorch.Unet', 'smp.Unet', (['"""resnet50"""'], {'encoder_weights': '"""imagenet"""', 'activation': 'None'}), "('resnet50', encoder_weights='imagenet', activation=None)\n", (1609, 1666), True, 'import segmentation_models_pytorch as smp\n'), ((3813, 3835), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (3823, 3835), False, 'from PIL import Image\n'), ((5595, 5607), 'numpy.sum', 'np.sum', (['pred'], {}), '(pred)\n', (5601, 5607), True, 'import numpy as np\n'), ((5732, 5744), 'numpy.sum', 'np.sum', (['pred'], {}), '(pred)\n', (5738, 5744), True, 'import numpy as np\n'), ((1750, 1825), 'segmentation_models_pytorch.Unet', 'smp.Unet', (['"""se_resnext50_32x4d"""'], {'encoder_weights': '"""imagenet"""', 'activation': 'None'}), "('se_resnext50_32x4d', encoder_weights='imagenet', activation=None)\n", (1758, 1825), True, 'import segmentation_models_pytorch as smp\n'), ((4592, 4613), 'torch.load', 'torch.load', (['unet_path'], {}), '(unet_path)\n', (4602, 4613), False, 'import torch\n'), ((5323, 5344), 'torch.load', 'torch.load', (['unet_path'], {}), '(unet_path)\n', (5333, 5344), False, 'import torch\n'), ((5946, 5989), 'numpy.where', 'np.where', (['(pred > thresholds_seg[fold])', '(1)', '(0)'], {}), '(pred > thresholds_seg[fold], 1, 0)\n', (5954, 5989), True, 'import numpy as np\n'), ((1902, 1970), 'segmentation_models_pytorch.Unet', 'smp.Unet', (['"""densenet121"""'], {'encoder_weights': '"""imagenet"""', 'activation': 'None'}), "('densenet121', encoder_weights='imagenet', activation=None)\n", (1910, 1970), True, 'import segmentation_models_pytorch as smp\n'), ((2046, 2139), 'models.Transpose_unet.unet.model.Unet', 'Unet_t', (['"""resnet34"""'], {'encoder_weights': '"""imagenet"""', 'activation': 'None', 'use_ConvTranspose2d': '(True)'}), "('resnet34', encoder_weights='imagenet', activation=None,\n use_ConvTranspose2d=True)\n", (2052, 2139), True, 'from models.Transpose_unet.unet.model import Unet as Unet_t\n'), ((2213, 2280), 'models.octave_unet.unet.model.OctaveUnet', 'OctaveUnet', (['"""resnet34"""'], {'encoder_weights': '"""imagenet"""', 'activation': 'None'}), "('resnet34', encoder_weights='imagenet', activation=None)\n", (2223, 2280), False, 'from models.octave_unet.unet.model import OctaveUnet\n'), ((2357, 2435), 'segmentation_models_pytorch.PSPNet', 'smp.PSPNet', (['"""resnet34"""'], {'encoder_weights': '"""imagenet"""', 'classes': '(1)', 'activation': 'None'}), "('resnet34', encoder_weights='imagenet', classes=1, activation=None)\n", (2367, 2435), True, 'import segmentation_models_pytorch as smp\n'), ((2503, 2527), 'models.linknet.LinkNet34', 'LinkNet34', ([], {'num_classes': '(1)'}), '(num_classes=1)\n', (2512, 2527), False, 'from models.linknet import LinkNet34\n'), ((2601, 2660), 'models.deeplabv3.deeplabv3plus.DeepLabV3Plus', 'DeepLabV3Plus', ([], {'model_backbone': '"""res50_atrous"""', 'num_classes': '(1)'}), "(model_backbone='res50_atrous', num_classes=1)\n", (2614, 2660), False, 'from models.deeplabv3.deeplabv3plus import DeepLabV3Plus\n')]
|
from dataclasses import dataclass
import functions as fx
import glow.gwas.log_reg as lr
import glow.gwas.approx_firth as af
import pandas as pd
from nptyping import Float, NDArray
import numpy as np
import pytest
from typing import Any
@dataclass
class TestData:
phenotypes: NDArray[(Any, ), Float]
covariates: NDArray[(Any, Any), Float]
offset: NDArray[(Any, ), Float]
def _get_test_data(use_offset, use_intercept):
test_file = 'test-data/r/sex2withoffset.txt'
df = pd.read_table(test_file, delimiter='\t').astype('float64')
phenotypes = df['case']
covariates = df.loc[:, 'age':'dia']
if use_intercept:
covariates.loc[:, 'intercept'] = 1
offset = df['offset']
if not use_offset:
offset = offset * 0
return TestData(phenotypes.to_numpy(), covariates.to_numpy(), offset.to_numpy())
def _compare_full_firth_beta(test_data, golden_firth_beta):
beta_init = np.zeros(test_data.covariates.shape[1])
X = test_data.covariates
y = test_data.phenotypes
offset = test_data.offset
test_firth_fit = af._fit_firth(beta_init=beta_init, X=X, y=y, offset=offset)
test_firth_beta = test_firth_fit.beta
assert np.allclose(golden_firth_beta, test_firth_beta)
def test_full_firth():
# table = read.table("sex2withoffset.txt", header=True)
# logistf(case ~ age+oc+vic+vicl+vis+dia+offset(offset), data=table)
golden_firth_beta = [
-1.1715911, # age
0.1568537, # oc
2.4752617, # vic
-2.2125007, # vicl
-0.8604622, # vis
2.7397140, # dia
-0.5679234 # intercept
]
test_data = _get_test_data(use_offset=True, use_intercept=True)
_compare_full_firth_beta(test_data, golden_firth_beta)
def test_full_firth_no_offset():
# logistf(case ~ age+oc+vic+vicl+vis+dia, data=table)
golden_firth_beta = [
-1.10598130, # age
-0.06881673, # oc
2.26887464, # vic
-2.11140816, # vicl
-0.78831694, # vis
3.09601263, # dia
0.12025404 # intercept
]
test_data = _get_test_data(use_offset=False, use_intercept=True)
_compare_full_firth_beta(test_data, golden_firth_beta)
def test_full_firth_no_intercept():
# logistf(case ~ age+oc+vic+vicl+vis+dia+offset(offset)-1, data=table)
golden_firth_beta = [
-1.2513849, # age
-0.3141151, # oc
2.2066573, # vic
-2.2988439, # vicl
-0.9922712, # vis
2.7046574 # dia
]
test_data = _get_test_data(use_offset=True, use_intercept=False)
_compare_full_firth_beta(test_data, golden_firth_beta)
def test_null_firth_fit_no_offset():
golden_firth_beta = [
-1.10598130, # age
-0.06881673, # oc
2.26887464, # vic
-2.11140816, # vicl
-0.78831694, # vis
3.09601263, # dia
0.12025404 # intercept
]
test_data = _get_test_data(use_offset=False, use_intercept=True)
fit = af.perform_null_firth_fit(test_data.phenotypes,
test_data.covariates,
~np.isnan(test_data.phenotypes),
None,
includes_intercept=True)
assert np.allclose(fit, test_data.covariates @ golden_firth_beta)
def _read_regenie_df(file, trait, num_snps):
df = pd.read_table(file, sep=r'\s+')
df = df[df['ID'] <= num_snps]
df['phenotype'] = trait
return df
def compare_corrections_to_regenie(spark,
pvalue_threshold,
output_prefix,
compare_all_cols,
uncorrected,
corrected,
missing=[]):
(genotype_df, phenotype_df, covariate_df, offset_df) = fx.get_input_dfs(spark,
binary=True,
missing=missing)
glowgr_df = lr.logistic_regression(genotype_df,
phenotype_df,
covariate_df,
offset_df,
correction=lr.correction_approx_firth,
pvalue_threshold=pvalue_threshold,
values_column='values').toPandas()
fx.compare_to_regenie(output_prefix, glowgr_df, compare_all_cols)
correction_counts = glowgr_df.correctionSucceeded.value_counts(dropna=False).to_dict()
if uncorrected > 0:
# null in Spark DataFrame converts to nan in pandas
assert correction_counts[np.nan] == uncorrected
if corrected > 0:
assert correction_counts[True] == corrected
assert False not in correction_counts
return glowgr_df
@pytest.mark.min_spark('3')
def test_correct_all_versus_regenie(spark):
compare_corrections_to_regenie(spark,
0.9999,
'test_bin_out_firth_',
compare_all_cols=True,
uncorrected=0,
corrected=200)
@pytest.mark.min_spark('3')
def test_correct_half_versus_regenie(spark):
compare_corrections_to_regenie(spark,
0.5,
'test_bin_out_half_firth_',
compare_all_cols=False,
uncorrected=103,
corrected=97)
@pytest.mark.min_spark('3')
def test_correct_missing_versus_regenie(spark):
compare_corrections_to_regenie(
spark,
0.9999,
'test_bin_out_missing_firth_',
compare_all_cols=True,
uncorrected=0,
corrected=200,
missing=['35_35', '136_136', '77_77', '100_100', '204_204', '474_474'])
|
[
"numpy.allclose",
"numpy.zeros",
"numpy.isnan",
"functions.compare_to_regenie",
"glow.gwas.approx_firth._fit_firth",
"pandas.read_table",
"glow.gwas.log_reg.logistic_regression",
"pytest.mark.min_spark",
"functions.get_input_dfs"
] |
[((4970, 4996), 'pytest.mark.min_spark', 'pytest.mark.min_spark', (['"""3"""'], {}), "('3')\n", (4991, 4996), False, 'import pytest\n'), ((5345, 5371), 'pytest.mark.min_spark', 'pytest.mark.min_spark', (['"""3"""'], {}), "('3')\n", (5366, 5371), False, 'import pytest\n'), ((5725, 5751), 'pytest.mark.min_spark', 'pytest.mark.min_spark', (['"""3"""'], {}), "('3')\n", (5746, 5751), False, 'import pytest\n'), ((923, 962), 'numpy.zeros', 'np.zeros', (['test_data.covariates.shape[1]'], {}), '(test_data.covariates.shape[1])\n', (931, 962), True, 'import numpy as np\n'), ((1073, 1132), 'glow.gwas.approx_firth._fit_firth', 'af._fit_firth', ([], {'beta_init': 'beta_init', 'X': 'X', 'y': 'y', 'offset': 'offset'}), '(beta_init=beta_init, X=X, y=y, offset=offset)\n', (1086, 1132), True, 'import glow.gwas.approx_firth as af\n'), ((1186, 1233), 'numpy.allclose', 'np.allclose', (['golden_firth_beta', 'test_firth_beta'], {}), '(golden_firth_beta, test_firth_beta)\n', (1197, 1233), True, 'import numpy as np\n'), ((3262, 3320), 'numpy.allclose', 'np.allclose', (['fit', '(test_data.covariates @ golden_firth_beta)'], {}), '(fit, test_data.covariates @ golden_firth_beta)\n', (3273, 3320), True, 'import numpy as np\n'), ((3377, 3408), 'pandas.read_table', 'pd.read_table', (['file'], {'sep': '"""\\\\s+"""'}), "(file, sep='\\\\s+')\n", (3390, 3408), True, 'import pandas as pd\n'), ((3887, 3940), 'functions.get_input_dfs', 'fx.get_input_dfs', (['spark'], {'binary': '(True)', 'missing': 'missing'}), '(spark, binary=True, missing=missing)\n', (3903, 3940), True, 'import functions as fx\n'), ((4531, 4596), 'functions.compare_to_regenie', 'fx.compare_to_regenie', (['output_prefix', 'glowgr_df', 'compare_all_cols'], {}), '(output_prefix, glowgr_df, compare_all_cols)\n', (4552, 4596), True, 'import functions as fx\n'), ((491, 531), 'pandas.read_table', 'pd.read_table', (['test_file'], {'delimiter': '"""\t"""'}), "(test_file, delimiter='\\t')\n", (504, 531), True, 'import pandas as pd\n'), ((3116, 3146), 'numpy.isnan', 'np.isnan', (['test_data.phenotypes'], {}), '(test_data.phenotypes)\n', (3124, 3146), True, 'import numpy as np\n'), ((4109, 4290), 'glow.gwas.log_reg.logistic_regression', 'lr.logistic_regression', (['genotype_df', 'phenotype_df', 'covariate_df', 'offset_df'], {'correction': 'lr.correction_approx_firth', 'pvalue_threshold': 'pvalue_threshold', 'values_column': '"""values"""'}), "(genotype_df, phenotype_df, covariate_df, offset_df,\n correction=lr.correction_approx_firth, pvalue_threshold=\n pvalue_threshold, values_column='values')\n", (4131, 4290), True, 'import glow.gwas.log_reg as lr\n')]
|
import cv2
import numpy as np
import os
def load_image(path: str) -> np.ndarray:
"""Загрузка ихображения
:param path: путь к файлу с изображением
:return: загруженное изображение
"""
if type(path) != str:
raise TypeError(f'Тип переменной path {type(path)} не является строкой')
if not os.path.exists(path):
raise FileNotFoundError(f'Файла {path} не существует.')
image = cv2.imread(path)
return image
def extract_contours(image: np.ndarray) -> np.ndarray:
"""Поиск контуров на изображени
:param image: предварительно обработанное изображение с нанесенными рамками
:return: контуры на изображении
"""
# Диапазон цвета которого не может быть в документе (в нашем случае - синий)
lower_range = np.array([110, 50, 50])
upper_range = np.array([130, 255, 255])
image_mask = cv2.inRange(image, lower_range, upper_range)
thresh = cv2.Canny(image_mask, 10, 250)
contours_of_frames, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
return contours_of_frames
def extract_frames(contours_of_frames: np.ndarray) -> list:
"""Поиск рамок на изображении
:param contours_of_frames: контуры на изображении
:return: полученные координаты рамок
"""
frames = []
# перебираем все найденные контуры в цикле
for contours_of_frame in contours_of_frames:
rect = cv2.minAreaRect(contours_of_frame) # пытаемся вписать прямоугольник
box = cv2.boxPoints(rect) # поиск четырех вершин прямоугольника
box = np.int0(box) # округление координат
area = int(rect[1][0] * rect[1][1]) # вычисление площади
if area > 250:
frames.append(box[[1, 3]])
return np.array(frames).tolist()
def save_frames(path_to_image_with_fields: str,
create_debug_form: bool = False, path_to_blank_image: str = None) -> (dict, np.ndarray):
"""По полученному изображению с нанесенными на него рамками (прямоугольники синего цвета по контурам мест, где
пользователь будет вводить свои данные) вывести координаты полей, а также изображение с нанесенными на него заново
рамками для проверки. Полученный в ходе работы данной программы массив значений можно использовать напрямую для
подачи в текущую версию основного скрипта в качестве sogl{number}_fields.json.
:param path_to_image_with_fields: путь к изображению с нанесенными рамками, которые выделяют нужные поля
:param create_debug_form: если True, на загруженное пустое изображение наносятся рамки в соответствии с полученными
их координатами. это действие осуществляется для проверки, что все сработало нормально
:param path_to_blank_image: пусть к исходному пустому изображению без нанесенных рамок
:return: словарь с координатами полей, а также изображение с нанесенными заново рамками
"""
image_with_frames = load_image(path_to_image_with_fields)
image_with_frames = cv2.cvtColor(image_with_frames, cv2.COLOR_BGR2HSV)
contours_of_frames = extract_contours(image_with_frames)
frames = extract_frames(contours_of_frames)
dict_with_values = {str(i): frame for i, frame in enumerate(frames)}
if create_debug_form:
template = load_image(path_to_blank_image) # пустой бланк
for location in frames:
y = (location[0][1], location[1][1])
x = (location[0][0], location[1][0])
# нанесение рамок на форму по инструкции
cv2.rectangle(template, (x[0], y[0]), (x[1], y[1]), (0, 255, 0), 3)
return dict_with_values, template
return dict_with_values, None
|
[
"cv2.Canny",
"numpy.int0",
"cv2.cvtColor",
"os.path.exists",
"cv2.rectangle",
"cv2.imread",
"cv2.boxPoints",
"numpy.array",
"cv2.minAreaRect",
"cv2.inRange"
] |
[((420, 436), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (430, 436), False, 'import cv2\n'), ((772, 795), 'numpy.array', 'np.array', (['[110, 50, 50]'], {}), '([110, 50, 50])\n', (780, 795), True, 'import numpy as np\n'), ((814, 839), 'numpy.array', 'np.array', (['[130, 255, 255]'], {}), '([130, 255, 255])\n', (822, 839), True, 'import numpy as np\n'), ((858, 902), 'cv2.inRange', 'cv2.inRange', (['image', 'lower_range', 'upper_range'], {}), '(image, lower_range, upper_range)\n', (869, 902), False, 'import cv2\n'), ((917, 947), 'cv2.Canny', 'cv2.Canny', (['image_mask', '(10)', '(250)'], {}), '(image_mask, 10, 250)\n', (926, 947), False, 'import cv2\n'), ((2981, 3031), 'cv2.cvtColor', 'cv2.cvtColor', (['image_with_frames', 'cv2.COLOR_BGR2HSV'], {}), '(image_with_frames, cv2.COLOR_BGR2HSV)\n', (2993, 3031), False, 'import cv2\n'), ((321, 341), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (335, 341), False, 'import os\n'), ((1411, 1445), 'cv2.minAreaRect', 'cv2.minAreaRect', (['contours_of_frame'], {}), '(contours_of_frame)\n', (1426, 1445), False, 'import cv2\n'), ((1494, 1513), 'cv2.boxPoints', 'cv2.boxPoints', (['rect'], {}), '(rect)\n', (1507, 1513), False, 'import cv2\n'), ((1567, 1579), 'numpy.int0', 'np.int0', (['box'], {}), '(box)\n', (1574, 1579), True, 'import numpy as np\n'), ((1745, 1761), 'numpy.array', 'np.array', (['frames'], {}), '(frames)\n', (1753, 1761), True, 'import numpy as np\n'), ((3507, 3574), 'cv2.rectangle', 'cv2.rectangle', (['template', '(x[0], y[0])', '(x[1], y[1])', '(0, 255, 0)', '(3)'], {}), '(template, (x[0], y[0]), (x[1], y[1]), (0, 255, 0), 3)\n', (3520, 3574), False, 'import cv2\n')]
|
import numpy as np
import random
import pickle
class Loader:
@staticmethod
def load_train_set_and_test_set(path):
# loading training set features
with open(path + "/new/train_set_features.pkl", "rb") as f:
train_set_features2 = pickle.load(f)
# reducing feature vector length
features_STDs = np.std(a=train_set_features2, axis=0)
train_set_features = train_set_features2[:, features_STDs > 52.3]
# changing the range of data between 0 and 1
train_set_features = np.divide(train_set_features, train_set_features.max())
# loading training set labels
with open(path + "/new/train_set_labels.pkl", "rb") as f:
train_set_labels = pickle.load(f)
# ------------
# loading test set features
with open(path + "/new/test_set_features.pkl", "rb") as f:
test_set_features2 = pickle.load(f)
# reducing feature vector length
features_STDs = np.std(a=test_set_features2, axis=0)
test_set_features = test_set_features2[:, features_STDs > 47.7]
# changing the range of data between 0 and 1
test_set_features = np.divide(test_set_features, test_set_features.max())
# loading test set labels
with open(path + "/new/test_set_labels.pkl", "rb") as f:
test_set_labels = pickle.load(f)
# ------------
# preparing our training and test sets - joining datasets and lables
train_set = []
test_set = []
for i in range(len(train_set_features)):
label = np.array([0, 0, 0, 0, 0, 0])
label[int(train_set_labels[i])] = 1
label = label.reshape(6, 1)
train_set.append((train_set_features[i].reshape(119, 1), label))
for i in range(len(test_set_features)):
label = np.array([0, 0, 0, 0, 0, 0])
label[int(test_set_labels[i])] = 1
label = label.reshape(6, 1)
test_set.append((test_set_features[i].reshape(119, 1), label))
# shuffle
random.shuffle(train_set)
random.shuffle(test_set)
# print size
# print(len(train_set), np.shape(train_set)) # 1962
# print(len(test_set)) # 662
train_set = np.array(train_set)
test_set = np.array(test_set)
return train_set, test_set
|
[
"numpy.std",
"random.shuffle",
"pickle.load",
"numpy.array"
] |
[((348, 385), 'numpy.std', 'np.std', ([], {'a': 'train_set_features2', 'axis': '(0)'}), '(a=train_set_features2, axis=0)\n', (354, 385), True, 'import numpy as np\n'), ((991, 1027), 'numpy.std', 'np.std', ([], {'a': 'test_set_features2', 'axis': '(0)'}), '(a=test_set_features2, axis=0)\n', (997, 1027), True, 'import numpy as np\n'), ((2078, 2103), 'random.shuffle', 'random.shuffle', (['train_set'], {}), '(train_set)\n', (2092, 2103), False, 'import random\n'), ((2112, 2136), 'random.shuffle', 'random.shuffle', (['test_set'], {}), '(test_set)\n', (2126, 2136), False, 'import random\n'), ((2278, 2297), 'numpy.array', 'np.array', (['train_set'], {}), '(train_set)\n', (2286, 2297), True, 'import numpy as np\n'), ((2317, 2335), 'numpy.array', 'np.array', (['test_set'], {}), '(test_set)\n', (2325, 2335), True, 'import numpy as np\n'), ((267, 281), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (278, 281), False, 'import pickle\n'), ((735, 749), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (746, 749), False, 'import pickle\n'), ((910, 924), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (921, 924), False, 'import pickle\n'), ((1366, 1380), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1377, 1380), False, 'import pickle\n'), ((1597, 1625), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0])\n', (1605, 1625), True, 'import numpy as np\n'), ((1860, 1888), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 0])\n', (1868, 1888), True, 'import numpy as np\n')]
|
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
class DataAugmentor:
"""
A class used for data augmentation (partially taken from : https://www.wouterbulten.nl/blog/tech/data-augmentation-using-tensorflow-data-dataset/)
Attributes
----------
batch : tf.Tensor, optional
The batch to augment
batchSize: int
The batch size
seed: int, optional
Random seed
Methods
-------
flip
Flip Augmentation
color
Color Augmentation
gaussian
Gaussian Noise
brightness
Custom Brightness Augmentation
zoom
Crop Augmentation
kerasAug
Inbuilt Keras Augmentations
augment
Wrapper Augmentation Function
"""
def __init__(self, batch=None, batchSize=50, seed=0):
if batch is not None:
self.dataset = batch
self.seed = seed
tf.random.set_seed(self.seed)
np.random.seed(self.seed)
self.batchSize = batchSize
def flip(self, x: tf.Tensor) -> tf.Tensor:
"""Flip augmentation
Args:
x: Image to flip
Returns:
Augmented image
"""
x = tf.image.random_flip_left_right(x, seed=self.seed)
return x
def color(self, x: tf.Tensor) -> tf.Tensor:
"""Color augmentation
Args:
x: Image
Returns:
Augmented image
#"""
x = tf.image.random_hue(x, 0.05, seed=self.seed)
x = tf.image.random_saturation(x, 0.6, 1.2, seed=self.seed)
x = tf.image.random_brightness(x, 0.05, seed=self.seed)
x = tf.image.random_contrast(x, 0.7, 1.0, seed=self.seed)
return x
def gaussian(self, x: tf.Tensor) -> tf.Tensor:
mean = tf.keras.backend.mean(x)
std = tf.keras.backend.std(x)
max_ = tf.keras.backend.max(x)
min_ = tf.keras.backend.min(x)
ptp = max_ - min_
noise = tf.random.normal(
shape=tf.shape(x),
mean=0,
stddev=0.3 * self.var,
dtype=tf.float32,
seed=self.seed,
)
# noise_img = tf.clip_by_value(((x - mean)/std + noise)*std + mean,
# clip_value_min = min_, clip_value_max=max_)
noise_img = x + noise
return noise_img
def brightness(self, x: tf.Tensor) -> tf.Tensor:
max_ = tf.keras.backend.max(x)
min_ = tf.keras.backend.min(x)
brightness_val = 0.1 * np.random.random_sample() - 0.05
noise = tf.constant(brightness_val, shape=x.shape)
noise_img = x + noise
noise_img = tf.clip_by_value(x, clip_value_min=min_, clip_value_max=max_)
return noise_img
def zoom(self, x: tf.Tensor) -> tf.Tensor:
"""Zoom augmentation
Args:
x: Image
Returns:
Augmented image
"""
# Generate 20 crop settings, ranging from a 1% to 20% crop.
scales = list(np.arange(0.85, 1.0, 0.01))
boxes = np.zeros((len(scales), 4))
for i, scale in enumerate(scales):
x1 = y1 = 0.5 - (0.5 * scale)
x2 = y2 = 0.5 + (0.5 * scale)
boxes[i] = [x1, y1, x2, y2]
def random_crop(img):
# Create different crops for an image
crops = tf.image.crop_and_resize(
[img],
boxes=boxes,
box_indices=np.zeros(len(scales)),
crop_size=(x.shape[0], x.shape[1]),
)
# Return a random crop
return crops[
tf.random.uniform(
shape=[],
minval=0,
maxval=len(scales),
dtype=tf.int32,
seed=self.seed,
)
]
choice = tf.random.uniform(
shape=[], minval=0.0, maxval=1.0, dtype=tf.float32, seed=self.seed
)
# Only apply cropping 50% of the time
return tf.cond(choice < 0.5, lambda: x, lambda: random_crop(x))
def kerasAug(self, x: tf.Tensor) -> tf.Tensor:
datagen = tf.keras.preprocessing.image.ImageDataGenerator(
rotation_range=2,
width_shift_range=0,
height_shift_range=0,
horizontal_flip=False,
shear_range=0,
fill_mode="nearest",
dtype=tf.float32,
)
return datagen.flow(
x, batch_size=self.batchSize, shuffle=False, seed=self.seed
).next()
def augment(self, batch=None):
if batch is not None:
self.dataset = batch
self.dataset = tf.data.Dataset.from_tensor_slices(self.dataset.numpy())
# Add augmentations
augmentations = [self.flip, self.color, self.zoom]
# Add the augmentations to the dataset
for f in augmentations:
# Apply the augmentation, run 4 jobs in parallel.
self.dataset = self.dataset.map(f)
self.dataset = next(iter(self.dataset.batch(self.batchSize)))
return self.dataset
|
[
"tensorflow.random.set_seed",
"tensorflow.keras.backend.min",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"numpy.random.seed",
"numpy.random.random_sample",
"tensorflow.clip_by_value",
"tensorflow.random.uniform",
"tensorflow.image.random_contrast",
"tensorflow.keras.backend.mean",
"tensorflow.keras.backend.max",
"tensorflow.image.random_hue",
"tensorflow.image.random_flip_left_right",
"tensorflow.keras.backend.std",
"tensorflow.constant",
"tensorflow.shape",
"numpy.arange",
"tensorflow.image.random_saturation",
"tensorflow.image.random_brightness"
] |
[((1028, 1057), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['self.seed'], {}), '(self.seed)\n', (1046, 1057), True, 'import tensorflow as tf\n'), ((1066, 1091), 'numpy.random.seed', 'np.random.seed', (['self.seed'], {}), '(self.seed)\n', (1080, 1091), True, 'import numpy as np\n'), ((1318, 1368), 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['x'], {'seed': 'self.seed'}), '(x, seed=self.seed)\n', (1349, 1368), True, 'import tensorflow as tf\n'), ((1573, 1617), 'tensorflow.image.random_hue', 'tf.image.random_hue', (['x', '(0.05)'], {'seed': 'self.seed'}), '(x, 0.05, seed=self.seed)\n', (1592, 1617), True, 'import tensorflow as tf\n'), ((1630, 1685), 'tensorflow.image.random_saturation', 'tf.image.random_saturation', (['x', '(0.6)', '(1.2)'], {'seed': 'self.seed'}), '(x, 0.6, 1.2, seed=self.seed)\n', (1656, 1685), True, 'import tensorflow as tf\n'), ((1698, 1749), 'tensorflow.image.random_brightness', 'tf.image.random_brightness', (['x', '(0.05)'], {'seed': 'self.seed'}), '(x, 0.05, seed=self.seed)\n', (1724, 1749), True, 'import tensorflow as tf\n'), ((1762, 1815), 'tensorflow.image.random_contrast', 'tf.image.random_contrast', (['x', '(0.7)', '(1.0)'], {'seed': 'self.seed'}), '(x, 0.7, 1.0, seed=self.seed)\n', (1786, 1815), True, 'import tensorflow as tf\n'), ((1902, 1926), 'tensorflow.keras.backend.mean', 'tf.keras.backend.mean', (['x'], {}), '(x)\n', (1923, 1926), True, 'import tensorflow as tf\n'), ((1941, 1964), 'tensorflow.keras.backend.std', 'tf.keras.backend.std', (['x'], {}), '(x)\n', (1961, 1964), True, 'import tensorflow as tf\n'), ((1980, 2003), 'tensorflow.keras.backend.max', 'tf.keras.backend.max', (['x'], {}), '(x)\n', (2000, 2003), True, 'import tensorflow as tf\n'), ((2019, 2042), 'tensorflow.keras.backend.min', 'tf.keras.backend.min', (['x'], {}), '(x)\n', (2039, 2042), True, 'import tensorflow as tf\n'), ((2514, 2537), 'tensorflow.keras.backend.max', 'tf.keras.backend.max', (['x'], {}), '(x)\n', (2534, 2537), True, 'import tensorflow as tf\n'), ((2553, 2576), 'tensorflow.keras.backend.min', 'tf.keras.backend.min', (['x'], {}), '(x)\n', (2573, 2576), True, 'import tensorflow as tf\n'), ((2657, 2699), 'tensorflow.constant', 'tf.constant', (['brightness_val'], {'shape': 'x.shape'}), '(brightness_val, shape=x.shape)\n', (2668, 2699), True, 'import tensorflow as tf\n'), ((2750, 2811), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['x'], {'clip_value_min': 'min_', 'clip_value_max': 'max_'}), '(x, clip_value_min=min_, clip_value_max=max_)\n', (2766, 2811), True, 'import tensorflow as tf\n'), ((3952, 4042), 'tensorflow.random.uniform', 'tf.random.uniform', ([], {'shape': '[]', 'minval': '(0.0)', 'maxval': '(1.0)', 'dtype': 'tf.float32', 'seed': 'self.seed'}), '(shape=[], minval=0.0, maxval=1.0, dtype=tf.float32, seed=\n self.seed)\n', (3969, 4042), True, 'import tensorflow as tf\n'), ((4250, 4443), 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'tf.keras.preprocessing.image.ImageDataGenerator', ([], {'rotation_range': '(2)', 'width_shift_range': '(0)', 'height_shift_range': '(0)', 'horizontal_flip': '(False)', 'shear_range': '(0)', 'fill_mode': '"""nearest"""', 'dtype': 'tf.float32'}), "(rotation_range=2,\n width_shift_range=0, height_shift_range=0, horizontal_flip=False,\n shear_range=0, fill_mode='nearest', dtype=tf.float32)\n", (4297, 4443), True, 'import tensorflow as tf\n'), ((3099, 3125), 'numpy.arange', 'np.arange', (['(0.85)', '(1.0)', '(0.01)'], {}), '(0.85, 1.0, 0.01)\n', (3108, 3125), True, 'import numpy as np\n'), ((2121, 2132), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (2129, 2132), True, 'import tensorflow as tf\n'), ((2608, 2633), 'numpy.random.random_sample', 'np.random.random_sample', ([], {}), '()\n', (2631, 2633), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import os
project_name = "reco-tut-asr"; branch = "main"; account = "sparsh-ai"
project_path = os.path.join('/content', project_name)
if not os.path.exists(project_path):
get_ipython().system(u'cp /content/drive/MyDrive/mykeys.py /content')
import mykeys
get_ipython().system(u'rm /content/mykeys.py')
path = "/content/" + project_name;
get_ipython().system(u'mkdir "{path}"')
get_ipython().magic(u'cd "{path}"')
import sys; sys.path.append(path)
get_ipython().system(u'git config --global user.email "<EMAIL>"')
get_ipython().system(u'git config --global user.name "reco-tut"')
get_ipython().system(u'git init')
get_ipython().system(u'git remote add origin https://"{mykeys.git_token}":[email protected]/"{account}"/"{project_name}".git')
get_ipython().system(u'git pull origin "{branch}"')
get_ipython().system(u'git checkout main')
else:
get_ipython().magic(u'cd "{project_path}"')
# In[8]:
import random
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# In[25]:
items = pd.read_csv('./data/silver/items.csv')
items.head()
# In[26]:
actual_ratings = pd.read_csv('./data/silver/ratings.csv')
actual_ratings.head()
# In[27]:
cbf = pd.read_csv('./data/gold/cbf.csv')
item_item = pd.read_csv('./data/gold/item-item.csv')
user_user = pd.read_csv('./data/gold/user-user.csv')
pers_bias = pd.read_csv('./data/gold/pers-bias.csv')
mf = pd.read_csv('./data/gold/mf.csv')
# In[28]:
# preprocess
cbf = cbf.apply(lambda col: col.apply(lambda elem: str(elem).replace(',', '.'))).astype(float)
user_user = user_user.apply(lambda col: col.apply(lambda elem: str(elem).replace(',', '.'))).astype(float)
item_item = item_item.apply(lambda col: col.apply(lambda elem: str(elem).replace(',', '.'))).astype(float)
mf = mf.apply(lambda col: col.apply(lambda elem: str(elem).replace(',', '.'))).astype(float)
pers_bias = pers_bias.apply(lambda col: col.apply(lambda elem: str(elem).replace(',', '.'))).astype(float)
# In[29]:
recs = [cbf, item_item, user_user, pers_bias, mf]
recs_names = ['cbf', 'item_item', 'user_user', 'pers_bias', 'mf']
# ## Metrics
# In[30]:
def get_ratings(user_id):
user_ratings = ratings[user_id]
actual_ratings = user_ratings[~np.isnan(user_ratings)]
return actual_ratings
def get_top_n(user_id, n):
top_n = {}
for rec, rec_name in zip(recs, recs_names):
top_n_items = rec[user_id].argsort().sort_values()[:n].index.values
top_n[rec_name] = top_n_items
return top_n
def get_popular_items(n):
pop_percentages = ratings.copy()
pop_percentages['popularity'] = ratings.apply(lambda row: np.sum(~np.isnan(row))-1, axis=1)/len(ratings.columns[1::])
pop_percentages = pop_percentages.sort_values(by = 'popularity', ascending=False)
return pop_percentages.item.values[:n]
def get_rmse(user_id):
user_ratings = get_ratings(user_id)
rmse = {}
for rec, rec_name in zip(recs, recs_names):
predicted_ratings = rec.loc[user_ratings.index, user_id]
temp = np.sqrt(np.average((predicted_ratings - user_ratings)**2))
rmse[rec_name] = temp
return rmse
def get_precision_at_n(user_id, n):
top_n = get_top_n(user_id, n)
user_ratings = get_ratings(user_id).index.values
precisions = {}
for rec, rec_name in zip(recs, recs_names):
temp = np.sum(np.isin(top_n[rec_name], user_ratings))/n
precisions[rec_name] = temp
return precisions
# We will use the "FullCat" column in the items catalog to determine the product diversity in the recommendations.
# The recommender with a high number of distinct product categories in its recommendations is said to be product-diverse
def get_product_diversity(user_id, n):
top_n = get_top_n(user_id, n)
product_diversity = {}
for rec_name in top_n:
categories = items.loc[top_n[rec_name]][['FullCat']].values
categories = set([item for sublist in categories for item in sublist])
product_diversity[rec_name] = len(categories)
return product_diversity
# We will use the "Price" column in the items catalog to determine cost diversity in the recommendations.
# The recommender with a high standard deviation in the cost across all its recommendations is said to be cost-diverse
def get_cost_diversity(user_id, n):
top_n = get_top_n(user_id,n)
cost_diversity = {}
for rec_name in top_n:
std_dev = np.std(items.loc[top_n[rec_name]][['Price']].values)
cost_diversity[rec_name] = std_dev
return cost_diversity
# We will use inverse popularity as a measure of serendipity.
# The recommender with least number of recommendations on the "most popular" list, will be called most serendipitous
def get_serendipity(user_id, n):
top_n = get_top_n(user_id,n)
popular_items = get_popular_items(20)
serendipity = {}
for rec, rec_name in zip(recs, recs_names):
popularity = np.sum(np.isin(top_n[rec_name],popular_items))
if int(popularity) == 0:
serendipity[rec_name] = 1
else:
serendipity[rec_name] = 1/popularity
return serendipity
# In[31]:
avg_metrics = {}
for name in recs_names:
avg_metrics[name] = {"rmse": [], "precision_at_n": [], "product_diversity": [], "cost_diversity": [], "serendipity": []}
for user_id in ratings.columns:
if user_id == 'item':
continue
user_id = str(user_id)
rmse = get_rmse(user_id)
precision_at_n = get_precision_at_n(user_id, 10)
product_diversity = get_product_diversity(user_id, 10)
cost_diversity = get_cost_diversity(user_id, 10)
serendipity = get_serendipity(user_id, 10)
for key in avg_metrics:
rec_name = avg_metrics[key]
rec_name['rmse'].append(rmse[key])
rec_name['precision_at_n'].append(precision_at_n[key])
rec_name['product_diversity'].append(product_diversity[key])
rec_name['cost_diversity'].append(cost_diversity[key])
rec_name['serendipity'].append(serendipity[key])
# The Price for certain items is not available. Also rmse for certain users is turning out to be NaN.
# Ignoring nans in the average metric calculation for now. So basically narrowing down the evaluation to users who have
# rated atleast one item and items for which the price is known.
for key in avg_metrics:
rec_name = avg_metrics[key]
for metric in rec_name:
temp = rec_name[metric]
temp = [x for x in temp if not np.isnan(x)]
rec_name[metric] = sum(temp) / len(temp)
# In[32]:
avg_metrics
# ## Hybridization
# In[33]:
# Creating a dataframe with ratings from all algorithms and user_ratings as ground truth
users = []
items = []
user_ratings = []
cbf_ratings = []
user_user_ratings = []
item_item_ratings = []
mf_ratings = []
pers_bias_ratings = []
for user_id in ratings.columns:
if user_id == 'item':
continue
user_id = str(user_id)
true_ratings = get_ratings(user_id)
user_ratings.extend(true_ratings.values)
users.extend([user_id]*len(true_ratings))
items.extend(ratings.loc[true_ratings.index].item.values)
cbf_ratings.extend(cbf.loc[true_ratings.index, user_id].values)
item_item_ratings.extend(item_item.loc[true_ratings.index, user_id].values)
user_user_ratings.extend(user_user.loc[true_ratings.index, user_id].values)
pers_bias_ratings.extend(pers_bias.loc[true_ratings.index, user_id].values)
mf_ratings.extend(mf.loc[true_ratings.index, user_id].values)
df = pd.DataFrame({'user': users, 'item': items,'true_rating': user_ratings, 'cbf':cbf_ratings, 'item_item':item_item_ratings, 'user_user': user_user_ratings, 'pers_bias':pers_bias_ratings, 'mf':mf_ratings})
# In[34]:
df = df.dropna()
# In[35]:
df.head()
# ### Linear Combination
# In[39]:
clf = LinearRegression()
# In[40]:
# Split data in 80-20 train and test sets
train = df[0:(int(0.8*len(df)))]
test = df[(int(0.8*len(df)))::]
# In[41]:
train_data = train.drop(['user', 'item','true_rating'], axis=1)
train_labels = train.true_rating.values
model = clf.fit(train_data, train_labels)
# In[42]:
test_data = test.drop(['user', 'item','true_rating'], axis=1)
test_labels = test.true_rating.values
predictions = model.predict(test_data)
# In[44]:
# Avg RMSE predictions
avg_rmse = np.sqrt(np.average((predictions - test_labels)**2))
avg_rmse
# #### Top 5 for three users
# In[46]:
# Pick three users
users = random.sample(list(ratings.columns[1::]), 3)
print(users)
# In[47]:
train_data = df.drop(['user', 'item','true_rating'], axis=1)
train_labels = df.true_rating.values
model = clf.fit(train_data, train_labels)
# In[ ]:
top_5 = {}
for user in users:
df_preds = df[df.user == user]
preds = model.predict(df_preds.drop(['user', 'item','true_rating'], axis=1))
df_preds['predictions'] = preds
top_5_items = list(df_preds.sort_values(by=['predictions'], ascending=False)[:5].item.values)
top_5[user] = top_5_items
# In[49]:
top_5
# ### Non-linear Combination
# For a non-linear combination of the algorithms, we'll use the DecisionTreeRegressor method in scikitlearn
# In[51]:
clf = DecisionTreeRegressor()
# In[52]:
# Split data in 80-20 train and test sets
train = df[0:(int(0.8*len(df)))]
test = df[(int(0.8*len(df)))::]
# In[53]:
train_data = train.drop(['user', 'item','true_rating'], axis=1)
train_labels = train.true_rating.values
model = clf.fit(train_data, train_labels)
# In[54]:
test_data = test.drop(['user', 'item','true_rating'], axis=1)
test_labels = test.true_rating.values
predictions = model.predict(test_data)
# In[55]:
# Avg RMSE predictions
avg_rmse = np.sqrt(np.average((predictions - test_labels)**2))
avg_rmse
# #### Top-5 for 3 users
# In[56]:
# Using the same users as above to compare across the same users
users = ['3430', '112', '1817']
# In[57]:
train_data = df.drop(['user', 'item','true_rating'], axis=1)
train_labels = df.true_rating.values
model = clf.fit(train_data, train_labels)
# In[ ]:
top_5 = {}
for user in users:
df_preds = df[df.user == user]
preds = model.predict(df_preds.drop(['user', 'item','true_rating'], axis=1))
df_preds['predictions'] = preds
top_5_items = list(df_preds.sort_values(by=['predictions'], ascending=False)[:5].item.values)
top_5[user] = top_5_items
# In[59]:
top_5
# ## Different recommenders based on user type
# This hybridization techniques aims to create separate recomemnder strategies for two separate scenarios- one where users end up on the Nile-River.com landing page via banner ads for school products and other where users arrive at the landing page via endoresements for office products. For the first scenario, we'll pick a 3:2 ratio of school (inexpensive) products vs. office (expensive) products and the reverse for the second scenario i.e. 2:3 ratio of school to office products. Here we will show the evaluate only for the first scenario.
# In[68]:
# Determine threshold to label an item cheap or expensive- let's set this as the third quantile of the price list
# This is assuming office products are mostly in the expensive bracket
items = pd.read_csv('./data/silver/items.csv') # df converted to list in processing above, so loading back
prices = items.Price.values
price_threshold = np.percentile([x for x in prices if not np.isnan(x)], 75)
# ### Performance
# In[69]:
def get_precision_at_n(user_id, top_n):
user_ratings = get_ratings(user_id).index.values
precision_at_n = np.sum(np.isin(top_n, user_ratings))/ len(top_n)
return precision_at_n
# In[70]:
def get_cost_diversity(top_n):
std_dev = np.std(items.loc[top_n][['Price']].values)
return std_dev
# In[71]:
def get_product_diversity(top_n):
categories = items.loc[top_n][['FullCat']].values
categories = set([item for sublist in categories for item in sublist])
return len(categories)
# In[72]:
def get_serendipity(top_n):
popular_items = get_popular_items(20)
popularity = np.sum(np.isin(top_n,popular_items))
if int(popularity) == 0:
serendipity = 1
else:
serendipity = 1/popularity
return serendipity
# In[73]:
# To pick which items to finally recommend, let's assume that all the items in the top-5 for each recommender are
# equally relevant. We can potentially include some ranking based selection to pick item that are more relavant AND fit the
# cost criteria. For now, we'll pick at random since we're assuming all items are equally relevant.
def get_mixed_recs(user_id, n, n_cheap, n_exp):
top_n_overall_items = []
top_n_overall_prices = []
mixed_recs = []
for rec, rec_name in zip(recs, recs_names):
top_n_items = rec[user_id].argsort().sort_values()[:n].index.values
top_n_prices = items.loc[top_n_items][['Price']].values
top_n_overall_items.extend(top_n_items)
top_n_overall_prices.extend(top_n_prices)
top_dict = dict(zip(top_n_overall_items, top_n_overall_prices))
top_cheap = dict(filter(lambda elem: elem[1] <= price_threshold, top_dict.items())).keys()
top_exp = dict(filter(lambda elem: elem[1] > price_threshold, top_dict.items())).keys()
mixed_recs = random.sample(list(top_cheap), n_cheap) + random.sample(list(top_exp), n_exp)
return mixed_recs
# In[74]:
avg_metrics = {"precision_at_n": [], "product_diversity": [], "cost_diversity": [], "serendipity": []}
for user_id in ratings.columns:
if user_id == 'item':
continue
user_id = str(user_id)
top_5 = get_mixed_recs(user_id, 5, 3, 2)
avg_metrics["precision_at_n"].append(get_precision_at_n(user_id, top_5))
avg_metrics["cost_diversity"].append(get_cost_diversity(top_5))
avg_metrics["product_diversity"].append(get_product_diversity(top_5))
avg_metrics["serendipity"].append(get_serendipity(top_5))
for metric in avg_metrics:
temp = avg_metrics[metric]
temp = [x for x in temp if not np.isnan(x)]
avg_metrics[metric] = sum(temp) / len(temp)
# In[75]:
avg_metrics
# ### Top-5 for three users
# In[76]:
# Assuming all three users ended up on the landing pagee through scenario 1 i.e. banner ads for school products
users = ['3430', '112', '1817']
# In[77]:
top_5 = {}
for user_id in users:
# For office products
# top_5[user_id] = get_mixed_recs(user_id, 5, 2, 3)
# For school products
top_5[user_id] = list(ratings.loc[get_mixed_recs(user_id, 5, 3, 2)].item.values)
# In[78]:
top_5
# ## Switching hybridization
# We will not be implementing this hybridizaton as such, but we will explore whether or not the strategy of using content based filtering for new users (users with fewer/no ratings) or items with less ratings is even reasonable for this dataset. For this, let's begin with visualizing the number of ratings for the users in the dataset.
# In[80]:
item_ratings = ratings.apply(lambda row: np.sum(~np.isnan(row))-1, axis=1)
# In[81]:
plt.hist(item_ratings)
plt.xlabel("Number of ratings")
plt.ylabel("number of items")
# In[82]:
# Number of items with < 10 ratings
count_less_than_10 = np.count_nonzero(item_ratings<10)/len(item_ratings)*100
# In[83]:
count_less_than_10
# In[84]:
user_ratings = []
for user_id in ratings.columns:
if user_id == 'item':
continue
user_id = str(user_id)
user_ratings.append(len(get_ratings(user_id)))
# In[85]:
plt.hist(user_ratings)
plt.xlabel("Number of ratings")
plt.ylabel("number of users")
# In[86]:
# Number of users with < 10 ratings
count_less_than_10 = np.count_nonzero(np.array(user_ratings)<10)/len(user_ratings)*100
# In[87]:
count_less_than_10
|
[
"pandas.DataFrame",
"sys.path.append",
"numpy.isin",
"numpy.average",
"sklearn.tree.DecisionTreeRegressor",
"matplotlib.pyplot.hist",
"numpy.count_nonzero",
"pandas.read_csv",
"numpy.std",
"os.path.exists",
"numpy.isnan",
"sklearn.linear_model.LinearRegression",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"os.path.join"
] |
[((145, 183), 'os.path.join', 'os.path.join', (['"""/content"""', 'project_name'], {}), "('/content', project_name)\n", (157, 183), False, 'import os\n'), ((1218, 1256), 'pandas.read_csv', 'pd.read_csv', (['"""./data/silver/items.csv"""'], {}), "('./data/silver/items.csv')\n", (1229, 1256), True, 'import pandas as pd\n'), ((1301, 1341), 'pandas.read_csv', 'pd.read_csv', (['"""./data/silver/ratings.csv"""'], {}), "('./data/silver/ratings.csv')\n", (1312, 1341), True, 'import pandas as pd\n'), ((1384, 1418), 'pandas.read_csv', 'pd.read_csv', (['"""./data/gold/cbf.csv"""'], {}), "('./data/gold/cbf.csv')\n", (1395, 1418), True, 'import pandas as pd\n'), ((1431, 1471), 'pandas.read_csv', 'pd.read_csv', (['"""./data/gold/item-item.csv"""'], {}), "('./data/gold/item-item.csv')\n", (1442, 1471), True, 'import pandas as pd\n'), ((1484, 1524), 'pandas.read_csv', 'pd.read_csv', (['"""./data/gold/user-user.csv"""'], {}), "('./data/gold/user-user.csv')\n", (1495, 1524), True, 'import pandas as pd\n'), ((1537, 1577), 'pandas.read_csv', 'pd.read_csv', (['"""./data/gold/pers-bias.csv"""'], {}), "('./data/gold/pers-bias.csv')\n", (1548, 1577), True, 'import pandas as pd\n'), ((1583, 1616), 'pandas.read_csv', 'pd.read_csv', (['"""./data/gold/mf.csv"""'], {}), "('./data/gold/mf.csv')\n", (1594, 1616), True, 'import pandas as pd\n'), ((7652, 7867), 'pandas.DataFrame', 'pd.DataFrame', (["{'user': users, 'item': items, 'true_rating': user_ratings, 'cbf':\n cbf_ratings, 'item_item': item_item_ratings, 'user_user':\n user_user_ratings, 'pers_bias': pers_bias_ratings, 'mf': mf_ratings}"], {}), "({'user': users, 'item': items, 'true_rating': user_ratings,\n 'cbf': cbf_ratings, 'item_item': item_item_ratings, 'user_user':\n user_user_ratings, 'pers_bias': pers_bias_ratings, 'mf': mf_ratings})\n", (7664, 7867), True, 'import pandas as pd\n'), ((7956, 7974), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (7972, 7974), False, 'from sklearn.linear_model import LinearRegression\n'), ((9304, 9327), 'sklearn.tree.DecisionTreeRegressor', 'DecisionTreeRegressor', ([], {}), '()\n', (9325, 9327), False, 'from sklearn.tree import DecisionTreeRegressor\n'), ((11306, 11344), 'pandas.read_csv', 'pd.read_csv', (['"""./data/silver/items.csv"""'], {}), "('./data/silver/items.csv')\n", (11317, 11344), True, 'import pandas as pd\n'), ((15098, 15120), 'matplotlib.pyplot.hist', 'plt.hist', (['item_ratings'], {}), '(item_ratings)\n', (15106, 15120), True, 'import matplotlib.pyplot as plt\n'), ((15121, 15152), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of ratings"""'], {}), "('Number of ratings')\n", (15131, 15152), True, 'import matplotlib.pyplot as plt\n'), ((15153, 15182), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""number of items"""'], {}), "('number of items')\n", (15163, 15182), True, 'import matplotlib.pyplot as plt\n'), ((15542, 15564), 'matplotlib.pyplot.hist', 'plt.hist', (['user_ratings'], {}), '(user_ratings)\n', (15550, 15564), True, 'import matplotlib.pyplot as plt\n'), ((15565, 15596), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of ratings"""'], {}), "('Number of ratings')\n", (15575, 15596), True, 'import matplotlib.pyplot as plt\n'), ((15597, 15626), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""number of users"""'], {}), "('number of users')\n", (15607, 15626), True, 'import matplotlib.pyplot as plt\n'), ((192, 220), 'os.path.exists', 'os.path.exists', (['project_path'], {}), '(project_path)\n', (206, 220), False, 'import os\n'), ((505, 526), 'sys.path.append', 'sys.path.append', (['path'], {}), '(path)\n', (520, 526), False, 'import sys\n'), ((8465, 8509), 'numpy.average', 'np.average', (['((predictions - test_labels) ** 2)'], {}), '((predictions - test_labels) ** 2)\n', (8475, 8509), True, 'import numpy as np\n'), ((9818, 9862), 'numpy.average', 'np.average', (['((predictions - test_labels) ** 2)'], {}), '((predictions - test_labels) ** 2)\n', (9828, 9862), True, 'import numpy as np\n'), ((11790, 11832), 'numpy.std', 'np.std', (["items.loc[top_n][['Price']].values"], {}), "(items.loc[top_n][['Price']].values)\n", (11796, 11832), True, 'import numpy as np\n'), ((4587, 4639), 'numpy.std', 'np.std', (["items.loc[top_n[rec_name]][['Price']].values"], {}), "(items.loc[top_n[rec_name]][['Price']].values)\n", (4593, 4639), True, 'import numpy as np\n'), ((12164, 12193), 'numpy.isin', 'np.isin', (['top_n', 'popular_items'], {}), '(top_n, popular_items)\n', (12171, 12193), True, 'import numpy as np\n'), ((15254, 15289), 'numpy.count_nonzero', 'np.count_nonzero', (['(item_ratings < 10)'], {}), '(item_ratings < 10)\n', (15270, 15289), True, 'import numpy as np\n'), ((2408, 2430), 'numpy.isnan', 'np.isnan', (['user_ratings'], {}), '(user_ratings)\n', (2416, 2430), True, 'import numpy as np\n'), ((3215, 3266), 'numpy.average', 'np.average', (['((predicted_ratings - user_ratings) ** 2)'], {}), '((predicted_ratings - user_ratings) ** 2)\n', (3225, 3266), True, 'import numpy as np\n'), ((5095, 5134), 'numpy.isin', 'np.isin', (['top_n[rec_name]', 'popular_items'], {}), '(top_n[rec_name], popular_items)\n', (5102, 5134), True, 'import numpy as np\n'), ((11663, 11691), 'numpy.isin', 'np.isin', (['top_n', 'user_ratings'], {}), '(top_n, user_ratings)\n', (11670, 11691), True, 'import numpy as np\n'), ((3527, 3565), 'numpy.isin', 'np.isin', (['top_n[rec_name]', 'user_ratings'], {}), '(top_n[rec_name], user_ratings)\n', (3534, 3565), True, 'import numpy as np\n'), ((11491, 11502), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (11499, 11502), True, 'import numpy as np\n'), ((14093, 14104), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (14101, 14104), True, 'import numpy as np\n'), ((15715, 15737), 'numpy.array', 'np.array', (['user_ratings'], {}), '(user_ratings)\n', (15723, 15737), True, 'import numpy as np\n'), ((6619, 6630), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (6627, 6630), True, 'import numpy as np\n'), ((15058, 15071), 'numpy.isnan', 'np.isnan', (['row'], {}), '(row)\n', (15066, 15071), True, 'import numpy as np\n'), ((2816, 2829), 'numpy.isnan', 'np.isnan', (['row'], {}), '(row)\n', (2824, 2829), True, 'import numpy as np\n')]
|
"""Main module that contains SimulationOptimization class definition
.. module:: sim_opt.py
:synopsis: DWSIM simulation optimization class
.. moduleauthor:: <NAME> <<EMAIL>>
:Module: sim_opt.py
:Author: <NAME> <<EMAIL>>
"""
import numpy as np
import time
class SimulationOptimization():
"""Class that defines DWSIM simulation optimization objects.
:ivar path2sim: Absolute path to a DWSIM simulation (.dwxmz)
:ivar path2dwsim: Absolute path to the DWSIM installation
:ivar savepath: Absolute path to save the DWSIM simulation (.dwxmz)
:ivar verbose: Boolean that controls display messages during simulation calculation
:ivar x_val: Last simulated degrees of freedom values
:ivar f_val: Last simulated objective functions values
:ivar g_val: Last simulated constraints values
:ivar dof: Lambda function that assign the degrees of freedom of the DWSIM process simulation to be handled by the optimization solver
:ivar f: Lambda function that returns a numpy.array with objective functions values after converging the simulation
:ivar g: Lambda function that returns a numpy.array with constraints values after converging the simulation
:ivar n_dof: Number of degrees of freedom (size of optimization problem)
:ivar n_f: Number of objective functions (still unsupported for n_f>1, *i.e.* multi-objective problem)
:ivar n_g: Number of constraints
"""
def __init__(self, path2sim, dof=np.array([], dtype=object),
path2dwsim = "C:\\Users\\lfsfr\\AppData\\Local\\DWSIM7\\",
savepath = "", verbose = True): # pragma: no cover
self.path2sim = path2sim
self.path2dwsim = path2dwsim
if savepath=="":
self.savepath = path2sim
else:
self.savepath = savepath
self.x_val = np.array([])
self.f_val = np.array([])
self.g_val = np.array([])
self.f = np.array([], dtype=object)
self.n_f = self.f.size
self.g = np.array([], dtype=object)
self.n_g = self.g.size
self.dof = dof
self.n_dof = self.dof.size
self.verbose = verbose
def add_refs(self):
"""This method add reference in the proggraming environment to the DWSIM dlls, so they can be imported.
"""
import pythoncom
pythoncom.CoInitialize()
import clr
# from os import system as System
# from System.IO import Directory, Path, File
# from System import String, Environment
clr.AddReference(self.path2dwsim + "CapeOpen.dll")
clr.AddReference(self.path2dwsim + "DWSIM.Automation.dll")
clr.AddReference(self.path2dwsim + "DWSIM.Interfaces.dll")
clr.AddReference(self.path2dwsim + "DWSIM.GlobalSettings.dll")
clr.AddReference(self.path2dwsim + "DWSIM.SharedClasses.dll")
clr.AddReference(self.path2dwsim + "DWSIM.Thermodynamics.dll")
clr.AddReference(self.path2dwsim + "DWSIM.UnitOperations.dll")
clr.AddReference(self.path2dwsim + "System.Buffers.dll")
try:
clr.AddReference(self.path2dwsim + "System.Buffers2.dll")
except Exception as e:
pass
# print(Exception)
# print("More refs")
clr.AddReference(self.path2dwsim + "DWSIM.Inspector.dll")
clr.AddReference(self.path2dwsim + "DWSIM.MathOps.dll")
clr.AddReference(self.path2dwsim + "TcpComm.dll")
clr.AddReference(self.path2dwsim + "Microsoft.ServiceBus.dll")
clr.AddReference(self.path2dwsim + "System.Buffers.dll")
clr.AddReference(self.path2dwsim + "SkiaSharp.dll")
clr.AddReference(self.path2dwsim + "OxyPlot")
# clr.AddReference(self.path2dwsim + "OxyPlot.WindowsForms")
# clr.AddReference(self.path2dwsim + "DWSIM.ExtensionMethods.Eto")
print("added refs")
def connect(self, interf):
"""This method uses the automation manager object to load the DWSIM flowsheet and store them into self.
Args:
interf (DWSIM.Automation.Automation2): Automation manager object with methods to load, save, and create DWSIM flowsheet simulations.
"""
import sys
if ~hasattr(self, 'flowsheet'):
# load simulation
flowsheet = interf.LoadFlowsheet(self.path2sim)
# add DWSIM objects to Simulation object
self.interface = interf
self.flowsheet = flowsheet
if flowsheet is not None:
print("Simulation was loaded successfully")
def add_dof(self, dof_new, description=[None,None,None,None]):
"""Append a new degree of freedom to the SimulationOptimization object
Args:
dof_new (lambda function): Lambda function that assign the appended degrees of freedom of the DWSIM process simulation
"""
if self.dof.size==0:
self.dof = np.append(self.dof, np.append( dof_new, description ) )
else:
self.dof = np.block( [ [self.dof], [np.append( dof_new, description)] ] )
self.n_dof += 1# int(self.dof.size)
# self.dof.reshape((self.n_dof,2))
def add_fobj(self, func, description=[None,None,None,None]):
"""Append a new objective function to the SimulationOptimization object
Args:
func (lambda function): Lambda function that returns a numpy.array with objective function value after converging the simulation
"""
if self.f.size==0:
self.f = np.append(self.f, np.append( func, description ) )
else:
self.f = np.block( [ [self.f], [np.append( func, description)] ] )
self.n_f += 1
# self.f = np.append(self.f, func)
# self.n_f = self.f.size
def add_constraint(self, g_func, description=[None,None,None,None]):
"""Append a new constraint to the SimulationOptimization object
Args:
g_func (lambda function): Lambda function that returns a numpy.array with constraint value after converging the simulation
"""
if self.g.size==0:
self.g = np.append(self.g, np.append( g_func, description ) )
else:
self.g = np.block( [ [self.g], [np.append( g_func, description)] ] )
self.n_g += 1
# self.g = np.append(self.g, g_func)
# self.n_g = self.g.size
def converge_simulation(self, x):
"""Converge the simulation with degrees of freedom values of ``x``
Args:
x (numpy.array): Array of degrees of freedom values to be simulated
"""
if self.verbose:
print(f"opt_functions calculation at x = {x}")
if x.size != self.n_dof:
print(f"Size of x {x.size} is diferent from n_dof = {self.n_dof}. DO you know what your doing? Only {x.size} values of dof will be assigned.")
for i in range(self.n_dof):
self.dof[i][0](x[i])
# first calculation
error = self.interface.CalculateFlowsheet2(self.flowsheet)
time.sleep(0.1)
# second calculation
error = self.interface.CalculateFlowsheet2(self.flowsheet)
time.sleep(0.1)
res_old = np.array([self.f[0]()])
for i in range(self.n_g):
res_old = np.append(res_old, np.asarray(self.g[i][0]()))
# third+ calculation
for conv_ite in range(3):
error = self.interface.CalculateFlowsheet2(self.flowsheet)
time.sleep(0.1)
res_new = np.array([self.f[0]()])
for i in range(self.n_g):
res_new = np.append(res_new, self.g[i][0]())
try:
variation = np.linalg.norm(res_new-res_old)
except:
variation = 1
if variation > 1e-6:
res_old = res_new
else:
if self.verbose:
print(f" Simulation converged in {conv_ite+3} iterations")
if len(error)>0:
print(f"{error} at x = {x}")
return
# fifth calculation, in case of error
if len(error)>0:
error = self.interface.CalculateFlowsheet2(self.flowsheet)
time.sleep(0.05)
if self.verbose:
print(" Simulation converged in 5 iterations or failed to converge...")
if len(error)>0:
print(f"{error} at x = {x}")
def calculate_optProblem(self, x):
"""Assign degrees of freedom values to the simulation if norm > 1e-10. Converge the simulation and return an array with objectives and constraints values.
Args:
x (numpy.array): Array of degrees of freedom values to be simulated
Returns:
numpy.array: Array of objectives and constraints values calculated at ``x``
"""
try:
delta_x = np.linalg.norm(self.x_val - np.asarray(x))
except:
delta_x = 1
if delta_x > 1e-10:
self.converge_simulation(x)
self.x_val = np.array(x)
self.f_val = np.zeros(self.n_f)
self.g_val = np.zeros(self.n_g)
if self.n_f>1:
for i, ff in enumerate(self.f):
self.f_val[i] = ff[0]()
elif self.n_f==0:
self.f_val = None
else:
self.f_val = np.array([self.f[0]()])
if self.n_g>1:
for i, gg in enumerate(self.g):
self.g_val[i] = gg[0]()
elif self.n_g==0:
self.f_val = None
else:
self.g_val = np.array([self.g[0]()])
if self.verbose:
print(f"f = {self.f_val}, g = {self.g_val} at x = {x}")
return np.append(self.f_val, self.g_val)
def fpen_barrier(self,x,pen=1000):
"""Calculates a penalized objective function using barrier method and considering ``f`` and ``g``.
Args:
x (numpy.array): Array of degrees of freedom values to be simulated.
pen (float, optional): Penalization parameter. Defaults to 1000.
Returns:
float: Penalized objective function.
"""
self.calculate_optProblem(x)
fpen = 0
for i in range(self.n_f):
fpen += np.asarray(self.f_val)[i]
for i in range(self.n_g):
fpen += pen*max(0, self.g_val[i])
return fpen
def fpen_quad(self, x, pen=1000):
"""Calculates a penalized objective function using quadratic penalization method and considering ``f`` and ``g``.
Args:
x (numpy.array): Array of degrees of freedom values to be simulated.
pen (float, optional): Penalization parameter. Defaults to 1000.
Returns:
float: Penalized objective function.
"""
self.calculate_optProblem(x)
fpen = 0
for i in range(self.n_f):
fpen += self.f_val[i]
for i in range(self.n_g):
fpen += pen*max(0, self.g_val[i])**2
return fpen
def fpen_exp(self, x, pen=1000):
"""Calculates a penalized objective function using exponential penalization method and considering ``f`` and ``g``.
Args:
x (numpy.array): Array of degrees of freedom values to be simulated.
pen (float, optional): Penalization parameter. Defaults to 1000.
Returns:
float: Penalized objective function.
"""
self.calculate_optProblem(x)
fpen = 0
for i in range(self.n_f):
fpen += self.f_val[i]
for i in range(self.n_g):
fpen += pen*exp(max(0, self.g_val[i]))
return fpen
|
[
"numpy.asarray",
"numpy.zeros",
"time.sleep",
"pythoncom.CoInitialize",
"numpy.append",
"clr.AddReference",
"numpy.array",
"numpy.linalg.norm"
] |
[((1505, 1531), 'numpy.array', 'np.array', (['[]'], {'dtype': 'object'}), '([], dtype=object)\n', (1513, 1531), True, 'import numpy as np\n'), ((1882, 1894), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1890, 1894), True, 'import numpy as np\n'), ((1916, 1928), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1924, 1928), True, 'import numpy as np\n'), ((1950, 1962), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1958, 1962), True, 'import numpy as np\n'), ((1980, 2006), 'numpy.array', 'np.array', (['[]'], {'dtype': 'object'}), '([], dtype=object)\n', (1988, 2006), True, 'import numpy as np\n'), ((2055, 2081), 'numpy.array', 'np.array', (['[]'], {'dtype': 'object'}), '([], dtype=object)\n', (2063, 2081), True, 'import numpy as np\n'), ((2388, 2412), 'pythoncom.CoInitialize', 'pythoncom.CoInitialize', ([], {}), '()\n', (2410, 2412), False, 'import pythoncom\n'), ((2596, 2646), 'clr.AddReference', 'clr.AddReference', (["(self.path2dwsim + 'CapeOpen.dll')"], {}), "(self.path2dwsim + 'CapeOpen.dll')\n", (2612, 2646), False, 'import clr\n'), ((2655, 2713), 'clr.AddReference', 'clr.AddReference', (["(self.path2dwsim + 'DWSIM.Automation.dll')"], {}), "(self.path2dwsim + 'DWSIM.Automation.dll')\n", (2671, 2713), False, 'import clr\n'), ((2722, 2780), 'clr.AddReference', 'clr.AddReference', (["(self.path2dwsim + 'DWSIM.Interfaces.dll')"], {}), "(self.path2dwsim + 'DWSIM.Interfaces.dll')\n", (2738, 2780), False, 'import clr\n'), ((2789, 2851), 'clr.AddReference', 'clr.AddReference', (["(self.path2dwsim + 'DWSIM.GlobalSettings.dll')"], {}), "(self.path2dwsim + 'DWSIM.GlobalSettings.dll')\n", (2805, 2851), False, 'import clr\n'), ((2860, 2921), 'clr.AddReference', 'clr.AddReference', (["(self.path2dwsim + 'DWSIM.SharedClasses.dll')"], {}), "(self.path2dwsim + 'DWSIM.SharedClasses.dll')\n", (2876, 2921), False, 'import clr\n'), ((2930, 2992), 'clr.AddReference', 'clr.AddReference', (["(self.path2dwsim + 'DWSIM.Thermodynamics.dll')"], {}), "(self.path2dwsim + 'DWSIM.Thermodynamics.dll')\n", (2946, 2992), False, 'import clr\n'), ((3001, 3063), 'clr.AddReference', 'clr.AddReference', (["(self.path2dwsim + 'DWSIM.UnitOperations.dll')"], {}), "(self.path2dwsim + 'DWSIM.UnitOperations.dll')\n", (3017, 3063), False, 'import clr\n'), ((3072, 3128), 'clr.AddReference', 'clr.AddReference', (["(self.path2dwsim + 'System.Buffers.dll')"], {}), "(self.path2dwsim + 'System.Buffers.dll')\n", (3088, 3128), False, 'import clr\n'), ((3328, 3385), 'clr.AddReference', 'clr.AddReference', (["(self.path2dwsim + 'DWSIM.Inspector.dll')"], {}), "(self.path2dwsim + 'DWSIM.Inspector.dll')\n", (3344, 3385), False, 'import clr\n'), ((3394, 3449), 'clr.AddReference', 'clr.AddReference', (["(self.path2dwsim + 'DWSIM.MathOps.dll')"], {}), "(self.path2dwsim + 'DWSIM.MathOps.dll')\n", (3410, 3449), False, 'import clr\n'), ((3458, 3507), 'clr.AddReference', 'clr.AddReference', (["(self.path2dwsim + 'TcpComm.dll')"], {}), "(self.path2dwsim + 'TcpComm.dll')\n", (3474, 3507), False, 'import clr\n'), ((3516, 3578), 'clr.AddReference', 'clr.AddReference', (["(self.path2dwsim + 'Microsoft.ServiceBus.dll')"], {}), "(self.path2dwsim + 'Microsoft.ServiceBus.dll')\n", (3532, 3578), False, 'import clr\n'), ((3587, 3643), 'clr.AddReference', 'clr.AddReference', (["(self.path2dwsim + 'System.Buffers.dll')"], {}), "(self.path2dwsim + 'System.Buffers.dll')\n", (3603, 3643), False, 'import clr\n'), ((3652, 3703), 'clr.AddReference', 'clr.AddReference', (["(self.path2dwsim + 'SkiaSharp.dll')"], {}), "(self.path2dwsim + 'SkiaSharp.dll')\n", (3668, 3703), False, 'import clr\n'), ((3712, 3757), 'clr.AddReference', 'clr.AddReference', (["(self.path2dwsim + 'OxyPlot')"], {}), "(self.path2dwsim + 'OxyPlot')\n", (3728, 3757), False, 'import clr\n'), ((7121, 7136), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (7131, 7136), False, 'import time\n'), ((7241, 7256), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (7251, 7256), False, 'import time\n'), ((9868, 9901), 'numpy.append', 'np.append', (['self.f_val', 'self.g_val'], {}), '(self.f_val, self.g_val)\n', (9877, 9901), True, 'import numpy as np\n'), ((3154, 3211), 'clr.AddReference', 'clr.AddReference', (["(self.path2dwsim + 'System.Buffers2.dll')"], {}), "(self.path2dwsim + 'System.Buffers2.dll')\n", (3170, 3211), False, 'import clr\n'), ((7549, 7564), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (7559, 7564), False, 'import time\n'), ((8308, 8324), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (8318, 8324), False, 'import time\n'), ((9151, 9162), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (9159, 9162), True, 'import numpy as np\n'), ((9188, 9206), 'numpy.zeros', 'np.zeros', (['self.n_f'], {}), '(self.n_f)\n', (9196, 9206), True, 'import numpy as np\n'), ((9232, 9250), 'numpy.zeros', 'np.zeros', (['self.n_g'], {}), '(self.n_g)\n', (9240, 9250), True, 'import numpy as np\n'), ((5022, 5053), 'numpy.append', 'np.append', (['dof_new', 'description'], {}), '(dof_new, description)\n', (5031, 5053), True, 'import numpy as np\n'), ((5626, 5654), 'numpy.append', 'np.append', (['func', 'description'], {}), '(func, description)\n', (5635, 5654), True, 'import numpy as np\n'), ((6225, 6255), 'numpy.append', 'np.append', (['g_func', 'description'], {}), '(g_func, description)\n', (6234, 6255), True, 'import numpy as np\n'), ((7755, 7788), 'numpy.linalg.norm', 'np.linalg.norm', (['(res_new - res_old)'], {}), '(res_new - res_old)\n', (7769, 7788), True, 'import numpy as np\n'), ((10409, 10431), 'numpy.asarray', 'np.asarray', (['self.f_val'], {}), '(self.f_val)\n', (10419, 10431), True, 'import numpy as np\n'), ((9003, 9016), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (9013, 9016), True, 'import numpy as np\n'), ((5121, 5152), 'numpy.append', 'np.append', (['dof_new', 'description'], {}), '(dof_new, description)\n', (5130, 5152), True, 'import numpy as np\n'), ((5718, 5746), 'numpy.append', 'np.append', (['func', 'description'], {}), '(func, description)\n', (5727, 5746), True, 'import numpy as np\n'), ((6319, 6349), 'numpy.append', 'np.append', (['g_func', 'description'], {}), '(g_func, description)\n', (6328, 6349), True, 'import numpy as np\n')]
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Still in experimental stage!
from optparse import OptionParser
import os
import sys
import copy
import numpy as np
import pandas as pd
import scipy as sp
COLUMNS = ["age", "workclass", "fnlwgt", "education", "education_num",
"marital_status", "occupation", "relationship", "race", "gender",
"capital_gain", "capital_loss", "hours_per_week", "native_country",
"income_bracket"]
AGE, WORKCLASS, FNLWGT, EDUCATION, EDUCATION_NUM, MARITAL_STATUS, OCCPATION, \
RELATIONSHIP, RACE, GENDER, CAPITAL_GAIN, CAPITAL_LOSS, HOURS_PER_WEEK, NATIVE_COUNTRY, \
AGE_BUCKETS, LABEL, EDUCATION_OCCUPATION, NATIVECOUNTRY_OCCUPATION, AGEBUCKET_EDUCATION_OCCUPATION = range(19)
LABEL_COLUMN = "label"
CATEGORICAL_COLUMNS = ["workclass", "education", "marital_status", "occupation",
"relationship", "race", "gender", "native_country"]
CONTINUOUS_COLUMNS = ["age", "education_num", "capital_gain", "capital_loss",
"hours_per_week"]
def get_data(train_file_name='train.data', test_file_name='test.data'):
df_train = pd.read_csv(train_file_name,
names=COLUMNS,
skipinitialspace=True,
engine="python")
df_test = pd.read_csv(test_file_name,
names=COLUMNS,
skipinitialspace=True,
skiprows=1, # skip first line: "|1x3 Cross Validator"
engine="python")
df_train = df_train.dropna(how='any', axis=0)
df_test = df_test.dropna(how='any', axis=0)
df_train[LABEL_COLUMN] = (
df_train["income_bracket"].apply(lambda x: ">50K" in x)).astype(int)
df_test[LABEL_COLUMN] = (
df_test["income_bracket"].apply(lambda x: ">50K" in x)).astype(int)
return df_train, df_test
def binary_search(val, array, start=0):
"""
binary search implementation
:param val: value to search
:param array: data array to be searched
:param start: 0 if array starts with 0 else 1
:return: location of val in array, or bucket fall in if not in array
"""
low = start
high = len(array) - 1 + start
while low <= high:
mid = (low + high) / 2
if array[mid] == val:
return mid
elif array[mid] > val:
high = mid-1
else:
low = mid+1
return low
def bucketized_column(column, boundaries):
"""
transform every value of a column to corresponding bucket according to boundaries
:param column: primitive column
:param boundaries: boundaries to bucketize
:return: bucketized column
"""
_column = copy.deepcopy(column)
for i in range(len(_column)):
_column[i] = binary_search(_column[i], boundaries)
return _column
def discretize_for_lookupTable(df, data_type, lookup_dict, columns, start=0):
"""
discretize for BigDL's lookupTable's requirement: elements of input should be little than or equal to $nIndex + 1
:param df: data tensor. Type must be numpy.ndarray
:param columns: columns to do discretize
:param start: index that starts from
:return: discretized data tensor
"""
if data_type == 'train':
for col in columns:
total = sorted({}.fromkeys(df[:, col]).keys())
total_dict = {k: i+start
for i, k in enumerate(total)}
for _ in range(len(df[:, col])):
if df[_, col] not in total_dict.keys():
df[_, col] = 1
else:
df[_, col] = total_dict[df[_, col]]
lookup_dict[col] = total_dict
elif data_type == 'test':
for col in columns:
total_dict = lookup_dict[col]
for _ in range(len(df[:, col])):
if df[_, col] not in total_dict.keys():
df[_, col] = 1
else:
df[_, col] = total_dict[df[_, col]]
else:
raise ValueError("Not valid data type")
return df, lookup_dict
def cross_column(columns, hash_backet_size=1e4, scale=0.0):
"""
generate cross column feature from `columns` with hash bucket.
:param columns: columns to use to generate cross column, Type must be ndarray
:param hash_backet_size: hash bucket size to bucketize cross columns to fixed hash bucket
:return: cross column, represented as a ndarray
"""
assert columns.shape[0] > 0 and columns.shape[1] > 0
_crossed_column = np.zeros((columns.shape[0], 1))
for i in range(columns.shape[0]):
_crossed_column[i, 0] = (hash("_".join(map(str, columns[i, :]))) % hash_backet_size
+ hash_backet_size) % hash_backet_size
if scale > 0.0:
_crossed_column[i, 0] *= scale
return _crossed_column
def feature_columns(df, data_type, lookup_dict):
gender_dict = {"Male": 1, "Female": 2}
age_boundaries = [18, 25, 30, 35, 40, 45, 50, 55, 60, 65]
age_bucket = bucketized_column(df[:, AGE], boundaries=age_boundaries)
df[:, AGE_BUCKETS] = age_bucket
assert WORKCLASS == 1 and EDUCATION == 3 and CAPITAL_LOSS == 11 and NATIVE_COUNTRY == 13
education_occupation = cross_column(df[:, [EDUCATION, OCCPATION]], hash_backet_size=int(1e4))
nativecountry_occupation = cross_column(df[:, [NATIVE_COUNTRY, OCCPATION]], hash_backet_size=int(1e4))
agebucket_education_occpation = cross_column(df[:, [AGE_BUCKETS, EDUCATION, OCCPATION]], hash_backet_size=int(1e6))
for i in range(df.shape[0]):
df[i, WORKCLASS] = (hash(df[i, 1]) % 100 + 100) % 100 # workclass
df[i, EDUCATION] = (hash(df[i, 3]) % 1000 + 1000) % 1000 # education
df[i, RELATIONSHIP] = (hash(df[i, 7]) % 100 + 100) % 100 # relationship
df[i, OCCPATION] = (hash(df[i, 6]) % 1000 + 1000) % 1000 # occupation
df[i, NATIVE_COUNTRY] = (hash(df[i, 13]) % 1000 + 1000) % 1000 # native_country
df[i, GENDER] = gender_dict[df[i, 9]] \
if (df[i, 9] in gender_dict.keys()) else -1 # gender
df[i, AGE] = df[i, 0] # age
df[i, EDUCATION_NUM] = df[i, 4] # education_num
df[i, CAPITAL_GAIN] = df[i, 10] # capital_gain
df[i, CAPITAL_LOSS] = df[i, 11] # capital_loss
df[i, HOURS_PER_WEEK] = df[i, 12] # hours_per_week
df, lookup_dict = discretize_for_lookupTable(df, data_type, lookup_dict,
columns=[WORKCLASS, EDUCATION, RELATIONSHIP, OCCPATION, NATIVE_COUNTRY, GENDER], start=1)
df = np.c_[df, education_occupation, nativecountry_occupation, agebucket_education_occpation]
return df, lookup_dict
def make_wide_deep_columns(df):
wide_columns = np.array(df[:, GENDER])
wide_columns = np.c_[wide_columns, df[:, NATIVE_COUNTRY]]
wide_columns = np.c_[wide_columns, df[:, EDUCATION], df[:, OCCPATION]]
wide_columns = np.c_[wide_columns, df[:, WORKCLASS], df[:, RELATIONSHIP]]
wide_columns = np.c_[wide_columns, df[:, AGE_BUCKETS], df[:, EDUCATION_OCCUPATION]]
wide_columns = np.c_[wide_columns, df[:, NATIVECOUNTRY_OCCUPATION], df[:, AGEBUCKET_EDUCATION_OCCUPATION]]
deep_columns = np.array(df[:, WORKCLASS])
deep_columns = np.c_[deep_columns, df[:, EDUCATION], df[:, GENDER]]
deep_columns = np.c_[deep_columns, df[:, RELATIONSHIP], df[:, NATIVE_COUNTRY]]
deep_columns = np.c_[deep_columns, df[:, OCCPATION]]
deep_columns = np.c_[deep_columns, df[:, AGE], df[:, EDUCATION_NUM], df[:, CAPITAL_GAIN]]
deep_columns = np.c_[deep_columns, df[:, CAPITAL_LOSS], df[:, HOURS_PER_WEEK]]
wide_deep_columns = np.c_[wide_columns, deep_columns]
return wide_deep_columns, np.array(df[:, LABEL])
def handle():
df_train, df_test = get_data()
df_train = np.array(df_train)
df_test = np.array(df_test)
df_train, lookup_dict = feature_columns(df_train, 'train', {})
df_test, _ = feature_columns(df_test, 'test', lookup_dict)
train_data, train_label = make_wide_deep_columns(df_train)
test_data, test_label = make_wide_deep_columns(df_test)
np.savetxt("train_tensor.data", train_data, fmt="%d", delimiter=',')
np.savetxt("train_label.data", train_label, fmt="%d")
np.savetxt("test_tensor.data", test_data, fmt="%d", delimiter=',')
np.savetxt("test_label.data", test_label, fmt="%d")
handle()
|
[
"copy.deepcopy",
"pandas.read_csv",
"numpy.savetxt",
"numpy.zeros",
"numpy.array"
] |
[((1669, 1757), 'pandas.read_csv', 'pd.read_csv', (['train_file_name'], {'names': 'COLUMNS', 'skipinitialspace': '(True)', 'engine': '"""python"""'}), "(train_file_name, names=COLUMNS, skipinitialspace=True, engine=\n 'python')\n", (1680, 1757), True, 'import pandas as pd\n'), ((1849, 1948), 'pandas.read_csv', 'pd.read_csv', (['test_file_name'], {'names': 'COLUMNS', 'skipinitialspace': '(True)', 'skiprows': '(1)', 'engine': '"""python"""'}), "(test_file_name, names=COLUMNS, skipinitialspace=True, skiprows=\n 1, engine='python')\n", (1860, 1948), True, 'import pandas as pd\n'), ((3268, 3289), 'copy.deepcopy', 'copy.deepcopy', (['column'], {}), '(column)\n', (3281, 3289), False, 'import copy\n'), ((5112, 5143), 'numpy.zeros', 'np.zeros', (['(columns.shape[0], 1)'], {}), '((columns.shape[0], 1))\n', (5120, 5143), True, 'import numpy as np\n'), ((7328, 7351), 'numpy.array', 'np.array', (['df[:, GENDER]'], {}), '(df[:, GENDER])\n', (7336, 7351), True, 'import numpy as np\n'), ((7786, 7812), 'numpy.array', 'np.array', (['df[:, WORKCLASS]'], {}), '(df[:, WORKCLASS])\n', (7794, 7812), True, 'import numpy as np\n'), ((8381, 8399), 'numpy.array', 'np.array', (['df_train'], {}), '(df_train)\n', (8389, 8399), True, 'import numpy as np\n'), ((8414, 8431), 'numpy.array', 'np.array', (['df_test'], {}), '(df_test)\n', (8422, 8431), True, 'import numpy as np\n'), ((8689, 8757), 'numpy.savetxt', 'np.savetxt', (['"""train_tensor.data"""', 'train_data'], {'fmt': '"""%d"""', 'delimiter': '""","""'}), "('train_tensor.data', train_data, fmt='%d', delimiter=',')\n", (8699, 8757), True, 'import numpy as np\n'), ((8762, 8815), 'numpy.savetxt', 'np.savetxt', (['"""train_label.data"""', 'train_label'], {'fmt': '"""%d"""'}), "('train_label.data', train_label, fmt='%d')\n", (8772, 8815), True, 'import numpy as np\n'), ((8820, 8886), 'numpy.savetxt', 'np.savetxt', (['"""test_tensor.data"""', 'test_data'], {'fmt': '"""%d"""', 'delimiter': '""","""'}), "('test_tensor.data', test_data, fmt='%d', delimiter=',')\n", (8830, 8886), True, 'import numpy as np\n'), ((8891, 8942), 'numpy.savetxt', 'np.savetxt', (['"""test_label.data"""', 'test_label'], {'fmt': '"""%d"""'}), "('test_label.data', test_label, fmt='%d')\n", (8901, 8942), True, 'import numpy as np\n'), ((8292, 8314), 'numpy.array', 'np.array', (['df[:, LABEL]'], {}), '(df[:, LABEL])\n', (8300, 8314), True, 'import numpy as np\n')]
|
import tensorflow as tf
import tensorflow_addons as tfa
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout
from sklearn import metrics
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import pandas as pd
import numpy as np
import argparse
import time
import grpc
from protobuf.api_pb2 import ModelParameters, ModelResults, Empty, Optimizer, ActivationFunc
from protobuf.api_pb2_grpc import APIStub
def ParseArgs() -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("-s","--server", action="store", dest="server",
type=str, default="localhost:10000",
help="Server address to connect to")
return parser.parse_args()
def CreateModel(params: ModelParameters, shape) -> Sequential:
model = Sequential()
activation_func = ActivationFunc.Name(params.activation_func).lower()
optimizers = {
Optimizer.Adam: tf.keras.optimizers.Adam,
Optimizer.SGD: tf.keras.optimizers.SGD,
Optimizer.RMSprop: tf.keras.optimizers.RMSprop
}
for i, layer in enumerate(params.layers):
if i == 0:
model.add(Dense(units=layer.num_neurons,
activation=activation_func,
input_shape=(shape[1],)))
else:
model.add(Dense(units=layer.num_neurons,
activation=activation_func))
if params.dropout:
model.add(Dropout(0.25))
model.add(Dense(1, activation="sigmoid"))
optimizer = optimizers[params.optimizer]
model.compile(optimizer=optimizer(params.learning_rate),
loss=tf.keras.losses.binary_crossentropy,
metrics=["accuracy"])
return model
if __name__ == "__main__":
args = ParseArgs()
df = pd.read_csv("https://archive.ics.uci.edu/ml/machine-learning-databases/00547/Algerian_forest_fires_dataset_UPDATE.csv",
skiprows=[0,124,125,126])
df.columns=df.columns.map(lambda x: x.strip())
df.drop(columns=["day", "year"], inplace=True)
df.dropna(inplace=True)
df["Classes"] = df["Classes"].map(lambda x: 1 if x.strip() == "fire" else 0)
X = np.array(df.drop(["Classes"], axis=1))
y = np.array(df["Classes"])
scaler = preprocessing.MinMaxScaler()
X = scaler.fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
print(f"Connecting to {args.server}")
with grpc.insecure_channel(args.server) as channel:
stub = APIStub(channel)
while True:
try:
params = stub.GetModelParams(Empty())
print(params)
model = CreateModel(params, X.shape)
# Train model
model.fit(X_train, y_train,
batch_size=32,
epochs=10,
verbose=1)
y_pred = model.predict(X_test)
y_pred = (y_pred > 0.5).astype("int32")
f1_score = metrics.f1_score(y_test, y_pred)
print(f"F1: {f1_score}")
results = ModelResults()
results.model_id = params.model_id
results.recall = f1_score
print(f"Returning params")
_ = stub.ReturnModel(results)
except grpc.RpcError as rpc_error:
if rpc_error.code() == grpc.StatusCode.CANCELLED:
print("No models to evaluate now. Sleeping...")
time.sleep(0.5)
elif rpc_error.code() == grpc.StatusCode.UNAVAILABLE:
print("Server is down")
exit(0)
else:
print(rpc_error)
exit(1)
|
[
"argparse.ArgumentParser",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.Dense",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"protobuf.api_pb2.ModelResults",
"sklearn.preprocessing.MinMaxScaler",
"grpc.insecure_channel",
"protobuf.api_pb2.Empty",
"time.sleep",
"sklearn.metrics.f1_score",
"numpy.array",
"tensorflow.keras.models.Sequential",
"protobuf.api_pb2_grpc.APIStub",
"protobuf.api_pb2.ActivationFunc.Name"
] |
[((588, 613), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (611, 613), False, 'import argparse\n'), ((917, 929), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (927, 929), False, 'from tensorflow.keras.models import Sequential\n'), ((1942, 2100), 'pandas.read_csv', 'pd.read_csv', (['"""https://archive.ics.uci.edu/ml/machine-learning-databases/00547/Algerian_forest_fires_dataset_UPDATE.csv"""'], {'skiprows': '[0, 124, 125, 126]'}), "(\n 'https://archive.ics.uci.edu/ml/machine-learning-databases/00547/Algerian_forest_fires_dataset_UPDATE.csv'\n , skiprows=[0, 124, 125, 126])\n", (1953, 2100), True, 'import pandas as pd\n'), ((2375, 2398), 'numpy.array', 'np.array', (["df['Classes']"], {}), "(df['Classes'])\n", (2383, 2398), True, 'import numpy as np\n'), ((2413, 2441), 'sklearn.preprocessing.MinMaxScaler', 'preprocessing.MinMaxScaler', ([], {}), '()\n', (2439, 2441), False, 'from sklearn import preprocessing\n'), ((2514, 2567), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(0)'}), '(X, y, test_size=0.2, random_state=0)\n', (2530, 2567), False, 'from sklearn.model_selection import train_test_split\n'), ((1624, 1654), 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), "(1, activation='sigmoid')\n", (1629, 1654), False, 'from tensorflow.keras.layers import Dense, Dropout\n'), ((2620, 2654), 'grpc.insecure_channel', 'grpc.insecure_channel', (['args.server'], {}), '(args.server)\n', (2641, 2654), False, 'import grpc\n'), ((2682, 2698), 'protobuf.api_pb2_grpc.APIStub', 'APIStub', (['channel'], {}), '(channel)\n', (2689, 2698), False, 'from protobuf.api_pb2_grpc import APIStub\n'), ((953, 996), 'protobuf.api_pb2.ActivationFunc.Name', 'ActivationFunc.Name', (['params.activation_func'], {}), '(params.activation_func)\n', (972, 996), False, 'from protobuf.api_pb2 import ModelParameters, ModelResults, Empty, Optimizer, ActivationFunc\n'), ((1275, 1363), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'layer.num_neurons', 'activation': 'activation_func', 'input_shape': '(shape[1],)'}), '(units=layer.num_neurons, activation=activation_func, input_shape=(\n shape[1],))\n', (1280, 1363), False, 'from tensorflow.keras.layers import Dense, Dropout\n'), ((1452, 1510), 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'layer.num_neurons', 'activation': 'activation_func'}), '(units=layer.num_neurons, activation=activation_func)\n', (1457, 1510), False, 'from tensorflow.keras.layers import Dense, Dropout\n'), ((1590, 1603), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (1597, 1603), False, 'from tensorflow.keras.layers import Dense, Dropout\n'), ((3214, 3246), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (3230, 3246), False, 'from sklearn import metrics\n'), ((3327, 3341), 'protobuf.api_pb2.ModelResults', 'ModelResults', ([], {}), '()\n', (3339, 3341), False, 'from protobuf.api_pb2 import ModelParameters, ModelResults, Empty, Optimizer, ActivationFunc\n'), ((2782, 2789), 'protobuf.api_pb2.Empty', 'Empty', ([], {}), '()\n', (2787, 2789), False, 'from protobuf.api_pb2 import ModelParameters, ModelResults, Empty, Optimizer, ActivationFunc\n'), ((3726, 3741), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (3736, 3741), False, 'import time\n')]
|
import pytest
import os
import numpy as np
import pyscal.core as pc
import pyscal.crystal_structures as pcs
def test_q_4():
atoms, boxdims = pcs.make_crystal('bcc', repetitions = [4, 4, 4])
sys = pc.System()
sys.atoms = atoms
sys.box = boxdims
#sys.get_neighbors(method = 'voronoi')
sys.find_neighbors(method = 'cutoff', cutoff=0.9)
sys.calculate_q([4, 6], averaged=True)
q = sys.get_qvals([4, 6], averaged=True)
assert np.round(np.mean(np.array(q[0])), decimals=2) == 0.51 , "Calculated q4 value is wrong!"
assert np.round(np.mean(np.array(q[1])), decimals=2) == 0.63 , "Calculated q4 value is wrong!"
q = sys.get_qvals([4, 6])
assert np.round(np.mean(np.array(q[0])), decimals=2) == 0.51 , "Calculated q4 value is wrong!"
assert np.round(np.mean(np.array(q[1])), decimals=2) == 0.63 , "Calculated q4 value is wrong!"
|
[
"pyscal.crystal_structures.make_crystal",
"numpy.array",
"pyscal.core.System"
] |
[((153, 199), 'pyscal.crystal_structures.make_crystal', 'pcs.make_crystal', (['"""bcc"""'], {'repetitions': '[4, 4, 4]'}), "('bcc', repetitions=[4, 4, 4])\n", (169, 199), True, 'import pyscal.crystal_structures as pcs\n'), ((213, 224), 'pyscal.core.System', 'pc.System', ([], {}), '()\n', (222, 224), True, 'import pyscal.core as pc\n'), ((489, 503), 'numpy.array', 'np.array', (['q[0]'], {}), '(q[0])\n', (497, 503), True, 'import numpy as np\n'), ((589, 603), 'numpy.array', 'np.array', (['q[1]'], {}), '(q[1])\n', (597, 603), True, 'import numpy as np\n'), ((722, 736), 'numpy.array', 'np.array', (['q[0]'], {}), '(q[0])\n', (730, 736), True, 'import numpy as np\n'), ((822, 836), 'numpy.array', 'np.array', (['q[1]'], {}), '(q[1])\n', (830, 836), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sun May 15 22:37:00 2016
@author: <NAME>
"""
import random
import time
import numpy
from solution import solution
def PSO(objf, lb, ub, dim, popSize, iters):
# PSO parameters
vMax = 6
wMax = 0.9
wMin = 0.2
c1 = 2
c2 = 2
s = solution()
if not isinstance(lb, list):
lb = [lb] * dim
if not isinstance(ub, list):
ub = [ub] * dim
######################## Initializations
vel = numpy.zeros((popSize, dim))
pBestScore = numpy.zeros(popSize)
pBestScore.fill(float("inf"))
pBest = numpy.zeros((popSize, dim))
gBest = numpy.zeros(dim)
gBestScore = float("inf")
pos = numpy.zeros((popSize, dim))
for i in range(dim):
pos[:, i] = numpy.random.uniform(0, 1, popSize) * (ub[i] - lb[i]) + lb[i]
convergenceCurve = numpy.zeros(iters)
############################################
print('PSO is optimizing "' + objf.__name__ + '"')
timerStart = time.time()
s.startTime = time.strftime("%Y-%m-%d-%H-%M-%S")
for l in range(0, iters):
for i in range(0, popSize):
# pos[i,:]=checkBounds(pos[i,:],lb,ub)
for j in range(dim):
pos[i, j] = numpy.clip(pos[i, j], lb[j], ub[j])
# Calculate objective function for each particle
fitness = objf(pos[i, :])
if pBestScore[i] > fitness:
pBestScore[i] = fitness
pBest[i, :] = pos[i, :].copy()
if gBestScore > fitness:
gBestScore = fitness
gBest = pos[i, :].copy()
# Update the W of PSO
w = wMax - l * ((wMax - wMin) / iters)
for i in range(0, popSize):
for j in range(0, dim):
r1 = random.random()
r2 = random.random()
vel[i, j] = (
w * vel[i, j]
+ c1 * r1 * (pBest[i, j] - pos[i, j])
+ c2 * r2 * (gBest[j] - pos[i, j])
)
if vel[i, j] > vMax:
vel[i, j] = vMax
if vel[i, j] < -vMax:
vel[i, j] = -vMax
pos[i, j] = pos[i, j] + vel[i, j]
convergenceCurve[l] = gBestScore
if l % 1 == 0:
print(
[
"At iteration "
+ str(l + 1)
+ " the best fitness is "
+ str(gBestScore)
]
)
timerEnd = time.time()
s.endTime = time.strftime("%Y-%m-%d-%H-%M-%S")
s.executionTime = timerEnd - timerStart
s.convergence = convergenceCurve
s.optimizer = "PSO"
s.objfname = objf.__name__
return s
|
[
"numpy.random.uniform",
"solution.solution",
"numpy.zeros",
"time.strftime",
"numpy.clip",
"time.time",
"random.random"
] |
[((303, 313), 'solution.solution', 'solution', ([], {}), '()\n', (311, 313), False, 'from solution import solution\n'), ((485, 512), 'numpy.zeros', 'numpy.zeros', (['(popSize, dim)'], {}), '((popSize, dim))\n', (496, 512), False, 'import numpy\n'), ((531, 551), 'numpy.zeros', 'numpy.zeros', (['popSize'], {}), '(popSize)\n', (542, 551), False, 'import numpy\n'), ((599, 626), 'numpy.zeros', 'numpy.zeros', (['(popSize, dim)'], {}), '((popSize, dim))\n', (610, 626), False, 'import numpy\n'), ((639, 655), 'numpy.zeros', 'numpy.zeros', (['dim'], {}), '(dim)\n', (650, 655), False, 'import numpy\n'), ((698, 725), 'numpy.zeros', 'numpy.zeros', (['(popSize, dim)'], {}), '((popSize, dim))\n', (709, 725), False, 'import numpy\n'), ((857, 875), 'numpy.zeros', 'numpy.zeros', (['iters'], {}), '(iters)\n', (868, 875), False, 'import numpy\n'), ((1000, 1011), 'time.time', 'time.time', ([], {}), '()\n', (1009, 1011), False, 'import time\n'), ((1030, 1064), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d-%H-%M-%S"""'], {}), "('%Y-%m-%d-%H-%M-%S')\n", (1043, 1064), False, 'import time\n'), ((2549, 2560), 'time.time', 'time.time', ([], {}), '()\n', (2558, 2560), False, 'import time\n'), ((2577, 2611), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d-%H-%M-%S"""'], {}), "('%Y-%m-%d-%H-%M-%S')\n", (2590, 2611), False, 'import time\n'), ((771, 806), 'numpy.random.uniform', 'numpy.random.uniform', (['(0)', '(1)', 'popSize'], {}), '(0, 1, popSize)\n', (791, 806), False, 'import numpy\n'), ((1244, 1279), 'numpy.clip', 'numpy.clip', (['pos[i, j]', 'lb[j]', 'ub[j]'], {}), '(pos[i, j], lb[j], ub[j])\n', (1254, 1279), False, 'import numpy\n'), ((1795, 1810), 'random.random', 'random.random', ([], {}), '()\n', (1808, 1810), False, 'import random\n'), ((1832, 1847), 'random.random', 'random.random', ([], {}), '()\n', (1845, 1847), False, 'import random\n')]
|
from trame import get_app_instance
from trame.html import AbstractElement, Template
try:
import numpy as np
from numbers import Number
except:
# dataframe_to_grid won't work
pass
# Make sure used module is available
_app = get_app_instance()
if "vuetify" not in _app.vue_use:
_app.vue_use += ["vuetify"]
type_mapper = {
"b": ["textColumn"],
"i": [], # ["numericColumn", "numberColumnFilter"],
"u": [], # ["numericColumn", "numberColumnFilter"],
"f": [], # ["numericColumn", "numberColumnFilter"],
"c": [],
"m": [], # ['timedeltaFormat'],
"M": [], # ["dateColumnFilter", "shortDateTimeFormat"],
"O": [],
"S": [],
"U": [],
"V": [],
}
def cast_to_serializable(value):
isoformat = getattr(value, "isoformat", None)
if (isoformat) and callable(isoformat):
return isoformat()
elif isinstance(value, Number):
if np.isnan(value) or np.isinf(value):
return value.__str__()
return value
return value.__str__()
def dataframe_to_grid(dataframe, options={}):
"""
Transform a dataframe for use with a VDataTable
:param dataframe: A pandas dataframe
:param options: Control which columns are sortable, filterable, grouped, aligned, etc. A dictionary where keys are the columns from the dataframe and values are Vuetify DataTableHeader objects. See more info |header_doc_link|.
.. |header_doc_link| raw:: html
<a href="https://vuetifyjs.com/en/api/v-data-table/#props-headers" target="_blank">here</a>
>>> headers, rows = vuetify.dataframe_to_grid(dataframe)
>>> VDataTable(
... headers=("table_headers", headers),
... items=("table_rows", rows))
"""
headers = {}
for col_name in dataframe.columns:
headers[col_name] = {"text": col_name, "value": col_name}
if options.get(col_name):
headers[col_name].update(options.get(col_name))
return list(headers.values()), dataframe.applymap(cast_to_serializable).to_dict(
orient="records"
)
slot_names = [
"day-label",
"group.header",
"expanded-item",
"item.data-table-expand",
"group.summary",
"body.append",
"foot",
"no-results",
"badge",
"category",
"placeholder",
"icon",
"body",
"selection",
"progress",
"day-month",
"actions",
"group",
"header.<name>",
"label",
"append-outer",
"page-text",
"day-label-header",
"counter",
"extension",
"close",
"prepend-item",
"img",
"loader",
"footer.page-text",
"day-header",
"prepend",
"prev",
"item",
"interval",
"item.data-table-select",
"default",
"divider",
"no-data",
"footer",
"top",
"thumb-label",
"opposite",
"append-item",
"body.prepend",
"appendIcon",
"prepend-inner",
"loading",
"header.data-table-select",
"activator",
"day-body",
"header",
"event",
"item.<name>",
"message",
"footer.prepend",
"prependIcon",
"next",
"input",
"append",
"day",
"action",
]
Template.slot_names.update(slot_names)
class VApp(AbstractElement):
"""
Vuetify's VApp component. See more info and examples |VApp_vuetify_link|.
.. |VApp_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-app" target="_blank">here</a>
:param id: Sets the DOM id on the component
:type string:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-app", children, **kwargs)
self._attr_names += [
"id",
]
class VAppBar(AbstractElement):
"""
Vuetify's VAppBar component. See more info and examples |VAppBar_vuetify_link|.
.. |VAppBar_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-app-bar" target="_blank">here</a>
:param absolute: Applies position: absolute to the component.
:type boolean:
:param app: See description |VAppBar_vuetify_link|.
:type boolean:
:param bottom: Aligns the component towards the bottom.
:type boolean:
:param clipped_left: Designates that the application's `v-navigation-drawer` that is positioned on the left is below the app-bar.
:type boolean:
:param clipped_right: Designates that the application's `v-navigation-drawer` that is positioned on the right is below the app-bar.
:type boolean:
:param collapse: Puts the toolbar into a collapsed state reducing its maximum width.
:type boolean:
:param collapse_on_scroll: Puts the app-bar into a collapsed state when scrolling.
:type boolean:
:param color: See description |VAppBar_vuetify_link|.
:type string:
:param dark: See description |VAppBar_vuetify_link|.
:type boolean:
:param dense: Reduces the height of the toolbar content to 48px (96px when using the **prominent** prop).
:type boolean:
:param elevate_on_scroll: Elevates the app-bar when scrolling.
:type boolean:
:param elevation: See description |VAppBar_vuetify_link|.
:type ['number', 'string']:
:param extended: Use this prop to increase the height of the toolbar _without_ using the `extension` slot for adding content. May be used in conjunction with the **extension-height** prop, and any of the other props that affect the height of the toolbar, e.g. **prominent**, **dense**, etc., **WITH THE EXCEPTION** of **height**.
:type boolean:
:param extension_height: Specify an explicit height for the `extension` slot.
:type ['number', 'string']:
:param fade_img_on_scroll: When using the **src** prop or `img` slot, will fade the image when scrolling.
:type boolean:
:param fixed: Applies **position: fixed** to the component.
:type boolean:
:param flat: Removes the toolbar's box-shadow.
:type boolean:
:param floating: Applies **display: inline-flex** to the component.
:type boolean:
:param height: Designates a specific height for the toolbar. Overrides the heights imposed by other props, e.g. **prominent**, **dense**, **extended**, etc.
:type ['number', 'string']:
:param hide_on_scroll: Hides the app-bar when scrolling. Will still show the `extension` slot.
:type boolean:
:param inverted_scroll: Hides the app-bar when scrolling down and displays it when scrolling up.
:type boolean:
:param light: Applies the light theme variant to the component.
:type boolean:
:param max_height: Sets the maximum height for the component.
:type ['number', 'string']:
:param max_width: Sets the maximum width for the component.
:type ['number', 'string']:
:param min_height: Sets the minimum height for the component.
:type ['number', 'string']:
:param min_width: Sets the minimum width for the component.
:type ['number', 'string']:
:param outlined: Removes elevation (box-shadow) and adds a *thin* border.
:type boolean:
:param prominent: Increases the height of the toolbar content to 128px.
:type boolean:
:param rounded: See description |VAppBar_vuetify_link|.
:type ['boolean', 'string']:
:param scroll_off_screen: Hides the app-bar when scrolling. Will **NOT** show the `extension` slot.
:type boolean:
:param scroll_target: Designates the element to target for scrolling events. Uses `window` by default.
:type string:
:param scroll_threshold: The amount of scroll distance down before **hide-on-scroll** activates.
:type ['string', 'number']:
:param shaped: Applies a large border radius on the top left and bottom right of the card.
:type boolean:
:param short: Reduce the height of the toolbar content to 56px (112px when using the **prominent** prop).
:type boolean:
:param shrink_on_scroll: Shrinks a **prominent** toolbar to a **dense** or **short** (default) one when scrolling.
:type boolean:
:param src: Image source. See `v-img` for details
:type ['string', 'object']:
:param tag: Specify a custom tag used on the root element.
:type string:
:param tile: Removes the component's **border-radius**.
:type boolean:
:param value: Controls whether the component is visible or hidden.
:type boolean:
:param width: Sets the width for the component.
:type ['number', 'string']:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-app-bar", children, **kwargs)
self._attr_names += [
"absolute",
"app",
"bottom",
"clipped_left",
"clipped_right",
"collapse",
"collapse_on_scroll",
"color",
"dark",
"dense",
"elevate_on_scroll",
"elevation",
"extended",
"extension_height",
"fade_img_on_scroll",
"fixed",
"flat",
"floating",
"height",
"hide_on_scroll",
"inverted_scroll",
"light",
"max_height",
"max_width",
"min_height",
"min_width",
"outlined",
"prominent",
"rounded",
"scroll_off_screen",
"scroll_target",
"scroll_threshold",
"shaped",
"short",
"shrink_on_scroll",
"src",
"tag",
"tile",
"value",
"width",
]
class VAppBarNavIcon(AbstractElement):
"""
Vuetify's VAppBarNavIcon component. See more info and examples |VAppBarNavIcon_vuetify_link|.
.. |VAppBarNavIcon_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-app-bar-nav-icon" target="_blank">here</a>
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-app-bar-nav-icon", children, **kwargs)
class VAppBarTitle(AbstractElement):
"""
Vuetify's VAppBarTitle component. See more info and examples |VAppBarTitle_vuetify_link|.
.. |VAppBarTitle_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-app-bar-title" target="_blank">here</a>
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-app-bar-title", children, **kwargs)
class VAlert(AbstractElement):
"""
Vuetify's VAlert component. See more info and examples |VAlert_vuetify_link|.
.. |VAlert_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-alert" target="_blank">here</a>
:param border: Puts a border on the alert. Accepts **top** \| **right** \| **bottom** \| **left**.
:type string:
:param close_icon: Change the default icon used for **dismissible** alerts.
:type string:
:param close_label: See description |VAlert_vuetify_link|.
:type string:
:param color: See description |VAlert_vuetify_link|.
:type string:
:param colored_border: Applies the defined **color** to the alert's border.
:type boolean:
:param dark: See description |VAlert_vuetify_link|.
:type boolean:
:param dense: Decreases component's height.
:type boolean:
:param dismissible: Adds a close icon that can hide the alert.
:type boolean:
:param elevation: See description |VAlert_vuetify_link|.
:type ['number', 'string']:
:param height: Sets the height for the component.
:type ['number', 'string']:
:param icon: Designates a specific icon.
:type ['boolean', 'string']:
:param light: Applies the light theme variant to the component.
:type boolean:
:param max_height: Sets the maximum height for the component.
:type ['number', 'string']:
:param max_width: Sets the maximum width for the component.
:type ['number', 'string']:
:param min_height: Sets the minimum height for the component.
:type ['number', 'string']:
:param min_width: Sets the minimum width for the component.
:type ['number', 'string']:
:param mode: See description |VAlert_vuetify_link|.
:type string:
:param origin: See description |VAlert_vuetify_link|.
:type string:
:param outlined: Makes the background transparent and applies a thin border.
:type boolean:
:param prominent: Displays a larger vertically centered icon to draw more attention.
:type boolean:
:param rounded: See description |VAlert_vuetify_link|.
:type ['boolean', 'string']:
:param shaped: Applies a large border radius on the top left and bottom right of the card.
:type boolean:
:param tag: Specify a custom tag used on the root element.
:type string:
:param text: Applies the defined **color** to text and a low opacity background of the same.
:type boolean:
:param tile: Removes the component's border-radius.
:type boolean:
:param transition: See description |VAlert_vuetify_link|.
:type string:
:param type: Specify a **success**, **info**, **warning** or **error** alert. Uses the contextual color and has a pre-defined icon.
:type string:
:param value: Controls whether the component is visible or hidden.
:type boolean:
:param width: Sets the width for the component.
:type ['number', 'string']:
Events
:param input: The updated bound model
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-alert", children, **kwargs)
self._attr_names += [
"border",
"close_icon",
"close_label",
"color",
"colored_border",
"dark",
"dense",
"dismissible",
"elevation",
"height",
"icon",
"light",
"max_height",
"max_width",
"min_height",
"min_width",
"mode",
"origin",
"outlined",
"prominent",
"rounded",
"shaped",
"tag",
"text",
"tile",
"transition",
"type",
"value",
"width",
]
self._event_names += [
"input",
]
class VAutocomplete(AbstractElement):
"""
Vuetify's VAutocomplete component. See more info and examples |VAutocomplete_vuetify_link|.
.. |VAutocomplete_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-autocomplete" target="_blank">here</a>
:param allow_overflow: Allow the menu to overflow off the screen
:type boolean:
:param append_icon: Appends an icon to the component, uses the same syntax as `v-icon`
:type string:
:param append_outer_icon: Appends an icon to the outside the component's input, uses same syntax as `v-icon`
:type string:
:param attach: Specifies which DOM element that this component should detach to. String can be any valid querySelector and Object can be any valid Node. This will attach to the root `v-app` component by default.
:type any:
:param auto_select_first: When searching, will always highlight the first option
:type boolean:
:param autofocus: Enables autofocus
:type boolean:
:param background_color: Changes the background-color of the input
:type string:
:param cache_items: Keeps a local _unique_ copy of all items that have been passed through the **items** prop.
:type boolean:
:param chips: Changes display of selections to chips
:type boolean:
:param clear_icon: Applied when using **clearable** and the input is dirty
:type string:
:param clearable: Add input clear functionality, default icon is Material Design Icons **mdi-clear**
:type boolean:
:param color: See description |VAutocomplete_vuetify_link|.
:type string:
:param counter: Creates counter for input length; if no number is specified, it defaults to 25. Does not apply any validation.
:type ['boolean', 'number', 'string']:
:param counter_value:
:type function:
:param dark: See description |VAutocomplete_vuetify_link|.
:type boolean:
:param deletable_chips: Adds a remove icon to selected chips
:type boolean:
:param dense: Reduces the input height
:type boolean:
:param disable_lookup: Disables keyboard lookup
:type boolean:
:param disabled: Disables the input
:type boolean:
:param eager: Will force the components content to render on mounted. This is useful if you have content that will not be rendered in the DOM that you want crawled for SEO.
:type boolean:
:param error: Puts the input in a manual error state
:type boolean:
:param error_count: The total number of errors that should display at once
:type ['number', 'string']:
:param error_messages: Puts the input in an error state and passes through custom error messages. Will be combined with any validations that occur from the **rules** prop. This field will not trigger validation
:type ['string', 'array']:
:param filled: Applies the alternate filled input style
:type boolean:
:param filter: See description |VAutocomplete_vuetify_link|.
:type function:
:param flat: Removes elevation (shadow) added to element when using the **solo** or **solo-inverted** props
:type boolean:
:param full_width: Designates input type as full-width
:type boolean:
:param height: Sets the height of the input
:type ['number', 'string']:
:param hide_details: Hides hint and validation errors. When set to `auto` messages will be rendered only if there's a message (hint, error message, counter value etc) to display
:type ['boolean', 'string']:
:param hide_no_data: Hides the menu when there are no options to show. Useful for preventing the menu from opening before results are fetched asynchronously. Also has the effect of opening the menu when the `items` array changes if not already open.
:type boolean:
:param hide_selected: Do not display in the select menu items that are already selected
:type boolean:
:param hint: Hint text
:type string:
:param id: Sets the DOM id on the component
:type string:
:param item_color: Sets color of selected items
:type string:
:param item_disabled: Set property of **items**'s disabled value
:type ['string', 'array', 'function']:
:param item_text: Set property of **items**'s text value
:type ['string', 'array', 'function']:
:param item_value: See description |VAutocomplete_vuetify_link|.
:type ['string', 'array', 'function']:
:param items: Can be an array of objects or array of strings. When using objects, will look for a text, value and disabled keys. This can be changed using the **item-text**, **item-value** and **item-disabled** props. Objects that have a **header** or **divider** property are considered special cases and generate a list header or divider; these items are not selectable.
:type array:
:param label: Sets input label
:type string:
:param light: Applies the light theme variant to the component.
:type boolean:
:param loader_height: Specifies the height of the loader
:type ['number', 'string']:
:param loading: Displays linear progress bar. Can either be a String which specifies which color is applied to the progress bar (any material color or theme color - **primary**, **secondary**, **success**, **info**, **warning**, **error**) or a Boolean which uses the component **color** (set by color prop - if it's supported by the component) or the primary color
:type ['boolean', 'string']:
:param menu_props: Pass props through to the `v-menu` component. Accepts either a string for boolean props `menu-props="auto, overflowY"`, or an object `:menu-props="{ auto: true, overflowY: true }"`
:type ['string', 'array', 'object']:
:param messages: Displays a list of messages or message if using a string
:type ['string', 'array']:
:param multiple: Changes select to multiple. Accepts array for value
:type boolean:
:param no_data_text: Display text when there is no data
:type string:
:param no_filter: Do not apply filtering when searching. Useful when data is being filtered server side
:type boolean:
:param open_on_clear: When using the **clearable** prop, once cleared, the select menu will either open or stay open, depending on the current state
:type boolean:
:param outlined: Applies the outlined style to the input
:type boolean:
:param persistent_hint: Forces hint to always be visible
:type boolean:
:param persistent_placeholder: Forces placeholder to always be visible
:type boolean:
:param placeholder: Sets the input's placeholder text
:type string:
:param prefix: Displays prefix text
:type string:
:param prepend_icon: Prepends an icon to the component, uses the same syntax as `v-icon`
:type string:
:param prepend_inner_icon: Prepends an icon inside the component's input, uses the same syntax as `v-icon`
:type string:
:param readonly: Puts input in readonly state
:type boolean:
:param return_object: Changes the selection behavior to return the object directly rather than the value specified with **item-value**
:type boolean:
:param reverse: Reverses the input orientation
:type boolean:
:param rounded: Adds a border radius to the input
:type boolean:
:param rules: Accepts a mixed array of types `function`, `boolean` and `string`. Functions pass an input value as an argument and must return either `true` / `false` or a `string` containing an error message. The input field will enter an error state if a function returns (or any value in the array contains) `false` or is a `string`
:type array:
:param search_input: Search value. Can be used with `.sync` modifier.
:type string:
:param shaped: Round if `outlined` and increase `border-radius` if `filled`. Must be used with either `outlined` or `filled`
:type boolean:
:param single_line: Label does not move on focus/dirty
:type boolean:
:param small_chips: Changes display of selections to chips with the **small** property
:type boolean:
:param solo: Changes the style of the input
:type boolean:
:param solo_inverted: Reduces element opacity until focused
:type boolean:
:param success: Puts the input in a manual success state
:type boolean:
:param success_messages: Puts the input in a success state and passes through custom success messages.
:type ['string', 'array']:
:param suffix: Displays suffix text
:type string:
:param type: Sets input type
:type string:
:param validate_on_blur: Delays validation until blur event
:type boolean:
:param value: The input's value
:type any:
:param value_comparator: See description |VAutocomplete_vuetify_link|.
:type function:
Events
:param blur: Emitted when the input is blurred
:param change: Emitted when the input is changed by user interaction
:param click_append: Emitted when appended icon is clicked
:param click_append_outer: Emitted when appended outer icon is clicked
:param click_clear: Emitted when clearable icon clicked
:param click_prepend: Emitted when prepended icon is clicked
:param click_prepend_inner: Emitted when prepended inner icon is clicked
:param focus: Emitted when component is focused
:param input: The updated bound model
:param keydown: Emitted when **any** key is pressed
:param update_error: The `error.sync` event
:param update_list_index: Emitted when menu item is selected using keyboard arrows
:param update_search_input: The `search-input.sync` event
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-autocomplete", children, **kwargs)
self._attr_names += [
"allow_overflow",
"append_icon",
"append_outer_icon",
"attach",
"auto_select_first",
"autofocus",
"background_color",
"cache_items",
"chips",
"clear_icon",
"clearable",
"color",
"counter",
"counter_value", # JS functions unimplemented
"dark",
"deletable_chips",
"dense",
"disable_lookup",
"disabled",
"eager",
"error",
"error_count",
"error_messages",
"filled",
"filter", # JS functions unimplemented
"flat",
"full_width",
"height",
"hide_details",
"hide_no_data",
"hide_selected",
"hint",
"id",
"item_color",
"item_disabled", # JS functions unimplemented
"item_text", # JS functions unimplemented
"item_value", # JS functions unimplemented
"items",
"label",
"light",
"loader_height",
"loading",
"menu_props",
"messages",
"multiple",
"no_data_text",
"no_filter",
"open_on_clear",
"outlined",
"persistent_hint",
"persistent_placeholder",
"placeholder",
"prefix",
"prepend_icon",
"prepend_inner_icon",
"readonly",
"return_object",
"reverse",
"rounded",
"rules",
"search_input",
"shaped",
"single_line",
"small_chips",
"solo",
"solo_inverted",
"success",
"success_messages",
"suffix",
"type",
"validate_on_blur",
"value",
"value_comparator", # JS functions unimplemented
]
self._event_names += [
"blur",
"change",
# click, #Implemented in AbstractElement parent class
("click_append", "click:append"),
("click_append_outer", "click:append-outer"),
("click_clear", "click:clear"),
("click_prepend", "click:prepend"),
("click_prepend_inner", "click:prepend-inner"),
"focus",
"input",
"keydown",
# mousedown, #Implemented in AbstractElement parent class
# mouseup, #Implemented in AbstractElement parent class
("update_error", "update:error"),
("update_list_index", "update:list-index"),
("update_search_input", "update:search-input"),
]
class VAvatar(AbstractElement):
"""
Vuetify's VAvatar component. See more info and examples |VAvatar_vuetify_link|.
.. |VAvatar_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-avatar" target="_blank">here</a>
:param color: See description |VAvatar_vuetify_link|.
:type string:
:param height: Sets the height for the component.
:type ['number', 'string']:
:param left: See description |VAvatar_vuetify_link|.
:type boolean:
:param max_height: Sets the maximum height for the component.
:type ['number', 'string']:
:param max_width: Sets the maximum width for the component.
:type ['number', 'string']:
:param min_height: Sets the minimum height for the component.
:type ['number', 'string']:
:param min_width: Sets the minimum width for the component.
:type ['number', 'string']:
:param right: See description |VAvatar_vuetify_link|.
:type boolean:
:param rounded: See description |VAvatar_vuetify_link|.
:type ['boolean', 'string']:
:param size: Sets the height and width of the component.
:type ['number', 'string']:
:param tile: Removes the component's **border-radius**.
:type boolean:
:param width: Sets the width for the component.
:type ['number', 'string']:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-avatar", children, **kwargs)
self._attr_names += [
"color",
"height",
"left",
"max_height",
"max_width",
"min_height",
"min_width",
"right",
"rounded",
"size",
"tile",
"width",
]
class VBadge(AbstractElement):
"""
Vuetify's VBadge component. See more info and examples |VBadge_vuetify_link|.
.. |VBadge_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-badge" target="_blank">here</a>
:param avatar: Removes badge padding for the use of the `v-avatar` in the **badge** slot.
:type boolean:
:param bordered: Applies a **2px** by default and **1.5px** border around the badge when using the **dot** property.
:type boolean:
:param bottom: Aligns the component towards the bottom.
:type boolean:
:param color: See description |VBadge_vuetify_link|.
:type string:
:param content: Any content you want injected as text into the badge.
:type any:
:param dark: See description |VBadge_vuetify_link|.
:type boolean:
:param dot: Reduce the size of the badge and hide its contents
:type boolean:
:param icon: Designates a specific icon used in the badge.
:type string:
:param inline: Moves the badge to be inline with the wrapping element. Supports the usage of the **left** prop.
:type boolean:
:param label: The **aria-label** used for the badge
:type string:
:param left: Aligns the component towards the left.
:type boolean:
:param light: Applies the light theme variant to the component.
:type boolean:
:param mode: See description |VBadge_vuetify_link|.
:type string:
:param offset_x: Offset the badge on the x-axis.
:type ['number', 'string']:
:param offset_y: Offset the badge on the y-axis.
:type ['number', 'string']:
:param origin: See description |VBadge_vuetify_link|.
:type string:
:param overlap: Overlaps the slotted content on top of the component.
:type boolean:
:param tile: Removes the component's border-radius.
:type boolean:
:param transition: See description |VBadge_vuetify_link|.
:type string:
:param value: Controls whether the component is visible or hidden.
:type any:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-badge", children, **kwargs)
self._attr_names += [
"avatar",
"bordered",
"bottom",
"color",
"content",
"dark",
"dot",
"icon",
"inline",
"label",
"left",
"light",
"mode",
"offset_x",
"offset_y",
"origin",
"overlap",
"tile",
"transition",
"value",
]
class VBanner(AbstractElement):
"""
Vuetify's VBanner component. See more info and examples |VBanner_vuetify_link|.
.. |VBanner_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-banner" target="_blank">here</a>
:param app: When used inside of `v-main`, will calculate top based upon application `v-toolbar` and `v-system-bar`.
:type boolean:
:param color: See description |VBanner_vuetify_link|.
:type string:
:param dark: See description |VBanner_vuetify_link|.
:type boolean:
:param elevation: See description |VBanner_vuetify_link|.
:type ['number', 'string']:
:param height: Sets the height for the component.
:type ['number', 'string']:
:param icon: Designates a specific icon.
:type string:
:param icon_color: Designates a specific icon color.
:type string:
:param light: Applies the light theme variant to the component.
:type boolean:
:param max_height: Sets the maximum height for the component.
:type ['number', 'string']:
:param max_width: Sets the maximum width for the component.
:type ['number', 'string']:
:param min_height: Sets the minimum height for the component.
:type ['number', 'string']:
:param min_width: Sets the minimum width for the component.
:type ['number', 'string']:
:param mobile_breakpoint: Sets the designated mobile breakpoint for the component.
:type ['number', 'string']:
:param outlined: Removes elevation (box-shadow) and adds a *thin* border.
:type boolean:
:param rounded: See description |VBanner_vuetify_link|.
:type ['boolean', 'string']:
:param shaped: Applies a large border radius on the top left and bottom right of the card.
:type boolean:
:param single_line: Forces the banner onto a single line.
:type boolean:
:param sticky: See description |VBanner_vuetify_link|.
:type boolean:
:param tag: Specify a custom tag used on the root element.
:type string:
:param tile: Removes the component's **border-radius**.
:type boolean:
:param value: Controls whether the component is visible or hidden.
:type boolean:
:param width: Sets the width for the component.
:type ['number', 'string']:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-banner", children, **kwargs)
self._attr_names += [
"app",
"color",
"dark",
"elevation",
"height",
"icon",
"icon_color",
"light",
"max_height",
"max_width",
"min_height",
"min_width",
"mobile_breakpoint",
"outlined",
"rounded",
"shaped",
"single_line",
"sticky",
"tag",
"tile",
"value",
"width",
]
class VBottomNavigation(AbstractElement):
"""
Vuetify's VBottomNavigation component. See more info and examples |VBottomNavigation_vuetify_link|.
.. |VBottomNavigation_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-bottom-navigation" target="_blank">here</a>
:param absolute: Applies **position: absolute** to the component.
:type boolean:
:param active_class: See description |VBottomNavigation_vuetify_link|.
:type string:
:param app: See description |VBottomNavigation_vuetify_link|.
:type boolean:
:param background_color: Changes the background-color for the component.
:type string:
:param color: See description |VBottomNavigation_vuetify_link|.
:type string:
:param dark: See description |VBottomNavigation_vuetify_link|.
:type boolean:
:param fixed: Applies **position: fixed** to the component.
:type boolean:
:param grow: See description |VBottomNavigation_vuetify_link|.
:type boolean:
:param height: Sets the height for the component.
:type ['number', 'string']:
:param hide_on_scroll: Will transition the navigation off screen when scrolling up.
:type boolean:
:param horizontal: See description |VBottomNavigation_vuetify_link|.
:type boolean:
:param input_value: Controls whether the component is visible or hidden. Supports the **.sync** modifier.
:type boolean:
:param light: Applies the light theme variant to the component.
:type boolean:
:param mandatory: Forces a value to always be selected (if available).
:type boolean:
:param max_height: Sets the maximum height for the component.
:type ['number', 'string']:
:param max_width: Sets the maximum width for the component.
:type ['number', 'string']:
:param min_height: Sets the minimum height for the component.
:type ['number', 'string']:
:param min_width: Sets the minimum width for the component.
:type ['number', 'string']:
:param scroll_target: Designates the element to target for scrolling events. Uses `window` by default.
:type string:
:param scroll_threshold: The amount of scroll distance down before **hide-on-scroll** activates.
:type ['string', 'number']:
:param shift: See description |VBottomNavigation_vuetify_link|.
:type boolean:
:param tag: Specify a custom tag used on the root element.
:type string:
:param value: See description |VBottomNavigation_vuetify_link|.
:type any:
:param width: Sets the width for the component.
:type ['number', 'string']:
Events
:param change: The value of currently selected button. If no value is assigned, will be the current index of the button.
:param update_input_value: The event used for `input-value.sync`.
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-bottom-navigation", children, **kwargs)
self._attr_names += [
"absolute",
"active_class",
"app",
"background_color",
"color",
"dark",
"fixed",
"grow",
"height",
"hide_on_scroll",
"horizontal",
"input_value",
"light",
"mandatory",
"max_height",
"max_width",
"min_height",
"min_width",
"scroll_target",
"scroll_threshold",
"shift",
"tag",
"value",
"width",
]
self._event_names += [
"change",
("update_input_value", "update:input-value"),
]
class VBottomSheet(AbstractElement):
"""
Vuetify's VBottomSheet component. See more info and examples |VBottomSheet_vuetify_link|.
.. |VBottomSheet_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-bottom-sheet" target="_blank">here</a>
:param activator: Designate a custom activator when the `activator` slot is not used. String can be any valid querySelector and Object can be any valid Node.
:type any:
:param attach: Specifies which DOM element that this component should detach to. String can be any valid querySelector and Object can be any valid Node. This will attach to the root `v-app` component by default.
:type any:
:param close_delay: Milliseconds to wait before closing component.
:type ['number', 'string']:
:param content_class: Applies a custom class to the detached element. This is useful because the content is moved to the beginning of the `v-app` component (unless the **attach** prop is provided) and is not targetable by classes passed directly on the component.
:type string:
:param dark: See description |VBottomSheet_vuetify_link|.
:type boolean:
:param disabled: Disables the ability to open the component.
:type boolean:
:param eager: Will force the components content to render on mounted. This is useful if you have content that will not be rendered in the DOM that you want crawled for SEO.
:type boolean:
:param fullscreen: Changes layout for fullscreen display.
:type boolean:
:param hide_overlay: Hides the display of the overlay.
:type boolean:
:param inset: Reduces the sheet content maximum width to 70%.
:type boolean:
:param internal_activator: Detaches the menu content inside of the component as opposed to the document.
:type boolean:
:param light: Applies the light theme variant to the component.
:type boolean:
:param max_width: Sets the maximum width for the component.
:type ['string', 'number']:
:param no_click_animation: Disables the bounce effect when clicking outside of a `v-dialog`'s content when using the **persistent** prop.
:type boolean:
:param open_delay: Milliseconds to wait before opening component.
:type ['number', 'string']:
:param open_on_focus:
:type boolean:
:param open_on_hover: Designates whether component should activate when its activator is hovered.
:type boolean:
:param origin: See description |VBottomSheet_vuetify_link|.
:type string:
:param overlay_color: Sets the overlay color.
:type string:
:param overlay_opacity: Sets the overlay opacity.
:type ['number', 'string']:
:param persistent: Clicking outside of the element or pressing **esc** key will not deactivate it.
:type boolean:
:param retain_focus: Tab focus will return to the first child of the dialog by default. Disable this when using external tools that require focus such as TinyMCE or vue-clipboard.
:type boolean:
:param return_value:
:type any:
:param scrollable: See description |VBottomSheet_vuetify_link|.
:type boolean:
:param transition: See description |VBottomSheet_vuetify_link|.
:type string:
:param value: Controls whether the component is visible or hidden.
:type any:
:param width: Sets the width for the component.
:type ['string', 'number']:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-bottom-sheet", children, **kwargs)
self._attr_names += [
"activator",
"attach",
"close_delay",
"content_class",
"dark",
"disabled",
"eager",
"fullscreen",
"hide_overlay",
"inset",
"internal_activator",
"light",
"max_width",
"no_click_animation",
"open_delay",
"open_on_focus",
"open_on_hover",
"origin",
"overlay_color",
"overlay_opacity",
"persistent",
"retain_focus",
"return_value",
"scrollable",
"transition",
"value",
"width",
]
class VBreadcrumbs(AbstractElement):
"""
Vuetify's VBreadcrumbs component. See more info and examples |VBreadcrumbs_vuetify_link|.
.. |VBreadcrumbs_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-breadcrumbs" target="_blank">here</a>
:param dark: See description |VBreadcrumbs_vuetify_link|.
:type boolean:
:param divider: Specifies the dividing character between items.
:type string:
:param items: An array of objects for each breadcrumb.
:type array:
:param large: Increase the font-size of the breadcrumb item text to 16px (14px default).
:type boolean:
:param light: Applies the light theme variant to the component.
:type boolean:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-breadcrumbs", children, **kwargs)
self._attr_names += [
"dark",
"divider",
"items",
"large",
"light",
]
class VBreadcrumbsItem(AbstractElement):
"""
Vuetify's VBreadcrumbsItem component. See more info and examples |VBreadcrumbsItem_vuetify_link|.
.. |VBreadcrumbsItem_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-breadcrumbs-item" target="_blank">here</a>
:param active_class: See description |VBreadcrumbsItem_vuetify_link|.
:type string:
:param append: See description |VBreadcrumbsItem_vuetify_link|.
:type boolean:
:param disabled: Removes the ability to click or target the component.
:type boolean:
:param exact: See description |VBreadcrumbsItem_vuetify_link|.
:type boolean:
:param exact_active_class: See description |VBreadcrumbsItem_vuetify_link|.
:type string:
:param exact_path: See description |VBreadcrumbsItem_vuetify_link|.
:type boolean:
:param href: Designates the component as anchor and applies the **href** attribute.
:type ['string', 'object']:
:param link: Designates that the component is a link. This is automatic when using the **href** or **to** prop.
:type boolean:
:param nuxt: See description |VBreadcrumbsItem_vuetify_link|.
:type boolean:
:param replace: See description |VBreadcrumbsItem_vuetify_link|.
:type boolean:
:param ripple: See description |VBreadcrumbsItem_vuetify_link|.
:type ['boolean', 'object']:
:param tag: Specify a custom tag used on the root element.
:type string:
:param target: Designates the target attribute. This should only be applied when using the **href** prop.
:type string:
:param to: See description |VBreadcrumbsItem_vuetify_link|.
:type ['string', 'object']:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-breadcrumbs-item", children, **kwargs)
self._attr_names += [
"active_class",
"append",
"disabled",
"exact",
"exact_active_class",
"exact_path",
"href",
"link",
"nuxt",
"replace",
"ripple",
"tag",
"target",
"to",
]
class VBreadcrumbsDivider(AbstractElement):
"""
Vuetify's VBreadcrumbsDivider component. See more info and examples |VBreadcrumbsDivider_vuetify_link|.
.. |VBreadcrumbsDivider_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-breadcrumbs-divider" target="_blank">here</a>
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-breadcrumbs-divider", children, **kwargs)
class VBtn(AbstractElement):
"""
Vuetify's VBtn component. See more info and examples |VBtn_vuetify_link|.
.. |VBtn_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-btn" target="_blank">here</a>
:param absolute: Applies **position: absolute** to the component.
:type boolean:
:param active_class: See description |VBtn_vuetify_link|.
:type string:
:param append: See description |VBtn_vuetify_link|.
:type boolean:
:param block: Expands the button to 100% of available space.
:type boolean:
:param bottom: Aligns the component towards the bottom.
:type boolean:
:param color: See description |VBtn_vuetify_link|.
:type string:
:param dark: See description |VBtn_vuetify_link|.
:type boolean:
:param depressed: Removes the button box shadow.
:type boolean:
:param disabled: Removes the ability to click or target the component.
:type boolean:
:param elevation: See description |VBtn_vuetify_link|.
:type ['number', 'string']:
:param exact: See description |VBtn_vuetify_link|.
:type boolean:
:param exact_active_class: See description |VBtn_vuetify_link|.
:type string:
:param exact_path: See description |VBtn_vuetify_link|.
:type boolean:
:param fab: Designates the button as a floating-action-button. Button will become _round_.
:type boolean:
:param fixed: Applies **position: fixed** to the component.
:type boolean:
:param height: Sets the height for the component.
:type ['number', 'string']:
:param href: Designates the component as anchor and applies the **href** attribute.
:type ['string', 'object']:
:param icon: Designates the button as icon. Button will become _round_ and applies the **text** prop.
:type boolean:
:param input_value: Controls the button's active state.
:type any:
:param large: Makes the component large.
:type boolean:
:param left: Aligns the component towards the left. This should be used with the **absolute** or **fixed** props.
:type boolean:
:param light: Applies the light theme variant to the component.
:type boolean:
:param link: Designates that the component is a link. This is automatic when using the **href** or **to** prop.
:type boolean:
:param loading: Adds a loading icon animation.
:type boolean:
:param max_height: Sets the maximum height for the component.
:type ['number', 'string']:
:param max_width: Sets the maximum width for the component.
:type ['number', 'string']:
:param min_height: Sets the minimum height for the component.
:type ['number', 'string']:
:param min_width: Sets the minimum width for the component.
:type ['number', 'string']:
:param nuxt: See description |VBtn_vuetify_link|.
:type boolean:
:param outlined: Makes the background transparent and applies a thin border.
:type boolean:
:param plain: Removes the default background change applied when hovering over the button.
:type boolean:
:param replace: See description |VBtn_vuetify_link|.
:type boolean:
:param retain_focus_on_click: Don't blur on click.
:type boolean:
:param right: Aligns the component towards the right. This should be used with the **absolute** or **fixed** props.
:type boolean:
:param ripple: See description |VBtn_vuetify_link|.
:type ['boolean', 'object']:
:param rounded: Applies a large border radius on the button.
:type boolean:
:param shaped: Applies a large border radius on the top left and bottom right of the card.
:type boolean:
:param small: Makes the component small.
:type boolean:
:param tag: Specify a custom tag used on the root element.
:type string:
:param target: Designates the target attribute. This should only be applied when using the **href** prop.
:type string:
:param text: Makes the background transparent. When using the **color** prop, the color will be applied to the button text instead of the background.
:type boolean:
:param tile: Removes the component's **border-radius**.
:type boolean:
:param to: See description |VBtn_vuetify_link|.
:type ['string', 'object']:
:param top: Aligns the content towards the top.
:type boolean:
:param type: Set the button's **type** attribute.
:type string:
:param value: Controls whether the component is visible or hidden.
:type any:
:param width: Sets the width for the component.
:type ['number', 'string']:
:param x_large: Makes the component extra large.
:type boolean:
:param x_small: Makes the component extra small.
:type boolean:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-btn", children, **kwargs)
self._attr_names += [
"absolute",
"active_class",
"append",
"block",
"bottom",
"color",
"dark",
"depressed",
"disabled",
"elevation",
"exact",
"exact_active_class",
"exact_path",
"fab",
"fixed",
"height",
"href",
"icon",
"input_value",
"large",
"left",
"light",
"link",
"loading",
"max_height",
"max_width",
"min_height",
"min_width",
"nuxt",
"outlined",
"plain",
"replace",
"retain_focus_on_click",
"right",
"ripple",
"rounded",
"shaped",
"small",
"tag",
"target",
"text",
"tile",
"to",
"top",
"type",
"value",
"width",
"x_large",
"x_small",
]
self._event_names += [
# click, #Implemented in AbstractElement parent class
]
class VBtnToggle(AbstractElement):
"""
Vuetify's VBtnToggle component. See more info and examples |VBtnToggle_vuetify_link|.
.. |VBtnToggle_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-btn-toggle" target="_blank">here</a>
:param active_class: The **active-class** applied to children when they are activated.
:type string:
:param background_color: Changes the background-color for the component.
:type string:
:param borderless: Removes the group's border.
:type boolean:
:param color: See description |VBtnToggle_vuetify_link|.
:type string:
:param dark: See description |VBtnToggle_vuetify_link|.
:type boolean:
:param dense: Reduces the button size and padding.
:type boolean:
:param group: See description |VBtnToggle_vuetify_link|.
:type boolean:
:param light: Applies the light theme variant to the component.
:type boolean:
:param mandatory: Forces a value to always be selected (if available).
:type boolean:
:param max: Sets a maximum number of selections that can be made.
:type ['number', 'string']:
:param multiple: Allow multiple selections. The **value** prop must be an _array_.
:type boolean:
:param rounded: Round edge buttons
:type boolean:
:param shaped: Applies a large border radius on the top left and bottom right of the card.
:type boolean:
:param tag: Specify a custom tag used on the root element.
:type string:
:param tile: Removes the component's border-radius.
:type boolean:
:param value: The designated model value for the component.
:type any:
Events
:param change: Emitted when the input is changed by user interaction
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-btn-toggle", children, **kwargs)
self._attr_names += [
"active_class",
"background_color",
"borderless",
"color",
"dark",
"dense",
"group",
"light",
"mandatory",
"max",
"multiple",
"rounded",
"shaped",
"tag",
"tile",
"value",
]
self._event_names += [
"change",
]
class VCalendar(AbstractElement):
"""
Vuetify's VCalendar component. See more info and examples |VCalendar_vuetify_link|.
.. |VCalendar_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-calendar" target="_blank">here</a>
:param categories: Specifies what categories to display in the `category` view. This controls the order of the categories as well. If the calendar uses events any categories specified in those events not specified in this value are dynamically rendered in the view unless `category-hide-dynamic` is true.
:type ['array', 'string']:
:param category_days: The number of days to render in the `category` view.
:type ['number', 'string']:
:param category_for_invalid: The category to place events in that have invalid categories. A category is invalid when it is not a string. By default events without a category are not displayed until this value is specified.
:type string:
:param category_hide_dynamic: Sets whether categories specified in an event should be hidden if it's not defined in `categories`.
:type boolean:
:param category_show_all: Set whether the `category` view should show all defined `categories` even if there are no events for a category.
:type boolean:
:param category_text: If categories is a list of objects, you can use this to determine what property to print out as the category text on the calendar. You can provide a function to do some logic or just define the prop name. It's similar to item-text on v-select
:type ['string', 'function']:
:param color: See description |VCalendar_vuetify_link|.
:type string:
:param dark: See description |VCalendar_vuetify_link|.
:type boolean:
:param day_format: Formats day of the month string that appears in a day to a specified locale
:type function:
:param end: The ending date on the calendar (inclusive) in the format of `YYYY-MM-DD`. This may be ignored depending on the `type` of the calendar.
:type ['string', 'number', 'date']:
:param event_category: Set property of *event*'s category. Instead of a property a function can be given which takes an event and returns the category.
:type ['string', 'function']:
:param event_color: A background color for all events or a function which accepts an event object passed to the calendar to return a color.
:type ['string', 'function']:
:param event_end: Set property of *event*'s end timestamp.
:type string:
:param event_height: The height of an event in pixels in the `month` view and at the top of the `day` views.
:type number:
:param event_margin_bottom: Margin bottom for event
:type number:
:param event_more: Whether the more 'button' is displayed on a calendar with too many events in a given day. It will say something like '5 more' and when clicked generates a `click:more` event.
:type boolean:
:param event_more_text: The text to display in the more 'button' given the number of hidden events.
:type string:
:param event_name: Set property of *event*'s displayed name, or a function which accepts an event object passed to the calendar as the first argument and a flag signalling whether the name is for a timed event (true) or an event over a day.
:type ['string', 'function']:
:param event_overlap_mode: One of `stack`, `column`, or a custom render function
:type ['string', 'function']:
:param event_overlap_threshold: A value in minutes that's used to determine whether two timed events should be placed in column beside each other or should be treated as slightly overlapping events.
:type ['string', 'number']:
:param event_ripple: Applies the `v-ripple` directive.
:type ['boolean', 'object']:
:param event_start: Set property of *event*'s start timestamp.
:type string:
:param event_text_color: A text color for all events or a function which accepts an event object passed to the calendar to return a color.
:type ['string', 'function']:
:param event_timed: If Dates or milliseconds are used as the start or end timestamp of an event, this prop can be a string to a property on the event that is truthy if the event is a timed event or a function which takes the event and returns a truthy value if the event is a timed event.
:type ['string', 'function']:
:param events: An array of event objects with a property for a start timestamp and optionally a name and end timestamp. If an end timestamp is not given, the value of start will be used. If no name is given, you must provide an implementation for the `event` slot.
:type array:
:param first_interval: The first interval to display in the `day` view. If `intervalMinutes` is set to 60 and this is set to 9 the first time in the view is 9am.
:type ['number', 'string']:
:param first_time: The first time to display in the `day` view. If specified, this overwrites any `firstInterval` value specified. This can be the number of minutes since midnight, a string in the format of `HH:mm`, or an object with number properties hour and minute.
:type ['number', 'string', 'object']:
:param hide_header: If the header at the top of the `day` view should be visible.
:type boolean:
:param interval_count: The number of intervals to display in the `day` view.
:type ['number', 'string']:
:param interval_format: Formats time of day string that appears in the interval gutter of the `day` and `week` view to specified locale
:type function:
:param interval_height: The height of an interval in pixels in the `day` view.
:type ['number', 'string']:
:param interval_minutes: The number of minutes the intervals are in the `day` view. A common interval is 60 minutes so the intervals are an hour.
:type ['number', 'string']:
:param interval_style: Returns CSS styling to apply to the interval.
:type function:
:param interval_width: The width of the interval gutter on the left side in the `day` view.
:type ['number', 'string']:
:param light: Applies the light theme variant to the component.
:type boolean:
:param locale: The locale of the calendar.
:type string:
:param locale_first_day_of_year: Sets the day that determines the first week of the year, starting with 0 for **Sunday**. For ISO 8601 this should be 4.
:type ['string', 'number']:
:param max_days: The maximum number of days to display in the custom calendar if an `end` day is not set.
:type number:
:param min_weeks: The minimum number of weeks to display in the `month` or `week` view.
:type any:
:param month_format: Formats month string that appears in a day to specified locale
:type function:
:param now: Override the day & time which is considered now. This is in the format of `YYYY-MM-DD hh:mm:ss`. The calendar is styled according to now.
:type string:
:param short_intervals: If true, the intervals in the `day` view will be 9 AM as opposed to 09:00 AM
:type boolean:
:param short_months: Whether the short versions of a month should be used (Jan vs January).
:type boolean:
:param short_weekdays: Whether the short versions of a weekday should be used (Mon vs Monday).
:type boolean:
:param show_interval_label: Checks if a given day and time should be displayed in the interval gutter of the `day` view.
:type function:
:param show_month_on_first: Whether the name of the month should be displayed on the first day of the month.
:type boolean:
:param show_week: Whether week numbers should be displayed when using the `month` view.
:type boolean:
:param start: The starting date on the calendar (inclusive) in the format of `YYYY-MM-DD`. This may be ignored depending on the `type` of the calendar.
:type ['string', 'number', 'date']:
:param type: A string which is one of `month`, `week`, `day`, `4day`, `custom-weekly`, `custom-daily`, and `category`. The custom types look at the `start` and `end` dates passed to the component as opposed to the `value`.
:type string:
:param value: A date in the format of `YYYY-MM-DD` which determines what span of time for the calendar.
:type ['string', 'number', 'date']:
:param weekday_format: Formats day of the week string that appears in the header to specified locale
:type function:
:param weekdays: Specifies which days of the week to display. To display Monday through Friday only, a value of `[1, 2, 3, 4, 5]` can be used. To display a week starting on Monday a value of `[1, 2, 3, 4, 5, 6, 0]` can be used.
:type ['array', 'string']:
Events
:param change: The range of days displayed on the calendar changed. This is triggered on initialization. The event passed is an object with start and end date objects.
:param click_date: The click event on the day of the month link. The event passed is the day & time object.
:param click_day: The click event on a day. The event passed is the day object.
:param click_day_category: The click event on a day in the `category` view. The event passed is the day object.
:param click_event: The click event on a specific event. The event passed is the day & time object.
:param click_interval: The click event at a specific interval label in the `day` view. The event passed is the day & time object.
:param click_more: The click event on the `X more` button on views with too many events in a day.
:param click_time: The click event at a specific time in the `day` view. The event passed is the day & time object.
:param click_time_category: The click event at a specific time in the `category` view. The event passed is the day & time object.
:param contextmenu_date: The right-click event on the day of the month link. The event passed is the day & time object.
:param contextmenu_day: The right-click event on a day. The event passed is the day object.
:param contextmenu_day_category: The right-click event on a day in the `category` view. The event passed is the day object.
:param contextmenu_event: The right-click event on an event. The event passed is the day & time object.
:param contextmenu_interval: The right-click event at a specific interval label in the `day` view. The event passed is the day & time object.
:param contextmenu_time: The right-click event at a specific time in the `day` view. The event passed is the day & time object.
:param contextmenu_time_category: The right-click event at a specific time in the `category` view. The event passed is the day & time object.
:param input: An alias to the `click:date` event used to support v-model.
:param mousedown_day: The mousedown event on a day. The event passed is the day object.
:param mousedown_day_category: The mousedown event on a day in the `category` view. The event passed is the day object.
:param mousedown_event: The mousedown event on an event. The event passed is the day & time object.
:param mousedown_interval: The mousedown event at a specific interval label in the `day` view. The event passed is the day & time object.
:param mousedown_time: The mousedown event at a specific time in the `day` view. The event passed is the day & time object.
:param mousedown_time_category: The mousedown event at a specific time in the `category` view. The event passed is the day & time object.
:param mouseenter_day: The mouseenter event on a day. The event passed is the day object.
:param mouseenter_day_category: The mouseenter event on a day in the `category` view. The event passed is the day object.
:param mouseenter_event: The mouseenter event on an event. The event passed is the day & time object.
:param mouseenter_interval: The mouseenter event at a specific interval label in the `day` view. The event passed is the day & time object.
:param mouseenter_time: The mouseenter event at a specific time in the `day` view. The event passed is the day & time object.
:param mouseenter_time_category: The mouseenter event at a specific time in the `category` view. The event passed is the day & time object.
:param mouseleave_day: The mouseleave event on a day. The event passed is the day object.
:param mouseleave_day_category: The mouseleave event on a day in the `category` view. The event passed is the day object.
:param mouseleave_event: The mouseleave event on an event. The event passed is the day & time object.
:param mouseleave_interval: The mouseleave event at a specific interval label in the `day` view. The event passed is the day & time object.
:param mouseleave_time: The mouseleave event at a specific time in the `day` view. The event passed is the day & time object.
:param mouseleave_time_category: The mouseleave event at a specific time in the `category` view. The event passed is the day & time object.
:param mousemove_day: The mousemove event on a day. The event passed is the day object.
:param mousemove_day_category: The mousemove event on a day in the `category` view. The event passed is the day object.
:param mousemove_event: The mousemove event on an event. The event passed is the day & time object.
:param mousemove_interval: The mousemove event at a specific interval label in the `day` view. The event passed is the day & time object.
:param mousemove_time: The mousemove event at a specific time in the `day` view. The event passed is the day & time object.
:param mousemove_time_category: The mousemove event at a specific time in the `category` view. The event passed is the day & time object.
:param mouseup_day: The mouseup event on a day. The event passed is the day object.
:param mouseup_day_category: The mouseup event on a day in the `category` view. The event passed is the day object.
:param mouseup_event: The mouseup event on an event. The event passed is the day & time object.
:param mouseup_interval: The mouseup event at a specific interval label in the `day` view. The event passed is the day & time object.
:param mouseup_time: The mouseup event at a specific time in the `day` view. The event passed is the day & time object.
:param mouseup_time_category: The mouseup event at a specific time in the `category` view. The event passed is the day & time object.
:param moved: One of the functions `next`, `prev`, and `move` was called. The event passed is the day object calculated for the movement.
:param touchend_day: The touchend event on a day. The event passed is the day object.
:param touchend_day_category: The touchend event on a day in the `category` view. The event passed is the day object.
:param touchend_event: The touchend event on am view. The event passed is the day & time object.
:param touchend_interval: The touchend event at a specific interval label in the `day` view. The event passed is the day & time object.
:param touchend_time: The touchend event at a specific time in the `day` view. The event passed is the day & time object.
:param touchend_time_category: The touchend event at a specific time in the `category` view. The event passed is the day & time object.
:param touchmove_day: The touchmove event on a day. The event passed is the day object.
:param touchmove_day_category: The touchmove event on a day in the `category` view. The event passed is the day object.
:param touchmove_event: The touchmove event on an `event` view. The event passed is the day & time object.
:param touchmove_interval: The touchmove event at a specific interval label in the `day` view. The event passed is the day & time object.
:param touchmove_time: The touchmove event at a specific time in the `day` view. The event passed is the day & time object.
:param touchmove_time_category: The touchmove event at a specific time in the `category` view. The event passed is the day & time object.
:param touchstart_day: The touchstart event on a day. The event passed is the day object.
:param touchstart_day_category: The touchstart event on a day in the `category` view. The event passed is the day object.
:param touchstart_event: The touchstart event on an event` view. The event passed is the day & time object.
:param touchstart_interval: The touchstart event at a specific interval label in the `day` view. The event passed is the day & time object.
:param touchstart_time: The touchstart event at a specific time in the `day` view. The event passed is the day & time object.
:param touchstart_time_category: The touchstart event at a specific time in the `category` view. The event passed is the day & time object.
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-calendar", children, **kwargs)
self._attr_names += [
"categories",
"category_days",
"category_for_invalid",
"category_hide_dynamic",
"category_show_all",
"category_text", # JS functions unimplemented
"color",
"dark",
"day_format", # JS functions unimplemented
"end",
"event_category", # JS functions unimplemented
"event_color", # JS functions unimplemented
"event_end",
"event_height",
"event_margin_bottom",
"event_more",
"event_more_text",
"event_name", # JS functions unimplemented
"event_overlap_mode", # JS functions unimplemented
"event_overlap_threshold",
"event_ripple",
"event_start",
"event_text_color", # JS functions unimplemented
"event_timed", # JS functions unimplemented
"events",
"first_interval",
"first_time",
"hide_header",
"interval_count",
"interval_format", # JS functions unimplemented
"interval_height",
"interval_minutes",
"interval_style", # JS functions unimplemented
"interval_width",
"light",
"locale",
"locale_first_day_of_year",
"max_days",
"min_weeks",
"month_format", # JS functions unimplemented
"now",
"short_intervals",
"short_months",
"short_weekdays",
"show_interval_label", # JS functions unimplemented
"show_month_on_first",
"show_week",
"start",
"type",
"value",
"weekday_format", # JS functions unimplemented
"weekdays",
]
self._event_names += [
"change",
("click_date", "click:date"),
("click_day", "click:day"),
("click_day_category", "click:day-category"),
("click_event", "click:event"),
("click_interval", "click:interval"),
("click_more", "click:more"),
("click_time", "click:time"),
("click_time_category", "click:time-category"),
("contextmenu_date", "contextmenu:date"),
("contextmenu_day", "contextmenu:day"),
("contextmenu_day_category", "contextmenu:day-category"),
("contextmenu_event", "contextmenu:event"),
("contextmenu_interval", "contextmenu:interval"),
("contextmenu_time", "contextmenu:time"),
("contextmenu_time_category", "contextmenu:time-category"),
"input",
("mousedown_day", "mousedown:day"),
("mousedown_day_category", "mousedown:day-category"),
("mousedown_event", "mousedown:event"),
("mousedown_interval", "mousedown:interval"),
("mousedown_time", "mousedown:time"),
("mousedown_time_category", "mousedown:time-category"),
("mouseenter_day", "mouseenter:day"),
("mouseenter_day_category", "mouseenter:day-category"),
("mouseenter_event", "mouseenter:event"),
("mouseenter_interval", "mouseenter:interval"),
("mouseenter_time", "mouseenter:time"),
("mouseenter_time_category", "mouseenter:time-category"),
("mouseleave_day", "mouseleave:day"),
("mouseleave_day_category", "mouseleave:day-category"),
("mouseleave_event", "mouseleave:event"),
("mouseleave_interval", "mouseleave:interval"),
("mouseleave_time", "mouseleave:time"),
("mouseleave_time_category", "mouseleave:time-category"),
("mousemove_day", "mousemove:day"),
("mousemove_day_category", "mousemove:day-category"),
("mousemove_event", "mousemove:event"),
("mousemove_interval", "mousemove:interval"),
("mousemove_time", "mousemove:time"),
("mousemove_time_category", "mousemove:time-category"),
("mouseup_day", "mouseup:day"),
("mouseup_day_category", "mouseup:day-category"),
("mouseup_event", "mouseup:event"),
("mouseup_interval", "mouseup:interval"),
("mouseup_time", "mouseup:time"),
("mouseup_time_category", "mouseup:time-category"),
"moved",
("touchend_day", "touchend:day"),
("touchend_day_category", "touchend:day-category"),
("touchend_event", "touchend:event"),
("touchend_interval", "touchend:interval"),
("touchend_time", "touchend:time"),
("touchend_time_category", "touchend:time-category"),
("touchmove_day", "touchmove:day"),
("touchmove_day_category", "touchmove:day-category"),
("touchmove_event", "touchmove:event"),
("touchmove_interval", "touchmove:interval"),
("touchmove_time", "touchmove:time"),
("touchmove_time_category", "touchmove:time-category"),
("touchstart_day", "touchstart:day"),
("touchstart_day_category", "touchstart:day-category"),
("touchstart_event", "touchstart:event"),
("touchstart_interval", "touchstart:interval"),
("touchstart_time", "touchstart:time"),
("touchstart_time_category", "touchstart:time-category"),
]
class VCalendarDaily(AbstractElement):
"""
Vuetify's VCalendarDaily component. See more info and examples |VCalendarDaily_vuetify_link|.
.. |VCalendarDaily_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-calendar-daily" target="_blank">here</a>
:param color: See description |VCalendarDaily_vuetify_link|.
:type string:
:param dark: See description |VCalendarDaily_vuetify_link|.
:type boolean:
:param day_format: Formats day of the month string that appears in a day to a specified locale
:type function:
:param end: The ending date on the calendar (inclusive) in the format of `YYYY-MM-DD`. This may be ignored depending on the `type` of the calendar.
:type ['string', 'number', 'date']:
:param first_interval: The first interval to display in the `day` view. If `intervalMinutes` is set to 60 and this is set to 9 the first time in the view is 9am.
:type ['number', 'string']:
:param first_time: The first time to display in the `day` view. If specified, this overwrites any `firstInterval` value specified. This can be the number of minutes since midnight, a string in the format of `HH:mm`, or an object with number properties hour and minute.
:type ['number', 'string', 'object']:
:param hide_header: If the header at the top of the `day` view should be visible.
:type boolean:
:param interval_count: The number of intervals to display in the `day` view.
:type ['number', 'string']:
:param interval_format: Formats time of day string that appears in the interval gutter of the `day` and `week` view to specified locale
:type function:
:param interval_height: The height of an interval in pixels in the `day` view.
:type ['number', 'string']:
:param interval_minutes: The number of minutes the intervals are in the `day` view. A common interval is 60 minutes so the intervals are an hour.
:type ['number', 'string']:
:param interval_style: Returns CSS styling to apply to the interval.
:type function:
:param interval_width: The width of the interval gutter on the left side in the `day` view.
:type ['number', 'string']:
:param light: Applies the light theme variant to the component.
:type boolean:
:param locale: The locale of the calendar.
:type string:
:param max_days: The maximum number of days to display in the custom calendar if an `end` day is not set.
:type number:
:param now: Override the day & time which is considered now. This is in the format of `YYYY-MM-DD hh:mm:ss`. The calendar is styled according to now.
:type string:
:param short_intervals: If true, the intervals in the `day` view will be 9 AM as opposed to 09:00 AM
:type boolean:
:param short_weekdays: Whether the short versions of a weekday should be used (Mon vs Monday).
:type boolean:
:param show_interval_label: Checks if a given day and time should be displayed in the interval gutter of the `day` view.
:type function:
:param start: The starting date on the calendar (inclusive) in the format of `YYYY-MM-DD`. This may be ignored depending on the `type` of the calendar.
:type ['string', 'number', 'date']:
:param weekday_format: Formats day of the week string that appears in the header to specified locale
:type function:
:param weekdays: Specifies which days of the week to display. To display Monday through Friday only, a value of `[1, 2, 3, 4, 5]` can be used. To display a week starting on Monday a value of `[1, 2, 3, 4, 5, 6, 0]` can be used.
:type ['array', 'string']:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-calendar-daily", children, **kwargs)
self._attr_names += [
"color",
"dark",
"day_format", # JS functions unimplemented
"end",
"first_interval",
"first_time",
"hide_header",
"interval_count",
"interval_format", # JS functions unimplemented
"interval_height",
"interval_minutes",
"interval_style", # JS functions unimplemented
"interval_width",
"light",
"locale",
"max_days",
"now",
"short_intervals",
"short_weekdays",
"show_interval_label", # JS functions unimplemented
"start",
"weekday_format", # JS functions unimplemented
"weekdays",
]
class VCalendarWeekly(AbstractElement):
"""
Vuetify's VCalendarWeekly component. See more info and examples |VCalendarWeekly_vuetify_link|.
.. |VCalendarWeekly_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-calendar-weekly" target="_blank">here</a>
:param color: See description |VCalendarWeekly_vuetify_link|.
:type string:
:param dark: See description |VCalendarWeekly_vuetify_link|.
:type boolean:
:param day_format: Formats day of the month string that appears in a day to a specified locale
:type function:
:param end: The ending date on the calendar (inclusive) in the format of `YYYY-MM-DD`. This may be ignored depending on the `type` of the calendar.
:type ['string', 'number', 'date']:
:param hide_header: If the header at the top of the `day` view should be visible.
:type boolean:
:param light: Applies the light theme variant to the component.
:type boolean:
:param locale: The locale of the calendar.
:type string:
:param locale_first_day_of_year: Sets the day that determines the first week of the year, starting with 0 for **Sunday**. For ISO 8601 this should be 4.
:type ['string', 'number']:
:param min_weeks: The minimum number of weeks to display in the `month` or `week` view.
:type any:
:param month_format: Formats month string that appears in a day to specified locale
:type function:
:param now: Override the day & time which is considered now. This is in the format of `YYYY-MM-DD hh:mm:ss`. The calendar is styled according to now.
:type string:
:param short_months: Whether the short versions of a month should be used (Jan vs January).
:type boolean:
:param short_weekdays: Whether the short versions of a weekday should be used (Mon vs Monday).
:type boolean:
:param show_month_on_first: Whether the name of the month should be displayed on the first day of the month.
:type boolean:
:param show_week: Whether week numbers should be displayed when using the `month` view.
:type boolean:
:param start: The starting date on the calendar (inclusive) in the format of `YYYY-MM-DD`. This may be ignored depending on the `type` of the calendar.
:type ['string', 'number', 'date']:
:param weekday_format: Formats day of the week string that appears in the header to specified locale
:type function:
:param weekdays: Specifies which days of the week to display. To display Monday through Friday only, a value of `[1, 2, 3, 4, 5]` can be used. To display a week starting on Monday a value of `[1, 2, 3, 4, 5, 6, 0]` can be used.
:type ['array', 'string']:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-calendar-weekly", children, **kwargs)
self._attr_names += [
"color",
"dark",
"day_format", # JS functions unimplemented
"end",
"hide_header",
"light",
"locale",
"locale_first_day_of_year",
"min_weeks",
"month_format", # JS functions unimplemented
"now",
"short_months",
"short_weekdays",
"show_month_on_first",
"show_week",
"start",
"weekday_format", # JS functions unimplemented
"weekdays",
]
class VCalendarMonthly(AbstractElement):
"""
Vuetify's VCalendarMonthly component. See more info and examples |VCalendarMonthly_vuetify_link|.
.. |VCalendarMonthly_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-calendar-monthly" target="_blank">here</a>
:param color: See description |VCalendarMonthly_vuetify_link|.
:type string:
:param dark: See description |VCalendarMonthly_vuetify_link|.
:type boolean:
:param day_format: Formats day of the month string that appears in a day to a specified locale
:type function:
:param end: The ending date on the calendar (inclusive) in the format of `YYYY-MM-DD`. This may be ignored depending on the `type` of the calendar.
:type ['string', 'number', 'date']:
:param hide_header: If the header at the top of the `day` view should be visible.
:type boolean:
:param light: Applies the light theme variant to the component.
:type boolean:
:param locale: The locale of the calendar.
:type string:
:param locale_first_day_of_year: Sets the day that determines the first week of the year, starting with 0 for **Sunday**. For ISO 8601 this should be 4.
:type ['string', 'number']:
:param min_weeks: The minimum number of weeks to display in the `month` or `week` view.
:type any:
:param month_format: Formats month string that appears in a day to specified locale
:type function:
:param now: Override the day & time which is considered now. This is in the format of `YYYY-MM-DD hh:mm:ss`. The calendar is styled according to now.
:type string:
:param short_months: Whether the short versions of a month should be used (Jan vs January).
:type boolean:
:param short_weekdays: Whether the short versions of a weekday should be used (Mon vs Monday).
:type boolean:
:param show_month_on_first: Whether the name of the month should be displayed on the first day of the month.
:type boolean:
:param show_week: Whether week numbers should be displayed when using the `month` view.
:type boolean:
:param start: The starting date on the calendar (inclusive) in the format of `YYYY-MM-DD`. This may be ignored depending on the `type` of the calendar.
:type ['string', 'number', 'date']:
:param weekday_format: Formats day of the week string that appears in the header to specified locale
:type function:
:param weekdays: Specifies which days of the week to display. To display Monday through Friday only, a value of `[1, 2, 3, 4, 5]` can be used. To display a week starting on Monday a value of `[1, 2, 3, 4, 5, 6, 0]` can be used.
:type ['array', 'string']:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-calendar-monthly", children, **kwargs)
self._attr_names += [
"color",
"dark",
"day_format", # JS functions unimplemented
"end",
"hide_header",
"light",
"locale",
"locale_first_day_of_year",
"min_weeks",
"month_format", # JS functions unimplemented
"now",
"short_months",
"short_weekdays",
"show_month_on_first",
"show_week",
"start",
"weekday_format", # JS functions unimplemented
"weekdays",
]
class VCard(AbstractElement):
"""
Vuetify's VCard component. See more info and examples |VCard_vuetify_link|.
.. |VCard_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-card" target="_blank">here</a>
:param active_class: See description |VCard_vuetify_link|.
:type string:
:param append: See description |VCard_vuetify_link|.
:type boolean:
:param color: See description |VCard_vuetify_link|.
:type string:
:param dark: See description |VCard_vuetify_link|.
:type boolean:
:param disabled: Removes the ability to click or target the component.
:type boolean:
:param elevation: See description |VCard_vuetify_link|.
:type ['number', 'string']:
:param exact: See description |VCard_vuetify_link|.
:type boolean:
:param exact_active_class: See description |VCard_vuetify_link|.
:type string:
:param exact_path: See description |VCard_vuetify_link|.
:type boolean:
:param flat: Removes the card's elevation.
:type boolean:
:param height: Sets the height for the component.
:type ['number', 'string']:
:param hover: See description |VCard_vuetify_link|.
:type boolean:
:param href: Designates the component as anchor and applies the **href** attribute.
:type ['string', 'object']:
:param img: See description |VCard_vuetify_link|.
:type string:
:param light: Applies the light theme variant to the component.
:type boolean:
:param link: Designates that the component is a link. This is automatic when using the **href** or **to** prop.
:type boolean:
:param loader_height: Specifies the height of the loader
:type ['number', 'string']:
:param loading: Displays linear progress bar. Can either be a String which specifies which color is applied to the progress bar (any material color or theme color - **primary**, **secondary**, **success**, **info**, **warning**, **error**) or a Boolean which uses the component **color** (set by color prop - if it's supported by the component) or the primary color
:type ['boolean', 'string']:
:param max_height: Sets the maximum height for the component.
:type ['number', 'string']:
:param max_width: Sets the maximum width for the component.
:type ['number', 'string']:
:param min_height: Sets the minimum height for the component.
:type ['number', 'string']:
:param min_width: Sets the minimum width for the component.
:type ['number', 'string']:
:param nuxt: See description |VCard_vuetify_link|.
:type boolean:
:param outlined: Removes elevation (box-shadow) and adds a *thin* border.
:type boolean:
:param raised: See description |VCard_vuetify_link|.
:type boolean:
:param replace: See description |VCard_vuetify_link|.
:type boolean:
:param ripple: See description |VCard_vuetify_link|.
:type ['boolean', 'object']:
:param rounded: See description |VCard_vuetify_link|.
:type ['boolean', 'string']:
:param shaped: Applies a large border radius on the top left and bottom right of the card.
:type boolean:
:param tag: Specify a custom tag used on the root element.
:type string:
:param target: Designates the target attribute. This should only be applied when using the **href** prop.
:type string:
:param tile: Removes the component's **border-radius**.
:type boolean:
:param to: See description |VCard_vuetify_link|.
:type ['string', 'object']:
:param width: Sets the width for the component.
:type ['number', 'string']:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-card", children, **kwargs)
self._attr_names += [
"active_class",
"append",
"color",
"dark",
"disabled",
"elevation",
"exact",
"exact_active_class",
"exact_path",
"flat",
"height",
"hover",
"href",
"img",
"light",
"link",
"loader_height",
"loading",
"max_height",
"max_width",
"min_height",
"min_width",
"nuxt",
"outlined",
"raised",
"replace",
"ripple",
"rounded",
"shaped",
"tag",
"target",
"tile",
"to",
"width",
]
self._event_names += [
# click, #Implemented in AbstractElement parent class
]
class VCardActions(AbstractElement):
"""
Vuetify's VCardActions component. See more info and examples |VCardActions_vuetify_link|.
.. |VCardActions_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-card-actions" target="_blank">here</a>
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-card-actions", children, **kwargs)
class VCardSubtitle(AbstractElement):
"""
Vuetify's VCardSubtitle component. See more info and examples |VCardSubtitle_vuetify_link|.
.. |VCardSubtitle_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-card-subtitle" target="_blank">here</a>
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-card-subtitle", children, **kwargs)
class VCardText(AbstractElement):
"""
Vuetify's VCardText component. See more info and examples |VCardText_vuetify_link|.
.. |VCardText_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-card-text" target="_blank">here</a>
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-card-text", children, **kwargs)
class VCardTitle(AbstractElement):
"""
Vuetify's VCardTitle component. See more info and examples |VCardTitle_vuetify_link|.
.. |VCardTitle_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-card-title" target="_blank">here</a>
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-card-title", children, **kwargs)
class VCarousel(AbstractElement):
"""
Vuetify's VCarousel component. See more info and examples |VCarousel_vuetify_link|.
.. |VCarousel_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-carousel" target="_blank">here</a>
:param active_class: The **active-class** applied to children when they are activated.
:type string:
:param continuous: Determines whether carousel is continuous
:type boolean:
:param cycle: Determines if the carousel should cycle through images.
:type boolean:
:param dark: See description |VCarousel_vuetify_link|.
:type boolean:
:param delimiter_icon: Sets icon for carousel delimiter
:type string:
:param height: Sets the height for the component
:type ['number', 'string']:
:param hide_delimiter_background: Hides the bottom delimiter background.
:type boolean:
:param hide_delimiters: Hides the carousel's bottom delimiters.
:type boolean:
:param interval: The duration between image cycles. Requires the **cycle** prop.
:type ['number', 'string']:
:param light: Applies the light theme variant to the component.
:type boolean:
:param mandatory: Forces a value to always be selected (if available).
:type boolean:
:param max: Sets a maximum number of selections that can be made.
:type ['number', 'string']:
:param multiple: Allow multiple selections. The **value** prop must be an _array_.
:type boolean:
:param next_icon: The displayed icon for forcing pagination to the next item.
:type ['boolean', 'string']:
:param prev_icon: The displayed icon for forcing pagination to the previous item.
:type ['boolean', 'string']:
:param progress: Displays a carousel progress bar. Requires the **cycle** prop and **interval**.
:type boolean:
:param progress_color: Applies specified color to progress bar.
:type string:
:param reverse: Reverse the normal transition direction.
:type boolean:
:param show_arrows: Displays arrows for next/previous navigation.
:type boolean:
:param show_arrows_on_hover: Displays navigation arrows only when the carousel is hovered over.
:type boolean:
:param tag: Specify a custom tag used on the root element.
:type string:
:param touch: Provide a custom **left** and **right** function when swiped left or right.
:type object:
:param touchless: Disable touch support.
:type boolean:
:param value: The designated model value for the component.
:type any:
:param vertical: Uses a vertical transition when changing windows.
:type boolean:
:param vertical_delimiters: Displays carousel delimiters vertically.
:type string:
Events
:param change: Emitted when the component value is changed by user interaction
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-carousel", children, **kwargs)
self._attr_names += [
"active_class",
"continuous",
"cycle",
"dark",
"delimiter_icon",
"height",
"hide_delimiter_background",
"hide_delimiters",
"interval",
"light",
"mandatory",
"max",
"multiple",
"next_icon",
"prev_icon",
"progress",
"progress_color",
"reverse",
"show_arrows",
"show_arrows_on_hover",
"tag",
"touch",
"touchless",
"value",
"vertical",
"vertical_delimiters",
]
self._event_names += [
"change",
]
class VCarouselItem(AbstractElement):
"""
Vuetify's VCarouselItem component. See more info and examples |VCarouselItem_vuetify_link|.
.. |VCarouselItem_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-carousel-item" target="_blank">here</a>
:param active_class: See description |VCarouselItem_vuetify_link|.
:type string:
:param append: See description |VCarouselItem_vuetify_link|.
:type boolean:
:param disabled: Removes the ability to click or target the component.
:type boolean:
:param eager: Will force the components content to render on mounted. This is useful if you have content that will not be rendered in the DOM that you want crawled for SEO.
:type boolean:
:param exact: See description |VCarouselItem_vuetify_link|.
:type boolean:
:param exact_active_class: See description |VCarouselItem_vuetify_link|.
:type string:
:param exact_path: See description |VCarouselItem_vuetify_link|.
:type boolean:
:param href: Designates the component as anchor and applies the **href** attribute.
:type ['string', 'object']:
:param link: Designates that the component is a link. This is automatic when using the **href** or **to** prop.
:type boolean:
:param nuxt: See description |VCarouselItem_vuetify_link|.
:type boolean:
:param replace: See description |VCarouselItem_vuetify_link|.
:type boolean:
:param reverse_transition: Sets the reverse transition
:type ['boolean', 'string']:
:param ripple: See description |VCarouselItem_vuetify_link|.
:type ['boolean', 'object']:
:param tag: Specify a custom tag used on the root element.
:type string:
:param target: Designates the target attribute. This should only be applied when using the **href** prop.
:type string:
:param to: See description |VCarouselItem_vuetify_link|.
:type ['string', 'object']:
:param transition: See description |VCarouselItem_vuetify_link|.
:type ['boolean', 'string']:
:param value: The value used when the component is selected in a group. If not provided, the index will be used.
:type any:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-carousel-item", children, **kwargs)
self._attr_names += [
"active_class",
"append",
"disabled",
"eager",
"exact",
"exact_active_class",
"exact_path",
"href",
"link",
"nuxt",
"replace",
"reverse_transition",
"ripple",
"tag",
"target",
"to",
"transition",
"value",
]
class VCheckbox(AbstractElement):
"""
Vuetify's VCheckbox component. See more info and examples |VCheckbox_vuetify_link|.
.. |VCheckbox_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-checkbox" target="_blank">here</a>
:param append_icon: Appends an icon to the component, uses the same syntax as `v-icon`
:type string:
:param background_color: Changes the background-color of the input
:type string:
:param color: See description |VCheckbox_vuetify_link|.
:type string:
:param dark: See description |VCheckbox_vuetify_link|.
:type boolean:
:param dense: Reduces the input height
:type boolean:
:param disabled: Disable the input
:type boolean:
:param error: Puts the input in a manual error state
:type boolean:
:param error_count: The total number of errors that should display at once
:type ['number', 'string']:
:param error_messages: Puts the input in an error state and passes through custom error messages. Will be combined with any validations that occur from the **rules** prop. This field will not trigger validation
:type ['string', 'array']:
:param false_value: Sets value for falsy state
:type any:
:param hide_details: Hides hint and validation errors. When set to `auto` messages will be rendered only if there's a message (hint, error message, counter value etc) to display
:type ['boolean', 'string']:
:param hint: Hint text
:type string:
:param id: Sets the DOM id on the component
:type string:
:param indeterminate: Sets an indeterminate state for the checkbox
:type boolean:
:param indeterminate_icon: The icon used when in an indeterminate state
:type string:
:param input_value: The **v-model** bound value
:type any:
:param label: Sets input label
:type string:
:param light: Applies the light theme variant to the component.
:type boolean:
:param messages: Displays a list of messages or message if using a string
:type ['string', 'array']:
:param multiple: Changes expected model to an array
:type boolean:
:param off_icon: The icon used when inactive
:type string:
:param on_icon: The icon used when active
:type string:
:param persistent_hint: Forces hint to always be visible
:type boolean:
:param prepend_icon: Prepends an icon to the component, uses the same syntax as `v-icon`
:type string:
:param readonly: Puts input in readonly state
:type boolean:
:param ripple: See description |VCheckbox_vuetify_link|.
:type ['boolean', 'object']:
:param rules: Accepts a mixed array of types `function`, `boolean` and `string`. Functions pass an input value as an argument and must return either `true` / `false` or a `string` containing an error message. The input field will enter an error state if a function returns (or any value in the array contains) `false` or is a `string`
:type array:
:param success: Puts the input in a manual success state
:type boolean:
:param success_messages: Puts the input in a success state and passes through custom success messages.
:type ['string', 'array']:
:param true_value: Sets value for truthy state
:type any:
:param validate_on_blur: Delays validation until blur event
:type boolean:
:param value: The input's value
:type any:
:param value_comparator: Apply a custom value comparator function
:type function:
Events
:param change: Emitted when the input is changed by user interaction
:param click_append: Emitted when appended icon is clicked
:param click_prepend: Emitted when prepended icon is clicked
:param update_error: The `error.sync` event
:param update_indeterminate: The **indeterminate.sync** event.
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-checkbox", children, **kwargs)
self._attr_names += [
"append_icon",
"background_color",
"color",
"dark",
"dense",
"disabled",
"error",
"error_count",
"error_messages",
"false_value",
"hide_details",
"hint",
"id",
"indeterminate",
"indeterminate_icon",
"input_value",
"label",
"light",
"messages",
"multiple",
"off_icon",
"on_icon",
"persistent_hint",
"prepend_icon",
"readonly",
"ripple",
"rules",
"success",
"success_messages",
"true_value",
"validate_on_blur",
"value",
"value_comparator", # JS functions unimplemented
]
self._event_names += [
"change",
# click, #Implemented in AbstractElement parent class
("click_append", "click:append"),
("click_prepend", "click:prepend"),
# mousedown, #Implemented in AbstractElement parent class
# mouseup, #Implemented in AbstractElement parent class
("update_error", "update:error"),
("update_indeterminate", "update:indeterminate"),
]
class VSimpleCheckbox(AbstractElement):
"""
Vuetify's VSimpleCheckbox component. See more info and examples |VSimpleCheckbox_vuetify_link|.
.. |VSimpleCheckbox_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-simple-checkbox" target="_blank">here</a>
:param color: See description |VSimpleCheckbox_vuetify_link|.
:type string:
:param dark: See description |VSimpleCheckbox_vuetify_link|.
:type boolean:
:param disabled: Disables simple checkbox.
:type boolean:
:param indeterminate: Sets an indeterminate state for the simple checkbox.
:type boolean:
:param indeterminate_icon: The icon used when in an indeterminate state.
:type string:
:param light: Applies the light theme variant to the component.
:type boolean:
:param off_icon: The icon used when inactive.
:type string:
:param on_icon: The icon used when active.
:type string:
:param ripple: See description |VSimpleCheckbox_vuetify_link|.
:type boolean:
:param value: A boolean value that represents whether the simple checkbox is checked.
:type boolean:
Events
:param input: The updated bound model
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-simple-checkbox", children, **kwargs)
self._attr_names += [
"color",
"dark",
"disabled",
"indeterminate",
"indeterminate_icon",
"light",
"off_icon",
"on_icon",
"ripple",
"value",
]
self._event_names += [
"input",
]
class VChip(AbstractElement):
"""
Vuetify's VChip component. See more info and examples |VChip_vuetify_link|.
.. |VChip_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-chip" target="_blank">here</a>
:param active: Determines whether the chip is visible or not.
:type boolean:
:param active_class: See description |VChip_vuetify_link|.
:type string:
:param append: See description |VChip_vuetify_link|.
:type boolean:
:param close: Adds remove button
:type boolean:
:param close_icon: Change the default icon used for **close** chips
:type string:
:param close_label: See description |VChip_vuetify_link|.
:type string:
:param color: See description |VChip_vuetify_link|.
:type string:
:param dark: See description |VChip_vuetify_link|.
:type boolean:
:param disabled: Disables the chip, making it un-selectable
:type boolean:
:param draggable: Makes the chip draggable
:type boolean:
:param exact: See description |VChip_vuetify_link|.
:type boolean:
:param exact_active_class: See description |VChip_vuetify_link|.
:type string:
:param exact_path: See description |VChip_vuetify_link|.
:type boolean:
:param filter: Displays a selection icon when selected
:type boolean:
:param filter_icon: Change the default icon used for **filter** chips
:type string:
:param href: Designates the component as anchor and applies the **href** attribute.
:type ['string', 'object']:
:param input_value: Controls the **active** state of the item. This is typically used to highlight the component.
:type any:
:param label: Removes circle edges
:type boolean:
:param large: Makes the component large.
:type boolean:
:param light: Applies the light theme variant to the component.
:type boolean:
:param link: Explicitly define the chip as a link
:type boolean:
:param nuxt: See description |VChip_vuetify_link|.
:type boolean:
:param outlined: Removes background and applies border and text color
:type boolean:
:param pill: Remove `v-avatar` padding
:type boolean:
:param replace: See description |VChip_vuetify_link|.
:type boolean:
:param ripple: See description |VChip_vuetify_link|.
:type ['boolean', 'object']:
:param small: Makes the component small.
:type boolean:
:param tag: Specify a custom tag used on the root element.
:type string:
:param target: Designates the target attribute. This should only be applied when using the **href** prop.
:type string:
:param text_color: Applies a specified color to the control text
:type string:
:param to: See description |VChip_vuetify_link|.
:type ['string', 'object']:
:param value: See description |VChip_vuetify_link|.
:type any:
:param x_large: Makes the component extra large.
:type boolean:
:param x_small: Makes the component extra small.
:type boolean:
Events
:param click_close: Emitted when close icon is clicked
:param input: The updated bound model
:param update_active: Emitted when close icon is clicked, sets active to `false`
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-chip", children, **kwargs)
self._attr_names += [
"active",
"active_class",
"append",
"close",
"close_icon",
"close_label",
"color",
"dark",
"disabled",
"draggable",
"exact",
"exact_active_class",
"exact_path",
"filter",
"filter_icon",
"href",
"input_value",
"label",
"large",
"light",
"link",
"nuxt",
"outlined",
"pill",
"replace",
"ripple",
"small",
"tag",
"target",
"text_color",
"to",
"value",
"x_large",
"x_small",
]
self._event_names += [
# click, #Implemented in AbstractElement parent class
("click_close", "click:close"),
"input",
("update_active", "update:active"),
]
class VChipGroup(AbstractElement):
"""
Vuetify's VChipGroup component. See more info and examples |VChipGroup_vuetify_link|.
.. |VChipGroup_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-chip-group" target="_blank">here</a>
:param active_class: The **active-class** applied to children when they are activated.
:type string:
:param center_active: Forces the selected chip to be centered
:type boolean:
:param color: See description |VChipGroup_vuetify_link|.
:type string:
:param column: Remove horizontal pagination and wrap items as needed
:type boolean:
:param dark: See description |VChipGroup_vuetify_link|.
:type boolean:
:param light: Applies the light theme variant to the component.
:type boolean:
:param mandatory: Forces a value to always be selected (if available).
:type boolean:
:param max: Sets a maximum number of selections that can be made.
:type ['number', 'string']:
:param mobile_breakpoint: Sets the designated mobile breakpoint for the component.
:type ['number', 'string']:
:param multiple: Allow multiple selections. The **value** prop must be an _array_.
:type boolean:
:param next_icon: Specify the icon to use for the next icon
:type string:
:param prev_icon: Specify the icon to use for the prev icon
:type string:
:param show_arrows: Force the display of the pagination arrows
:type ['boolean', 'string']:
:param tag: Specify a custom tag used on the root element.
:type string:
:param value: The designated model value for the component.
:type any:
Events
:param change: Emitted when the component value is changed by user interaction
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-chip-group", children, **kwargs)
self._attr_names += [
"active_class",
"center_active",
"color",
"column",
"dark",
"light",
"mandatory",
"max",
"mobile_breakpoint",
"multiple",
"next_icon",
"prev_icon",
"show_arrows",
"tag",
"value",
]
self._event_names += [
"change",
]
class VColorPicker(AbstractElement):
"""
Vuetify's VColorPicker component. See more info and examples |VColorPicker_vuetify_link|.
.. |VColorPicker_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-color-picker" target="_blank">here</a>
:param canvas_height: Height of canvas
:type ['string', 'number']:
:param dark: See description |VColorPicker_vuetify_link|.
:type boolean:
:param disabled: Disables picker
:type boolean:
:param dot_size: Changes the size of the selection dot on the canvas
:type ['number', 'string']:
:param elevation: See description |VColorPicker_vuetify_link|.
:type ['number', 'string']:
:param flat: Removes elevation
:type boolean:
:param hide_canvas: Hides canvas
:type boolean:
:param hide_inputs: Hides inputs
:type boolean:
:param hide_mode_switch: Hides mode switch
:type boolean:
:param hide_sliders: Hides sliders
:type boolean:
:param light: Applies the light theme variant to the component.
:type boolean:
:param mode: Sets mode of inputs. Available modes are 'rgba', 'hsla', and 'hexa'. Can be synced with the `.sync` modifier.
:type string:
:param show_swatches: Displays color swatches
:type boolean:
:param swatches: Sets the available color swatches to select from - This prop only accepts rgba hex strings
:type array:
:param swatches_max_height: Sets the maximum height of the swatches section
:type ['number', 'string']:
:param value: Current color. This can be either a string representing a hex color, or an object representing a RGBA, HSLA, or HSVA value
:type ['object', 'string']:
:param width: Sets the width of the color picker
:type ['number', 'string']:
Events
:param input: Selected color. Depending on what you passed to the `value` prop this is either a string or an object
:param update_color: Selected color. This is the internal representation of the color, containing all values.
:param update_mode: Selected mode
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-color-picker", children, **kwargs)
self._attr_names += [
"canvas_height",
"dark",
"disabled",
"dot_size",
"elevation",
"flat",
"hide_canvas",
"hide_inputs",
"hide_mode_switch",
"hide_sliders",
"light",
"mode",
"show_swatches",
"swatches",
"swatches_max_height",
"value",
"width",
]
self._event_names += [
"input",
("update_color", "update:color"),
("update_mode", "update:mode"),
]
class VContent(AbstractElement):
"""
Vuetify's VContent component. See more info and examples |VContent_vuetify_link|.
.. |VContent_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-content" target="_blank">here</a>
:param tag: Specify a custom tag used on the root element.
:type string:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-content", children, **kwargs)
self._attr_names += [
"tag",
]
class VCombobox(AbstractElement):
"""
Vuetify's VCombobox component. See more info and examples |VCombobox_vuetify_link|.
.. |VCombobox_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-combobox" target="_blank">here</a>
:param allow_overflow: Allow the menu to overflow off the screen
:type boolean:
:param append_icon: Appends an icon to the component, uses the same syntax as `v-icon`
:type string:
:param append_outer_icon: Appends an icon to the outside the component's input, uses same syntax as `v-icon`
:type string:
:param attach: Specifies which DOM element that this component should detach to. String can be any valid querySelector and Object can be any valid Node. This will attach to the root `v-app` component by default.
:type any:
:param auto_select_first: When searching, will always highlight the first option
:type boolean:
:param autofocus: Enables autofocus
:type boolean:
:param background_color: Changes the background-color of the input
:type string:
:param cache_items: Keeps a local _unique_ copy of all items that have been passed through the **items** prop.
:type boolean:
:param chips: Changes display of selections to chips
:type boolean:
:param clear_icon: Applied when using **clearable** and the input is dirty
:type string:
:param clearable: Add input clear functionality, default icon is Material Design Icons **mdi-clear**
:type boolean:
:param color: See description |VCombobox_vuetify_link|.
:type string:
:param counter: Creates counter for input length; if no number is specified, it defaults to 25. Does not apply any validation.
:type ['boolean', 'number', 'string']:
:param counter_value:
:type function:
:param dark: See description |VCombobox_vuetify_link|.
:type boolean:
:param deletable_chips: Adds a remove icon to selected chips
:type boolean:
:param delimiters: Accepts an array of strings that will trigger a new tag when typing. Does not replace the normal Tab and Enter keys.
:type array:
:param dense: Reduces the input height
:type boolean:
:param disable_lookup: Disables keyboard lookup
:type boolean:
:param disabled: Disables the input
:type boolean:
:param eager: Will force the components content to render on mounted. This is useful if you have content that will not be rendered in the DOM that you want crawled for SEO.
:type boolean:
:param error: Puts the input in a manual error state
:type boolean:
:param error_count: The total number of errors that should display at once
:type ['number', 'string']:
:param error_messages: Puts the input in an error state and passes through custom error messages. Will be combined with any validations that occur from the **rules** prop. This field will not trigger validation
:type ['string', 'array']:
:param filled: Applies the alternate filled input style
:type boolean:
:param filter: See description |VCombobox_vuetify_link|.
:type function:
:param flat: Removes elevation (shadow) added to element when using the **solo** or **solo-inverted** props
:type boolean:
:param full_width: Designates input type as full-width
:type boolean:
:param height: Sets the height of the input
:type ['number', 'string']:
:param hide_details: Hides hint and validation errors. When set to `auto` messages will be rendered only if there's a message (hint, error message, counter value etc) to display
:type ['boolean', 'string']:
:param hide_no_data: Hides the menu when there are no options to show. Useful for preventing the menu from opening before results are fetched asynchronously. Also has the effect of opening the menu when the `items` array changes if not already open.
:type boolean:
:param hide_selected: Do not display in the select menu items that are already selected
:type boolean:
:param hint: Hint text
:type string:
:param id: Sets the DOM id on the component
:type string:
:param item_color: Sets color of selected items
:type string:
:param item_disabled: Set property of **items**'s disabled value
:type ['string', 'array', 'function']:
:param item_text: Set property of **items**'s text value
:type ['string', 'array', 'function']:
:param item_value: See description |VCombobox_vuetify_link|.
:type ['string', 'array', 'function']:
:param items: Can be an array of objects or array of strings. When using objects, will look for a text, value and disabled keys. This can be changed using the **item-text**, **item-value** and **item-disabled** props. Objects that have a **header** or **divider** property are considered special cases and generate a list header or divider; these items are not selectable.
:type array:
:param label: Sets input label
:type string:
:param light: Applies the light theme variant to the component.
:type boolean:
:param loader_height: Specifies the height of the loader
:type ['number', 'string']:
:param loading: Displays linear progress bar. Can either be a String which specifies which color is applied to the progress bar (any material color or theme color - **primary**, **secondary**, **success**, **info**, **warning**, **error**) or a Boolean which uses the component **color** (set by color prop - if it's supported by the component) or the primary color
:type ['boolean', 'string']:
:param menu_props: Pass props through to the `v-menu` component. Accepts either a string for boolean props `menu-props="auto, overflowY"`, or an object `:menu-props="{ auto: true, overflowY: true }"`
:type ['string', 'array', 'object']:
:param messages: Displays a list of messages or message if using a string
:type ['string', 'array']:
:param multiple: Changes select to multiple. Accepts array for value
:type boolean:
:param no_data_text: Display text when there is no data
:type string:
:param no_filter: Do not apply filtering when searching. Useful when data is being filtered server side
:type boolean:
:param open_on_clear: When using the **clearable** prop, once cleared, the select menu will either open or stay open, depending on the current state
:type boolean:
:param outlined: Applies the outlined style to the input
:type boolean:
:param persistent_hint: Forces hint to always be visible
:type boolean:
:param persistent_placeholder: Forces placeholder to always be visible
:type boolean:
:param placeholder: Sets the input's placeholder text
:type string:
:param prefix: Displays prefix text
:type string:
:param prepend_icon: Prepends an icon to the component, uses the same syntax as `v-icon`
:type string:
:param prepend_inner_icon: Prepends an icon inside the component's input, uses the same syntax as `v-icon`
:type string:
:param readonly: Puts input in readonly state
:type boolean:
:param return_object: Changes the selection behavior to return the object directly rather than the value specified with **item-value**
:type boolean:
:param reverse: Reverses the input orientation
:type boolean:
:param rounded: Adds a border radius to the input
:type boolean:
:param rules: Accepts a mixed array of types `function`, `boolean` and `string`. Functions pass an input value as an argument and must return either `true` / `false` or a `string` containing an error message. The input field will enter an error state if a function returns (or any value in the array contains) `false` or is a `string`
:type array:
:param search_input: Search value. Can be used with `.sync` modifier.
:type string:
:param shaped: Round if `outlined` and increase `border-radius` if `filled`. Must be used with either `outlined` or `filled`
:type boolean:
:param single_line: Label does not move on focus/dirty
:type boolean:
:param small_chips: Changes display of selections to chips with the **small** property
:type boolean:
:param solo: Changes the style of the input
:type boolean:
:param solo_inverted: Reduces element opacity until focused
:type boolean:
:param success: Puts the input in a manual success state
:type boolean:
:param success_messages: Puts the input in a success state and passes through custom success messages.
:type ['string', 'array']:
:param suffix: Displays suffix text
:type string:
:param type: Sets input type
:type string:
:param validate_on_blur: Delays validation until blur event
:type boolean:
:param value: The input's value
:type any:
:param value_comparator: See description |VCombobox_vuetify_link|.
:type function:
Events
:param blur: Emitted when the input is blurred
:param change: Emitted when the input is changed by user interaction
:param click_append: Emitted when appended icon is clicked
:param click_append_outer: Emitted when appended outer icon is clicked
:param click_clear: Emitted when clearable icon clicked
:param click_prepend: Emitted when prepended icon is clicked
:param click_prepend_inner: Emitted when prepended inner icon is clicked
:param focus: Emitted when component is focused
:param input: The updated bound model
:param keydown: Emitted when **any** key is pressed
:param update_error: The `error.sync` event
:param update_list_index: Emitted when menu item is selected using keyboard arrows
:param update_search_input: The `search-input.sync` event
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-combobox", children, **kwargs)
self._attr_names += [
"allow_overflow",
"append_icon",
"append_outer_icon",
"attach",
"auto_select_first",
"autofocus",
"background_color",
"cache_items",
"chips",
"clear_icon",
"clearable",
"color",
"counter",
"counter_value", # JS functions unimplemented
"dark",
"deletable_chips",
"delimiters",
"dense",
"disable_lookup",
"disabled",
"eager",
"error",
"error_count",
"error_messages",
"filled",
"filter", # JS functions unimplemented
"flat",
"full_width",
"height",
"hide_details",
"hide_no_data",
"hide_selected",
"hint",
"id",
"item_color",
"item_disabled", # JS functions unimplemented
"item_text", # JS functions unimplemented
"item_value", # JS functions unimplemented
"items",
"label",
"light",
"loader_height",
"loading",
"menu_props",
"messages",
"multiple",
"no_data_text",
"no_filter",
"open_on_clear",
"outlined",
"persistent_hint",
"persistent_placeholder",
"placeholder",
"prefix",
"prepend_icon",
"prepend_inner_icon",
"readonly",
"return_object",
"reverse",
"rounded",
"rules",
"search_input",
"shaped",
"single_line",
"small_chips",
"solo",
"solo_inverted",
"success",
"success_messages",
"suffix",
"type",
"validate_on_blur",
"value",
"value_comparator", # JS functions unimplemented
]
self._event_names += [
"blur",
"change",
# click, #Implemented in AbstractElement parent class
("click_append", "click:append"),
("click_append_outer", "click:append-outer"),
("click_clear", "click:clear"),
("click_prepend", "click:prepend"),
("click_prepend_inner", "click:prepend-inner"),
"focus",
"input",
"keydown",
# mousedown, #Implemented in AbstractElement parent class
# mouseup, #Implemented in AbstractElement parent class
("update_error", "update:error"),
("update_list_index", "update:list-index"),
("update_search_input", "update:search-input"),
]
class VDataIterator(AbstractElement):
"""
Vuetify's VDataIterator component. See more info and examples |VDataIterator_vuetify_link|.
.. |VDataIterator_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-data-iterator" target="_blank">here</a>
:param checkbox_color:
:type string:
:param custom_filter: Function to filter items
:type function:
:param custom_group: Function used to group items
:type function:
:param custom_sort: Function used to sort items
:type function:
:param dark: See description |VDataIterator_vuetify_link|.
:type boolean:
:param disable_filtering: Disables filtering completely
:type boolean:
:param disable_pagination: Disables pagination completely
:type boolean:
:param disable_sort: Disables sorting completely
:type boolean:
:param expanded: Array of expanded items. Can be used with `.sync` modifier
:type array:
:param footer_props: See description |VDataIterator_vuetify_link|.
:type object:
:param group_by: Changes which item property should be used for grouping items. Currently only supports a single grouping in the format: `group` or `['group']`. When using an array, only the first element is considered. Can be used with `.sync` modifier
:type ['string', 'array']:
:param group_desc: Changes which direction grouping is done. Can be used with `.sync` modifier
:type ['boolean', 'array']:
:param hide_default_footer: Hides default footer
:type boolean:
:param item_key: The property on each item that is used as a unique key
:type string:
:param items: The array of items to display
:type array:
:param items_per_page: Changes how many items per page should be visible. Can be used with `.sync` modifier. Setting this prop to `-1` will display all items on the page
:type number:
:param light: Applies the light theme variant to the component.
:type boolean:
:param loading: If `true` and no items are provided, then a loading text will be shown
:type ['boolean', 'string']:
:param loading_text: Text shown when `loading` is true and no items are provided
:type string:
:param locale: See description |VDataIterator_vuetify_link|.
:type string:
:param mobile_breakpoint: Used to set when to toggle between regular table and mobile view
:type ['number', 'string']:
:param multi_sort: If `true` then one can sort on multiple properties
:type boolean:
:param must_sort: If `true` then one can not disable sorting, it will always switch between ascending and descending
:type boolean:
:param no_data_text: Text shown when no items are provided to the component
:type string:
:param no_results_text: Text shown when `search` prop is used and there are no results
:type string:
:param options:
:type DataOptions:
:param page:
:type number:
:param search: Text input used to filter items
:type string:
:param selectable_key: The property on each item that is used to determine if it is selectable or not
:type string:
:param server_items_length: Used only when data is provided by a server. Should be set to the total amount of items available on server so that pagination works correctly
:type number:
:param single_expand: Changes expansion mode to single expand
:type boolean:
:param single_select: Changes selection mode to single select
:type boolean:
:param sort_by: Changes which item property (or properties) should be used for sort order. Can be used with `.sync` modifier
:type ['string', 'array']:
:param sort_desc: Changes which direction sorting is done. Can be used with `.sync` modifier
:type ['boolean', 'array']:
:param value: Used for controlling selected rows
:type array:
Events
:param current_items:
:param input: Array of selected items
:param item_expanded: Event emitted when an item is expanded or closed
:param item_selected: Event emitted when an item is selected or deselected
:param page_count:
:param pagination:
:param toggle_select_all:
:param update_expanded: The `.sync` event for `expanded` prop
:param update_group_by:
:param update_group_desc:
:param update_items_per_page:
:param update_multi_sort:
:param update_must_sort:
:param update_options:
:param update_page:
:param update_sort_by:
:param update_sort_desc:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-data-iterator", children, **kwargs)
self._attr_names += [
"checkbox_color",
"custom_filter", # JS functions unimplemented
"custom_group", # JS functions unimplemented
"custom_sort", # JS functions unimplemented
"dark",
"disable_filtering",
"disable_pagination",
"disable_sort",
"expanded",
"footer_props",
"group_by",
"group_desc",
"hide_default_footer",
"item_key",
"items",
"items_per_page",
"light",
"loading",
"loading_text",
"locale",
"mobile_breakpoint",
"multi_sort",
"must_sort",
"no_data_text",
"no_results_text",
"options",
"page",
"search",
"selectable_key",
"server_items_length",
"single_expand",
"single_select",
"sort_by",
"sort_desc",
"value",
]
self._event_names += [
("current_items", "current-items"),
"input",
("item_expanded", "item-expanded"),
("item_selected", "item-selected"),
("page_count", "page-count"),
"pagination",
("toggle_select_all", "toggle-select-all"),
("update_expanded", "update:expanded"),
("update_group_by", "update:group-by"),
("update_group_desc", "update:group-desc"),
("update_items_per_page", "update:items-per-page"),
("update_multi_sort", "update:multi-sort"),
("update_must_sort", "update:must-sort"),
("update_options", "update:options"),
("update_page", "update:page"),
("update_sort_by", "update:sort-by"),
("update_sort_desc", "update:sort-desc"),
]
class VDataFooter(AbstractElement):
"""
Vuetify's VDataFooter component. See more info and examples |VDataFooter_vuetify_link|.
.. |VDataFooter_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-data-footer" target="_blank">here</a>
:param disable_items_per_page: Disables items-per-page dropdown
:type boolean:
:param disable_pagination: Disables pagination buttons
:type boolean:
:param first_icon: First icon
:type string:
:param items_per_page_all_text: Text for 'All' option in items-per-page dropdown
:type string:
:param items_per_page_options: Array of options to show in the items-per-page dropdown
:type array:
:param items_per_page_text: Text for items-per-page dropdown
:type string:
:param last_icon: Last icon
:type string:
:param next_icon: Next icon
:type string:
:param options: DataOptions
:type object:
:param page_text:
:type string:
:param pagination: DataPagination
:type object:
:param prev_icon: Previous icon
:type string:
:param show_current_page: Show current page number between prev/next icons
:type boolean:
:param show_first_last_page: Show first/last icons
:type boolean:
Events
:param update_options: The `.sync` event for `options` prop
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-data-footer", children, **kwargs)
self._attr_names += [
"disable_items_per_page",
"disable_pagination",
"first_icon",
"items_per_page_all_text",
"items_per_page_options",
"items_per_page_text",
"last_icon",
"next_icon",
"options",
"page_text",
"pagination",
"prev_icon",
"show_current_page",
"show_first_last_page",
]
self._event_names += [
("update_options", "update:options"),
]
class VDataTable(AbstractElement):
"""
Vuetify's VDataTable component. See more info and examples |VDataTable_vuetify_link|.
.. |VDataTable_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-data-table" target="_blank">here</a>
:param calculate_widths: Enables calculation of column widths. `widths` property will be available in select scoped slots
:type boolean:
:param caption: Set the caption (using `<caption>`)
:type string:
:param checkbox_color: Set the color of the checkboxes (showSelect must be used)
:type string:
:param custom_filter: Function to filter items
:type function:
:param custom_group: Function used to group items
:type function:
:param custom_sort: Function used to sort items
:type function:
:param dark: See description |VDataTable_vuetify_link|.
:type boolean:
:param dense: Decreases the height of rows
:type boolean:
:param disable_filtering: Disables filtering completely
:type boolean:
:param disable_pagination: Disables pagination completely
:type boolean:
:param disable_sort: Disables sorting completely
:type boolean:
:param expand_icon: Icon used for expand toggle button.
:type string:
:param expanded: Array of expanded items. Can be used with `.sync` modifier
:type array:
:param fixed_header: Fixed header to top of table. **NOTE:** Does not work in IE11
:type boolean:
:param footer_props: See description |VDataTable_vuetify_link|.
:type object:
:param group_by: Changes which item property should be used for grouping items. Currently only supports a single grouping in the format: `group` or `['group']`. When using an array, only the first element is considered. Can be used with `.sync` modifier
:type ['string', 'array']:
:param group_desc: Changes which direction grouping is done. Can be used with `.sync` modifier
:type ['boolean', 'array']:
:param header_props: See description |VDataTable_vuetify_link|.
:type object:
:param headers: An array of objects that each describe a header column. See the example below for a definition of all properties
:type DataTableHeader[]:
:param headers_length: Can be used in combination with `hide-default-header` to specify the number of columns in the table to allow expansion rows and loading bar to function properly
:type number:
:param height: Set an explicit height of table
:type ['number', 'string']:
:param hide_default_footer: Hides default footer
:type boolean:
:param hide_default_header: Hide the default headers
:type boolean:
:param item_class: Property on supplied `items` that contains item's row class or function that takes an item as an argument and returns the class of corresponding row
:type ['string', 'function']:
:param item_key: The property on each item that is used as a unique key
:type string:
:param items: The array of items to display
:type array:
:param items_per_page: Changes how many items per page should be visible. Can be used with `.sync` modifier. Setting this prop to `-1` will display all items on the page
:type number:
:param light: Applies the light theme variant to the component.
:type boolean:
:param loader_height: Specifies the height of the loader
:type ['number', 'string']:
:param loading: If `true` and no items are provided, then a loading text will be shown
:type ['boolean', 'string']:
:param loading_text: Text shown when `loading` is true and no items are provided
:type string:
:param locale: See description |VDataTable_vuetify_link|.
:type string:
:param mobile_breakpoint: Used to set when to toggle between regular table and mobile view
:type ['number', 'string']:
:param multi_sort: If `true` then one can sort on multiple properties
:type boolean:
:param must_sort: If `true` then one can not disable sorting, it will always switch between ascending and descending
:type boolean:
:param no_data_text: Text shown when no items are provided to the component
:type string:
:param no_results_text: Text shown when `search` prop is used and there are no results
:type string:
:param options:
:type DataOptions:
:param page:
:type number:
:param search: Text input used to filter items
:type string:
:param selectable_key: The property on each item that is used to determine if it is selectable or not
:type string:
:param server_items_length: Used only when data is provided by a server. Should be set to the total amount of items available on server so that pagination works correctly
:type number:
:param show_expand: Shows the expand toggle in default rows
:type boolean:
:param show_group_by: Shows the group by toggle in the header and enables grouped rows
:type boolean:
:param show_select: Shows the select checkboxes in both the header and rows (if using default rows)
:type boolean:
:param single_expand: Changes expansion mode to single expand
:type boolean:
:param single_select: Changes selection mode to single select
:type boolean:
:param sort_by: Changes which item property (or properties) should be used for sort order. Can be used with `.sync` modifier
:type ['string', 'array']:
:param sort_desc: Changes which direction sorting is done. Can be used with `.sync` modifier
:type ['boolean', 'array']:
:param value: Used for controlling selected rows
:type array:
Events
:param click_row: Emits when a table row is clicked. This event provides 2 arguments: the first is the item data that was clicked and the second is the other related data provided by the `item` slot. **NOTE:** will not emit when table rows are defined through a slot such as `item` or `body`.
:param contextmenu_row: Emits when a table row is right-clicked. The item for the row is included. **NOTE:** will not emit when table rows are defined through a slot such as `item` or `body`.
:param current_items:
:param dblclick_row: Emits when a table row is double-clicked. The item for the row is included. **NOTE:** will not emit when table rows are defined through a slot such as `item` or `body`.
:param input: Array of selected items
:param item_expanded: Event emitted when an item is expanded or closed
:param item_selected: Event emitted when an item is selected or deselected
:param page_count:
:param pagination:
:param toggle_select_all:
:param update_expanded: The `.sync` event for `expanded` prop
:param update_group_by:
:param update_group_desc:
:param update_items_per_page:
:param update_multi_sort:
:param update_must_sort:
:param update_options:
:param update_page:
:param update_sort_by:
:param update_sort_desc:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-data-table", children, **kwargs)
self.ttsSensitive()
self._attr_names += [
"calculate_widths",
"caption",
"checkbox_color",
"custom_filter", # JS functions unimplemented
"custom_group", # JS functions unimplemented
"custom_sort", # JS functions unimplemented
"dark",
"dense",
"disable_filtering",
"disable_pagination",
"disable_sort",
"expand_icon",
"expanded",
"fixed_header",
"footer_props",
"group_by",
"group_desc",
"header_props",
"headers",
"headers_length",
"height",
"hide_default_footer",
"hide_default_header",
"item_class", # JS functions unimplemented
"item_key",
"items",
"items_per_page",
"light",
"loader_height",
"loading",
"loading_text",
"locale",
"mobile_breakpoint",
"multi_sort",
"must_sort",
"no_data_text",
"no_results_text",
"options",
"page",
"search",
"selectable_key",
"server_items_length",
"show_expand",
"show_group_by",
"show_select",
"single_expand",
"single_select",
"sort_by",
"sort_desc",
"value",
]
self._event_names += [
("click_row", "click:row"),
("contextmenu_row", "contextmenu:row"),
("current_items", "current-items"),
("dblclick_row", "dblclick:row"),
"input",
("item_expanded", "item-expanded"),
("item_selected", "item-selected"),
("page_count", "page-count"),
"pagination",
("toggle_select_all", "toggle-select-all"),
("update_expanded", "update:expanded"),
("update_group_by", "update:group-by"),
("update_group_desc", "update:group-desc"),
("update_items_per_page", "update:items-per-page"),
("update_multi_sort", "update:multi-sort"),
("update_must_sort", "update:must-sort"),
("update_options", "update:options"),
("update_page", "update:page"),
("update_sort_by", "update:sort-by"),
("update_sort_desc", "update:sort-desc"),
]
class VEditDialog(AbstractElement):
"""
Vuetify's VEditDialog component. See more info and examples |VEditDialog_vuetify_link|.
.. |VEditDialog_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-edit-dialog" target="_blank">here</a>
:param cancel_text: Sets the default text for the cancel button when using the **large** prop
:type any:
:param dark: See description |VEditDialog_vuetify_link|.
:type boolean:
:param eager: Will force the components content to render on mounted. This is useful if you have content that will not be rendered in the DOM that you want crawled for SEO.
:type boolean:
:param large: Attaches a submit and cancel button to the dialog
:type boolean:
:param light: Applies the light theme variant to the component.
:type boolean:
:param persistent: Clicking outside or pressing **esc** key will not dismiss the dialog
:type boolean:
:param return_value:
:type any:
:param save_text: Sets the default text for the save button when using the **large** prop
:type any:
:param transition: See description |VEditDialog_vuetify_link|.
:type string:
Events
:param cancel: Emits when editing is canceled
:param close: Emits when edit-dialog close button is pressed
:param open: Emits when editing is opened
:param save: Emits when edit-dialog save button is pressed
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-edit-dialog", children, **kwargs)
self._attr_names += [
"cancel_text",
"dark",
"eager",
"large",
"light",
"persistent",
"return_value",
"save_text",
"transition",
]
self._event_names += [
"cancel",
"close",
"open",
"save",
]
class VDataTableHeader(AbstractElement):
"""
Vuetify's VDataTableHeader component. See more info and examples |VDataTableHeader_vuetify_link|.
.. |VDataTableHeader_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-data-table-header" target="_blank">here</a>
:param checkbox_color:
:type string:
:param disable_sort: Toggles rendering of sort button
:type boolean:
:param every_item: Indicates if all items in table are selected
:type boolean:
:param headers: Array of header items to display
:type array:
:param mobile: Renders mobile view of headers
:type boolean:
:param options: Options object. Identical to the one on `v-data-table`
:type object:
:param show_group_by: Shows group by button
:type boolean:
:param single_select: Toggles rendering of select-all checkbox
:type boolean:
:param some_items: Indicates if one or more items in table are selected
:type boolean:
:param sort_by_text: Sets the label text used by the default sort-by selector when `v-data-table` is rendering the mobile view
:type string:
:param sort_icon: Icon used for sort button
:type string:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-data-table-header", children, **kwargs)
self._attr_names += [
"checkbox_color",
"disable_sort",
"every_item",
"headers",
"mobile",
"options",
"show_group_by",
"single_select",
"some_items",
"sort_by_text",
"sort_icon",
]
class VSimpleTable(AbstractElement):
"""
Vuetify's VSimpleTable component. See more info and examples |VSimpleTable_vuetify_link|.
.. |VSimpleTable_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-simple-table" target="_blank">here</a>
:param dark: See description |VSimpleTable_vuetify_link|.
:type boolean:
:param dense: Decreases paddings to render a dense table
:type boolean:
:param fixed_header: Sets table header to fixed mode
:type boolean:
:param height: Sets the height for the component
:type ['number', 'string']:
:param light: Applies the light theme variant to the component.
:type boolean:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-simple-table", children, **kwargs)
self._attr_names += [
"dark",
"dense",
"fixed_header",
"height",
"light",
]
class VDatePicker(AbstractElement):
"""
Vuetify's VDatePicker component. See more info and examples |VDatePicker_vuetify_link|.
.. |VDatePicker_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-date-picker" target="_blank">here</a>
:param active_picker: Determines which picker in the date or month picker is being displayed. Allowed values: `'DATE'`, `'MONTH'`, `'YEAR'`
:type string:
:param allowed_dates: Restricts which dates can be selected
:type function:
:param color: See description |VDatePicker_vuetify_link|.
:type string:
:param dark: See description |VDatePicker_vuetify_link|.
:type boolean:
:param day_format: Allows you to customize the format of the day string that appears in the date table. Called with date (ISO 8601 **date** string) arguments.
:type function:
:param disabled: Disables interaction with the picker
:type boolean:
:param elevation: See description |VDatePicker_vuetify_link|.
:type ['number', 'string']:
:param event_color: Sets the color for event dot. It can be string (all events will have the same color) or `object` where attribute is the event date and value is boolean/color/array of colors for specified date or `function` taking date as a parameter and returning boolean/color/array of colors for that date
:type ['array', 'function', 'object', 'string']:
:param events: Array of dates or object defining events or colors or function returning boolean/color/array of colors
:type ['array', 'function', 'object']:
:param first_day_of_week: Sets the first day of the week, starting with 0 for Sunday.
:type ['string', 'number']:
:param flat: Removes elevation
:type boolean:
:param full_width: Forces 100% width
:type boolean:
:param header_color: Defines the header color. If not specified it will use the color defined by <code>color</code> prop or the default picker color
:type string:
:param header_date_format: Allows you to customize the format of the month string that appears in the header of the calendar. Called with date (ISO 8601 **date** string) arguments.
:type function:
:param landscape: Orients picker horizontal
:type boolean:
:param light: Applies the light theme variant to the component.
:type boolean:
:param locale: Sets the locale. Accepts a string with a BCP 47 language tag.
:type string:
:param locale_first_day_of_year: Sets the day that determines the first week of the year, starting with 0 for **Sunday**. For ISO 8601 this should be 4.
:type ['string', 'number']:
:param max: Maximum allowed date/month (ISO 8601 format)
:type string:
:param min: Minimum allowed date/month (ISO 8601 format)
:type string:
:param month_format: Formatting function used for displaying months in the months table. Called with date (ISO 8601 **date** string) arguments.
:type function:
:param multiple: Allow the selection of multiple dates
:type boolean:
:param next_icon: Sets the icon for next month/year button
:type string:
:param next_month_aria_label:
:type string:
:param next_year_aria_label:
:type string:
:param no_title: Hide the picker title
:type boolean:
:param picker_date: Displayed year/month
:type string:
:param prev_icon: Sets the icon for previous month/year button
:type string:
:param prev_month_aria_label:
:type string:
:param prev_year_aria_label:
:type string:
:param range: Allow the selection of date range
:type boolean:
:param reactive: Updates the picker model when changing months/years automatically
:type boolean:
:param readonly: Makes the picker readonly (doesn't allow to select new date)
:type boolean:
:param scrollable: Allows changing displayed month with mouse scroll
:type boolean:
:param selected_items_text: See description |VDatePicker_vuetify_link|.
:type string:
:param show_adjacent_months: Toggles visibility of days from previous and next months
:type boolean:
:param show_current: Toggles visibility of the current date/month outline or shows the provided date/month as a current
:type ['boolean', 'string']:
:param show_week: Toggles visibility of the week numbers in the body of the calendar
:type boolean:
:param title_date_format: Allows you to customize the format of the date string that appears in the title of the date picker. Called with date (ISO 8601 **date** string) arguments.
:type function:
:param type: Determines the type of the picker - `date` for date picker, `month` for month picker
:type string:
:param value: Date picker model (ISO 8601 format, YYYY-mm-dd or YYYY-mm)
:type ['array', 'string']:
:param weekday_format: Allows you to customize the format of the weekday string that appears in the body of the calendar. Called with date (ISO 8601 **date** string) arguments.
:type function:
:param width: Width of the picker
:type ['number', 'string']:
:param year_format: Allows you to customize the format of the year string that appears in the header of the calendar. Called with date (ISO 8601 **date** string) arguments.
:type function:
:param year_icon: Sets the icon in the year selection button
:type string:
Events
:param change: Reactive date picker emits `input` even when any part of the date (year/month/day) changes, but `change` event is emitted only when the day (for date pickers) or month (for month pickers) changes. If `range` prop is set, date picker emits `change` when both [from, to] are selected.
:param input: The updated bound model
:param update_active_picker: The `.sync` event for `active-picker` prop
:param update_picker_date: The `.sync` event for `picker-date` prop
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-date-picker", children, **kwargs)
self._attr_names += [
"active_picker",
"allowed_dates", # JS functions unimplemented
"color",
"dark",
"day_format", # JS functions unimplemented
"disabled",
"elevation",
"event_color", # JS functions unimplemented
"events", # JS functions unimplemented
"first_day_of_week",
"flat",
"full_width",
"header_color",
"header_date_format", # JS functions unimplemented
"landscape",
"light",
"locale",
"locale_first_day_of_year",
"max",
"min",
"month_format", # JS functions unimplemented
"multiple",
"next_icon",
"next_month_aria_label",
"next_year_aria_label",
"no_title",
"picker_date",
"prev_icon",
"prev_month_aria_label",
"prev_year_aria_label",
"range",
"reactive",
"readonly",
"scrollable",
"selected_items_text",
"show_adjacent_months",
"show_current",
"show_week",
"title_date_format", # JS functions unimplemented
"type",
"value",
"weekday_format", # JS functions unimplemented
"width",
"year_format", # JS functions unimplemented
"year_icon",
]
self._event_names += [
("click_date", "click:date"),
("click_month", "click:month"),
("click_year", "click:year"),
("dblclick_date", "dblclick:date"),
("dblclick_month", "dblclick:month"),
("dblclick_year", "dblclick:year"),
("mousedown_date", "mousedown:date"),
("mousedown_month", "mousedown:month"),
("mousedown_year", "mousedown:year"),
("mouseenter_date", "mouseenter:date"),
("mouseenter_month", "mouseenter:month"),
("mouseenter_year", "mouseenter:year"),
("mouseleave_date", "mouseleave:date"),
("mouseleave_month", "mouseleave:month"),
("mouseleave_year", "mouseleave:year"),
("mousemove_date", "mousemove:date"),
("mousemove_month", "mousemove:month"),
("mousemove_year", "mousemove:year"),
("mouseover_date", "mouseover:date"),
("mouseover_month", "mouseover:month"),
("mouseover_year", "mouseover:year"),
("mouseout_date", "mouseout:date"),
("mouseout_month", "mouseout:month"),
("mouseout_year", "mouseout:year"),
("mouseup_date", "mouseup:date"),
("mouseup_month", "mouseup:month"),
("mouseup_year", "mouseup:year"),
("focus_date", "focus:date"),
("focus_month", "focus:month"),
("focus_year", "focus:year"),
("click_date", "click:date"),
("click_month", "click:month"),
("click_year", "click:year"),
("dblclick_date", "dblclick:date"),
("dblclick_month", "dblclick:month"),
("dblclick_year", "dblclick:year"),
("mousedown_date", "mousedown:date"),
("mousedown_month", "mousedown:month"),
("mousedown_year", "mousedown:year"),
("mouseenter_date", "mouseenter:date"),
("mouseenter_month", "mouseenter:month"),
("mouseenter_year", "mouseenter:year"),
("mouseleave_date", "mouseleave:date"),
("mouseleave_month", "mouseleave:month"),
("mouseleave_year", "mouseleave:year"),
("mousemove_date", "mousemove:date"),
("mousemove_month", "mousemove:month"),
("mousemove_year", "mousemove:year"),
("mouseover_date", "mouseover:date"),
("mouseover_month", "mouseover:month"),
("mouseover_year", "mouseover:year"),
("mouseout_date", "mouseout:date"),
("mouseout_month", "mouseout:month"),
("mouseout_year", "mouseout:year"),
("mouseup_date", "mouseup:date"),
("mouseup_month", "mouseup:month"),
("mouseup_year", "mouseup:year"),
("focus_date", "focus:date"),
("focus_month", "focus:month"),
("focus_year", "focus:year"),
("click_date", "click:date"),
("click_month", "click:month"),
("click_year", "click:year"),
("dblclick_date", "dblclick:date"),
("dblclick_month", "dblclick:month"),
("dblclick_year", "dblclick:year"),
("mousedown_date", "mousedown:date"),
("mousedown_month", "mousedown:month"),
("mousedown_year", "mousedown:year"),
("mouseenter_date", "mouseenter:date"),
("mouseenter_month", "mouseenter:month"),
("mouseenter_year", "mouseenter:year"),
("mouseleave_date", "mouseleave:date"),
("mouseleave_month", "mouseleave:month"),
("mouseleave_year", "mouseleave:year"),
("mousemove_date", "mousemove:date"),
("mousemove_month", "mousemove:month"),
("mousemove_year", "mousemove:year"),
("mouseover_date", "mouseover:date"),
("mouseover_month", "mouseover:month"),
("mouseover_year", "mouseover:year"),
("mouseout_date", "mouseout:date"),
("mouseout_month", "mouseout:month"),
("mouseout_year", "mouseout:year"),
("mouseup_date", "mouseup:date"),
("mouseup_month", "mouseup:month"),
("mouseup_year", "mouseup:year"),
("focus_date", "focus:date"),
("focus_month", "focus:month"),
("focus_year", "focus:year"),
"change",
"input",
("update_active_picker", "update:active-picker"),
("update_picker_date", "update:picker-date"),
]
class VDialog(AbstractElement):
"""
Vuetify's VDialog component. See more info and examples |VDialog_vuetify_link|.
.. |VDialog_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-dialog" target="_blank">here</a>
:param activator: Designate a custom activator when the `activator` slot is not used. String can be any valid querySelector and Object can be any valid Node.
:type any:
:param attach: Specifies which DOM element that this component should detach to. String can be any valid querySelector and Object can be any valid Node. This will attach to the root `v-app` component by default.
:type any:
:param close_delay: Milliseconds to wait before closing component.
:type ['number', 'string']:
:param content_class: Applies a custom class to the detached element. This is useful because the content is moved to the beginning of the `v-app` component (unless the **attach** prop is provided) and is not targetable by classes passed directly on the component.
:type string:
:param dark: See description |VDialog_vuetify_link|.
:type boolean:
:param disabled: Disables the ability to open the component.
:type boolean:
:param eager: Will force the components content to render on mounted. This is useful if you have content that will not be rendered in the DOM that you want crawled for SEO.
:type boolean:
:param fullscreen: Changes layout for fullscreen display.
:type boolean:
:param hide_overlay: Hides the display of the overlay.
:type boolean:
:param internal_activator: Detaches the menu content inside of the component as opposed to the document.
:type boolean:
:param light: Applies the light theme variant to the component.
:type boolean:
:param max_width: Sets the maximum width for the component.
:type ['string', 'number']:
:param no_click_animation: Disables the bounce effect when clicking outside of a `v-dialog`'s content when using the **persistent** prop.
:type boolean:
:param open_delay: Milliseconds to wait before opening component.
:type ['number', 'string']:
:param open_on_focus:
:type boolean:
:param open_on_hover: Designates whether component should activate when its activator is hovered.
:type boolean:
:param origin: See description |VDialog_vuetify_link|.
:type string:
:param overlay_color: Sets the overlay color.
:type string:
:param overlay_opacity: Sets the overlay opacity.
:type ['number', 'string']:
:param persistent: Clicking outside of the element or pressing **esc** key will not deactivate it.
:type boolean:
:param retain_focus: Tab focus will return to the first child of the dialog by default. Disable this when using external tools that require focus such as TinyMCE or vue-clipboard.
:type boolean:
:param return_value:
:type any:
:param scrollable: See description |VDialog_vuetify_link|.
:type boolean:
:param transition: See description |VDialog_vuetify_link|.
:type ['string', 'boolean']:
:param value: Controls whether the component is visible or hidden.
:type any:
:param width: Sets the width for the component.
:type ['string', 'number']:
Events
:param click_outside: Event that fires when clicking outside an active dialog.
:param input: The updated bound model
:param keydown: Event that fires when key is pressed. If dialog is active and not using the **persistent** prop, the **esc** key will deactivate it.
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-dialog", children, **kwargs)
self._attr_names += [
"activator",
"attach",
"close_delay",
"content_class",
"dark",
"disabled",
"eager",
"fullscreen",
"hide_overlay",
"internal_activator",
"light",
"max_width",
"no_click_animation",
"open_delay",
"open_on_focus",
"open_on_hover",
"origin",
"overlay_color",
"overlay_opacity",
"persistent",
"retain_focus",
"return_value",
"scrollable",
"transition",
"value",
"width",
]
self._event_names += [
("click_outside", "click:outside"),
"input",
"keydown",
]
class VDivider(AbstractElement):
"""
Vuetify's VDivider component. See more info and examples |VDivider_vuetify_link|.
.. |VDivider_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-divider" target="_blank">here</a>
:param dark: See description |VDivider_vuetify_link|.
:type boolean:
:param inset: Adds indentation (72px) for **normal** dividers, reduces max height for **vertical**.
:type boolean:
:param light: Applies the light theme variant to the component.
:type boolean:
:param vertical: Displays dividers vertically
:type boolean:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-divider", children, **kwargs)
self._attr_names += [
"dark",
"inset",
"light",
"vertical",
]
class VExpansionPanels(AbstractElement):
"""
Vuetify's VExpansionPanels component. See more info and examples |VExpansionPanels_vuetify_link|.
.. |VExpansionPanels_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-expansion-panels" target="_blank">here</a>
:param accordion: Removes the margin around open panels
:type boolean:
:param active_class: The **active-class** applied to children when they are activated.
:type string:
:param dark: See description |VExpansionPanels_vuetify_link|.
:type boolean:
:param disabled: Disables the entire expansion-panel
:type boolean:
:param flat: Removes the expansion-panel's elevation and borders
:type boolean:
:param focusable: Makes the expansion-panel headers focusable
:type boolean:
:param hover: Applies a background-color shift on hover to expansion panel headers
:type boolean:
:param inset: Makes the expansion-panel open with a inset style
:type boolean:
:param light: Applies the light theme variant to the component.
:type boolean:
:param mandatory: Forces a value to always be selected (if available).
:type boolean:
:param max: Sets a maximum number of selections that can be made.
:type ['number', 'string']:
:param multiple: Allow multiple selections. The **value** prop must be an _array_.
:type boolean:
:param popout: Makes the expansion-panel open with an popout style
:type boolean:
:param readonly: Makes the entire expansion-panel read only.
:type boolean:
:param tag: Specify a custom tag used on the root element.
:type string:
:param tile: Removes the border-radius
:type boolean:
:param value: Controls the opened/closed state of content in the expansion-panel. Corresponds to a zero-based index of the currently opened content. If the `multiple` prop (previously `expand` in 1.5.x) is used then it is an array of numbers where each entry corresponds to the index of the opened content. The index order is not relevant.
:type any:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-expansion-panels", children, **kwargs)
self._attr_names += [
"accordion",
"active_class",
"dark",
"disabled",
"flat",
"focusable",
"hover",
"inset",
"light",
"mandatory",
"max",
"multiple",
"popout",
"readonly",
"tag",
"tile",
"value",
]
class VExpansionPanel(AbstractElement):
"""
Vuetify's VExpansionPanel component. See more info and examples |VExpansionPanel_vuetify_link|.
.. |VExpansionPanel_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-expansion-panel" target="_blank">here</a>
:param active_class: See description |VExpansionPanel_vuetify_link|.
:type string:
:param disabled: Disables the expansion-panel content
:type boolean:
:param readonly: Makes the expansion-panel content read only.
:type boolean:
Events
:param change: Toggles the value of the selected panel
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-expansion-panel", children, **kwargs)
self._attr_names += [
"active_class",
"disabled",
"readonly",
]
self._event_names += [
"change",
# click, #Implemented in AbstractElement parent class
]
class VExpansionPanelHeader(AbstractElement):
"""
Vuetify's VExpansionPanelHeader component. See more info and examples |VExpansionPanelHeader_vuetify_link|.
.. |VExpansionPanelHeader_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-expansion-panel-header" target="_blank">here</a>
:param color: See description |VExpansionPanelHeader_vuetify_link|.
:type string:
:param disable_icon_rotate: Removes the icon rotation animation when expanding a panel
:type boolean:
:param expand_icon: Set the expand action icon
:type string:
:param hide_actions: Hide the expand icon in the content header
:type boolean:
:param ripple: See description |VExpansionPanelHeader_vuetify_link|.
:type ['boolean', 'object']:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-expansion-panel-header", children, **kwargs)
self._attr_names += [
"color",
"disable_icon_rotate",
"expand_icon",
"hide_actions",
"ripple",
]
self._event_names += [
# click, #Implemented in AbstractElement parent class
]
class VExpansionPanelContent(AbstractElement):
"""
Vuetify's VExpansionPanelContent component. See more info and examples |VExpansionPanelContent_vuetify_link|.
.. |VExpansionPanelContent_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-expansion-panel-content" target="_blank">here</a>
:param color: See description |VExpansionPanelContent_vuetify_link|.
:type string:
:param eager: Will force the components content to render on mounted. This is useful if you have content that will not be rendered in the DOM that you want crawled for SEO.
:type boolean:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-expansion-panel-content", children, **kwargs)
self._attr_names += [
"color",
"eager",
]
class VFileInput(AbstractElement):
"""
Vuetify's VFileInput component. See more info and examples |VFileInput_vuetify_link|.
.. |VFileInput_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-file-input" target="_blank">here</a>
:param append_icon: Appends an icon to the component, uses the same syntax as `v-icon`
:type string:
:param append_outer_icon: Appends an icon to the outside the component's input, uses same syntax as `v-icon`
:type string:
:param autofocus: Enables autofocus
:type boolean:
:param background_color: Changes the background-color of the input
:type string:
:param chips: Changes display of selections to chips
:type boolean:
:param clear_icon: Applied when using **clearable** and the input is dirty
:type string:
:param clearable: Add input clear functionality, default icon is Material Design Icons **mdi-clear**
:type boolean:
:param color: See description |VFileInput_vuetify_link|.
:type string:
:param counter: Creates counter for input length; if no number is specified, it defaults to 25. Does not apply any validation.
:type ['boolean', 'number', 'string']:
:param counter_size_string: See description |VFileInput_vuetify_link|.
:type string:
:param counter_string: See description |VFileInput_vuetify_link|.
:type string:
:param counter_value:
:type function:
:param dark: See description |VFileInput_vuetify_link|.
:type boolean:
:param dense: Reduces the input height
:type boolean:
:param disabled: Disable the input
:type boolean:
:param error: Puts the input in a manual error state
:type boolean:
:param error_count: The total number of errors that should display at once
:type ['number', 'string']:
:param error_messages: Puts the input in an error state and passes through custom error messages. Will be combined with any validations that occur from the **rules** prop. This field will not trigger validation
:type ['string', 'array']:
:param filled: Applies the alternate filled input style
:type boolean:
:param flat: Removes elevation (shadow) added to element when using the **solo** or **solo-inverted** props
:type boolean:
:param full_width: Designates input type as full-width
:type boolean:
:param height: Sets the height of the input
:type ['number', 'string']:
:param hide_details: Hides hint and validation errors. When set to `auto` messages will be rendered only if there's a message (hint, error message, counter value etc) to display
:type ['boolean', 'string']:
:param hide_input: Display the icon only without the input (file names)
:type boolean:
:param hint: Hint text
:type string:
:param id: Sets the DOM id on the component
:type string:
:param label: Sets input label
:type string:
:param light: Applies the light theme variant to the component.
:type boolean:
:param loader_height: Specifies the height of the loader
:type ['number', 'string']:
:param loading: Displays linear progress bar. Can either be a String which specifies which color is applied to the progress bar (any material color or theme color - **primary**, **secondary**, **success**, **info**, **warning**, **error**) or a Boolean which uses the component **color** (set by color prop - if it's supported by the component) or the primary color
:type ['boolean', 'string']:
:param messages: Displays a list of messages or message if using a string
:type ['string', 'array']:
:param multiple: Adds the **multiple** attribute to the input, allowing multiple file selections.
:type boolean:
:param outlined: Applies the outlined style to the input
:type boolean:
:param persistent_hint: Forces hint to always be visible
:type boolean:
:param persistent_placeholder: Forces placeholder to always be visible
:type boolean:
:param placeholder: Sets the input's placeholder text
:type string:
:param prefix: Displays prefix text
:type string:
:param prepend_icon: Prepends an icon to the component, uses the same syntax as `v-icon`
:type string:
:param prepend_inner_icon: Prepends an icon inside the component's input, uses the same syntax as `v-icon`
:type string:
:param reverse: Reverses the input orientation
:type boolean:
:param rounded: Adds a border radius to the input
:type boolean:
:param rules: Accepts a mixed array of types `function`, `boolean` and `string`. Functions pass an input value as an argument and must return either `true` / `false` or a `string` containing an error message. The input field will enter an error state if a function returns (or any value in the array contains) `false` or is a `string`
:type array:
:param shaped: Round if `outlined` and increase `border-radius` if `filled`. Must be used with either `outlined` or `filled`
:type boolean:
:param show_size: Sets the displayed size of selected file(s). When using **true** will default to _1000_ displaying (**kB, MB, GB**) while _1024_ will display (**KiB, MiB, GiB**).
:type ['boolean', 'number']:
:param single_line: Label does not move on focus/dirty
:type boolean:
:param small_chips: Changes display of selections to chips with the **small** property
:type boolean:
:param solo: Changes the style of the input
:type boolean:
:param solo_inverted: Reduces element opacity until focused
:type boolean:
:param success: Puts the input in a manual success state
:type boolean:
:param success_messages: Puts the input in a success state and passes through custom success messages.
:type ['string', 'array']:
:param suffix: Displays suffix text
:type string:
:param truncate_length: The length of a filename before it is truncated with ellipsis
:type ['number', 'string']:
:param type: Sets input type
:type string:
:param validate_on_blur: Delays validation until blur event
:type boolean:
:param value: See description |VFileInput_vuetify_link|.
:type any:
Events
:param blur: Emitted when the input is blurred
:param change: Emitted when the input is changed by user interaction
:param click_append: Emitted when appended icon is clicked
:param click_append_outer: Emitted when appended outer icon is clicked
:param click_clear: Emitted when clearable icon clicked
:param click_prepend: Emitted when prepended icon is clicked
:param click_prepend_inner: Emitted when prepended inner icon is clicked
:param focus: Emitted when component is focused
:param input: The updated bound model
:param keydown: Emitted when **any** key is pressed
:param update_error: The `error.sync` event
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-file-input", children, **kwargs)
self._attr_names += [
"append_icon",
"append_outer_icon",
"autofocus",
"background_color",
"chips",
"clear_icon",
"clearable",
"color",
"counter",
"counter_size_string",
"counter_string",
"counter_value", # JS functions unimplemented
"dark",
"dense",
"disabled",
"error",
"error_count",
"error_messages",
"filled",
"flat",
"full_width",
"height",
"hide_details",
"hide_input",
"hint",
"id",
"label",
"light",
"loader_height",
"loading",
"messages",
"multiple",
"outlined",
"persistent_hint",
"persistent_placeholder",
"placeholder",
"prefix",
"prepend_icon",
"prepend_inner_icon",
"reverse",
"rounded",
"rules",
"shaped",
"show_size",
"single_line",
"small_chips",
"solo",
"solo_inverted",
"success",
"success_messages",
"suffix",
"truncate_length",
"type",
"validate_on_blur",
"value",
]
self._event_names += [
"blur",
"change",
# click, #Implemented in AbstractElement parent class
("click_append", "click:append"),
("click_append_outer", "click:append-outer"),
("click_clear", "click:clear"),
("click_prepend", "click:prepend"),
("click_prepend_inner", "click:prepend-inner"),
"focus",
"input",
"keydown",
# mousedown, #Implemented in AbstractElement parent class
# mouseup, #Implemented in AbstractElement parent class
("update_error", "update:error"),
]
class VFooter(AbstractElement):
"""
Vuetify's VFooter component. See more info and examples |VFooter_vuetify_link|.
.. |VFooter_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-footer" target="_blank">here</a>
:param absolute: Applies **position: absolute** to the component.
:type boolean:
:param app: See description |VFooter_vuetify_link|.
:type boolean:
:param color: See description |VFooter_vuetify_link|.
:type string:
:param dark: See description |VFooter_vuetify_link|.
:type boolean:
:param elevation: See description |VFooter_vuetify_link|.
:type ['number', 'string']:
:param fixed: Applies **position: fixed** to the component.
:type boolean:
:param height: Sets the height for the component.
:type ['number', 'string']:
:param inset: Positions the toolbar offset from an application `v-navigation-drawer`
:type boolean:
:param light: Applies the light theme variant to the component.
:type boolean:
:param max_height: Sets the maximum height for the component.
:type ['number', 'string']:
:param max_width: Sets the maximum width for the component.
:type ['number', 'string']:
:param min_height: Sets the minimum height for the component.
:type ['number', 'string']:
:param min_width: Sets the minimum width for the component.
:type ['number', 'string']:
:param outlined: Removes elevation (box-shadow) and adds a *thin* border.
:type boolean:
:param padless: Remove all padding from the footer
:type boolean:
:param rounded: See description |VFooter_vuetify_link|.
:type ['boolean', 'string']:
:param shaped: Applies a large border radius on the top left and bottom right of the card.
:type boolean:
:param tag: Specify a custom tag used on the root element.
:type string:
:param tile: Removes the component's **border-radius**.
:type boolean:
:param width: Sets the width for the component.
:type ['number', 'string']:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-footer", children, **kwargs)
self._attr_names += [
"absolute",
"app",
"color",
"dark",
"elevation",
"fixed",
"height",
"inset",
"light",
"max_height",
"max_width",
"min_height",
"min_width",
"outlined",
"padless",
"rounded",
"shaped",
"tag",
"tile",
"width",
]
class VForm(AbstractElement):
"""
Vuetify's VForm component. See more info and examples |VForm_vuetify_link|.
.. |VForm_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-form" target="_blank">here</a>
:param disabled: Puts all children inputs into a disabled state.
:type boolean:
:param lazy_validation: If enabled, **value** will always be _true_ unless there are visible validation errors. You can still call `validate()` to manually trigger validation
:type boolean:
:param readonly: Puts all children inputs into a readonly state.
:type boolean:
:param value: A boolean value representing the validity of the form.
:type boolean:
Events
:param input: The updated bound model
:param submit: Emitted when form is submitted
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-form", children, **kwargs)
self._attr_names += [
"disabled",
"lazy_validation",
"readonly",
"value",
]
self._event_names += [
"input",
"submit",
]
class VContainer(AbstractElement):
"""
Vuetify's VContainer component. See more info and examples |VContainer_vuetify_link|.
.. |VContainer_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-container" target="_blank">here</a>
:param fluid: Removes viewport maximum-width size breakpoints
:type boolean:
:param id: Sets the DOM id on the component
:type string:
:param tag: Specify a custom tag used on the root element.
:type string:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-container", children, **kwargs)
self._attr_names += [
"fluid",
"id",
"tag",
]
class VCol(AbstractElement):
"""
Vuetify's VCol component. See more info and examples |VCol_vuetify_link|.
.. |VCol_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-col" target="_blank">here</a>
:param align_self: See description |VCol_vuetify_link|.
:type string:
:param cols: Sets the default number of columns the component extends. Available options are **1 -> 12** and **auto**.
:type ['boolean', 'string', 'number']:
:param lg: Changes the number of columns on large and greater breakpoints.
:type ['boolean', 'string', 'number']:
:param md: Changes the number of columns on medium and greater breakpoints.
:type ['boolean', 'string', 'number']:
:param offset: Sets the default offset for the column.
:type ['string', 'number']:
:param offset_lg: Changes the offset of the component on large and greater breakpoints.
:type ['string', 'number']:
:param offset_md: Changes the offset of the component on medium and greater breakpoints.
:type ['string', 'number']:
:param offset_sm: Changes the offset of the component on small and greater breakpoints.
:type ['string', 'number']:
:param offset_xl: Changes the offset of the component on extra large and greater breakpoints.
:type ['string', 'number']:
:param order: See description |VCol_vuetify_link|.
:type ['string', 'number']:
:param order_lg: Changes the order of the component on large and greater breakpoints.
:type ['string', 'number']:
:param order_md: Changes the order of the component on medium and greater breakpoints.
:type ['string', 'number']:
:param order_sm: Changes the order of the component on small and greater breakpoints.
:type ['string', 'number']:
:param order_xl: Changes the order of the component on extra large and greater breakpoints.
:type ['string', 'number']:
:param sm: Changes the number of columns on small and greater breakpoints.
:type ['boolean', 'string', 'number']:
:param tag: Specify a custom tag used on the root element.
:type string:
:param xl: Changes the number of columns on extra large and greater breakpoints.
:type ['boolean', 'string', 'number']:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-col", children, **kwargs)
self._attr_names += [
"align_self",
"cols",
"lg",
"md",
"offset",
"offset_lg",
"offset_md",
"offset_sm",
"offset_xl",
"order",
"order_lg",
"order_md",
"order_sm",
"order_xl",
"sm",
"tag",
"xl",
]
class VRow(AbstractElement):
"""
Vuetify's VRow component. See more info and examples |VRow_vuetify_link|.
.. |VRow_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-row" target="_blank">here</a>
:param align: See description |VRow_vuetify_link|.
:type string:
:param align_content: See description |VRow_vuetify_link|.
:type string:
:param align_content_lg: Changes the **align-content** property on large and greater breakpoints.
:type string:
:param align_content_md: Changes the **align-content** property on medium and greater breakpoints.
:type string:
:param align_content_sm: Changes the **align-content** property on small and greater breakpoints.
:type string:
:param align_content_xl: Changes the **align-content** property on extra large and greater breakpoints.
:type string:
:param align_lg: Changes the **align-items** property on large and greater breakpoints.
:type string:
:param align_md: Changes the **align-items** property on medium and greater breakpoints.
:type string:
:param align_sm: Changes the **align-items** property on small and greater breakpoints.
:type string:
:param align_xl: Changes the **align-items** property on extra large and greater breakpoints.
:type string:
:param dense: Reduces the gutter between `v-col`s.
:type boolean:
:param justify: See description |VRow_vuetify_link|.
:type string:
:param justify_lg: Changes the **justify-content** property on large and greater breakpoints.
:type string:
:param justify_md: Changes the **justify-content** property on medium and greater breakpoints.
:type string:
:param justify_sm: Changes the **justify-content** property on small and greater breakpoints.
:type string:
:param justify_xl: Changes the **justify-content** property on extra large and greater breakpoints.
:type string:
:param no_gutters: Removes the gutter between `v-col`s.
:type boolean:
:param tag: Specify a custom tag used on the root element.
:type string:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-row", children, **kwargs)
self._attr_names += [
"align",
"align_content",
"align_content_lg",
"align_content_md",
"align_content_sm",
"align_content_xl",
"align_lg",
"align_md",
"align_sm",
"align_xl",
"dense",
"justify",
"justify_lg",
"justify_md",
"justify_sm",
"justify_xl",
"no_gutters",
"tag",
]
class VSpacer(AbstractElement):
"""
Vuetify's VSpacer component. See more info and examples |VSpacer_vuetify_link|.
.. |VSpacer_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-spacer" target="_blank">here</a>
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-spacer", children, **kwargs)
class VLayout(AbstractElement):
"""
Vuetify's VLayout component. See more info and examples |VLayout_vuetify_link|.
.. |VLayout_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-layout" target="_blank">here</a>
:param align_baseline:
:type Boolean:
:param align_center:
:type Boolean:
:param align_content_center:
:type Boolean:
:param align_content_end:
:type Boolean:
:param align_content_space_around:
:type Boolean:
:param align_content_space_between:
:type Boolean:
:param align_content_start:
:type Boolean:
:param align_end:
:type Boolean:
:param align_start:
:type Boolean:
:param column:
:type boolean:
:param d_{type}:
:type Boolean:
:param fill_height:
:type Boolean:
:param id: Sets the DOM id on the component
:type string:
:param justify_center:
:type Boolean:
:param justify_end:
:type Boolean:
:param justify_space_around:
:type Boolean:
:param justify_space_between:
:type Boolean:
:param justify_start:
:type Boolean:
:param reverse:
:type boolean:
:param row:
:type boolean:
:param tag: Specify a custom tag used on the root element.
:type String:
:param wrap:
:type boolean:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-layout", children, **kwargs)
self._attr_names += [
"align_baseline",
"align_center",
"align_content_center",
"align_content_end",
"align_content_space_around",
"align_content_space_between",
"align_content_start",
"align_end",
"align_start",
"column",
"d_{type}",
"fill_height",
"id",
"justify_center",
"justify_end",
"justify_space_around",
"justify_space_between",
"justify_start",
"reverse",
"row",
"tag",
"wrap",
]
class VFlex(AbstractElement):
"""
Vuetify's VFlex component. See more info and examples |VFlex_vuetify_link|.
.. |VFlex_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-flex" target="_blank">here</a>
:param (size)(1_12):
:type boolean:
:param align_self_baseline:
:type boolean:
:param align_self_center:
:type boolean:
:param align_self_end:
:type boolean:
:param align_self_start:
:type boolean:
:param grow:
:type boolean:
:param id: Sets the DOM id on the component
:type string:
:param offset_(size)(0_12):
:type boolean:
:param order_(size)(1_12):
:type boolean:
:param shrink:
:type boolean:
:param tag: Specify a custom tag used on the root element.
:type string:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-flex", children, **kwargs)
self._attr_names += [
"sm1",
"sm2",
"sm3",
"sm4",
"sm5",
"sm6",
"sm7",
"sm8",
"sm9",
"sm10",
"sm11",
"sm12",
"md1",
"md2",
"md3",
"md4",
"md5",
"md6",
"md7",
"md8",
"md9",
"md10",
"md11",
"md12",
"lg1",
"lg2",
"lg3",
"lg4",
"lg5",
"lg6",
"lg7",
"lg8",
"lg9",
"lg10",
"lg11",
"lg12",
"xl1",
"xl2",
"xl3",
"xl4",
"xl5",
"xl6",
"xl7",
"xl8",
"xl9",
"xl10",
"xl11",
"xl12",
"align_self_baseline",
"align_self_center",
"align_self_end",
"align_self_start",
"grow",
"id",
"offset_sm0",
"offset_sm1",
"offset_sm2",
"offset_sm3",
"offset_sm4",
"offset_sm5",
"offset_sm6",
"offset_sm7",
"offset_sm8",
"offset_sm9",
"offset_sm10",
"offset_sm11",
"offset_sm12",
"offset_md0",
"offset_md1",
"offset_md2",
"offset_md3",
"offset_md4",
"offset_md5",
"offset_md6",
"offset_md7",
"offset_md8",
"offset_md9",
"offset_md10",
"offset_md11",
"offset_md12",
"offset_lg0",
"offset_lg1",
"offset_lg2",
"offset_lg3",
"offset_lg4",
"offset_lg5",
"offset_lg6",
"offset_lg7",
"offset_lg8",
"offset_lg9",
"offset_lg10",
"offset_lg11",
"offset_lg12",
"offset_xl0",
"offset_xl1",
"offset_xl2",
"offset_xl3",
"offset_xl4",
"offset_xl5",
"offset_xl6",
"offset_xl7",
"offset_xl8",
"offset_xl9",
"offset_xl10",
"offset_xl11",
"offset_xl12",
"order_sm1",
"order_sm2",
"order_sm3",
"order_sm4",
"order_sm5",
"order_sm6",
"order_sm7",
"order_sm8",
"order_sm9",
"order_sm10",
"order_sm11",
"order_sm12",
"order_md1",
"order_md2",
"order_md3",
"order_md4",
"order_md5",
"order_md6",
"order_md7",
"order_md8",
"order_md9",
"order_md10",
"order_md11",
"order_md12",
"order_lg1",
"order_lg2",
"order_lg3",
"order_lg4",
"order_lg5",
"order_lg6",
"order_lg7",
"order_lg8",
"order_lg9",
"order_lg10",
"order_lg11",
"order_lg12",
"order_xl1",
"order_xl2",
"order_xl3",
"order_xl4",
"order_xl5",
"order_xl6",
"order_xl7",
"order_xl8",
"order_xl9",
"order_xl10",
"order_xl11",
"order_xl12",
"shrink",
"tag",
]
class VHover(AbstractElement):
"""
Vuetify's VHover component. See more info and examples |VHover_vuetify_link|.
.. |VHover_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-hover" target="_blank">here</a>
:param close_delay: Milliseconds to wait before closing component.
:type ['number', 'string']:
:param disabled: Turns off hover functionality
:type boolean:
:param open_delay: Milliseconds to wait before opening component.
:type ['number', 'string']:
:param value: Controls whether the component is visible or hidden.
:type boolean:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-hover", children, **kwargs)
self._attr_names += [
"close_delay",
"disabled",
"open_delay",
"value",
]
class VIcon(AbstractElement):
"""
Vuetify's VIcon component. See more info and examples |VIcon_vuetify_link|.
.. |VIcon_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-icon" target="_blank">here</a>
:param color: See description |VIcon_vuetify_link|.
:type string:
:param dark: See description |VIcon_vuetify_link|.
:type boolean:
:param dense: Makes icon smaller (20px)
:type boolean:
:param disabled: Disable the input
:type boolean:
:param large: Makes the component large.
:type boolean:
:param left: Applies appropriate margins to the icon inside of a button when placed to the **left** of another element or text
:type boolean:
:param light: Applies the light theme variant to the component.
:type boolean:
:param right: Applies appropriate margins to the icon inside of a button when placed to the **right** of another element or text
:type boolean:
:param size: Specifies a custom font size for the icon
:type ['number', 'string']:
:param small: Makes the component small.
:type boolean:
:param tag: Specifies a custom tag to be used
:type string:
:param x_large: Makes the component extra large.
:type boolean:
:param x_small: Makes the component extra small.
:type boolean:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-icon", children, **kwargs)
self._attr_names += [
"color",
"dark",
"dense",
"disabled",
"large",
"left",
"light",
"right",
"size",
"small",
"tag",
"x_large",
"x_small",
]
class VImg(AbstractElement):
"""
Vuetify's VImg component. See more info and examples |VImg_vuetify_link|.
.. |VImg_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-img" target="_blank">here</a>
:param alt: Alternate text for screen readers. Leave empty for decorative images
:type string:
:param aspect_ratio: Calculated as `width/height`, so for a 1920x1080px image this will be `1.7778`. Will be calculated automatically if omitted
:type ['string', 'number']:
:param contain: Prevents the image from being cropped if it doesn't fit
:type boolean:
:param content_class: Apply a custom class to the responsive content div.
:type string:
:param dark: See description |VImg_vuetify_link|.
:type boolean:
:param eager: Will force the components content to render on mounted. This is useful if you have content that will not be rendered in the DOM that you want crawled for SEO.
:type boolean:
:param gradient: See description |VImg_vuetify_link|.
:type string:
:param height: Sets the height for the component.
:type ['number', 'string']:
:param lazy_src: See description |VImg_vuetify_link|.
:type string:
:param light: Applies the light theme variant to the component.
:type boolean:
:param max_height: Sets the maximum height for the component.
:type ['number', 'string']:
:param max_width: Sets the maximum width for the component.
:type ['number', 'string']:
:param min_height: Sets the minimum height for the component.
:type ['number', 'string']:
:param min_width: Sets the minimum width for the component.
:type ['number', 'string']:
:param options: See description |VImg_vuetify_link|.
:type object:
:param position: See description |VImg_vuetify_link|.
:type string:
:param sizes: See description |VImg_vuetify_link|.
:type string:
:param src: The image URL. This prop is mandatory
:type ['string', 'object']:
:param srcset: See description |VImg_vuetify_link|.
:type string:
:param transition: The transition to use when switching from `lazy-src` to `src`
:type ['boolean', 'string']:
:param width: Sets the width for the component.
:type ['number', 'string']:
Events
:param error: Emitted when there is an error
:param load: Emitted when image is loaded
:param loadstart: Emitted when the image starts to load
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-img", children, **kwargs)
self._attr_names += [
"alt",
"aspect_ratio",
"contain",
"content_class",
"dark",
"eager",
"gradient",
"height",
"lazy_src",
"light",
"max_height",
"max_width",
"min_height",
"min_width",
"options",
"position",
"sizes",
"src",
"srcset",
"transition",
"width",
]
self._event_names += [
"error",
"load",
"loadstart",
]
class VInput(AbstractElement):
"""
Vuetify's VInput component. See more info and examples |VInput_vuetify_link|.
.. |VInput_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-input" target="_blank">here</a>
:param append_icon: Appends an icon to the component, uses the same syntax as `v-icon`
:type string:
:param background_color: Changes the background-color of the input
:type string:
:param color: See description |VInput_vuetify_link|.
:type string:
:param dark: See description |VInput_vuetify_link|.
:type boolean:
:param dense: Reduces the input height
:type boolean:
:param disabled: Disable the input
:type boolean:
:param error: Puts the input in a manual error state
:type boolean:
:param error_count: The total number of errors that should display at once
:type ['number', 'string']:
:param error_messages: Puts the input in an error state and passes through custom error messages. Will be combined with any validations that occur from the **rules** prop. This field will not trigger validation
:type ['string', 'array']:
:param height: Sets the height of the input
:type ['number', 'string']:
:param hide_details: Hides hint and validation errors. When set to `auto` messages will be rendered only if there's a message (hint, error message, counter value etc) to display
:type ['boolean', 'string']:
:param hint: Hint text
:type string:
:param id: Sets the DOM id on the component
:type string:
:param label: Sets input label
:type string:
:param light: Applies the light theme variant to the component.
:type boolean:
:param loading: Displays linear progress bar. Can either be a String which specifies which color is applied to the progress bar (any material color or theme color - **primary**, **secondary**, **success**, **info**, **warning**, **error**) or a Boolean which uses the component **color** (set by color prop - if it's supported by the component) or the primary color
:type boolean:
:param messages: Displays a list of messages or message if using a string
:type ['string', 'array']:
:param persistent_hint: Forces hint to always be visible
:type boolean:
:param prepend_icon: Prepends an icon to the component, uses the same syntax as `v-icon`
:type string:
:param readonly: Puts input in readonly state
:type boolean:
:param rules: Accepts a mixed array of types `function`, `boolean` and `string`. Functions pass an input value as an argument and must return either `true` / `false` or a `string` containing an error message. The input field will enter an error state if a function returns (or any value in the array contains) `false` or is a `string`
:type array:
:param success: Puts the input in a manual success state
:type boolean:
:param success_messages: Puts the input in a success state and passes through custom success messages.
:type ['string', 'array']:
:param validate_on_blur: Delays validation until blur event
:type boolean:
:param value: The input's value
:type any:
Events
:param change: Emitted when the input is changed by user interaction
:param click_append: Emitted when appended icon is clicked
:param click_prepend: Emitted when prepended icon is clicked
:param update_error: The `error.sync` event
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-input", children, **kwargs)
self._attr_names += [
"append_icon",
"background_color",
"color",
"dark",
"dense",
"disabled",
"error",
"error_count",
"error_messages",
"height",
"hide_details",
"hint",
"id",
"label",
"light",
"loading",
"messages",
"persistent_hint",
"prepend_icon",
"readonly",
"rules",
"success",
"success_messages",
"validate_on_blur",
"value",
]
self._event_names += [
"change",
("click_append", "click:append"),
("click_prepend", "click:prepend"),
# mousedown, #Implemented in AbstractElement parent class
# mouseup, #Implemented in AbstractElement parent class
("update_error", "update:error"),
]
class VItem(AbstractElement):
"""
Vuetify's VItem component. See more info and examples |VItem_vuetify_link|.
.. |VItem_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-item" target="_blank">here</a>
:param active_class: See description |VItem_vuetify_link|.
:type string:
:param disabled: Removes the ability to click or target the component.
:type boolean:
:param value: The value used when the component is selected in a group. If not provided, the index will be used.
:type any:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-item", children, **kwargs)
self._attr_names += [
"active_class",
"disabled",
"value",
]
class VItemGroup(AbstractElement):
"""
Vuetify's VItemGroup component. See more info and examples |VItemGroup_vuetify_link|.
.. |VItemGroup_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-item-group" target="_blank">here</a>
:param active_class: See description |VItemGroup_vuetify_link|.
:type string:
:param dark: See description |VItemGroup_vuetify_link|.
:type boolean:
:param light: Applies the light theme variant to the component.
:type boolean:
:param mandatory: Forces a value to always be selected (if available).
:type boolean:
:param max: Sets a maximum number of selections that can be made.
:type ['number', 'string']:
:param multiple: Allow multiple selections. The **value** prop must be an _array_.
:type boolean:
:param tag: Specify a custom tag used on the root element.
:type string:
:param value: The designated model value for the component.
:type any:
Events
:param change: Emitted when the component value is changed by user interaction
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-item-group", children, **kwargs)
self._attr_names += [
"active_class",
"dark",
"light",
"mandatory",
"max",
"multiple",
"tag",
"value",
]
self._event_names += [
"change",
]
class VLazy(AbstractElement):
"""
Vuetify's VLazy component. See more info and examples |VLazy_vuetify_link|.
.. |VLazy_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-lazy" target="_blank">here</a>
:param height: Sets the height for the component.
:type ['number', 'string']:
:param max_height: Sets the maximum height for the component.
:type ['number', 'string']:
:param max_width: Sets the maximum width for the component.
:type ['number', 'string']:
:param min_height: Sets the minimum height for the component.
:type ['number', 'string']:
:param min_width: Sets the minimum width for the component.
:type ['number', 'string']:
:param options: See description |VLazy_vuetify_link|.
:type object:
:param tag: Specify a custom tag used on the root element.
:type string:
:param transition: See description |VLazy_vuetify_link|.
:type string:
:param value: Controls whether the component is visible or hidden.
:type any:
:param width: Sets the width for the component.
:type ['number', 'string']:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-lazy", children, **kwargs)
self._attr_names += [
"height",
"max_height",
"max_width",
"min_height",
"min_width",
"options",
"tag",
"transition",
"value",
"width",
]
class VListItemActionText(AbstractElement):
"""
Vuetify's VListItemActionText component. See more info and examples |VListItemActionText_vuetify_link|.
.. |VListItemActionText_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-list-item-action-text" target="_blank">here</a>
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-list-item-action-text", children, **kwargs)
class VListItemContent(AbstractElement):
"""
Vuetify's VListItemContent component. See more info and examples |VListItemContent_vuetify_link|.
.. |VListItemContent_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-list-item-content" target="_blank">here</a>
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-list-item-content", children, **kwargs)
class VListItemTitle(AbstractElement):
"""
Vuetify's VListItemTitle component. See more info and examples |VListItemTitle_vuetify_link|.
.. |VListItemTitle_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-list-item-title" target="_blank">here</a>
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-list-item-title", children, **kwargs)
class VListItemSubtitle(AbstractElement):
"""
Vuetify's VListItemSubtitle component. See more info and examples |VListItemSubtitle_vuetify_link|.
.. |VListItemSubtitle_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-list-item-subtitle" target="_blank">here</a>
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-list-item-subtitle", children, **kwargs)
class VList(AbstractElement):
"""
Vuetify's VList component. See more info and examples |VList_vuetify_link|.
.. |VList_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-list" target="_blank">here</a>
:param color: See description |VList_vuetify_link|.
:type string:
:param dark: See description |VList_vuetify_link|.
:type boolean:
:param dense: Lowers max height of list tiles
:type boolean:
:param disabled: Disables all children `v-list-item` components
:type boolean:
:param elevation: See description |VList_vuetify_link|.
:type ['number', 'string']:
:param expand: Will only collapse when explicitly closed
:type boolean:
:param flat: Remove the highlighted background on active `v-list-item`s
:type boolean:
:param height: Sets the height for the component.
:type ['number', 'string']:
:param light: Applies the light theme variant to the component.
:type boolean:
:param max_height: Sets the maximum height for the component.
:type ['number', 'string']:
:param max_width: Sets the maximum width for the component.
:type ['number', 'string']:
:param min_height: Sets the minimum height for the component.
:type ['number', 'string']:
:param min_width: Sets the minimum width for the component.
:type ['number', 'string']:
:param nav: See description |VList_vuetify_link|.
:type boolean:
:param outlined: Removes elevation (box-shadow) and adds a *thin* border.
:type boolean:
:param rounded: Rounds the `v-list-item` edges
:type boolean:
:param shaped: Provides an alternative active style for `v-list-item`.
:type boolean:
:param subheader: Removes top padding. Used when previous sibling is a header
:type boolean:
:param tag: Specify a custom tag used on the root element.
:type string:
:param three_line: See description |VList_vuetify_link|.
:type boolean:
:param tile: Removes the component's **border-radius**.
:type boolean:
:param two_line: See description |VList_vuetify_link|.
:type boolean:
:param width: Sets the width for the component.
:type ['number', 'string']:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-list", children, **kwargs)
self._attr_names += [
"color",
"dark",
"dense",
"disabled",
"elevation",
"expand",
"flat",
"height",
"light",
"max_height",
"max_width",
"min_height",
"min_width",
"nav",
"outlined",
"rounded",
"shaped",
"subheader",
"tag",
"three_line",
"tile",
"two_line",
"width",
]
class VListGroup(AbstractElement):
"""
Vuetify's VListGroup component. See more info and examples |VListGroup_vuetify_link|.
.. |VListGroup_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-list-group" target="_blank">here</a>
:param active_class: See description |VListGroup_vuetify_link|.
:type string:
:param append_icon: Appends an icon to the component, uses the same syntax as `v-icon`
:type string:
:param color: See description |VListGroup_vuetify_link|.
:type string:
:param disabled: Disables all children `v-list-item` components
:type boolean:
:param eager: Will force the components content to render on mounted. This is useful if you have content that will not be rendered in the DOM that you want crawled for SEO.
:type boolean:
:param group: Assign a route namespace. Accepts a string or regexp for determining active state
:type ['string', 'regexp']:
:param no_action: Removes left padding assigned for action icons from group items
:type boolean:
:param prepend_icon: Prepends an icon to the component, uses the same syntax as `v-icon`
:type string:
:param ripple: See description |VListGroup_vuetify_link|.
:type ['boolean', 'object']:
:param sub_group: Designate the component as nested list group
:type boolean:
:param value: Expands / Collapse the list-group
:type any:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-list-group", children, **kwargs)
self._attr_names += [
"active_class",
"append_icon",
"color",
"disabled",
"eager",
"group",
"no_action",
"prepend_icon",
"ripple",
"sub_group",
"value",
]
self._event_names += [
# click, #Implemented in AbstractElement parent class
]
class VListItem(AbstractElement):
"""
Vuetify's VListItem component. See more info and examples |VListItem_vuetify_link|.
.. |VListItem_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-list-item" target="_blank">here</a>
:param active_class: See description |VListItem_vuetify_link|.
:type string:
:param append: See description |VListItem_vuetify_link|.
:type boolean:
:param color: Applies specified color to the control when in an **active** state or **input-value** is **true** - it can be the name of material color (for example `success` or `purple`) or css color (`#033` or `rgba(255, 0, 0, 0.5)`)
:type string:
:param dark: See description |VListItem_vuetify_link|.
:type boolean:
:param dense: Lowers max height of list tiles
:type boolean:
:param disabled: Disables the component
:type boolean:
:param exact: See description |VListItem_vuetify_link|.
:type boolean:
:param exact_active_class: See description |VListItem_vuetify_link|.
:type string:
:param exact_path: See description |VListItem_vuetify_link|.
:type boolean:
:param href: Designates the component as anchor and applies the **href** attribute.
:type ['string', 'object']:
:param inactive: If set, the list tile will not be rendered as a link even if it has to/href prop or @click handler
:type boolean:
:param input_value: Controls the **active** state of the item. This is typically used to highlight the component
:type any:
:param light: Applies the light theme variant to the component.
:type boolean:
:param link: Designates that the component is a link. This is automatic when using the **href** or **to** prop.
:type boolean:
:param nuxt: See description |VListItem_vuetify_link|.
:type boolean:
:param replace: See description |VListItem_vuetify_link|.
:type boolean:
:param ripple: See description |VListItem_vuetify_link|.
:type ['boolean', 'object']:
:param selectable: See description |VListItem_vuetify_link|.
:type boolean:
:param tag: Specify a custom tag used on the root element.
:type string:
:param target: Designates the target attribute. This should only be applied when using the **href** prop.
:type string:
:param three_line: See description |VListItem_vuetify_link|.
:type boolean:
:param to: See description |VListItem_vuetify_link|.
:type ['string', 'object']:
:param two_line: See description |VListItem_vuetify_link|.
:type boolean:
:param value: See description |VListItem_vuetify_link|.
:type any:
Events
:param keydown:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-list-item", children, **kwargs)
self._attr_names += [
"active_class",
"append",
"color",
"dark",
"dense",
"disabled",
"exact",
"exact_active_class",
"exact_path",
"href",
"inactive",
"input_value",
"light",
"link",
"nuxt",
"replace",
"ripple",
"selectable",
"tag",
"target",
"three_line",
"to",
"two_line",
"value",
]
self._event_names += [
# click, #Implemented in AbstractElement parent class
"keydown",
]
class VListItemAction(AbstractElement):
"""
Vuetify's VListItemAction component. See more info and examples |VListItemAction_vuetify_link|.
.. |VListItemAction_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-list-item-action" target="_blank">here</a>
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-list-item-action", children, **kwargs)
class VListItemAvatar(AbstractElement):
"""
Vuetify's VListItemAvatar component. See more info and examples |VListItemAvatar_vuetify_link|.
.. |VListItemAvatar_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-list-item-avatar" target="_blank">here</a>
:param color: See description |VListItemAvatar_vuetify_link|.
:type string:
:param height: Sets the height for the component.
:type ['number', 'string']:
:param horizontal: Uses an alternative horizontal style.
:type boolean:
:param left: See description |VListItemAvatar_vuetify_link|.
:type boolean:
:param max_height: Sets the maximum height for the component.
:type ['number', 'string']:
:param max_width: Sets the maximum width for the component.
:type ['number', 'string']:
:param min_height: Sets the minimum height for the component.
:type ['number', 'string']:
:param min_width: Sets the minimum width for the component.
:type ['number', 'string']:
:param right: See description |VListItemAvatar_vuetify_link|.
:type boolean:
:param rounded: See description |VListItemAvatar_vuetify_link|.
:type ['boolean', 'string']:
:param size: Sets the height and width of the component.
:type ['number', 'string']:
:param tile: Removes the component's **border-radius**.
:type boolean:
:param width: Sets the width for the component.
:type ['number', 'string']:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-list-item-avatar", children, **kwargs)
self._attr_names += [
"color",
"height",
"horizontal",
"left",
"max_height",
"max_width",
"min_height",
"min_width",
"right",
"rounded",
"size",
"tile",
"width",
]
class VListItemIcon(AbstractElement):
"""
Vuetify's VListItemIcon component. See more info and examples |VListItemIcon_vuetify_link|.
.. |VListItemIcon_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-list-item-icon" target="_blank">here</a>
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-list-item-icon", children, **kwargs)
class VListItemGroup(AbstractElement):
"""
Vuetify's VListItemGroup component. See more info and examples |VListItemGroup_vuetify_link|.
.. |VListItemGroup_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-list-item-group" target="_blank">here</a>
:param active_class: The **active-class** applied to children when they are activated.
:type string:
:param color: See description |VListItemGroup_vuetify_link|.
:type string:
:param dark: See description |VListItemGroup_vuetify_link|.
:type boolean:
:param light: Applies the light theme variant to the component.
:type boolean:
:param mandatory: Forces a value to always be selected (if available).
:type boolean:
:param max: Sets a maximum number of selections that can be made.
:type ['number', 'string']:
:param multiple: Allow multiple selections. The **value** prop must be an _array_.
:type boolean:
:param tag: Specify a custom tag used on the root element.
:type string:
:param value: Sets the active list-item inside the list-group
:type any:
Events
:param change: Emitted when the component value is changed by user interaction
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-list-item-group", children, **kwargs)
self._attr_names += [
"active_class",
"color",
"dark",
"light",
"mandatory",
"max",
"multiple",
"tag",
"value",
]
self._event_names += [
"change",
]
class VMain(AbstractElement):
"""
Vuetify's VMain component. See more info and examples |VMain_vuetify_link|.
.. |VMain_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-main" target="_blank">here</a>
:param tag: Specify a custom tag used on the root element.
:type string:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-main", children, **kwargs)
self._attr_names += [
"tag",
]
class VMenu(AbstractElement):
"""
Vuetify's VMenu component. See more info and examples |VMenu_vuetify_link|.
.. |VMenu_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-menu" target="_blank">here</a>
:param absolute: Applies **position: absolute** to the component.
:type boolean:
:param activator: Designate a custom activator when the `activator` slot is not used. String can be any valid querySelector and Object can be any valid Node.
:type any:
:param allow_overflow: Removes overflow re-positioning for the content
:type boolean:
:param attach: Specifies which DOM element that this component should detach to. String can be any valid querySelector and Object can be any valid Node. This will attach to the root `v-app` component by default.
:type any:
:param auto: Centers list on selected element
:type boolean:
:param bottom: Aligns the component towards the bottom.
:type boolean:
:param close_delay: Milliseconds to wait before closing component. Only works with the **open-on-hover** prop
:type ['number', 'string']:
:param close_on_click: Designates if menu should close on outside-activator click
:type boolean:
:param close_on_content_click: Designates if menu should close when its content is clicked
:type boolean:
:param content_class: Applies a custom class to the detached element. This is useful because the content is moved to the beginning of the `v-app` component (unless the **attach** prop is provided) and is not targetable by classes passed directly on the component.
:type string:
:param dark: See description |VMenu_vuetify_link|.
:type boolean:
:param disable_keys: Removes all keyboard interaction
:type boolean:
:param disabled: Disables the menu
:type boolean:
:param eager: Will force the components content to render on mounted. This is useful if you have content that will not be rendered in the DOM that you want crawled for SEO.
:type boolean:
:param fixed: Applies **position: fixed** to the component.
:type boolean:
:param internal_activator: Detaches the menu content inside of the component as opposed to the document.
:type boolean:
:param left: Aligns the component towards the left.
:type boolean:
:param light: Applies the light theme variant to the component.
:type boolean:
:param max_height: Sets the max height of the menu content
:type ['number', 'string']:
:param max_width: Sets the maximum width for the content
:type ['number', 'string']:
:param min_width: Sets the minimum width for the content
:type ['number', 'string']:
:param nudge_bottom: Nudge the content to the bottom
:type ['number', 'string']:
:param nudge_left: Nudge the content to the left
:type ['number', 'string']:
:param nudge_right: Nudge the content to the right
:type ['number', 'string']:
:param nudge_top: Nudge the content to the top
:type ['number', 'string']:
:param nudge_width: Nudge the content width
:type ['number', 'string']:
:param offset_overflow: Causes the component to flip to the opposite side when repositioned due to overflow
:type boolean:
:param offset_x: Offset the menu on the x-axis. Works in conjunction with direction left/right
:type boolean:
:param offset_y: Offset the menu on the y-axis. Works in conjunction with direction top/bottom
:type boolean:
:param open_delay: Milliseconds to wait before opening component. Only works with the **open-on-hover** prop
:type ['number', 'string']:
:param open_on_click: Designates whether menu should open on activator click
:type boolean:
:param open_on_focus:
:type boolean:
:param open_on_hover: Designates whether menu should open on activator hover
:type boolean:
:param origin: See description |VMenu_vuetify_link|.
:type string:
:param position_x: Used to position the content when not using an activator slot
:type number:
:param position_y: Used to position the content when not using an activator slot
:type number:
:param return_value: The value that is updated when the menu is closed - must be primitive. Dot notation is supported
:type any:
:param right: Aligns the component towards the right.
:type boolean:
:param rounded: See description |VMenu_vuetify_link|.
:type ['boolean', 'string']:
:param tile: Removes the component's **border-radius**.
:type boolean:
:param top: Aligns the content towards the top.
:type boolean:
:param transition: See description |VMenu_vuetify_link|.
:type ['boolean', 'string']:
:param value: Controls whether the component is visible or hidden.
:type any:
:param z_index: The z-index used for the component
:type ['number', 'string']:
Events
:param input: The updated bound model
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-menu", children, **kwargs)
self._attr_names += [
"absolute",
"activator",
"allow_overflow",
"attach",
"auto",
"bottom",
"close_delay",
"close_on_click",
"close_on_content_click",
"content_class",
"dark",
"disable_keys",
"disabled",
"eager",
"fixed",
"internal_activator",
"left",
"light",
"max_height",
"max_width",
"min_width",
"nudge_bottom",
"nudge_left",
"nudge_right",
"nudge_top",
"nudge_width",
"offset_overflow",
"offset_x",
"offset_y",
"open_delay",
"open_on_click",
"open_on_focus",
"open_on_hover",
"origin",
"position_x",
"position_y",
"return_value",
"right",
"rounded",
"tile",
"top",
"transition",
"value",
"z_index",
]
self._event_names += [
"input",
]
class VNavigationDrawer(AbstractElement):
"""
Vuetify's VNavigationDrawer component. See more info and examples |VNavigationDrawer_vuetify_link|.
.. |VNavigationDrawer_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-navigation-drawer" target="_blank">here</a>
:param absolute: Applies **position: absolute** to the component.
:type boolean:
:param app: See description |VNavigationDrawer_vuetify_link|.
:type boolean:
:param bottom: Expands from the bottom of the screen on mobile devices
:type boolean:
:param clipped: A clipped drawer rests under the application toolbar. **Note:** requires the **clipped-left** or **clipped-right** prop on `v-app-bar` to work as intended
:type boolean:
:param color: See description |VNavigationDrawer_vuetify_link|.
:type string:
:param dark: See description |VNavigationDrawer_vuetify_link|.
:type boolean:
:param disable_resize_watcher: Will automatically open/close drawer when resized depending if mobile or desktop.
:type boolean:
:param disable_route_watcher: Disables opening of navigation drawer when route changes
:type boolean:
:param expand_on_hover: Collapses the drawer to a **mini-variant** until hovering with the mouse
:type boolean:
:param fixed: Applies **position: fixed** to the component.
:type boolean:
:param floating: A floating drawer has no visible container (no border-right)
:type boolean:
:param height: Sets the height of the navigation drawer
:type ['number', 'string']:
:param hide_overlay: Hides the display of the overlay.
:type boolean:
:param light: Applies the light theme variant to the component.
:type boolean:
:param mini_variant: Condenses navigation drawer width, also accepts the **.sync** modifier. With this, the drawer will re-open when clicking it
:type boolean:
:param mini_variant_width: Designates the width assigned when the `mini` prop is turned on
:type ['number', 'string']:
:param mobile_breakpoint: Sets the designated mobile breakpoint for the component. This will apply alternate styles for mobile devices such as the `temporary` prop, or activate the `bottom` prop when the breakpoint value is met. Setting the value to `0` will disable this functionality.
:type ['number', 'string']:
:param overlay_color: Sets the overlay color.
:type string:
:param overlay_opacity: Sets the overlay opacity.
:type ['number', 'string']:
:param permanent: The drawer remains visible regardless of screen size
:type boolean:
:param right: Places the navigation drawer on the right
:type boolean:
:param src: See description |VNavigationDrawer_vuetify_link|.
:type ['string', 'object']:
:param stateless: Remove all automated state functionality (resize, mobile, route) and manually control the drawer state
:type boolean:
:param tag: Specify a custom tag used on the root element.
:type string:
:param temporary: A temporary drawer sits above its application and uses a scrim (overlay) to darken the background
:type boolean:
:param touchless: Disable mobile touch functionality
:type boolean:
:param value: Controls whether the component is visible or hidden.
:type any:
:param width: Sets the width for the component.
:type ['number', 'string']:
Events
:param input: The updated bound model
:param transitionend: Emits event object when transition is complete.
:param update_mini_variant: The `mini-variant.sync` event
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-navigation-drawer", children, **kwargs)
self._attr_names += [
"absolute",
"app",
"bottom",
"clipped",
"color",
"dark",
"disable_resize_watcher",
"disable_route_watcher",
"expand_on_hover",
"fixed",
"floating",
"height",
"hide_overlay",
"light",
"mini_variant",
"mini_variant_width",
"mobile_breakpoint",
"overlay_color",
"overlay_opacity",
"permanent",
"right",
"src",
"stateless",
"tag",
"temporary",
"touchless",
"value",
"width",
]
self._event_names += [
"input",
"transitionend",
("update_mini_variant", "update:mini-variant"),
]
class VOverflowBtn(AbstractElement):
"""
Vuetify's VOverflowBtn component. See more info and examples |VOverflowBtn_vuetify_link|.
.. |VOverflowBtn_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-overflow-btn" target="_blank">here</a>
:param allow_overflow: Allow the menu to overflow off the screen
:type boolean:
:param append_icon: Appends an icon to the component, uses the same syntax as `v-icon`
:type string:
:param append_outer_icon: Appends an icon to the outside the component's input, uses same syntax as `v-icon`
:type string:
:param attach: Specifies which DOM element that this component should detach to. String can be any valid querySelector and Object can be any valid Node. This will attach to the root `v-app` component by default.
:type any:
:param auto_select_first: When searching, will always highlight the first option
:type boolean:
:param autofocus: Enables autofocus
:type boolean:
:param background_color: Changes the background-color of the input
:type string:
:param cache_items: Keeps a local _unique_ copy of all items that have been passed through the **items** prop.
:type boolean:
:param chips: Changes display of selections to chips
:type boolean:
:param clear_icon: Applied when using **clearable** and the input is dirty
:type string:
:param clearable: Add input clear functionality, default icon is Material Design Icons **mdi-clear**
:type boolean:
:param color: See description |VOverflowBtn_vuetify_link|.
:type string:
:param counter: Creates counter for input length; if no number is specified, it defaults to 25. Does not apply any validation.
:type ['boolean', 'number', 'string']:
:param counter_value:
:type function:
:param dark: See description |VOverflowBtn_vuetify_link|.
:type boolean:
:param deletable_chips: Adds a remove icon to selected chips
:type boolean:
:param dense: Reduces the input height
:type boolean:
:param disable_lookup: Disables keyboard lookup
:type boolean:
:param disabled: Disables the input
:type boolean:
:param eager: Will force the components content to render on mounted. This is useful if you have content that will not be rendered in the DOM that you want crawled for SEO.
:type boolean:
:param editable: Creates an editable button
:type boolean:
:param error: Puts the input in a manual error state
:type boolean:
:param error_count: The total number of errors that should display at once
:type ['number', 'string']:
:param error_messages: Puts the input in an error state and passes through custom error messages. Will be combined with any validations that occur from the **rules** prop. This field will not trigger validation
:type ['string', 'array']:
:param filled: Applies the alternate filled input style
:type boolean:
:param filter: See description |VOverflowBtn_vuetify_link|.
:type function:
:param flat: Removes elevation (shadow) added to element when using the **solo** or **solo-inverted** props
:type boolean:
:param full_width: Designates input type as full-width
:type boolean:
:param height: Sets the height of the input
:type ['number', 'string']:
:param hide_details: Hides hint and validation errors. When set to `auto` messages will be rendered only if there's a message (hint, error message, counter value etc) to display
:type ['boolean', 'string']:
:param hide_no_data: Hides the menu when there are no options to show. Useful for preventing the menu from opening before results are fetched asynchronously. Also has the effect of opening the menu when the `items` array changes if not already open.
:type boolean:
:param hide_selected: Do not display in the select menu items that are already selected
:type boolean:
:param hint: Hint text
:type string:
:param id: Sets the DOM id on the component
:type string:
:param item_color: Sets color of selected items
:type string:
:param item_disabled: Set property of **items**'s disabled value
:type ['string', 'array', 'function']:
:param item_text: Set property of **items**'s text value
:type ['string', 'array', 'function']:
:param item_value: See description |VOverflowBtn_vuetify_link|.
:type ['string', 'array', 'function']:
:param items: Can be an array of objects or array of strings. When using objects, will look for a text, value and disabled keys. This can be changed using the **item-text**, **item-value** and **item-disabled** props. Objects that have a **header** or **divider** property are considered special cases and generate a list header or divider; these items are not selectable.
:type array:
:param label: Sets input label
:type string:
:param light: Applies the light theme variant to the component.
:type boolean:
:param loader_height: Specifies the height of the loader
:type ['number', 'string']:
:param loading: Displays linear progress bar. Can either be a String which specifies which color is applied to the progress bar (any material color or theme color - **primary**, **secondary**, **success**, **info**, **warning**, **error**) or a Boolean which uses the component **color** (set by color prop - if it's supported by the component) or the primary color
:type ['boolean', 'string']:
:param menu_props: Pass props through to the `v-menu` component. Accepts either a string for boolean props `menu-props="auto, overflowY"`, or an object `:menu-props="{ auto: true, overflowY: true }"`
:type ['string', 'array', 'object']:
:param messages: Displays a list of messages or message if using a string
:type ['string', 'array']:
:param multiple: Changes select to multiple. Accepts array for value
:type boolean:
:param no_data_text: Display text when there is no data
:type string:
:param no_filter: Do not apply filtering when searching. Useful when data is being filtered server side
:type boolean:
:param open_on_clear: When using the **clearable** prop, once cleared, the select menu will either open or stay open, depending on the current state
:type boolean:
:param outlined: Applies the outlined style to the input
:type boolean:
:param persistent_hint: Forces hint to always be visible
:type boolean:
:param persistent_placeholder: Forces placeholder to always be visible
:type boolean:
:param placeholder: Sets the input's placeholder text
:type string:
:param prefix: Displays prefix text
:type string:
:param prepend_icon: Prepends an icon to the component, uses the same syntax as `v-icon`
:type string:
:param prepend_inner_icon: Prepends an icon inside the component's input, uses the same syntax as `v-icon`
:type string:
:param readonly: Puts input in readonly state
:type boolean:
:param return_object: Changes the selection behavior to return the object directly rather than the value specified with **item-value**
:type boolean:
:param reverse: Reverses the input orientation
:type boolean:
:param rounded: Adds a border radius to the input
:type boolean:
:param rules: Accepts a mixed array of types `function`, `boolean` and `string`. Functions pass an input value as an argument and must return either `true` / `false` or a `string` containing an error message. The input field will enter an error state if a function returns (or any value in the array contains) `false` or is a `string`
:type array:
:param search_input: Search value. Can be used with `.sync` modifier.
:type string:
:param segmented: Creates a segmented button
:type boolean:
:param shaped: Round if `outlined` and increase `border-radius` if `filled`. Must be used with either `outlined` or `filled`
:type boolean:
:param single_line: Label does not move on focus/dirty
:type boolean:
:param small_chips: Changes display of selections to chips with the **small** property
:type boolean:
:param solo: Changes the style of the input
:type boolean:
:param solo_inverted: Reduces element opacity until focused
:type boolean:
:param success: Puts the input in a manual success state
:type boolean:
:param success_messages: Puts the input in a success state and passes through custom success messages.
:type ['string', 'array']:
:param suffix: Displays suffix text
:type string:
:param type: Sets input type
:type string:
:param validate_on_blur: Delays validation until blur event
:type boolean:
:param value: The input's value
:type any:
:param value_comparator: See description |VOverflowBtn_vuetify_link|.
:type function:
Events
:param blur: Emitted when the input is blurred
:param change: Emitted when the input is changed by user interaction
:param click_append: Emitted when appended icon is clicked
:param click_append_outer: Emitted when appended outer icon is clicked
:param click_clear: Emitted when clearable icon clicked
:param click_prepend: Emitted when prepended icon is clicked
:param click_prepend_inner: Emitted when prepended inner icon is clicked
:param focus: Emitted when component is focused
:param input: The updated bound model
:param keydown: Emitted when **any** key is pressed
:param update_error: The `error.sync` event
:param update_list_index: Emitted when menu item is selected using keyboard arrows
:param update_search_input: The `search-input.sync` event
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-overflow-btn", children, **kwargs)
self._attr_names += [
"allow_overflow",
"append_icon",
"append_outer_icon",
"attach",
"auto_select_first",
"autofocus",
"background_color",
"cache_items",
"chips",
"clear_icon",
"clearable",
"color",
"counter",
"counter_value", # JS functions unimplemented
"dark",
"deletable_chips",
"dense",
"disable_lookup",
"disabled",
"eager",
"editable",
"error",
"error_count",
"error_messages",
"filled",
"filter", # JS functions unimplemented
"flat",
"full_width",
"height",
"hide_details",
"hide_no_data",
"hide_selected",
"hint",
"id",
"item_color",
"item_disabled", # JS functions unimplemented
"item_text", # JS functions unimplemented
"item_value", # JS functions unimplemented
"items",
"label",
"light",
"loader_height",
"loading",
"menu_props",
"messages",
"multiple",
"no_data_text",
"no_filter",
"open_on_clear",
"outlined",
"persistent_hint",
"persistent_placeholder",
"placeholder",
"prefix",
"prepend_icon",
"prepend_inner_icon",
"readonly",
"return_object",
"reverse",
"rounded",
"rules",
"search_input",
"segmented",
"shaped",
"single_line",
"small_chips",
"solo",
"solo_inverted",
"success",
"success_messages",
"suffix",
"type",
"validate_on_blur",
"value",
"value_comparator", # JS functions unimplemented
]
self._event_names += [
"blur",
"change",
# click, #Implemented in AbstractElement parent class
("click_append", "click:append"),
("click_append_outer", "click:append-outer"),
("click_clear", "click:clear"),
("click_prepend", "click:prepend"),
("click_prepend_inner", "click:prepend-inner"),
"focus",
"input",
"keydown",
# mousedown, #Implemented in AbstractElement parent class
# mouseup, #Implemented in AbstractElement parent class
("update_error", "update:error"),
("update_list_index", "update:list-index"),
("update_search_input", "update:search-input"),
]
class VOverlay(AbstractElement):
"""
Vuetify's VOverlay component. See more info and examples |VOverlay_vuetify_link|.
.. |VOverlay_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-overlay" target="_blank">here</a>
:param absolute: Applies **position: absolute** to the component.
:type boolean:
:param color: See description |VOverlay_vuetify_link|.
:type string:
:param dark: See description |VOverlay_vuetify_link|.
:type boolean:
:param light: Applies the light theme variant to the component.
:type boolean:
:param opacity: Sets the overlay opacity
:type ['number', 'string']:
:param value: Controls whether the component is visible or hidden.
:type any:
:param z_index: The z-index used for the component
:type ['number', 'string']:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-overlay", children, **kwargs)
self._attr_names += [
"absolute",
"color",
"dark",
"light",
"opacity",
"value",
"z_index",
]
class VPagination(AbstractElement):
"""
Vuetify's VPagination component. See more info and examples |VPagination_vuetify_link|.
.. |VPagination_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-pagination" target="_blank">here</a>
:param circle: Shape pagination elements as circles
:type boolean:
:param color: See description |VPagination_vuetify_link|.
:type string:
:param current_page_aria_label:
:type string:
:param dark: See description |VPagination_vuetify_link|.
:type boolean:
:param disabled: Disables component
:type boolean:
:param length: The length of the pagination component
:type number:
:param light: Applies the light theme variant to the component.
:type boolean:
:param next_aria_label:
:type string:
:param next_icon: Specify the icon to use for the next icon
:type string:
:param page_aria_label:
:type string:
:param prev_icon: Specify the icon to use for the prev icon
:type string:
:param previous_aria_label:
:type string:
:param total_visible: Specify the max total visible pagination numbers
:type ['number', 'string']:
:param value: Current selected page
:type number:
:param wrapper_aria_label:
:type string:
Events
:param input: The updated bound model
:param next: Emitted when going to next item
:param previous: Emitted when going to previous item
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-pagination", children, **kwargs)
self._attr_names += [
"circle",
"color",
"current_page_aria_label",
"dark",
"disabled",
"length",
"light",
"next_aria_label",
"next_icon",
"page_aria_label",
"prev_icon",
"previous_aria_label",
"total_visible",
"value",
"wrapper_aria_label",
]
self._event_names += [
"input",
"next",
"previous",
]
class VSheet(AbstractElement):
"""
Vuetify's VSheet component. See more info and examples |VSheet_vuetify_link|.
.. |VSheet_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-sheet" target="_blank">here</a>
:param color: See description |VSheet_vuetify_link|.
:type string:
:param dark: See description |VSheet_vuetify_link|.
:type boolean:
:param elevation: See description |VSheet_vuetify_link|.
:type ['number', 'string']:
:param height: Sets the height for the component.
:type ['number', 'string']:
:param light: Applies the light theme variant to the component.
:type boolean:
:param max_height: Sets the maximum height for the component.
:type ['number', 'string']:
:param max_width: Sets the maximum width for the component.
:type ['number', 'string']:
:param min_height: Sets the minimum height for the component.
:type ['number', 'string']:
:param min_width: Sets the minimum width for the component.
:type ['number', 'string']:
:param outlined: Removes elevation (box-shadow) and adds a *thin* border.
:type boolean:
:param rounded: See description |VSheet_vuetify_link|.
:type ['boolean', 'string']:
:param shaped: Applies a large border radius on the top left and bottom right of the card.
:type boolean:
:param tag: Specify a custom tag used on the root element.
:type string:
:param tile: Removes the component's **border-radius**.
:type boolean:
:param width: Sets the width for the component.
:type ['number', 'string']:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-sheet", children, **kwargs)
self._attr_names += [
"color",
"dark",
"elevation",
"height",
"light",
"max_height",
"max_width",
"min_height",
"min_width",
"outlined",
"rounded",
"shaped",
"tag",
"tile",
"width",
]
class VParallax(AbstractElement):
"""
Vuetify's VParallax component. See more info and examples |VParallax_vuetify_link|.
.. |VParallax_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-parallax" target="_blank">here</a>
:param alt: Attaches an alt property to the parallax image
:type string:
:param height: Sets the height for the component
:type ['string', 'number']:
:param src: The image to parallax
:type string:
:param srcset: See description |VParallax_vuetify_link|.
:type string:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-parallax", children, **kwargs)
self._attr_names += [
"alt",
"height",
"src",
"srcset",
]
class VProgressCircular(AbstractElement):
"""
Vuetify's VProgressCircular component. See more info and examples |VProgressCircular_vuetify_link|.
.. |VProgressCircular_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-progress-circular" target="_blank">here</a>
:param button: Deprecated - Pending removal
:type boolean:
:param color: See description |VProgressCircular_vuetify_link|.
:type string:
:param indeterminate: Constantly animates, use when loading progress is unknown.
:type boolean:
:param rotate: Rotates the circle start point in deg
:type ['number', 'string']:
:param size: Sets the diameter of the circle in pixels
:type ['number', 'string']:
:param value: The percentage value for current progress
:type ['number', 'string']:
:param width: Sets the stroke of the circle in pixels
:type ['number', 'string']:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-progress-circular", children, **kwargs)
self._attr_names += [
"button",
"color",
"indeterminate",
"rotate",
"size",
"value",
"width",
]
class VProgressLinear(AbstractElement):
"""
Vuetify's VProgressLinear component. See more info and examples |VProgressLinear_vuetify_link|.
.. |VProgressLinear_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-progress-linear" target="_blank">here</a>
:param absolute: Applies **position: absolute** to the component.
:type boolean:
:param active: Reduce the height to 0, hiding component
:type boolean:
:param background_color: Background color, set to component's color if null
:type string:
:param background_opacity: Background opacity, if null it defaults to 0.3 if background color is not specified or 1 otherwise
:type ['number', 'string']:
:param bottom: Aligns the component towards the bottom.
:type boolean:
:param buffer_value: The percentage value for the buffer
:type ['number', 'string']:
:param color: See description |VProgressLinear_vuetify_link|.
:type string:
:param dark: See description |VProgressLinear_vuetify_link|.
:type boolean:
:param fixed: Applies **position: fixed** to the component.
:type boolean:
:param height: Sets the height for the component
:type ['number', 'string']:
:param indeterminate: Constantly animates, use when loading progress is unknown.
:type boolean:
:param light: Applies the light theme variant to the component.
:type boolean:
:param query: Animates like **indeterminate** prop but inverse
:type boolean:
:param reverse: Displays reversed progress (right to left in LTR mode and left to right in RTL)
:type boolean:
:param rounded: Adds a border radius to the progress component
:type boolean:
:param stream: An alternative style for portraying loading that works in tandem with **buffer-value**
:type boolean:
:param striped: Adds a stripe background to the filled portion of the progress component
:type boolean:
:param top: Aligns the content towards the top.
:type boolean:
:param value: The designated model value for the component.
:type ['number', 'string']:
Events
:param change: Emitted when the component value is changed by user interaction
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-progress-linear", children, **kwargs)
self._attr_names += [
"absolute",
"active",
"background_color",
"background_opacity",
"bottom",
"buffer_value",
"color",
"dark",
"fixed",
"height",
"indeterminate",
"light",
"query",
"reverse",
"rounded",
"stream",
"striped",
"top",
"value",
]
self._event_names += [
"change",
]
class VRadioGroup(AbstractElement):
"""
Vuetify's VRadioGroup component. See more info and examples |VRadioGroup_vuetify_link|.
.. |VRadioGroup_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-radio-group" target="_blank">here</a>
:param active_class: The **active-class** applied to children when they are activated.
:type string:
:param append_icon: Appends an icon to the component, uses the same syntax as `v-icon`
:type string:
:param background_color: Changes the background-color of the input
:type string:
:param column: Displays radio buttons in column
:type boolean:
:param dark: See description |VRadioGroup_vuetify_link|.
:type boolean:
:param dense: Reduces the input height
:type boolean:
:param disabled: Disable the input
:type boolean:
:param error: Puts the input in a manual error state
:type boolean:
:param error_count: The total number of errors that should display at once
:type ['number', 'string']:
:param error_messages: Puts the input in an error state and passes through custom error messages. Will be combined with any validations that occur from the **rules** prop. This field will not trigger validation
:type ['string', 'array']:
:param hide_details: Hides hint and validation errors. When set to `auto` messages will be rendered only if there's a message (hint, error message, counter value etc) to display
:type ['boolean', 'string']:
:param hint: Hint text
:type string:
:param id: Sets the DOM id on the component
:type string:
:param label: Sets input label
:type string:
:param light: Applies the light theme variant to the component.
:type boolean:
:param mandatory: Forces a value to always be selected (if available).
:type boolean:
:param max: Sets a maximum number of selections that can be made.
:type ['number', 'string']:
:param messages: Displays a list of messages or message if using a string
:type ['string', 'array']:
:param multiple: Allow multiple selections. The **value** prop must be an _array_.
:type boolean:
:param name: Sets the component's name attribute
:type string:
:param persistent_hint: Forces hint to always be visible
:type boolean:
:param prepend_icon: Prepends an icon to the component, uses the same syntax as `v-icon`
:type string:
:param readonly: Puts input in readonly state
:type boolean:
:param row: Displays radio buttons in row
:type boolean:
:param rules: Accepts a mixed array of types `function`, `boolean` and `string`. Functions pass an input value as an argument and must return either `true` / `false` or a `string` containing an error message. The input field will enter an error state if a function returns (or any value in the array contains) `false` or is a `string`
:type array:
:param success: Puts the input in a manual success state
:type boolean:
:param success_messages: Puts the input in a success state and passes through custom success messages.
:type ['string', 'array']:
:param tag: Specify a custom tag used on the root element.
:type string:
:param validate_on_blur: Delays validation until blur event
:type boolean:
:param value: The input's value
:type any:
:param value_comparator: Apply a custom value comparator function
:type function:
Events
:param change: Emitted when the input is changed by user interaction
:param click_append: Emitted when appended icon is clicked
:param click_prepend: Emitted when prepended icon is clicked
:param update_error: The `error.sync` event
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-radio-group", children, **kwargs)
self._attr_names += [
"active_class",
"append_icon",
"background_color",
"column",
"dark",
"dense",
"disabled",
"error",
"error_count",
"error_messages",
"hide_details",
"hint",
"id",
"label",
"light",
"mandatory",
"max",
"messages",
"multiple",
"name",
"persistent_hint",
"prepend_icon",
"readonly",
"row",
"rules",
"success",
"success_messages",
"tag",
"validate_on_blur",
"value",
"value_comparator", # JS functions unimplemented
]
self._event_names += [
"change",
("click_append", "click:append"),
("click_prepend", "click:prepend"),
# mousedown, #Implemented in AbstractElement parent class
# mouseup, #Implemented in AbstractElement parent class
("update_error", "update:error"),
]
class VRadio(AbstractElement):
"""
Vuetify's VRadio component. See more info and examples |VRadio_vuetify_link|.
.. |VRadio_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-radio" target="_blank">here</a>
:param active_class: See description |VRadio_vuetify_link|.
:type string:
:param color: See description |VRadio_vuetify_link|.
:type string:
:param dark: See description |VRadio_vuetify_link|.
:type boolean:
:param disabled: Removes the ability to click or target the component.
:type boolean:
:param id: Sets the DOM id on the component
:type string:
:param label: Sets input label
:type string:
:param light: Applies the light theme variant to the component.
:type boolean:
:param name: Sets the component's name attribute
:type string:
:param off_icon: The icon used when inactive
:type string:
:param on_icon: The icon used when active
:type string:
:param readonly: Puts input in readonly state
:type boolean:
:param ripple: See description |VRadio_vuetify_link|.
:type ['boolean', 'object']:
:param value: The value used when the component is selected in a group. If not provided, the index will be used.
:type any:
Events
:param change: Emitted when the input is changed by user interaction
:param click_append: Emitted when appended icon is clicked
:param click_prepend: Emitted when prepended icon is clicked
:param update_error: The `error.sync` event
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-radio", children, **kwargs)
self._attr_names += [
"active_class",
"color",
"dark",
"disabled",
"id",
"label",
"light",
"name",
"off_icon",
"on_icon",
"readonly",
"ripple",
"value",
]
self._event_names += [
"change",
# click, #Implemented in AbstractElement parent class
("click_append", "click:append"),
("click_prepend", "click:prepend"),
# mousedown, #Implemented in AbstractElement parent class
# mouseup, #Implemented in AbstractElement parent class
("update_error", "update:error"),
]
class VRangeSlider(AbstractElement):
"""
Vuetify's VRangeSlider component. See more info and examples |VRangeSlider_vuetify_link|.
.. |VRangeSlider_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-range-slider" target="_blank">here</a>
:param append_icon: Appends an icon to the component, uses the same syntax as `v-icon`
:type string:
:param background_color: Changes the background-color of the input
:type string:
:param color: See description |VRangeSlider_vuetify_link|.
:type string:
:param dark: See description |VRangeSlider_vuetify_link|.
:type boolean:
:param dense: Reduces the input height
:type boolean:
:param disabled: Disable the input
:type boolean:
:param error: Puts the input in a manual error state
:type boolean:
:param error_count: The total number of errors that should display at once
:type ['number', 'string']:
:param error_messages: Puts the input in an error state and passes through custom error messages. Will be combined with any validations that occur from the **rules** prop. This field will not trigger validation
:type ['string', 'array']:
:param height: Sets the height of the input
:type ['number', 'string']:
:param hide_details: Hides hint and validation errors. When set to `auto` messages will be rendered only if there's a message (hint, error message, counter value etc) to display
:type ['boolean', 'string']:
:param hint: Hint text
:type string:
:param id: Sets the DOM id on the component
:type string:
:param inverse_label: Reverse the label position. Works with **rtl**.
:type boolean:
:param label: Sets input label
:type string:
:param light: Applies the light theme variant to the component.
:type boolean:
:param loader_height: Specifies the height of the loader
:type ['number', 'string']:
:param loading: Displays linear progress bar. Can either be a String which specifies which color is applied to the progress bar (any material color or theme color - **primary**, **secondary**, **success**, **info**, **warning**, **error**) or a Boolean which uses the component **color** (set by color prop - if it's supported by the component) or the primary color
:type ['boolean', 'string']:
:param max: Sets the maximum allowed value
:type ['number', 'string']:
:param messages: Displays a list of messages or message if using a string
:type ['string', 'array']:
:param min: Sets the minimum allowed value
:type ['number', 'string']:
:param persistent_hint: Forces hint to always be visible
:type boolean:
:param prepend_icon: Prepends an icon to the component, uses the same syntax as `v-icon`
:type string:
:param readonly: Puts input in readonly state
:type boolean:
:param rules: Accepts a mixed array of types `function`, `boolean` and `string`. Functions pass an input value as an argument and must return either `true` / `false` or a `string` containing an error message. The input field will enter an error state if a function returns (or any value in the array contains) `false` or is a `string`
:type array:
:param step: If greater than 0, sets step interval for ticks
:type ['number', 'string']:
:param success: Puts the input in a manual success state
:type boolean:
:param success_messages: Puts the input in a success state and passes through custom success messages.
:type ['string', 'array']:
:param thumb_color: Sets the thumb and thumb label color
:type string:
:param thumb_label: Show thumb label. If `true` it shows label when using slider. If set to `'always'` it always shows label.
:type ['boolean', 'string']:
:param thumb_size: Controls the size of the thumb label.
:type ['number', 'string']:
:param tick_labels: When provided with Array<string>, will attempt to map the labels to each step in index order
:type array:
:param tick_size: Controls the size of **ticks**
:type ['number', 'string']:
:param ticks: Show track ticks. If `true` it shows ticks when using slider. If set to `'always'` it always shows ticks.
:type ['boolean', 'string']:
:param track_color: Sets the track's color
:type string:
:param track_fill_color: Sets the track's fill color
:type string:
:param validate_on_blur: Delays validation until blur event
:type boolean:
:param value: The input's value
:type any:
:param vertical: Changes slider direction to vertical
:type boolean:
Events
:param change: Emitted when the input is changed by user interaction
:param click_append: Emitted when appended icon is clicked
:param click_prepend: Emitted when prepended icon is clicked
:param end: Slider value emitted at the end of slider movement
:param input: The updated bound model
:param start: Slider value emitted at start of slider movement
:param update_error: The `error.sync` event
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-range-slider", children, **kwargs)
self._attr_names += [
"append_icon",
"background_color",
"color",
"dark",
"dense",
"disabled",
"error",
"error_count",
"error_messages",
"height",
"hide_details",
"hint",
"id",
"inverse_label",
"label",
"light",
"loader_height",
"loading",
"max",
"messages",
"min",
"persistent_hint",
"prepend_icon",
"readonly",
"rules",
"step",
"success",
"success_messages",
"thumb_color",
"thumb_label",
"thumb_size",
"tick_labels",
"tick_size",
"ticks",
"track_color",
"track_fill_color",
"validate_on_blur",
"value",
"vertical",
]
self._event_names += [
"change",
# click, #Implemented in AbstractElement parent class
("click_append", "click:append"),
("click_prepend", "click:prepend"),
"end",
"input",
# mousedown, #Implemented in AbstractElement parent class
# mouseup, #Implemented in AbstractElement parent class
"start",
("update_error", "update:error"),
]
class VRating(AbstractElement):
"""
Vuetify's VRating component. See more info and examples |VRating_vuetify_link|.
.. |VRating_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-rating" target="_blank">here</a>
:param background_color: The color used empty icons
:type string:
:param clearable: Allows for the component to be cleared. Triggers when the icon containing the current value is clicked.
:type boolean:
:param close_delay: Milliseconds to wait before closing component.
:type ['number', 'string']:
:param color: See description |VRating_vuetify_link|.
:type string:
:param dark: See description |VRating_vuetify_link|.
:type boolean:
:param dense: Icons have a smaller size
:type boolean:
:param empty_icon: The icon displayed when empty
:type string:
:param full_icon: The icon displayed when full
:type string:
:param half_icon: The icon displayed when half (requires **half-increments** prop)
:type string:
:param half_increments: Allows the selection of half increments
:type boolean:
:param hover: Provides visual feedback when hovering over icons
:type boolean:
:param icon_label: The **aria-label** used for icons
:type string:
:param large: Makes the component large.
:type boolean:
:param length: The amount of ratings to show
:type ['number', 'string']:
:param light: Applies the light theme variant to the component.
:type boolean:
:param open_delay: Milliseconds to wait before opening component.
:type ['number', 'string']:
:param readonly: Removes all hover effects and pointer events
:type boolean:
:param ripple: See description |VRating_vuetify_link|.
:type ['boolean', 'object']:
:param size: Sets the height and width of the component.
:type ['number', 'string']:
:param small: Makes the component small.
:type boolean:
:param value: The rating value
:type number:
:param x_large: Makes the component extra large.
:type boolean:
:param x_small: Makes the component extra small.
:type boolean:
Events
:param input: Emits the rating number when this value changes
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-rating", children, **kwargs)
self._attr_names += [
"background_color",
"clearable",
"close_delay",
"color",
"dark",
"dense",
"empty_icon",
"full_icon",
"half_icon",
"half_increments",
"hover",
"icon_label",
"large",
"length",
"light",
"open_delay",
"readonly",
"ripple",
"size",
"small",
"value",
"x_large",
"x_small",
]
self._event_names += [
"input",
]
class VResponsive(AbstractElement):
"""
Vuetify's VResponsive component. See more info and examples |VResponsive_vuetify_link|.
.. |VResponsive_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-responsive" target="_blank">here</a>
:param aspect_ratio: Sets a base aspect ratio, calculated as width/height. This will only set a **minimum** height, the component can still grow if it has a lot of content.
:type ['string', 'number']:
:param content_class: Apply a custom class to the responsive content div.
:type string:
:param height: Sets the height for the component.
:type ['number', 'string']:
:param max_height: Sets the maximum height for the component.
:type ['number', 'string']:
:param max_width: Sets the maximum width for the component.
:type ['number', 'string']:
:param min_height: Sets the minimum height for the component.
:type ['number', 'string']:
:param min_width: Sets the minimum width for the component.
:type ['number', 'string']:
:param width: Sets the width for the component.
:type ['number', 'string']:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-responsive", children, **kwargs)
self._attr_names += [
"aspect_ratio",
"content_class",
"height",
"max_height",
"max_width",
"min_height",
"min_width",
"width",
]
class VSelect(AbstractElement):
"""
Vuetify's VSelect component. See more info and examples |VSelect_vuetify_link|.
.. |VSelect_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-select" target="_blank">here</a>
:param append_icon: Appends an icon to the component, uses the same syntax as `v-icon`
:type string:
:param append_outer_icon: Appends an icon to the outside the component's input, uses same syntax as `v-icon`
:type string:
:param attach: Specifies which DOM element that this component should detach to. String can be any valid querySelector and Object can be any valid Node. This will attach to the root `v-app` component by default.
:type any:
:param autofocus: Enables autofocus
:type boolean:
:param background_color: Changes the background-color of the input
:type string:
:param cache_items: Keeps a local _unique_ copy of all items that have been passed through the **items** prop.
:type boolean:
:param chips: Changes display of selections to chips
:type boolean:
:param clear_icon: Applied when using **clearable** and the input is dirty
:type string:
:param clearable: Add input clear functionality, default icon is Material Design Icons **mdi-clear**
:type boolean:
:param color: See description |VSelect_vuetify_link|.
:type string:
:param counter: Creates counter for input length; if no number is specified, it defaults to 25. Does not apply any validation.
:type ['boolean', 'number', 'string']:
:param counter_value:
:type function:
:param dark: See description |VSelect_vuetify_link|.
:type boolean:
:param deletable_chips: Adds a remove icon to selected chips
:type boolean:
:param dense: Reduces the input height
:type boolean:
:param disable_lookup: Disables keyboard lookup
:type boolean:
:param disabled: Disables the input
:type boolean:
:param eager: Will force the components content to render on mounted. This is useful if you have content that will not be rendered in the DOM that you want crawled for SEO.
:type boolean:
:param error: Puts the input in a manual error state
:type boolean:
:param error_count: The total number of errors that should display at once
:type ['number', 'string']:
:param error_messages: Puts the input in an error state and passes through custom error messages. Will be combined with any validations that occur from the **rules** prop. This field will not trigger validation
:type ['string', 'array']:
:param filled: Applies the alternate filled input style
:type boolean:
:param flat: Removes elevation (shadow) added to element when using the **solo** or **solo-inverted** props
:type boolean:
:param full_width: Designates input type as full-width
:type boolean:
:param height: Sets the height of the input
:type ['number', 'string']:
:param hide_details: Hides hint and validation errors. When set to `auto` messages will be rendered only if there's a message (hint, error message, counter value etc) to display
:type ['boolean', 'string']:
:param hide_selected: Do not display in the select menu items that are already selected
:type boolean:
:param hint: Hint text
:type string:
:param id: Sets the DOM id on the component
:type string:
:param item_color: Sets color of selected items
:type string:
:param item_disabled: Set property of **items**'s disabled value
:type ['string', 'array', 'function']:
:param item_text: Set property of **items**'s text value
:type ['string', 'array', 'function']:
:param item_value: See description |VSelect_vuetify_link|.
:type ['string', 'array', 'function']:
:param items: Can be an array of objects or array of strings. When using objects, will look for a text, value and disabled keys. This can be changed using the **item-text**, **item-value** and **item-disabled** props. Objects that have a **header** or **divider** property are considered special cases and generate a list header or divider; these items are not selectable.
:type array:
:param label: Sets input label
:type string:
:param light: Applies the light theme variant to the component.
:type boolean:
:param loader_height: Specifies the height of the loader
:type ['number', 'string']:
:param loading: Displays linear progress bar. Can either be a String which specifies which color is applied to the progress bar (any material color or theme color - **primary**, **secondary**, **success**, **info**, **warning**, **error**) or a Boolean which uses the component **color** (set by color prop - if it's supported by the component) or the primary color
:type ['boolean', 'string']:
:param menu_props: Pass props through to the `v-menu` component. Accepts either a string for boolean props `menu-props="auto, overflowY"`, or an object `:menu-props="{ auto: true, overflowY: true }"`
:type ['string', 'array', 'object']:
:param messages: Displays a list of messages or message if using a string
:type ['string', 'array']:
:param multiple: Changes select to multiple. Accepts array for value
:type boolean:
:param no_data_text: Display text when there is no data
:type string:
:param open_on_clear: When using the **clearable** prop, once cleared, the select menu will either open or stay open, depending on the current state
:type boolean:
:param outlined: Applies the outlined style to the input
:type boolean:
:param persistent_hint: Forces hint to always be visible
:type boolean:
:param persistent_placeholder: Forces placeholder to always be visible
:type boolean:
:param placeholder: Sets the input's placeholder text
:type string:
:param prefix: Displays prefix text
:type string:
:param prepend_icon: Prepends an icon to the component, uses the same syntax as `v-icon`
:type string:
:param prepend_inner_icon: Prepends an icon inside the component's input, uses the same syntax as `v-icon`
:type string:
:param readonly: Puts input in readonly state
:type boolean:
:param return_object: Changes the selection behavior to return the object directly rather than the value specified with **item-value**
:type boolean:
:param reverse: Reverses the input orientation
:type boolean:
:param rounded: Adds a border radius to the input
:type boolean:
:param rules: Accepts a mixed array of types `function`, `boolean` and `string`. Functions pass an input value as an argument and must return either `true` / `false` or a `string` containing an error message. The input field will enter an error state if a function returns (or any value in the array contains) `false` or is a `string`
:type array:
:param shaped: Round if `outlined` and increase `border-radius` if `filled`. Must be used with either `outlined` or `filled`
:type boolean:
:param single_line: Label does not move on focus/dirty
:type boolean:
:param small_chips: Changes display of selections to chips with the **small** property
:type boolean:
:param solo: Changes the style of the input
:type boolean:
:param solo_inverted: Reduces element opacity until focused
:type boolean:
:param success: Puts the input in a manual success state
:type boolean:
:param success_messages: Puts the input in a success state and passes through custom success messages.
:type ['string', 'array']:
:param suffix: Displays suffix text
:type string:
:param type: Sets input type
:type string:
:param validate_on_blur: Delays validation until blur event
:type boolean:
:param value: The input's value
:type any:
:param value_comparator: See description |VSelect_vuetify_link|.
:type function:
Events
:param blur: Emitted when the input is blurred
:param change: Emitted when the input is changed by user interaction
:param click_append: Emitted when appended icon is clicked
:param click_append_outer: Emitted when appended outer icon is clicked
:param click_clear: Emitted when clearable icon clicked
:param click_prepend: Emitted when prepended icon is clicked
:param click_prepend_inner: Emitted when prepended inner icon is clicked
:param focus: Emitted when component is focused
:param input: The updated bound model
:param keydown: Emitted when **any** key is pressed
:param update_error: The `error.sync` event
:param update_list_index: Emitted when menu item is selected using keyboard arrows
:param update_search_input: The `search-input.sync` event
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-select", children, **kwargs)
self.ttsSensitive()
self._attr_names += [
"append_icon",
"append_outer_icon",
"attach",
"autofocus",
"background_color",
"cache_items",
"chips",
"clear_icon",
"clearable",
"color",
"counter",
"counter_value", # JS functions unimplemented
"dark",
"deletable_chips",
"dense",
"disable_lookup",
"disabled",
"eager",
"error",
"error_count",
"error_messages",
"filled",
"flat",
"full_width",
"height",
"hide_details",
"hide_selected",
"hint",
"id",
"item_color",
"item_disabled", # JS functions unimplemented
"item_text", # JS functions unimplemented
"item_value", # JS functions unimplemented
"items",
"label",
"light",
"loader_height",
"loading",
"menu_props",
"messages",
"multiple",
"no_data_text",
"open_on_clear",
"outlined",
"persistent_hint",
"persistent_placeholder",
"placeholder",
"prefix",
"prepend_icon",
"prepend_inner_icon",
"readonly",
"return_object",
"reverse",
"rounded",
"rules",
"shaped",
"single_line",
"small_chips",
"solo",
"solo_inverted",
"success",
"success_messages",
"suffix",
"type",
"validate_on_blur",
"value",
"value_comparator", # JS functions unimplemented
]
self._event_names += [
"blur",
"change",
# click, #Implemented in AbstractElement parent class
("click_append", "click:append"),
("click_append_outer", "click:append-outer"),
("click_clear", "click:clear"),
("click_prepend", "click:prepend"),
("click_prepend_inner", "click:prepend-inner"),
"focus",
"input",
"keydown",
# mousedown, #Implemented in AbstractElement parent class
# mouseup, #Implemented in AbstractElement parent class
("update_error", "update:error"),
("update_list_index", "update:list-index"),
("update_search_input", "update:search-input"),
]
class VSkeletonLoader(AbstractElement):
"""
Vuetify's VSkeletonLoader component. See more info and examples |VSkeletonLoader_vuetify_link|.
.. |VSkeletonLoader_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-skeleton-loader" target="_blank">here</a>
:param boilerplate: Remove the loading animation from the skeleton
:type boolean:
:param dark: See description |VSkeletonLoader_vuetify_link|.
:type boolean:
:param elevation: See description |VSkeletonLoader_vuetify_link|.
:type ['number', 'string']:
:param height: Sets the height for the component.
:type ['number', 'string']:
:param light: Applies the light theme variant to the component.
:type boolean:
:param loading: Applies a loading animation with a on-hover loading cursor. A value of **false** will only work when there is content in the `default` slot.
:type boolean:
:param max_height: Sets the maximum height for the component.
:type ['number', 'string']:
:param max_width: Sets the maximum width for the component.
:type ['number', 'string']:
:param min_height: Sets the minimum height for the component.
:type ['number', 'string']:
:param min_width: Sets the minimum width for the component.
:type ['number', 'string']:
:param tile: Removes the component's border-radius
:type boolean:
:param transition: See description |VSkeletonLoader_vuetify_link|.
:type string:
:param type: A string delimited list of skeleton components to create such as `type="text@3"` or `type="card, list-item"`. Will recursively generate a corresponding skeleton from the provided string. Also supports short-hand for multiple elements such as **article@3** and **paragraph@2** which will generate 3 _article_ skeletons and 2 _paragraph_ skeletons. Please see below for a list of available pre-defined options.
:type string:
:param types: A custom types object that will be combined with the pre-defined options. For a list of available pre-defined options, see the **type** prop.
:type object:
:param width: Sets the width for the component.
:type ['number', 'string']:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-skeleton-loader", children, **kwargs)
self._attr_names += [
"boilerplate",
"dark",
"elevation",
"height",
"light",
"loading",
"max_height",
"max_width",
"min_height",
"min_width",
"tile",
"transition",
"type",
"types",
"width",
]
class VSlider(AbstractElement):
"""
Vuetify's VSlider component. See more info and examples |VSlider_vuetify_link|.
.. |VSlider_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-slider" target="_blank">here</a>
:param append_icon: Appends an icon to the component, uses the same syntax as `v-icon`
:type string:
:param background_color: Changes the background-color of the input
:type string:
:param color: See description |VSlider_vuetify_link|.
:type string:
:param dark: See description |VSlider_vuetify_link|.
:type boolean:
:param dense: Reduces the input height
:type boolean:
:param disabled: Disable the input
:type boolean:
:param error: Puts the input in a manual error state
:type boolean:
:param error_count: The total number of errors that should display at once
:type ['number', 'string']:
:param error_messages: Puts the input in an error state and passes through custom error messages. Will be combined with any validations that occur from the **rules** prop. This field will not trigger validation
:type ['string', 'array']:
:param height: Sets the height of the input
:type ['number', 'string']:
:param hide_details: Hides hint and validation errors. When set to `auto` messages will be rendered only if there's a message (hint, error message, counter value etc) to display
:type ['boolean', 'string']:
:param hint: Hint text
:type string:
:param id: Sets the DOM id on the component
:type string:
:param inverse_label: Reverse the label position. Works with **rtl**.
:type boolean:
:param label: Sets input label
:type string:
:param light: Applies the light theme variant to the component.
:type boolean:
:param loader_height: Specifies the height of the loader
:type ['number', 'string']:
:param loading: Displays linear progress bar. Can either be a String which specifies which color is applied to the progress bar (any material color or theme color - **primary**, **secondary**, **success**, **info**, **warning**, **error**) or a Boolean which uses the component **color** (set by color prop - if it's supported by the component) or the primary color
:type ['boolean', 'string']:
:param max: Sets the maximum allowed value
:type ['number', 'string']:
:param messages: Displays a list of messages or message if using a string
:type ['string', 'array']:
:param min: Sets the minimum allowed value
:type ['number', 'string']:
:param persistent_hint: Forces hint to always be visible
:type boolean:
:param prepend_icon: Prepends an icon to the component, uses the same syntax as `v-icon`
:type string:
:param readonly: Puts input in readonly state
:type boolean:
:param rules: Accepts a mixed array of types `function`, `boolean` and `string`. Functions pass an input value as an argument and must return either `true` / `false` or a `string` containing an error message. The input field will enter an error state if a function returns (or any value in the array contains) `false` or is a `string`
:type array:
:param step: If greater than 0, sets step interval for ticks
:type ['number', 'string']:
:param success: Puts the input in a manual success state
:type boolean:
:param success_messages: Puts the input in a success state and passes through custom success messages.
:type ['string', 'array']:
:param thumb_color: Sets the thumb and thumb label color
:type string:
:param thumb_label: Show thumb label. If `true` it shows label when using slider. If set to `'always'` it always shows label.
:type ['boolean', 'string']:
:param thumb_size: Controls the size of the thumb label.
:type ['number', 'string']:
:param tick_labels: When provided with Array<string>, will attempt to map the labels to each step in index order
:type array:
:param tick_size: Controls the size of **ticks**
:type ['number', 'string']:
:param ticks: Show track ticks. If `true` it shows ticks when using slider. If set to `'always'` it always shows ticks.
:type ['boolean', 'string']:
:param track_color: Sets the track's color
:type string:
:param track_fill_color: Sets the track's fill color
:type string:
:param validate_on_blur: Delays validation until blur event
:type boolean:
:param value: The input's value
:type any:
:param vertical: Changes slider direction to vertical
:type boolean:
Events
:param change: Emitted when the input is changed by user interaction
:param click_append: Emitted when appended icon is clicked
:param click_prepend: Emitted when prepended icon is clicked
:param end: Slider value emitted at the end of slider movement
:param input: The updated bound model
:param start: Slider value emitted at start of slider movement
:param update_error: The `error.sync` event
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-slider", children, **kwargs)
self._attr_names += [
"append_icon",
"background_color",
"color",
"dark",
"dense",
"disabled",
"error",
"error_count",
"error_messages",
"height",
"hide_details",
"hint",
"id",
"inverse_label",
"label",
"light",
"loader_height",
"loading",
"max",
"messages",
"min",
"persistent_hint",
"prepend_icon",
"readonly",
"rules",
"step",
"success",
"success_messages",
"thumb_color",
"thumb_label",
"thumb_size",
"tick_labels",
"tick_size",
"ticks",
"track_color",
"track_fill_color",
"validate_on_blur",
"value",
"vertical",
]
self._event_names += [
"change",
# click, #Implemented in AbstractElement parent class
("click_append", "click:append"),
("click_prepend", "click:prepend"),
"end",
"input",
# mousedown, #Implemented in AbstractElement parent class
# mouseup, #Implemented in AbstractElement parent class
"start",
("update_error", "update:error"),
]
class VSlideGroup(AbstractElement):
"""
Vuetify's VSlideGroup component. See more info and examples |VSlideGroup_vuetify_link|.
.. |VSlideGroup_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-slide-group" target="_blank">here</a>
:param active_class: The **active-class** applied to children when they are activated.
:type string:
:param center_active: Forces the selected component to be centered
:type boolean:
:param dark: See description |VSlideGroup_vuetify_link|.
:type boolean:
:param light: Applies the light theme variant to the component.
:type boolean:
:param mandatory: Forces a value to always be selected (if available).
:type boolean:
:param max: Sets a maximum number of selections that can be made.
:type ['number', 'string']:
:param mobile_breakpoint: Sets the designated mobile breakpoint for the component.
:type ['number', 'string']:
:param multiple: Allow multiple selections. The **value** prop must be an _array_.
:type boolean:
:param next_icon: The appended slot when arrows are shown
:type string:
:param prev_icon: The prepended slot when arrows are shown
:type string:
:param show_arrows: See description |VSlideGroup_vuetify_link|.
:type ['boolean', 'string']:
:param tag: Specify a custom tag used on the root element.
:type string:
:param value: The designated model value for the component.
:type any:
Events
:param change: Emitted when the component value is changed by user interaction
:param click_next: Emitted when the next is clicked
:param click_prev: Emitted when the prev is clicked
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-slide-group", children, **kwargs)
self._attr_names += [
"active_class",
"center_active",
"dark",
"light",
"mandatory",
"max",
"mobile_breakpoint",
"multiple",
"next_icon",
"prev_icon",
"show_arrows",
"tag",
"value",
]
self._event_names += [
"change",
("click_next", "click:next"),
("click_prev", "click:prev"),
]
class VSlideItem(AbstractElement):
"""
Vuetify's VSlideItem component. See more info and examples |VSlideItem_vuetify_link|.
.. |VSlideItem_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-slide-item" target="_blank">here</a>
:param active_class: See description |VSlideItem_vuetify_link|.
:type string:
:param disabled: Removes the ability to click or target the component.
:type boolean:
:param value: The value used when the component is selected in a group. If not provided, the index will be used.
:type any:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-slide-item", children, **kwargs)
self._attr_names += [
"active_class",
"disabled",
"value",
]
class VSnackbar(AbstractElement):
"""
Vuetify's VSnackbar component. See more info and examples |VSnackbar_vuetify_link|.
.. |VSnackbar_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-snackbar" target="_blank">here</a>
:param absolute: Applies **position: absolute** to the component.
:type boolean:
:param app: Respects boundaries of—and will not overlap with—other `app` components like `v-app-bar`, `v-navigation-drawer`, and `v-footer`.
:type boolean:
:param bottom: Aligns the component towards the bottom.
:type boolean:
:param centered: Positions the snackbar in the center of the screen, (x and y axis).
:type boolean:
:param color: See description |VSnackbar_vuetify_link|.
:type string:
:param content_class: Apply a custom class to the snackbar content
:type string:
:param dark: See description |VSnackbar_vuetify_link|.
:type boolean:
:param elevation: See description |VSnackbar_vuetify_link|.
:type ['number', 'string']:
:param height: Sets the height for the component.
:type ['number', 'string']:
:param left: Aligns the component towards the left.
:type boolean:
:param light: Applies the light theme variant to the component.
:type boolean:
:param max_height: Sets the maximum height for the component.
:type ['number', 'string']:
:param max_width: Sets the maximum width for the component.
:type ['number', 'string']:
:param min_height: Sets the minimum height for the component.
:type ['number', 'string']:
:param min_width: Sets the minimum width for the component.
:type ['number', 'string']:
:param multi_line: Gives the snackbar a larger minimum height.
:type boolean:
:param outlined: Removes elevation (box-shadow) and adds a *thin* border.
:type boolean:
:param right: Aligns the component towards the right.
:type boolean:
:param rounded: See description |VSnackbar_vuetify_link|.
:type ['boolean', 'string']:
:param shaped: Applies a large border radius on the top left and bottom right of the card.
:type boolean:
:param tag: Specify a custom tag used on the root element.
:type string:
:param text: Applies the defined **color** to text and a low opacity background of the same.
:type boolean:
:param tile: Removes the component's **border-radius**.
:type boolean:
:param timeout: Time (in milliseconds) to wait until snackbar is automatically hidden. Use `-1` to keep open indefinitely (`0` in version < 2.3 ). It is recommended for this number to be between `4000` and `10000`. Changes to this property will reset the timeout.
:type ['number', 'string']:
:param top: Aligns the content towards the top.
:type boolean:
:param transition: See description |VSnackbar_vuetify_link|.
:type ['boolean', 'string']:
:param value: Controls whether the component is visible or hidden.
:type any:
:param vertical: Stacks snackbar content on top of the actions (button).
:type boolean:
:param width: Sets the width for the component.
:type ['number', 'string']:
Events
:param input: The updated bound model
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-snackbar", children, **kwargs)
self._attr_names += [
"absolute",
"app",
"bottom",
"centered",
"color",
"content_class",
"dark",
"elevation",
"height",
"left",
"light",
"max_height",
"max_width",
"min_height",
"min_width",
"multi_line",
"outlined",
"right",
"rounded",
"shaped",
"tag",
"text",
"tile",
"timeout",
"top",
"transition",
"value",
"vertical",
"width",
]
self._event_names += [
"input",
]
class VSparkline(AbstractElement):
"""
Vuetify's VSparkline component. See more info and examples |VSparkline_vuetify_link|.
.. |VSparkline_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-sparkline" target="_blank">here</a>
:param auto_draw: Trace the length of the line when first rendered
:type boolean:
:param auto_draw_duration: Amount of time (in ms) to run the trace animation
:type number:
:param auto_draw_easing: The easing function to use for the trace animation
:type string:
:param auto_line_width: Automatically expand bars to use space efficiently
:type boolean:
:param color: See description |VSparkline_vuetify_link|.
:type string:
:param fill: Using the **fill** property allows you to better customize the look and feel of your sparkline.
:type boolean:
:param gradient: An array of colors to use as a linear-gradient
:type array:
:param gradient_direction: The direction the gradient should run
:type string:
:param height: Height of the SVG trendline or bars
:type ['string', 'number']:
:param label_size: The label font size
:type ['number', 'string']:
:param labels: An array of string labels that correspond to the same index as its data counterpart
:type array:
:param line_width: The thickness of the line, in px
:type ['string', 'number']:
:param padding: Low `smooth` or high `line-width` values may result in cropping, increase padding to compensate
:type ['string', 'number']:
:param show_labels: Show labels below each data point
:type boolean:
:param smooth: Number of px to use as a corner radius. `true` defaults to 8, `false` is 0
:type ['boolean', 'number', 'string']:
:param type: Choose between a trendline or bars
:type string:
:param value: An array of numbers.
:type array:
:param width: Width of the SVG trendline or bars
:type ['number', 'string']:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-sparkline", children, **kwargs)
self._attr_names += [
"auto_draw",
"auto_draw_duration",
"auto_draw_easing",
"auto_line_width",
"color",
"fill",
"gradient",
"gradient_direction",
"height",
"label_size",
"labels",
"line_width",
"padding",
"show_labels",
"smooth",
"type",
"value",
"width",
]
class VSpeedDial(AbstractElement):
"""
Vuetify's VSpeedDial component. See more info and examples |VSpeedDial_vuetify_link|.
.. |VSpeedDial_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-speed-dial" target="_blank">here</a>
:param absolute: Applies **position: absolute** to the component.
:type boolean:
:param bottom: Aligns the component towards the bottom.
:type boolean:
:param direction: Direction in which speed-dial content will show. Possible values are `top`, `bottom`, `left`, `right`.
:type string:
:param fixed: Applies **position: fixed** to the component.
:type boolean:
:param left: Aligns the component towards the left.
:type boolean:
:param mode: See description |VSpeedDial_vuetify_link|.
:type string:
:param open_on_hover: Opens speed-dial on hover
:type boolean:
:param origin: See description |VSpeedDial_vuetify_link|.
:type string:
:param right: Aligns the component towards the right.
:type boolean:
:param top: Aligns the content towards the top.
:type boolean:
:param transition: See description |VSpeedDial_vuetify_link|.
:type string:
:param value: Controls whether the component is visible or hidden.
:type any:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-speed-dial", children, **kwargs)
self._attr_names += [
"absolute",
"bottom",
"direction",
"fixed",
"left",
"mode",
"open_on_hover",
"origin",
"right",
"top",
"transition",
"value",
]
class VStepper(AbstractElement):
"""
Vuetify's VStepper component. See more info and examples |VStepper_vuetify_link|.
.. |VStepper_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-stepper" target="_blank">here</a>
:param alt_labels: Places the labels beneath the step
:type boolean:
:param color: See description |VStepper_vuetify_link|.
:type string:
:param dark: See description |VStepper_vuetify_link|.
:type boolean:
:param elevation: See description |VStepper_vuetify_link|.
:type ['number', 'string']:
:param flat: Removes the stepper's elevation.
:type boolean:
:param height: Sets the height for the component.
:type ['number', 'string']:
:param light: Applies the light theme variant to the component.
:type boolean:
:param max_height: Sets the maximum height for the component.
:type ['number', 'string']:
:param max_width: Sets the maximum width for the component.
:type ['number', 'string']:
:param min_height: Sets the minimum height for the component.
:type ['number', 'string']:
:param min_width: Sets the minimum width for the component.
:type ['number', 'string']:
:param non_linear: Allow user to jump to any step
:type boolean:
:param outlined: Removes elevation (box-shadow) and adds a *thin* border.
:type boolean:
:param rounded: See description |VStepper_vuetify_link|.
:type ['boolean', 'string']:
:param shaped: Applies a large border radius on the top left and bottom right of the card.
:type boolean:
:param tag: Specify a custom tag used on the root element.
:type string:
:param tile: Removes the component's **border-radius**.
:type boolean:
:param value: The designated model value for the component.
:type any:
:param vertical: Display steps vertically
:type boolean:
:param width: Sets the width for the component.
:type ['number', 'string']:
Events
:param change: Emitted when step is changed by user interaction
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-stepper", children, **kwargs)
self._attr_names += [
"alt_labels",
"color",
"dark",
"elevation",
"flat",
"height",
"light",
"max_height",
"max_width",
"min_height",
"min_width",
"non_linear",
"outlined",
"rounded",
"shaped",
"tag",
"tile",
"value",
"vertical",
"width",
]
self._event_names += [
"change",
]
class VStepperContent(AbstractElement):
"""
Vuetify's VStepperContent component. See more info and examples |VStepperContent_vuetify_link|.
.. |VStepperContent_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-stepper-content" target="_blank">here</a>
:param step: Sets step to associate the content to
:type ['number', 'string']:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-stepper-content", children, **kwargs)
self._attr_names += [
"step",
]
class VStepperStep(AbstractElement):
"""
Vuetify's VStepperStep component. See more info and examples |VStepperStep_vuetify_link|.
.. |VStepperStep_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-stepper-step" target="_blank">here</a>
:param color: See description |VStepperStep_vuetify_link|.
:type string:
:param complete: Marks step as complete
:type boolean:
:param complete_icon: Icon to display when step is marked as completed
:type string:
:param edit_icon: Icon to display when step is editable
:type string:
:param editable: Marks step as editable
:type boolean:
:param error_icon: Icon to display when step has an error
:type string:
:param rules: Accepts a mixed array of types `function`, `boolean` and `string`. Functions pass an input value as an argument and must return either `true` / `false` or a `string` containing an error message. The input field will enter an error state if a function returns (or any value in the array contains) `false` or is a `string`
:type array:
:param step: Content to display inside step circle
:type ['number', 'string']:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-stepper-step", children, **kwargs)
self._attr_names += [
"color",
"complete",
"complete_icon",
"edit_icon",
"editable",
"error_icon",
"rules",
"step",
]
self._event_names += [
# click, #Implemented in AbstractElement parent class
]
class VStepperHeader(AbstractElement):
"""
Vuetify's VStepperHeader component. See more info and examples |VStepperHeader_vuetify_link|.
.. |VStepperHeader_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-stepper-header" target="_blank">here</a>
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-stepper-header", children, **kwargs)
class VStepperItems(AbstractElement):
"""
Vuetify's VStepperItems component. See more info and examples |VStepperItems_vuetify_link|.
.. |VStepperItems_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-stepper-items" target="_blank">here</a>
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-stepper-items", children, **kwargs)
class VSubheader(AbstractElement):
"""
Vuetify's VSubheader component. See more info and examples |VSubheader_vuetify_link|.
.. |VSubheader_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-subheader" target="_blank">here</a>
:param dark: See description |VSubheader_vuetify_link|.
:type boolean:
:param inset: Adds indentation (72px)
:type boolean:
:param light: Applies the light theme variant to the component.
:type boolean:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-subheader", children, **kwargs)
self._attr_names += [
"dark",
"inset",
"light",
]
class VSwitch(AbstractElement):
"""
Vuetify's VSwitch component. See more info and examples |VSwitch_vuetify_link|.
.. |VSwitch_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-switch" target="_blank">here</a>
:param append_icon: Appends an icon to the component, uses the same syntax as `v-icon`
:type string:
:param background_color: Changes the background-color of the input
:type string:
:param color: See description |VSwitch_vuetify_link|.
:type string:
:param dark: See description |VSwitch_vuetify_link|.
:type boolean:
:param dense: Reduces the input height
:type boolean:
:param disabled: Disable the input
:type boolean:
:param error: Puts the input in a manual error state
:type boolean:
:param error_count: The total number of errors that should display at once
:type ['number', 'string']:
:param error_messages: Puts the input in an error state and passes through custom error messages. Will be combined with any validations that occur from the **rules** prop. This field will not trigger validation
:type ['string', 'array']:
:param false_value: Sets value for falsy state
:type any:
:param flat: Display component without elevation. Default elevation for thumb is 4dp, `flat` resets it
:type boolean:
:param hide_details: Hides hint and validation errors. When set to `auto` messages will be rendered only if there's a message (hint, error message, counter value etc) to display
:type ['boolean', 'string']:
:param hint: Hint text
:type string:
:param id: Sets the DOM id on the component
:type string:
:param input_value: The **v-model** bound value
:type any:
:param inset: Enlarge the `v-switch` track to encompass the thumb
:type boolean:
:param label: Sets input label
:type string:
:param light: Applies the light theme variant to the component.
:type boolean:
:param loading: Displays circular progress bar. Can either be a String which specifies which color is applied to the progress bar (any material color or theme color - primary, secondary, success, info, warning, error) or a Boolean which uses the component color (set by color prop - if it's supported by the component) or the primary color
:type ['boolean', 'string']:
:param messages: Displays a list of messages or message if using a string
:type ['string', 'array']:
:param multiple: Changes expected model to an array
:type boolean:
:param persistent_hint: Forces hint to always be visible
:type boolean:
:param prepend_icon: Prepends an icon to the component, uses the same syntax as `v-icon`
:type string:
:param readonly: Puts input in readonly state
:type boolean:
:param ripple: See description |VSwitch_vuetify_link|.
:type ['boolean', 'object']:
:param rules: Accepts a mixed array of types `function`, `boolean` and `string`. Functions pass an input value as an argument and must return either `true` / `false` or a `string` containing an error message. The input field will enter an error state if a function returns (or any value in the array contains) `false` or is a `string`
:type array:
:param success: Puts the input in a manual success state
:type boolean:
:param success_messages: Puts the input in a success state and passes through custom success messages.
:type ['string', 'array']:
:param true_value: Sets value for truthy state
:type any:
:param validate_on_blur: Delays validation until blur event
:type boolean:
:param value: The input's value
:type any:
:param value_comparator: Apply a custom value comparator function
:type function:
Events
:param change: Emitted when the input is changed by user interaction
:param click_append: Emitted when appended icon is clicked
:param click_prepend: Emitted when prepended icon is clicked
:param update_error: The `error.sync` event
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-switch", children, **kwargs)
self._attr_names += [
"append_icon",
"background_color",
"color",
"dark",
"dense",
"disabled",
"error",
"error_count",
"error_messages",
"false_value",
"flat",
"hide_details",
"hint",
"id",
"input_value",
"inset",
"label",
"light",
"loading",
"messages",
"multiple",
"persistent_hint",
"prepend_icon",
"readonly",
"ripple",
"rules",
"success",
"success_messages",
"true_value",
"validate_on_blur",
"value",
"value_comparator", # JS functions unimplemented
]
self._event_names += [
"change",
# click, #Implemented in AbstractElement parent class
("click_append", "click:append"),
("click_prepend", "click:prepend"),
# mousedown, #Implemented in AbstractElement parent class
# mouseup, #Implemented in AbstractElement parent class
("update_error", "update:error"),
]
class VSystemBar(AbstractElement):
"""
Vuetify's VSystemBar component. See more info and examples |VSystemBar_vuetify_link|.
.. |VSystemBar_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-system-bar" target="_blank">here</a>
:param absolute: Applies **position: absolute** to the component.
:type boolean:
:param app: See description |VSystemBar_vuetify_link|.
:type boolean:
:param color: See description |VSystemBar_vuetify_link|.
:type string:
:param dark: See description |VSystemBar_vuetify_link|.
:type boolean:
:param fixed: Applies **position: fixed** to the component.
:type boolean:
:param height: Sets the height for the component.
:type ['number', 'string']:
:param light: Applies the light theme variant to the component.
:type boolean:
:param lights_out: Reduces the system bar opacity.
:type boolean:
:param window: Increases the system bar height to 32px (24px default).
:type boolean:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-system-bar", children, **kwargs)
self._attr_names += [
"absolute",
"app",
"color",
"dark",
"fixed",
"height",
"light",
"lights_out",
"window",
]
class VTabs(AbstractElement):
"""
Vuetify's VTabs component. See more info and examples |VTabs_vuetify_link|.
.. |VTabs_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-tabs" target="_blank">here</a>
:param active_class: The **active-class** applied to children when they are activated.
:type string:
:param align_with_title: Make `v-tabs` lined up with the toolbar title
:type boolean:
:param background_color: Changes the background color of the component.
:type string:
:param center_active: Forces the selected tab to be centered
:type boolean:
:param centered: Centers the tabs
:type boolean:
:param color: See description |VTabs_vuetify_link|.
:type string:
:param dark: See description |VTabs_vuetify_link|.
:type boolean:
:param fixed_tabs: `v-tabs-item` min-width 160px, max-width 360px
:type boolean:
:param grow: Force `v-tab`'s to take up all available space
:type boolean:
:param height: Sets the height of the tabs bar
:type ['number', 'string']:
:param hide_slider: Hide's the generated `v-tabs-slider`
:type boolean:
:param icons_and_text: Will stack icon and text vertically
:type boolean:
:param light: Applies the light theme variant to the component.
:type boolean:
:param mobile_breakpoint: Sets the designated mobile breakpoint for the component.
:type ['string', 'number']:
:param next_icon: Right pagination icon
:type string:
:param optional: Does not require an active item. Useful when using `v-tab` as a `router-link`
:type boolean:
:param prev_icon: Left pagination icon
:type string:
:param right: Aligns tabs to the right
:type boolean:
:param show_arrows: Show pagination arrows if the tab items overflow their container. For mobile devices, arrows will only display when using this prop.
:type ['boolean', 'string']:
:param slider_color: Changes the background color of an auto-generated `v-tabs-slider`
:type string:
:param slider_size: Changes the size of the slider, **height** for horizontal, **width** for vertical.
:type ['number', 'string']:
:param value: The designated model value for the component.
:type any:
:param vertical: Stacks tabs on top of each other vertically.
:type boolean:
Events
:param change: Emitted when tab is changed by user interaction. Returns a string if **href** attribute is set and number if it is not.
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-tabs", children, **kwargs)
self._attr_names += [
"active_class",
"align_with_title",
"background_color",
"center_active",
"centered",
"color",
"dark",
"fixed_tabs",
"grow",
"height",
"hide_slider",
"icons_and_text",
"light",
"mobile_breakpoint",
"next_icon",
"optional",
"prev_icon",
"right",
"show_arrows",
"slider_color",
"slider_size",
"value",
"vertical",
]
self._event_names += [
"change",
]
class VTab(AbstractElement):
"""
Vuetify's VTab component. See more info and examples |VTab_vuetify_link|.
.. |VTab_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-tab" target="_blank">here</a>
:param active_class: See description |VTab_vuetify_link|.
:type string:
:param append: See description |VTab_vuetify_link|.
:type boolean:
:param dark: See description |VTab_vuetify_link|.
:type boolean:
:param disabled: Removes the ability to click or target the component.
:type boolean:
:param exact: See description |VTab_vuetify_link|.
:type boolean:
:param exact_active_class: See description |VTab_vuetify_link|.
:type string:
:param exact_path: See description |VTab_vuetify_link|.
:type boolean:
:param href: Designates the component as anchor and applies the **href** attribute.
:type ['string', 'object']:
:param light: Applies the light theme variant to the component.
:type boolean:
:param link: Designates that the component is a link. This is automatic when using the **href** or **to** prop.
:type boolean:
:param nuxt: See description |VTab_vuetify_link|.
:type boolean:
:param replace: See description |VTab_vuetify_link|.
:type boolean:
:param ripple: See description |VTab_vuetify_link|.
:type ['boolean', 'object']:
:param tag: Specify a custom tag used on the root element.
:type string:
:param target: Designates the target attribute. This should only be applied when using the **href** prop.
:type string:
:param to: See description |VTab_vuetify_link|.
:type ['string', 'object']:
Events
:param change: Emitted when tab becomes active
:param keydown: Emitted when **enter** key is pressed
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-tab", children, **kwargs)
self._attr_names += [
"active_class",
"append",
"dark",
"disabled",
"exact",
"exact_active_class",
"exact_path",
"href",
"light",
"link",
"nuxt",
"replace",
"ripple",
"tag",
"target",
"to",
]
self._event_names += [
"change",
# click, #Implemented in AbstractElement parent class
"keydown",
]
class VTabItem(AbstractElement):
"""
Vuetify's VTabItem component. See more info and examples |VTabItem_vuetify_link|.
.. |VTabItem_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-tab-item" target="_blank">here</a>
:param active_class: See description |VTabItem_vuetify_link|.
:type string:
:param disabled: Removes the ability to click or target the component.
:type boolean:
:param eager: Will force the components content to render on mounted. This is useful if you have content that will not be rendered in the DOM that you want crawled for SEO.
:type boolean:
:param id: Sets the DOM id on the component
:type string:
:param reverse_transition: Sets the reverse transition
:type ['boolean', 'string']:
:param transition: See description |VTabItem_vuetify_link|.
:type ['boolean', 'string']:
:param value: Sets the value of the tab. If not provided, the index will be used.
:type any:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-tab-item", children, **kwargs)
self._attr_names += [
"active_class",
"disabled",
"eager",
"id",
"reverse_transition",
"transition",
"value",
]
class VTabsItems(AbstractElement):
"""
Vuetify's VTabsItems component. See more info and examples |VTabsItems_vuetify_link|.
.. |VTabsItems_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-tabs-items" target="_blank">here</a>
:param active_class: The **active-class** applied to children when they are activated.
:type string:
:param continuous: If `true`, window will "wrap around" from the last item to the first, and from the first item to the last
:type boolean:
:param dark: See description |VTabsItems_vuetify_link|.
:type boolean:
:param light: Applies the light theme variant to the component.
:type boolean:
:param mandatory: Forces a value to always be selected (if available).
:type boolean:
:param max: Sets a maximum number of selections that can be made.
:type ['number', 'string']:
:param multiple: Allow multiple selections. The **value** prop must be an _array_.
:type boolean:
:param next_icon: Icon used for the "next" button if `show-arrows` is `true`
:type ['boolean', 'string']:
:param prev_icon: Icon used for the "prev" button if `show-arrows` is `true`
:type ['boolean', 'string']:
:param reverse: Reverse the normal transition direction.
:type boolean:
:param show_arrows: Display the "next" and "prev" buttons
:type boolean:
:param show_arrows_on_hover: Display the "next" and "prev" buttons on hover. `show-arrows` MUST ALSO be set.
:type boolean:
:param tag: Specify a custom tag used on the root element.
:type string:
:param touch: Provide a custom **left** and **right** function when swiped left or right.
:type object:
:param touchless: Disable touch support.
:type boolean:
:param value: The designated model value for the component.
:type any:
:param vertical: Uses a vertical transition when changing windows.
:type boolean:
Events
:param change: Emitted when user swipes between tabs.
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-tabs-items", children, **kwargs)
self._attr_names += [
"active_class",
"continuous",
"dark",
"light",
"mandatory",
"max",
"multiple",
"next_icon",
"prev_icon",
"reverse",
"show_arrows",
"show_arrows_on_hover",
"tag",
"touch",
"touchless",
"value",
"vertical",
]
self._event_names += [
"change",
]
class VTabsSlider(AbstractElement):
"""
Vuetify's VTabsSlider component. See more info and examples |VTabsSlider_vuetify_link|.
.. |VTabsSlider_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-tabs-slider" target="_blank">here</a>
:param color: See description |VTabsSlider_vuetify_link|.
:type string:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-tabs-slider", children, **kwargs)
self._attr_names += [
"color",
]
class VTextarea(AbstractElement):
"""
Vuetify's VTextarea component. See more info and examples |VTextarea_vuetify_link|.
.. |VTextarea_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-textarea" target="_blank">here</a>
:param append_icon: Appends an icon to the component, uses the same syntax as `v-icon`
:type string:
:param append_outer_icon: Appends an icon to the outside the component's input, uses same syntax as `v-icon`
:type string:
:param auto_grow: Automatically grow the textarea depending on amount of text
:type boolean:
:param autofocus: Enables autofocus
:type boolean:
:param background_color: Changes the background-color of the input
:type string:
:param clear_icon: Applied when using **clearable** and the input is dirty
:type string:
:param clearable: Add input clear functionality, default icon is Material Design Icons **mdi-clear**
:type boolean:
:param color: See description |VTextarea_vuetify_link|.
:type string:
:param counter: Creates counter for input length; if no number is specified, it defaults to 25. Does not apply any validation.
:type ['boolean', 'number', 'string']:
:param counter_value:
:type function:
:param dark: See description |VTextarea_vuetify_link|.
:type boolean:
:param dense: Reduces the input height
:type boolean:
:param disabled: Disable the input
:type boolean:
:param error: Puts the input in a manual error state
:type boolean:
:param error_count: The total number of errors that should display at once
:type ['number', 'string']:
:param error_messages: Puts the input in an error state and passes through custom error messages. Will be combined with any validations that occur from the **rules** prop. This field will not trigger validation
:type ['string', 'array']:
:param filled: Applies the alternate filled input style
:type boolean:
:param flat: Removes elevation (shadow) added to element when using the **solo** or **solo-inverted** props
:type boolean:
:param full_width: Designates input type as full-width
:type boolean:
:param height: Sets the height of the input
:type ['number', 'string']:
:param hide_details: Hides hint and validation errors. When set to `auto` messages will be rendered only if there's a message (hint, error message, counter value etc) to display
:type ['boolean', 'string']:
:param hint: Hint text
:type string:
:param id: Sets the DOM id on the component
:type string:
:param label: Sets input label
:type string:
:param light: Applies the light theme variant to the component.
:type boolean:
:param loader_height: Specifies the height of the loader
:type ['number', 'string']:
:param loading: Displays linear progress bar. Can either be a String which specifies which color is applied to the progress bar (any material color or theme color - **primary**, **secondary**, **success**, **info**, **warning**, **error**) or a Boolean which uses the component **color** (set by color prop - if it's supported by the component) or the primary color
:type ['boolean', 'string']:
:param messages: Displays a list of messages or message if using a string
:type ['string', 'array']:
:param no_resize: Remove resize handle
:type boolean:
:param outlined: Applies the outlined style to the input
:type boolean:
:param persistent_hint: Forces hint to always be visible
:type boolean:
:param persistent_placeholder: Forces placeholder to always be visible
:type boolean:
:param placeholder: Sets the input's placeholder text
:type string:
:param prefix: Displays prefix text
:type string:
:param prepend_icon: Prepends an icon to the component, uses the same syntax as `v-icon`
:type string:
:param prepend_inner_icon: Prepends an icon inside the component's input, uses the same syntax as `v-icon`
:type string:
:param readonly: Puts input in readonly state
:type boolean:
:param reverse: Reverses the input orientation
:type boolean:
:param rounded: Adds a border radius to the input
:type boolean:
:param row_height: Height value for each row. Requires the use of the **auto-grow** prop.
:type ['number', 'string']:
:param rows: Default row count
:type ['number', 'string']:
:param rules: Accepts a mixed array of types `function`, `boolean` and `string`. Functions pass an input value as an argument and must return either `true` / `false` or a `string` containing an error message. The input field will enter an error state if a function returns (or any value in the array contains) `false` or is a `string`
:type array:
:param shaped: Round if `outlined` and increase `border-radius` if `filled`. Must be used with either `outlined` or `filled`
:type boolean:
:param single_line: Label does not move on focus/dirty
:type boolean:
:param solo: Changes the style of the input
:type boolean:
:param solo_inverted: Reduces element opacity until focused
:type boolean:
:param success: Puts the input in a manual success state
:type boolean:
:param success_messages: Puts the input in a success state and passes through custom success messages.
:type ['string', 'array']:
:param suffix: Displays suffix text
:type string:
:param type: Sets input type
:type string:
:param validate_on_blur: Delays validation until blur event
:type boolean:
:param value: The input's value
:type any:
Events
:param blur: Emitted when the input is blurred
:param change: Emitted when the input is changed by user interaction
:param click_append: Emitted when appended icon is clicked
:param click_append_outer: Emitted when appended outer icon is clicked
:param click_clear: Emitted when clearable icon clicked
:param click_prepend: Emitted when prepended icon is clicked
:param click_prepend_inner: Emitted when prepended inner icon is clicked
:param focus: Emitted when component is focused
:param input: The updated bound model
:param keydown: Emitted when **any** key is pressed
:param update_error: The `error.sync` event
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-textarea", children, **kwargs)
self._attr_names += [
"append_icon",
"append_outer_icon",
"auto_grow",
"autofocus",
"background_color",
"clear_icon",
"clearable",
"color",
"counter",
"counter_value", # JS functions unimplemented
"dark",
"dense",
"disabled",
"error",
"error_count",
"error_messages",
"filled",
"flat",
"full_width",
"height",
"hide_details",
"hint",
"id",
"label",
"light",
"loader_height",
"loading",
"messages",
"no_resize",
"outlined",
"persistent_hint",
"persistent_placeholder",
"placeholder",
"prefix",
"prepend_icon",
"prepend_inner_icon",
"readonly",
"reverse",
"rounded",
"row_height",
"rows",
"rules",
"shaped",
"single_line",
"solo",
"solo_inverted",
"success",
"success_messages",
"suffix",
"type",
"validate_on_blur",
"value",
]
self._event_names += [
"blur",
"change",
# click, #Implemented in AbstractElement parent class
("click_append", "click:append"),
("click_append_outer", "click:append-outer"),
("click_clear", "click:clear"),
("click_prepend", "click:prepend"),
("click_prepend_inner", "click:prepend-inner"),
"focus",
"input",
"keydown",
# mousedown, #Implemented in AbstractElement parent class
# mouseup, #Implemented in AbstractElement parent class
("update_error", "update:error"),
]
class VTextField(AbstractElement):
"""
Vuetify's VTextField component. See more info and examples |VTextField_vuetify_link|.
.. |VTextField_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-text-field" target="_blank">here</a>
:param append_icon: Appends an icon to the component, uses the same syntax as `v-icon`
:type string:
:param append_outer_icon: Appends an icon to the outside the component's input, uses same syntax as `v-icon`
:type string:
:param autofocus: Enables autofocus
:type boolean:
:param background_color: Changes the background-color of the input
:type string:
:param clear_icon: Applied when using **clearable** and the input is dirty
:type string:
:param clearable: Add input clear functionality, default icon is Material Design Icons **mdi-clear**
:type boolean:
:param color: See description |VTextField_vuetify_link|.
:type string:
:param counter: Creates counter for input length; if no number is specified, it defaults to 25. Does not apply any validation.
:type ['boolean', 'number', 'string']:
:param counter_value:
:type function:
:param dark: See description |VTextField_vuetify_link|.
:type boolean:
:param dense: Reduces the input height
:type boolean:
:param disabled: Disable the input
:type boolean:
:param error: Puts the input in a manual error state
:type boolean:
:param error_count: The total number of errors that should display at once
:type ['number', 'string']:
:param error_messages: Puts the input in an error state and passes through custom error messages. Will be combined with any validations that occur from the **rules** prop. This field will not trigger validation
:type ['string', 'array']:
:param filled: Applies the alternate filled input style
:type boolean:
:param flat: Removes elevation (shadow) added to element when using the **solo** or **solo-inverted** props
:type boolean:
:param full_width: Designates input type as full-width
:type boolean:
:param height: Sets the height of the input
:type ['number', 'string']:
:param hide_details: Hides hint and validation errors. When set to `auto` messages will be rendered only if there's a message (hint, error message, counter value etc) to display
:type ['boolean', 'string']:
:param hint: Hint text
:type string:
:param id: Sets the DOM id on the component
:type string:
:param label: Sets input label
:type string:
:param light: Applies the light theme variant to the component.
:type boolean:
:param loader_height: Specifies the height of the loader
:type ['number', 'string']:
:param loading: Displays linear progress bar. Can either be a String which specifies which color is applied to the progress bar (any material color or theme color - **primary**, **secondary**, **success**, **info**, **warning**, **error**) or a Boolean which uses the component **color** (set by color prop - if it's supported by the component) or the primary color
:type ['boolean', 'string']:
:param messages: Displays a list of messages or message if using a string
:type ['string', 'array']:
:param outlined: Applies the outlined style to the input
:type boolean:
:param persistent_hint: Forces hint to always be visible
:type boolean:
:param persistent_placeholder: Forces placeholder to always be visible
:type boolean:
:param placeholder: Sets the input’s placeholder text
:type string:
:param prefix: Displays prefix text
:type string:
:param prepend_icon: Prepends an icon to the component, uses the same syntax as `v-icon`
:type string:
:param prepend_inner_icon: Prepends an icon inside the component's input, uses the same syntax as `v-icon`
:type string:
:param readonly: Puts input in readonly state
:type boolean:
:param reverse: Reverses the input orientation
:type boolean:
:param rounded: Adds a border radius to the input
:type boolean:
:param rules: Accepts a mixed array of types `function`, `boolean` and `string`. Functions pass an input value as an argument and must return either `true` / `false` or a `string` containing an error message. The input field will enter an error state if a function returns (or any value in the array contains) `false` or is a `string`
:type array:
:param shaped: Round if `outlined` and increase `border-radius` if `filled`. Must be used with either `outlined` or `filled`
:type boolean:
:param single_line: Label does not move on focus/dirty
:type boolean:
:param solo: Changes the style of the input
:type boolean:
:param solo_inverted: Reduces element opacity until focused
:type boolean:
:param success: Puts the input in a manual success state
:type boolean:
:param success_messages: Puts the input in a success state and passes through custom success messages.
:type ['string', 'array']:
:param suffix: Displays suffix text
:type string:
:param type: Sets input type
:type string:
:param validate_on_blur: Delays validation until blur event
:type boolean:
:param value: The input's value
:type any:
Events
:param blur: Emitted when the input is blurred
:param change: Emitted when the input is changed by user interaction
:param click_append: Emitted when appended icon is clicked
:param click_append_outer: Emitted when appended outer icon is clicked
:param click_clear: Emitted when clearable icon clicked
:param click_prepend: Emitted when prepended icon is clicked
:param click_prepend_inner: Emitted when prepended inner icon is clicked
:param focus: Emitted when component is focused
:param input: The updated bound model
:param keydown: Emitted when **any** key is pressed
:param update_error: The `error.sync` event
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-text-field", children, **kwargs)
self._attr_names += [
"append_icon",
"append_outer_icon",
"autofocus",
"background_color",
"clear_icon",
"clearable",
"color",
"counter",
"counter_value", # JS functions unimplemented
"dark",
"dense",
"disabled",
"error",
"error_count",
"error_messages",
"filled",
"flat",
"full_width",
"height",
"hide_details",
"hint",
"id",
"label",
"light",
"loader_height",
"loading",
"messages",
"outlined",
"persistent_hint",
"persistent_placeholder",
"placeholder",
"prefix",
"prepend_icon",
"prepend_inner_icon",
"readonly",
"reverse",
"rounded",
"rules",
"shaped",
"single_line",
"solo",
"solo_inverted",
"success",
"success_messages",
"suffix",
"type",
"validate_on_blur",
"value",
]
self._event_names += [
"blur",
"change",
# click, #Implemented in AbstractElement parent class
("click_append", "click:append"),
("click_append_outer", "click:append-outer"),
("click_clear", "click:clear"),
("click_prepend", "click:prepend"),
("click_prepend_inner", "click:prepend-inner"),
"focus",
"input",
"keydown",
# mousedown, #Implemented in AbstractElement parent class
# mouseup, #Implemented in AbstractElement parent class
("update_error", "update:error"),
]
class VThemeProvider(AbstractElement):
"""
Vuetify's VThemeProvider component. See more info and examples |VThemeProvider_vuetify_link|.
.. |VThemeProvider_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-theme-provider" target="_blank">here</a>
:param dark: See description |VThemeProvider_vuetify_link|.
:type boolean:
:param light: Applies the light theme variant to the component.
:type boolean:
:param root: Use the current value of `$vuetify.theme.dark` as opposed to the provided one.
:type boolean:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-theme-provider", children, **kwargs)
self._attr_names += [
"dark",
"light",
"root",
]
class VTimeline(AbstractElement):
"""
Vuetify's VTimeline component. See more info and examples |VTimeline_vuetify_link|.
.. |VTimeline_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-timeline" target="_blank">here</a>
:param align_top: Align caret and dot of timeline items to the top
:type boolean:
:param dark: See description |VTimeline_vuetify_link|.
:type boolean:
:param dense: Hide opposite slot content, and position all items to one side of timeline
:type boolean:
:param light: Applies the light theme variant to the component.
:type boolean:
:param reverse: Reverse direction of timeline items
:type boolean:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-timeline", children, **kwargs)
self._attr_names += [
"align_top",
"dark",
"dense",
"light",
"reverse",
]
class VTimelineItem(AbstractElement):
"""
Vuetify's VTimelineItem component. See more info and examples |VTimelineItem_vuetify_link|.
.. |VTimelineItem_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-timeline-item" target="_blank">here</a>
:param color: See description |VTimelineItem_vuetify_link|.
:type string:
:param dark: See description |VTimelineItem_vuetify_link|.
:type boolean:
:param fill_dot: Remove padding from dot container
:type boolean:
:param hide_dot: Hide display of timeline dot
:type boolean:
:param icon: Specify icon for dot container
:type string:
:param icon_color: See description |VTimelineItem_vuetify_link|.
:type string:
:param large: Large size dot
:type boolean:
:param left: Explicitly set the item to a left orientation
:type boolean:
:param light: Applies the light theme variant to the component.
:type boolean:
:param right: Explicitly set the item to a right orientation
:type boolean:
:param small: Small size dot
:type boolean:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-timeline-item", children, **kwargs)
self._attr_names += [
"color",
"dark",
"fill_dot",
"hide_dot",
"icon",
"icon_color",
"large",
"left",
"light",
"right",
"small",
]
class VTimePicker(AbstractElement):
"""
Vuetify's VTimePicker component. See more info and examples |VTimePicker_vuetify_link|.
.. |VTimePicker_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-time-picker" target="_blank">here</a>
:param allowed_hours: Restricts which hours can be selected
:type ['function', 'array']:
:param allowed_minutes: Restricts which minutes can be selected
:type ['function', 'array']:
:param allowed_seconds: Restricts which seconds can be selected
:type ['function', 'array']:
:param ampm_in_title: Place AM/PM switch in title, not near the clock.
:type boolean:
:param color: See description |VTimePicker_vuetify_link|.
:type string:
:param dark: See description |VTimePicker_vuetify_link|.
:type boolean:
:param disabled: disables picker
:type boolean:
:param elevation: See description |VTimePicker_vuetify_link|.
:type ['number', 'string']:
:param flat: Removes elevation
:type boolean:
:param format: Defines the format of a time displayed in picker. Available options are `ampm` and `24hr`.
:type string:
:param full_width: Forces 100% width
:type boolean:
:param header_color: Defines the header color. If not specified it will use the color defined by <code>color</code> prop or the default picker color
:type string:
:param landscape: Orients picker horizontal
:type boolean:
:param light: Applies the light theme variant to the component.
:type boolean:
:param max: Maximum allowed time
:type string:
:param min: Minimum allowed time
:type string:
:param no_title: Hide the picker title
:type boolean:
:param readonly: Puts picker in readonly state
:type boolean:
:param scrollable: Allows changing hour/minute with mouse scroll
:type boolean:
:param use_seconds: Toggles the use of seconds in picker
:type boolean:
:param value: Time picker model (ISO 8601 format, 24hr hh:mm)
:type any:
:param width: Width of the picker
:type ['number', 'string']:
Events
:param change: Emitted when the time selection is done (when user changes the minute for HH:MM picker and the second for HH:MM:SS picker
:param click_hour: Emitted when user selects the hour
:param click_minute: Emitted when user selects the minute
:param click_second: Emitted when user selects the second
:param input: The updated bound model
:param update_period: Emitted when user clicks the AM/PM button
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-time-picker", children, **kwargs)
self._attr_names += [
"allowed_hours", # JS functions unimplemented
"allowed_minutes", # JS functions unimplemented
"allowed_seconds", # JS functions unimplemented
"ampm_in_title",
"color",
"dark",
"disabled",
"elevation",
"flat",
"format",
"full_width",
"header_color",
"landscape",
"light",
"max",
"min",
"no_title",
"readonly",
"scrollable",
"use_seconds",
"value",
"width",
]
self._event_names += [
"change",
("click_hour", "click:hour"),
("click_minute", "click:minute"),
("click_second", "click:second"),
"input",
("update_period", "update:period"),
]
class VToolbar(AbstractElement):
"""
Vuetify's VToolbar component. See more info and examples |VToolbar_vuetify_link|.
.. |VToolbar_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-toolbar" target="_blank">here</a>
:param absolute: Applies position: absolute to the component.
:type boolean:
:param bottom: Aligns the component towards the bottom.
:type boolean:
:param collapse: Puts the toolbar into a collapsed state reducing its maximum width.
:type boolean:
:param color: See description |VToolbar_vuetify_link|.
:type string:
:param dark: See description |VToolbar_vuetify_link|.
:type boolean:
:param dense: Reduces the height of the toolbar content to 48px (96px when using the **prominent** prop).
:type boolean:
:param elevation: See description |VToolbar_vuetify_link|.
:type ['number', 'string']:
:param extended: Use this prop to increase the height of the toolbar _without_ using the `extension` slot for adding content. May be used in conjunction with the **extension-height** prop, and any of the other props that affect the height of the toolbar, e.g. **prominent**, **dense**, etc., **WITH THE EXCEPTION** of **height**.
:type boolean:
:param extension_height: Specify an explicit height for the `extension` slot.
:type ['number', 'string']:
:param flat: Removes the toolbar's box-shadow.
:type boolean:
:param floating: Applies **display: inline-flex** to the component.
:type boolean:
:param height: Designates a specific height for the toolbar. Overrides the heights imposed by other props, e.g. **prominent**, **dense**, **extended**, etc.
:type ['number', 'string']:
:param light: Applies the light theme variant to the component.
:type boolean:
:param max_height: Sets the maximum height for the component.
:type ['number', 'string']:
:param max_width: Sets the maximum width for the component.
:type ['number', 'string']:
:param min_height: Sets the minimum height for the component.
:type ['number', 'string']:
:param min_width: Sets the minimum width for the component.
:type ['number', 'string']:
:param outlined: Removes elevation (box-shadow) and adds a *thin* border.
:type boolean:
:param prominent: Increases the height of the toolbar content to 128px.
:type boolean:
:param rounded: See description |VToolbar_vuetify_link|.
:type ['boolean', 'string']:
:param shaped: Applies a large border radius on the top left and bottom right of the card.
:type boolean:
:param short: Reduce the height of the toolbar content to 56px (112px when using the **prominent** prop).
:type boolean:
:param src: See description |VToolbar_vuetify_link|.
:type ['string', 'object']:
:param tag: Specify a custom tag used on the root element.
:type string:
:param tile: Removes the component's **border-radius**.
:type boolean:
:param width: Sets the width for the component.
:type ['number', 'string']:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-toolbar", children, **kwargs)
self._attr_names += [
"absolute",
"bottom",
"collapse",
"color",
"dark",
"dense",
"elevation",
"extended",
"extension_height",
"flat",
"floating",
"height",
"light",
"max_height",
"max_width",
"min_height",
"min_width",
"outlined",
"prominent",
"rounded",
"shaped",
"short",
"src",
"tag",
"tile",
"width",
]
class VToolbarItems(AbstractElement):
"""
Vuetify's VToolbarItems component. See more info and examples |VToolbarItems_vuetify_link|.
.. |VToolbarItems_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-toolbar-items" target="_blank">here</a>
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-toolbar-items", children, **kwargs)
class VToolbarTitle(AbstractElement):
"""
Vuetify's VToolbarTitle component. See more info and examples |VToolbarTitle_vuetify_link|.
.. |VToolbarTitle_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-toolbar-title" target="_blank">here</a>
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-toolbar-title", children, **kwargs)
class VTooltip(AbstractElement):
"""
Vuetify's VTooltip component. See more info and examples |VTooltip_vuetify_link|.
.. |VTooltip_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-tooltip" target="_blank">here</a>
:param absolute: Applies **position: absolute** to the component.
:type boolean:
:param activator: Designate a custom activator when the `activator` slot is not used. String can be any valid querySelector and Object can be any valid Node.
:type any:
:param allow_overflow: Removes overflow re-positioning for the content
:type boolean:
:param attach: Specifies which DOM element that this component should detach to. String can be any valid querySelector and Object can be any valid Node. This will attach to the root `v-app` component by default.
:type any:
:param bottom: Aligns the component towards the bottom.
:type boolean:
:param close_delay: Delay (in ms) after which menu closes (when open-on-hover prop is set to true)
:type ['number', 'string']:
:param color: See description |VTooltip_vuetify_link|.
:type string:
:param content_class: Applies a custom class to the detached element. This is useful because the content is moved to the beginning of the `v-app` component (unless the **attach** prop is provided) and is not targetable by classes passed directly on the component.
:type string:
:param disabled: Disables the tooltip
:type boolean:
:param eager: Will force the components content to render on mounted. This is useful if you have content that will not be rendered in the DOM that you want crawled for SEO.
:type boolean:
:param fixed: Applies **position: fixed** to the component.
:type boolean:
:param internal_activator: Designates whether to use an internal activator
:type boolean:
:param left: Aligns the component towards the left.
:type boolean:
:param max_width: Sets the maximum width for the content
:type ['number', 'string']:
:param min_width: Sets the minimum width for the content
:type ['number', 'string']:
:param nudge_bottom: Nudge the content to the bottom
:type ['number', 'string']:
:param nudge_left: Nudge the content to the left
:type ['number', 'string']:
:param nudge_right: Nudge the content to the right
:type ['number', 'string']:
:param nudge_top: Nudge the content to the top
:type ['number', 'string']:
:param nudge_width: Nudge the content width
:type ['number', 'string']:
:param offset_overflow: Causes the component to flip to the opposite side when repositioned due to overflow
:type boolean:
:param open_delay: Delay (in ms) after which tooltip opens (when `open-on-hover` prop is set to **true**)
:type ['number', 'string']:
:param open_on_click: Designates whether the tooltip should open on activator click
:type boolean:
:param open_on_focus:
:type boolean:
:param open_on_hover: Designates whether the tooltip should open on activator hover
:type boolean:
:param position_x: Used to position the content when not using an activator slot
:type number:
:param position_y: Used to position the content when not using an activator slot
:type number:
:param right: Aligns the component towards the right.
:type boolean:
:param tag: Specifies a custom tag for the activator wrapper
:type string:
:param top: Aligns the content towards the top.
:type boolean:
:param transition: See description |VTooltip_vuetify_link|.
:type string:
:param value: Controls whether the component is visible or hidden.
:type any:
:param z_index: The z-index used for the component
:type ['number', 'string']:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-tooltip", children, **kwargs)
self._attr_names += [
"absolute",
"activator",
"allow_overflow",
"attach",
"bottom",
"close_delay",
"color",
"content_class",
"disabled",
"eager",
"fixed",
"internal_activator",
"left",
"max_width",
"min_width",
"nudge_bottom",
"nudge_left",
"nudge_right",
"nudge_top",
"nudge_width",
"offset_overflow",
"open_delay",
"open_on_click",
"open_on_focus",
"open_on_hover",
"position_x",
"position_y",
"right",
"tag",
"top",
"transition",
"value",
"z_index",
]
class VTreeview(AbstractElement):
"""
Vuetify's VTreeview component. See more info and examples |VTreeview_vuetify_link|.
.. |VTreeview_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-treeview" target="_blank">here</a>
:param activatable: Allows user to mark a node as active by clicking on it
:type boolean:
:param active: Syncable prop that allows one to control which nodes are active. The array consists of the `item-key` of each active item.
:type array:
:param active_class: The class applied to the node when active
:type string:
:param color: Sets the color of the active node
:type string:
:param dark: See description |VTreeview_vuetify_link|.
:type boolean:
:param dense: Decreases the height of the items
:type boolean:
:param expand_icon: Icon used to indicate that a node can be expanded
:type string:
:param filter: Custom item filtering function. By default it will use case-insensitive search in item's label.
:type function:
:param hoverable: Applies a hover class when mousing over nodes
:type boolean:
:param indeterminate_icon: Icon used when node is in an indeterminate state. Only visible when `selectable` is `true`.
:type string:
:param item_children: Property on supplied `items` that contains its children
:type string:
:param item_disabled: Property on supplied `items` that contains the disabled state of the item
:type string:
:param item_key: Property on supplied `items` used to keep track of node state. The value of this property has to be unique among all items.
:type string:
:param item_text: Property on supplied `items` that contains its label text
:type string:
:param items: An array of items used to build the treeview
:type array:
:param light: Applies the light theme variant to the component.
:type boolean:
:param load_children: A function used when dynamically loading children. If this prop is set, then the supplied function will be run if expanding an item that has a `item-children` property that is an empty array. Supports returning a Promise.
:type function:
:param loading_icon: Icon used when node is in a loading state
:type string:
:param multiple_active: When `true`, allows user to have multiple active nodes at the same time
:type boolean:
:param off_icon: Icon used when node is not selected. Only visible when `selectable` is `true`.
:type string:
:param on_icon: Icon used when leaf node is selected or when a branch node is fully selected. Only visible when `selectable` is `true`.
:type string:
:param open: Syncable prop that allows one to control which nodes are open. The array consists of the `item-key` of each open item.
:type array:
:param open_all: When `true` will cause all branch nodes to be opened when component is mounted
:type boolean:
:param open_on_click: When `true` will cause nodes to be opened by clicking anywhere on it, instead of only opening by clicking on expand icon. When using this prop with `activatable` you will be unable to mark nodes with children as active.
:type boolean:
:param return_object: When `true` will make `v-model`, `active.sync` and `open.sync` return the complete object instead of just the key
:type boolean:
:param rounded: Provides an alternative active style for `v-treeview` node. Only visible when `activatable` is `true` and should not be used in conjunction with the `shaped` prop.
:type boolean:
:param search: The search model for filtering results
:type string:
:param selectable: Will render a checkbox next to each node allowing them to be selected
:type boolean:
:param selected_color: The color of the selection checkbox
:type string:
:param selection_type: Controls how the treeview selects nodes. There are two modes available: 'leaf' and 'independent'
:type string:
:param shaped: Provides an alternative active style for `v-treeview` node. Only visible when `activatable` is `true` and should not be used in conjunction with the `rounded` prop.
:type boolean:
:param transition: Applies a transition when nodes are opened and closed
:type boolean:
:param value: Allows one to control which nodes are selected. The array consists of the `item-key` of each selected item. Is used with `@input` event to allow for `v-model` binding.
:type array:
Events
:param input: Emits the array of selected items when this value changes
:param update_active: Emits the array of active items when this value changes
:param update_open: Emits the array of open items when this value changes
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-treeview", children, **kwargs)
self._attr_names += [
"activatable",
"active",
"active_class",
"color",
"dark",
"dense",
"expand_icon",
"filter", # JS functions unimplemented
"hoverable",
"indeterminate_icon",
"item_children",
"item_disabled",
"item_key",
"item_text",
"items",
"light",
"load_children", # JS functions unimplemented
"loading_icon",
"multiple_active",
"off_icon",
"on_icon",
"open",
"open_all",
"open_on_click",
"return_object",
"rounded",
"search",
"selectable",
"selected_color",
"selection_type",
"shaped",
"transition",
"value",
]
self._event_names += [
"input",
("update_active", "update:active"),
("update_open", "update:open"),
]
class VVirtualScroll(AbstractElement):
"""
Vuetify's VVirtualScroll component. See more info and examples |VVirtualScroll_vuetify_link|.
.. |VVirtualScroll_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-virtual-scroll" target="_blank">here</a>
:param bench: The number of items **outside** the user view that are rendered (even if they are **not** viewable); to help prevent empty white space when scrolling *fast*.
:type ['number', 'string']:
:param height: Height of the component as a css value
:type ['number', 'string']:
:param item_height: Height in pixels of the items to display
:type ['number', 'string']:
:param items: The array of items to display
:type array:
:param max_height: Sets the maximum height for the component.
:type ['number', 'string']:
:param max_width: Sets the maximum width for the component.
:type ['number', 'string']:
:param min_height: Sets the minimum height for the component.
:type ['number', 'string']:
:param min_width: Sets the minimum width for the component.
:type ['number', 'string']:
:param width: Sets the width for the component.
:type ['number', 'string']:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-virtual-scroll", children, **kwargs)
self._attr_names += [
"bench",
"height",
"item_height",
"items",
"max_height",
"max_width",
"min_height",
"min_width",
"width",
]
class VWindow(AbstractElement):
"""
Vuetify's VWindow component. See more info and examples |VWindow_vuetify_link|.
.. |VWindow_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-window" target="_blank">here</a>
:param active_class: The **active-class** applied to children when they are activated.
:type string:
:param continuous: If `true`, window will "wrap around" from the last item to the first, and from the first item to the last
:type boolean:
:param dark: See description |VWindow_vuetify_link|.
:type boolean:
:param light: Applies the light theme variant to the component.
:type boolean:
:param next_icon: Icon used for the "next" button if `show-arrows` is `true`
:type ['boolean', 'string']:
:param prev_icon: Icon used for the "prev" button if `show-arrows` is `true`
:type ['boolean', 'string']:
:param reverse: Reverse the normal transition direction.
:type boolean:
:param show_arrows: Display the "next" and "prev" buttons
:type boolean:
:param show_arrows_on_hover: Display the "next" and "prev" buttons on hover. `show-arrows` MUST ALSO be set.
:type boolean:
:param tag: Specify a custom tag used on the root element.
:type string:
:param touch: Provide a custom **left** and **right** function when swiped left or right.
:type object:
:param touchless: Disable touch support.
:type boolean:
:param value: The designated model value for the component.
:type any:
:param vertical: Uses a vertical transition when changing windows.
:type boolean:
Events
:param change: Emitted when the component value is changed by user interaction
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-window", children, **kwargs)
self._attr_names += [
"active_class",
"continuous",
"dark",
"light",
"next_icon",
"prev_icon",
"reverse",
"show_arrows",
"show_arrows_on_hover",
"tag",
"touch",
"touchless",
"value",
"vertical",
]
self._event_names += [
"change",
]
class VWindowItem(AbstractElement):
"""
Vuetify's VWindowItem component. See more info and examples |VWindowItem_vuetify_link|.
.. |VWindowItem_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-window-item" target="_blank">here</a>
:param active_class: See description |VWindowItem_vuetify_link|.
:type string:
:param disabled: Prevents the item from becoming active when using the "next" and "prev" buttons or the `toggle` method
:type boolean:
:param eager: Will force the components content to render on mounted. This is useful if you have content that will not be rendered in the DOM that you want crawled for SEO.
:type boolean:
:param reverse_transition: Sets the reverse transition
:type ['boolean', 'string']:
:param transition: See description |VWindowItem_vuetify_link|.
:type ['boolean', 'string']:
:param value: The value used when the component is selected in a group. If not provided, the index will be used.
:type any:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-window-item", children, **kwargs)
self._attr_names += [
"active_class",
"disabled",
"eager",
"reverse_transition",
"transition",
"value",
]
class VCarouselTransition(AbstractElement):
"""
Vuetify's VCarouselTransition component. See more info and examples |VCarouselTransition_vuetify_link|.
.. |VCarouselTransition_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-carousel-transition" target="_blank">here</a>
:param group: See description |VCarouselTransition_vuetify_link|.
:type boolean:
:param hide_on_leave: Hides the leaving element (no exit animation)
:type boolean:
:param leave_absolute: See description |VCarouselTransition_vuetify_link|.
:type boolean:
:param mode: See description |VCarouselTransition_vuetify_link|.
:type string:
:param origin: See description |VCarouselTransition_vuetify_link|.
:type string:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-carousel-transition", children, **kwargs)
self._attr_names += [
"group",
"hide_on_leave",
"leave_absolute",
"mode",
"origin",
]
class VCarouselReverseTransition(AbstractElement):
"""
Vuetify's VCarouselReverseTransition component. See more info and examples |VCarouselReverseTransition_vuetify_link|.
.. |VCarouselReverseTransition_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-carousel-reverse-transition" target="_blank">here</a>
:param group: See description |VCarouselReverseTransition_vuetify_link|.
:type boolean:
:param hide_on_leave: Hides the leaving element (no exit animation)
:type boolean:
:param leave_absolute: See description |VCarouselReverseTransition_vuetify_link|.
:type boolean:
:param mode: See description |VCarouselReverseTransition_vuetify_link|.
:type string:
:param origin: See description |VCarouselReverseTransition_vuetify_link|.
:type string:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-carousel-reverse-transition", children, **kwargs)
self._attr_names += [
"group",
"hide_on_leave",
"leave_absolute",
"mode",
"origin",
]
class VTabTransition(AbstractElement):
"""
Vuetify's VTabTransition component. See more info and examples |VTabTransition_vuetify_link|.
.. |VTabTransition_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-tab-transition" target="_blank">here</a>
:param group: See description |VTabTransition_vuetify_link|.
:type boolean:
:param hide_on_leave: Hides the leaving element (no exit animation)
:type boolean:
:param leave_absolute: See description |VTabTransition_vuetify_link|.
:type boolean:
:param mode: See description |VTabTransition_vuetify_link|.
:type string:
:param origin: See description |VTabTransition_vuetify_link|.
:type string:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-tab-transition", children, **kwargs)
self._attr_names += [
"group",
"hide_on_leave",
"leave_absolute",
"mode",
"origin",
]
class VTabReverseTransition(AbstractElement):
"""
Vuetify's VTabReverseTransition component. See more info and examples |VTabReverseTransition_vuetify_link|.
.. |VTabReverseTransition_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-tab-reverse-transition" target="_blank">here</a>
:param group: See description |VTabReverseTransition_vuetify_link|.
:type boolean:
:param hide_on_leave: Hides the leaving element (no exit animation)
:type boolean:
:param leave_absolute: See description |VTabReverseTransition_vuetify_link|.
:type boolean:
:param mode: See description |VTabReverseTransition_vuetify_link|.
:type string:
:param origin: See description |VTabReverseTransition_vuetify_link|.
:type string:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-tab-reverse-transition", children, **kwargs)
self._attr_names += [
"group",
"hide_on_leave",
"leave_absolute",
"mode",
"origin",
]
class VMenuTransition(AbstractElement):
"""
Vuetify's VMenuTransition component. See more info and examples |VMenuTransition_vuetify_link|.
.. |VMenuTransition_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-menu-transition" target="_blank">here</a>
:param group: See description |VMenuTransition_vuetify_link|.
:type boolean:
:param hide_on_leave: Hides the leaving element (no exit animation)
:type boolean:
:param leave_absolute: See description |VMenuTransition_vuetify_link|.
:type boolean:
:param mode: See description |VMenuTransition_vuetify_link|.
:type string:
:param origin: See description |VMenuTransition_vuetify_link|.
:type string:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-menu-transition", children, **kwargs)
self._attr_names += [
"group",
"hide_on_leave",
"leave_absolute",
"mode",
"origin",
]
class VFabTransition(AbstractElement):
"""
Vuetify's VFabTransition component. See more info and examples |VFabTransition_vuetify_link|.
.. |VFabTransition_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-fab-transition" target="_blank">here</a>
:param group: See description |VFabTransition_vuetify_link|.
:type boolean:
:param hide_on_leave: Hides the leaving element (no exit animation)
:type boolean:
:param leave_absolute: See description |VFabTransition_vuetify_link|.
:type boolean:
:param mode: See description |VFabTransition_vuetify_link|.
:type string:
:param origin: See description |VFabTransition_vuetify_link|.
:type string:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-fab-transition", children, **kwargs)
self._attr_names += [
"group",
"hide_on_leave",
"leave_absolute",
"mode",
"origin",
]
class VDialogTransition(AbstractElement):
"""
Vuetify's VDialogTransition component. See more info and examples |VDialogTransition_vuetify_link|.
.. |VDialogTransition_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-dialog-transition" target="_blank">here</a>
:param group: See description |VDialogTransition_vuetify_link|.
:type boolean:
:param hide_on_leave: Hides the leaving element (no exit animation)
:type boolean:
:param leave_absolute: See description |VDialogTransition_vuetify_link|.
:type boolean:
:param mode: See description |VDialogTransition_vuetify_link|.
:type string:
:param origin: See description |VDialogTransition_vuetify_link|.
:type string:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-dialog-transition", children, **kwargs)
self._attr_names += [
"group",
"hide_on_leave",
"leave_absolute",
"mode",
"origin",
]
class VDialogBottomTransition(AbstractElement):
"""
Vuetify's VDialogBottomTransition component. See more info and examples |VDialogBottomTransition_vuetify_link|.
.. |VDialogBottomTransition_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-dialog-bottom-transition" target="_blank">here</a>
:param group: See description |VDialogBottomTransition_vuetify_link|.
:type boolean:
:param hide_on_leave: Hides the leaving element (no exit animation)
:type boolean:
:param leave_absolute: See description |VDialogBottomTransition_vuetify_link|.
:type boolean:
:param mode: See description |VDialogBottomTransition_vuetify_link|.
:type string:
:param origin: See description |VDialogBottomTransition_vuetify_link|.
:type string:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-dialog-bottom-transition", children, **kwargs)
self._attr_names += [
"group",
"hide_on_leave",
"leave_absolute",
"mode",
"origin",
]
class VDialogTopTransition(AbstractElement):
"""
Vuetify's VDialogTopTransition component. See more info and examples |VDialogTopTransition_vuetify_link|.
.. |VDialogTopTransition_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-dialog-top-transition" target="_blank">here</a>
:param group: See description |VDialogTopTransition_vuetify_link|.
:type boolean:
:param hide_on_leave: Hides the leaving element (no exit animation)
:type boolean:
:param leave_absolute: See description |VDialogTopTransition_vuetify_link|.
:type boolean:
:param mode: See description |VDialogTopTransition_vuetify_link|.
:type string:
:param origin: See description |VDialogTopTransition_vuetify_link|.
:type string:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-dialog-top-transition", children, **kwargs)
self._attr_names += [
"group",
"hide_on_leave",
"leave_absolute",
"mode",
"origin",
]
class VFadeTransition(AbstractElement):
"""
Vuetify's VFadeTransition component. See more info and examples |VFadeTransition_vuetify_link|.
.. |VFadeTransition_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-fade-transition" target="_blank">here</a>
:param group: See description |VFadeTransition_vuetify_link|.
:type boolean:
:param hide_on_leave: Hides the leaving element (no exit animation)
:type boolean:
:param leave_absolute: See description |VFadeTransition_vuetify_link|.
:type boolean:
:param mode: See description |VFadeTransition_vuetify_link|.
:type string:
:param origin: See description |VFadeTransition_vuetify_link|.
:type string:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-fade-transition", children, **kwargs)
self._attr_names += [
"group",
"hide_on_leave",
"leave_absolute",
"mode",
"origin",
]
class VScaleTransition(AbstractElement):
"""
Vuetify's VScaleTransition component. See more info and examples |VScaleTransition_vuetify_link|.
.. |VScaleTransition_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-scale-transition" target="_blank">here</a>
:param group: See description |VScaleTransition_vuetify_link|.
:type boolean:
:param hide_on_leave: Hides the leaving element (no exit animation)
:type boolean:
:param leave_absolute: See description |VScaleTransition_vuetify_link|.
:type boolean:
:param mode: See description |VScaleTransition_vuetify_link|.
:type string:
:param origin: See description |VScaleTransition_vuetify_link|.
:type string:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-scale-transition", children, **kwargs)
self._attr_names += [
"group",
"hide_on_leave",
"leave_absolute",
"mode",
"origin",
]
class VScrollXTransition(AbstractElement):
"""
Vuetify's VScrollXTransition component. See more info and examples |VScrollXTransition_vuetify_link|.
.. |VScrollXTransition_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-scroll-x-transition" target="_blank">here</a>
:param group: See description |VScrollXTransition_vuetify_link|.
:type boolean:
:param hide_on_leave: Hides the leaving element (no exit animation)
:type boolean:
:param leave_absolute: See description |VScrollXTransition_vuetify_link|.
:type boolean:
:param mode: See description |VScrollXTransition_vuetify_link|.
:type string:
:param origin: See description |VScrollXTransition_vuetify_link|.
:type string:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-scroll-x-transition", children, **kwargs)
self._attr_names += [
"group",
"hide_on_leave",
"leave_absolute",
"mode",
"origin",
]
class VScrollXReverseTransition(AbstractElement):
"""
Vuetify's VScrollXReverseTransition component. See more info and examples |VScrollXReverseTransition_vuetify_link|.
.. |VScrollXReverseTransition_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-scroll-x-reverse-transition" target="_blank">here</a>
:param group: See description |VScrollXReverseTransition_vuetify_link|.
:type boolean:
:param hide_on_leave: Hides the leaving element (no exit animation)
:type boolean:
:param leave_absolute: See description |VScrollXReverseTransition_vuetify_link|.
:type boolean:
:param mode: See description |VScrollXReverseTransition_vuetify_link|.
:type string:
:param origin: See description |VScrollXReverseTransition_vuetify_link|.
:type string:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-scroll-x-reverse-transition", children, **kwargs)
self._attr_names += [
"group",
"hide_on_leave",
"leave_absolute",
"mode",
"origin",
]
class VScrollYTransition(AbstractElement):
"""
Vuetify's VScrollYTransition component. See more info and examples |VScrollYTransition_vuetify_link|.
.. |VScrollYTransition_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-scroll-y-transition" target="_blank">here</a>
:param group: See description |VScrollYTransition_vuetify_link|.
:type boolean:
:param hide_on_leave: Hides the leaving element (no exit animation)
:type boolean:
:param leave_absolute: See description |VScrollYTransition_vuetify_link|.
:type boolean:
:param mode: See description |VScrollYTransition_vuetify_link|.
:type string:
:param origin: See description |VScrollYTransition_vuetify_link|.
:type string:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-scroll-y-transition", children, **kwargs)
self._attr_names += [
"group",
"hide_on_leave",
"leave_absolute",
"mode",
"origin",
]
class VScrollYReverseTransition(AbstractElement):
"""
Vuetify's VScrollYReverseTransition component. See more info and examples |VScrollYReverseTransition_vuetify_link|.
.. |VScrollYReverseTransition_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-scroll-y-reverse-transition" target="_blank">here</a>
:param group: See description |VScrollYReverseTransition_vuetify_link|.
:type boolean:
:param hide_on_leave: Hides the leaving element (no exit animation)
:type boolean:
:param leave_absolute: See description |VScrollYReverseTransition_vuetify_link|.
:type boolean:
:param mode: See description |VScrollYReverseTransition_vuetify_link|.
:type string:
:param origin: See description |VScrollYReverseTransition_vuetify_link|.
:type string:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-scroll-y-reverse-transition", children, **kwargs)
self._attr_names += [
"group",
"hide_on_leave",
"leave_absolute",
"mode",
"origin",
]
class VSlideXTransition(AbstractElement):
"""
Vuetify's VSlideXTransition component. See more info and examples |VSlideXTransition_vuetify_link|.
.. |VSlideXTransition_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-slide-x-transition" target="_blank">here</a>
:param group: See description |VSlideXTransition_vuetify_link|.
:type boolean:
:param hide_on_leave: Hides the leaving element (no exit animation)
:type boolean:
:param leave_absolute: See description |VSlideXTransition_vuetify_link|.
:type boolean:
:param mode: See description |VSlideXTransition_vuetify_link|.
:type string:
:param origin: See description |VSlideXTransition_vuetify_link|.
:type string:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-slide-x-transition", children, **kwargs)
self._attr_names += [
"group",
"hide_on_leave",
"leave_absolute",
"mode",
"origin",
]
class VSlideXReverseTransition(AbstractElement):
"""
Vuetify's VSlideXReverseTransition component. See more info and examples |VSlideXReverseTransition_vuetify_link|.
.. |VSlideXReverseTransition_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-slide-x-reverse-transition" target="_blank">here</a>
:param group: See description |VSlideXReverseTransition_vuetify_link|.
:type boolean:
:param hide_on_leave: Hides the leaving element (no exit animation)
:type boolean:
:param leave_absolute: See description |VSlideXReverseTransition_vuetify_link|.
:type boolean:
:param mode: See description |VSlideXReverseTransition_vuetify_link|.
:type string:
:param origin: See description |VSlideXReverseTransition_vuetify_link|.
:type string:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-slide-x-reverse-transition", children, **kwargs)
self._attr_names += [
"group",
"hide_on_leave",
"leave_absolute",
"mode",
"origin",
]
class VSlideYTransition(AbstractElement):
"""
Vuetify's VSlideYTransition component. See more info and examples |VSlideYTransition_vuetify_link|.
.. |VSlideYTransition_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-slide-y-transition" target="_blank">here</a>
:param group: See description |VSlideYTransition_vuetify_link|.
:type boolean:
:param hide_on_leave: Hides the leaving element (no exit animation)
:type boolean:
:param leave_absolute: See description |VSlideYTransition_vuetify_link|.
:type boolean:
:param mode: See description |VSlideYTransition_vuetify_link|.
:type string:
:param origin: See description |VSlideYTransition_vuetify_link|.
:type string:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-slide-y-transition", children, **kwargs)
self._attr_names += [
"group",
"hide_on_leave",
"leave_absolute",
"mode",
"origin",
]
class VSlideYReverseTransition(AbstractElement):
"""
Vuetify's VSlideYReverseTransition component. See more info and examples |VSlideYReverseTransition_vuetify_link|.
.. |VSlideYReverseTransition_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-slide-y-reverse-transition" target="_blank">here</a>
:param group: See description |VSlideYReverseTransition_vuetify_link|.
:type boolean:
:param hide_on_leave: Hides the leaving element (no exit animation)
:type boolean:
:param leave_absolute: See description |VSlideYReverseTransition_vuetify_link|.
:type boolean:
:param mode: See description |VSlideYReverseTransition_vuetify_link|.
:type string:
:param origin: See description |VSlideYReverseTransition_vuetify_link|.
:type string:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-slide-y-reverse-transition", children, **kwargs)
self._attr_names += [
"group",
"hide_on_leave",
"leave_absolute",
"mode",
"origin",
]
class VExpandTransition(AbstractElement):
"""
Vuetify's VExpandTransition component. See more info and examples |VExpandTransition_vuetify_link|.
.. |VExpandTransition_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-expand-transition" target="_blank">here</a>
:param mode: See description |VExpandTransition_vuetify_link|.
:type string:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-expand-transition", children, **kwargs)
self._attr_names += [
"mode",
]
class VExpandXTransition(AbstractElement):
"""
Vuetify's VExpandXTransition component. See more info and examples |VExpandXTransition_vuetify_link|.
.. |VExpandXTransition_vuetify_link| raw:: html
<a href="https://vuetifyjs.com/api/v-expand-x-transition" target="_blank">here</a>
:param mode: See description |VExpandXTransition_vuetify_link|.
:type string:
"""
def __init__(self, children=None, **kwargs):
super().__init__("v-expand-x-transition", children, **kwargs)
self._attr_names += [
"mode",
]
|
[
"trame.get_app_instance",
"trame.html.Template.slot_names.update",
"numpy.isinf",
"numpy.isnan"
] |
[((241, 259), 'trame.get_app_instance', 'get_app_instance', ([], {}), '()\n', (257, 259), False, 'from trame import get_app_instance\n'), ((3125, 3163), 'trame.html.Template.slot_names.update', 'Template.slot_names.update', (['slot_names'], {}), '(slot_names)\n', (3151, 3163), False, 'from trame.html import AbstractElement, Template\n'), ((907, 922), 'numpy.isnan', 'np.isnan', (['value'], {}), '(value)\n', (915, 922), True, 'import numpy as np\n'), ((926, 941), 'numpy.isinf', 'np.isinf', (['value'], {}), '(value)\n', (934, 941), True, 'import numpy as np\n')]
|
# probability.py
import scipy
import numpy as np
################################################################################
# Functions:
# Phi
# T
# SkewNorm
# SampleSkewNorm
################################################################################
def Phi(x, m, s, a):
return 0.5 * (1. + scipy.special.erf((x - m) / s / pow(2, 0.5)))
def T(h, a):
f = lambda x: np.exp(-0.5 * pow(h, 2) * (1 + pow(x, 2))) / (1 + pow(x,2))
temp = scipy.integrate.quad(f, 0, a)[0]
return 1. / (2. * np.pi) * temp
def SkewNorm(x, m, s, a):
return Phi(x, m, s, a) - 2 * T((x - m)/s, a)
def SampleSkewNorm(m, s, a):
""" A quick and dirty implementation of a skew-normal random variable.
Returns values from a skew-normal distribution with location m, scale s, and
shape parameter a (see ). When a = 0, this is just a Gaussian with mean m
and standard deviation, s.
"""
# first, choose a random value in [0,1]:
p = np.random.rand()
# next, find the value of x corresponding that cumulative probability for
# the skew-normal
func = lambda x: p - SkewNorm(x, m, s, a)
x = scipy.optimize.newton(func, 0)
return x
|
[
"numpy.random.rand",
"scipy.optimize.newton",
"scipy.integrate.quad"
] |
[((962, 978), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (976, 978), True, 'import numpy as np\n'), ((1134, 1164), 'scipy.optimize.newton', 'scipy.optimize.newton', (['func', '(0)'], {}), '(func, 0)\n', (1155, 1164), False, 'import scipy\n'), ((459, 488), 'scipy.integrate.quad', 'scipy.integrate.quad', (['f', '(0)', 'a'], {}), '(f, 0, a)\n', (479, 488), False, 'import scipy\n')]
|
"""
desispec.fiberbitmasking
==============
Functions to properly take FIBERSTATUS into account in the variances for data reduction
"""
from __future__ import absolute_import, division
import numpy as np
from astropy.table import Table
from desiutil.log import get_logger
from desispec.maskbits import fibermask as fmsk
from desispec.maskbits import specmask
def get_fiberbitmasked_frame(frame,bitmask=None,ivar_framemask=True):
"""
Wrapper script of get_fiberbitmasked_frame_arrays that will
return a modified version of the frame instead of just the
flux and ivar
NOTE: The input "frame" variable itself is modified and returned,
not a copy.
"""
ivar,mask = get_fiberbitmasked_frame_arrays(frame,bitmask,ivar_framemask,return_mask=True)
frame.mask = mask
frame.ivar = ivar
return frame
def get_fiberbitmasked_frame_arrays(frame,bitmask=None,ivar_framemask=True,return_mask=False):
"""
Function that takes a frame object and a bitmask and
returns ivar (and optionally mask) array(s) that have fibers with
offending bits in fibermap['FIBERSTATUS'] set to
0 in ivar and optionally flips a bit in mask.
input:
frame: frame object
bitmask: int32 or list/array of int32's derived from desispec.maskbits.fibermask
OR string indicating a keyword for get_fiberbitmask_comparison_value()
ivar_framemask: bool (default=True), tells code whether to multiply the output
variance by (frame.mask==0)
return_mask: bool, (default=False). Returns the frame.mask with the logic of
FIBERSTATUS applied.
output:
ivar: frame.ivar where the fibers with FIBERSTATUS & bitmask > 0
set to zero ivar
mask: (optional) frame.mask logically OR'ed with BADFIBER bit in cases with
a bad FIBERSTATUS
example bitmask list:
bitmask = [fmsk.BROKENFIBER,fmsk.UNASSIGNED,fmsk.BADFIBER,\
fmsk.BADTRACE,fmsk.MANYBADCOL, fmsk.MANYREJECTED]
bitmask = get_fiberbitmask_comparison_value(kind='fluxcalib')
bitmask = 'fluxcalib'
bitmask = 4128780
"""
ivar = frame.ivar.copy()
mask = frame.mask.copy()
if ivar_framemask and frame.mask is not None:
ivar *= (frame.mask==0)
fmap = Table(frame.fibermap)
if frame.fibermap is None:
log = get_logger()
log.warning("No fibermap was given, so no FIBERSTATUS check applied.")
if bitmask is None or frame.fibermap is None:
if return_mask:
return ivar, mask
else:
return ivar
if type(bitmask) in [int,np.int32]:
bad = bitmask
elif type(bitmask) == str:
if bitmask.isnumeric():
bad = np.int32(bitmask)
else:
bad = get_fiberbitmask_comparison_value(kind=bitmask)
else:
bad = bitmask[0]
for bit in bitmask[1:]:
bad |= bit
# find if any fibers have an intersection with the bad bits
badfibers = fmap['FIBER'][ (fmap['FIBERSTATUS'] & bad) > 0 ].data
badfibers = badfibers % 500
# For the bad fibers, loop through and nullify them
for fiber in badfibers:
mask[fiber] |= specmask.BADFIBER
if ivar_framemask :
ivar[fiber] = 0.
if return_mask:
return ivar,mask
else:
return ivar
def get_fiberbitmask_comparison_value(kind='fluxcalib'):
"""
Takes a string argument and returns a 32-bit integer representing the logical OR of all
relevant fibermask bits for that given reduction step
input:
kind: str : string designating which combination of bits to use based on the operation
possible values are:
"all", "sky" (or "skysub"), "flat", "flux" (or "fluxcalib"), "star" (or "stdstars")
"""
if kind.lower() == 'all':
return get_all_fiberbitmask_val()
elif kind.lower()[:3] == 'sky':
return get_skysub_fiberbitmask_val()
elif kind.lower() == 'flat':
return get_flat_fiberbitmask_val()
elif 'star' in kind.lower():
return get_stdstars_fiberbitmask_val()
elif 'flux' in kind.lower():
return get_fluxcalib_fiberbitmask_val()
else:
log = get_logger()
log.warning("Keyword {} given to get_fiberbitmask_comparison_value() is invalid.".format(kind)+\
" Using 'fluxcalib' fiberbitmask.")
return get_fluxcalib_fiberbitmask_val()
def get_skysub_fiberbitmask_val():
return get_all_fiberbitmask_val()
def get_flat_fiberbitmask_val():
return (fmsk.BROKENFIBER | fmsk.BADFIBER | fmsk.BADTRACE | fmsk.BADARC | \
fmsk.MANYBADCOL | fmsk.MANYREJECTED )
def get_fluxcalib_fiberbitmask_val():
return get_all_fiberbitmask_val()
def get_stdstars_fiberbitmask_val():
return get_all_fiberbitmask_val()
def get_all_nonamp_fiberbitmask_val():
"""Return a mask for all bad FIBERSTATUS bits except BADAMPB/R/Z
Note: does not include STUCKPOSITIONER or RESTRICTED, which could still
be on a valid sky location, or even a target for RESTRICTED.
"""
return (fmsk.UNASSIGNED | fmsk.BROKENFIBER | fmsk.MISSINGPOSITION | fmsk.BADPOSITION | \
fmsk.BADFIBER | fmsk.BADTRACE | fmsk.BADARC | fmsk.BADFLAT | \
fmsk.MANYBADCOL | fmsk.MANYREJECTED )
def get_justamps_fiberbitmask():
return ( fmsk.BADAMPB | fmsk.BADAMPR | fmsk.BADAMPZ )
def get_all_fiberbitmask_with_amp(band):
nonamp_mask = get_all_nonamp_fiberbitmask_val()
if band.lower()[0] == 'b':
amp_mask = fmsk.BADAMPB
elif band.lower()[0] == 'r':
amp_mask = fmsk.BADAMPR
elif band.lower()[0] == 'z':
amp_mask = fmsk.BADAMPZ
else:
log = get_logger()
log.error("Didn't recognize band={}".format(band))
amp_mask = np.int32(0)
return ( nonamp_mask | amp_mask )
def get_all_fiberbitmask_val():
return ( get_all_nonamp_fiberbitmask_val() | get_justamps_fiberbitmask() )
|
[
"desiutil.log.get_logger",
"astropy.table.Table",
"numpy.int32"
] |
[((2333, 2354), 'astropy.table.Table', 'Table', (['frame.fibermap'], {}), '(frame.fibermap)\n', (2338, 2354), False, 'from astropy.table import Table\n'), ((2401, 2413), 'desiutil.log.get_logger', 'get_logger', ([], {}), '()\n', (2411, 2413), False, 'from desiutil.log import get_logger\n'), ((2780, 2797), 'numpy.int32', 'np.int32', (['bitmask'], {}), '(bitmask)\n', (2788, 2797), True, 'import numpy as np\n'), ((5778, 5790), 'desiutil.log.get_logger', 'get_logger', ([], {}), '()\n', (5788, 5790), False, 'from desiutil.log import get_logger\n'), ((5869, 5880), 'numpy.int32', 'np.int32', (['(0)'], {}), '(0)\n', (5877, 5880), True, 'import numpy as np\n'), ((4284, 4296), 'desiutil.log.get_logger', 'get_logger', ([], {}), '()\n', (4294, 4296), False, 'from desiutil.log import get_logger\n')]
|
import math
import torch
import gpytorch
import numpy as np
import random
from matplotlib import pyplot as plt
from pssgp.kernels import MyMaternKernel
from unittest import TestCase
# We will use the simplest form of GP model, exact inference
class ExactGPModel(gpytorch.models.ExactGP):
def __init__(self, train_x, train_y, likelihood, use_gpy):
super(ExactGPModel, self).__init__(train_x, train_y, likelihood)
self.mean_module = gpytorch.means.ConstantMean()
if use_gpy:
self.covar_module = gpytorch.kernels.ScaleKernel(
gpytorch.kernels.MaternKernel(nu=1.5))
else:
self.covar_module = MyMaternKernel(nu=1.5)
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return gpytorch.distributions.MultivariateNormal(mean_x, covar_x)
def run(model,likelihood, train_x, train_y):
# Find optimal model hyperparameters
model.train()
likelihood.train()
# Use the adam optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=0.1) # Includes GaussianLikelihood parameters
# "Loss" for GPs - the marginal log likelihood
mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
for i in range(50):
# Zero gradients from previous iteration
optimizer.zero_grad()
# Output from model
output = model(train_x)
# Calc loss and backprop gradients
loss = -mll(output, train_y)
loss.backward()
# print('Iter %d/%d - Loss: %.3f lengthscale: %.3f noise: %.3f' % (
# i + 1, 50, loss.item(),
# model.covar_module.base_kernel.lengthscale.item(),
# model.likelihood.noise.item()
# ))
optimizer.step()
# Get into evaluation (predictive posterior) mode
model.eval()
likelihood.eval()
# Test points are regularly spaced along [0,1]
# Make predictions by feeding model through likelihood
with torch.no_grad(), gpytorch.settings.fast_pred_var():
test_x = torch.linspace(0, 1, 51)
observed_pred = likelihood(model(test_x))
return observed_pred
class TestCompatitibilityWithGpyTorch(TestCase):
def setUp(self) -> None:
torch.manual_seed(0)
np.random.seed(0)
random.seed(0)
torch.use_deterministic_algorithms(True)
torch.backends.cudnn.benchmark = True
# Training data is 100 points in [0,1] inclusive regularly spaced
self.train_x = torch.linspace(0, 1, 100)
# True function is sin(2*pi*x) with Gaussian noise
self.train_y = torch.sin(self.train_x * (2 * math.pi)) + \
torch.randn(self.train_x.size()) * math.sqrt(0.04)
# self.likelihood = gpytorch.likelihoods.GaussianLikelihood()
def test_result(self):
likelihood1 = gpytorch.likelihoods.GaussianLikelihood()
gpymodel = ExactGPModel(self.train_x,
self.train_y,
likelihood1,
use_gpy=True)
likelihood2 = gpytorch.likelihoods.GaussianLikelihood()
mymodel = ExactGPModel(self.train_x,
self.train_y,
likelihood2,
use_gpy=False)
result1 = run(gpymodel, likelihood1, train_x=self.train_x, train_y=self.train_y)
result2 = run(mymodel, likelihood2, train_x=self.train_x, train_y=self.train_y)
assert torch.allclose(result1.loc, result2.loc)
|
[
"numpy.random.seed",
"gpytorch.distributions.MultivariateNormal",
"gpytorch.mlls.ExactMarginalLogLikelihood",
"math.sqrt",
"torch.manual_seed",
"gpytorch.settings.fast_pred_var",
"pssgp.kernels.MyMaternKernel",
"gpytorch.kernels.MaternKernel",
"random.seed",
"gpytorch.likelihoods.GaussianLikelihood",
"gpytorch.means.ConstantMean",
"torch.linspace",
"torch.use_deterministic_algorithms",
"torch.no_grad",
"torch.allclose",
"torch.sin"
] |
[((1210, 1269), 'gpytorch.mlls.ExactMarginalLogLikelihood', 'gpytorch.mlls.ExactMarginalLogLikelihood', (['likelihood', 'model'], {}), '(likelihood, model)\n', (1250, 1269), False, 'import gpytorch\n'), ((453, 482), 'gpytorch.means.ConstantMean', 'gpytorch.means.ConstantMean', ([], {}), '()\n', (480, 482), False, 'import gpytorch\n'), ((828, 886), 'gpytorch.distributions.MultivariateNormal', 'gpytorch.distributions.MultivariateNormal', (['mean_x', 'covar_x'], {}), '(mean_x, covar_x)\n', (869, 886), False, 'import gpytorch\n'), ((2017, 2032), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2030, 2032), False, 'import torch\n'), ((2034, 2067), 'gpytorch.settings.fast_pred_var', 'gpytorch.settings.fast_pred_var', ([], {}), '()\n', (2065, 2067), False, 'import gpytorch\n'), ((2086, 2110), 'torch.linspace', 'torch.linspace', (['(0)', '(1)', '(51)'], {}), '(0, 1, 51)\n', (2100, 2110), False, 'import torch\n'), ((2276, 2296), 'torch.manual_seed', 'torch.manual_seed', (['(0)'], {}), '(0)\n', (2293, 2296), False, 'import torch\n'), ((2305, 2322), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2319, 2322), True, 'import numpy as np\n'), ((2331, 2345), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (2342, 2345), False, 'import random\n'), ((2354, 2394), 'torch.use_deterministic_algorithms', 'torch.use_deterministic_algorithms', (['(True)'], {}), '(True)\n', (2388, 2394), False, 'import torch\n'), ((2539, 2564), 'torch.linspace', 'torch.linspace', (['(0)', '(1)', '(100)'], {}), '(0, 1, 100)\n', (2553, 2564), False, 'import torch\n'), ((2881, 2922), 'gpytorch.likelihoods.GaussianLikelihood', 'gpytorch.likelihoods.GaussianLikelihood', ([], {}), '()\n', (2920, 2922), False, 'import gpytorch\n'), ((3144, 3185), 'gpytorch.likelihoods.GaussianLikelihood', 'gpytorch.likelihoods.GaussianLikelihood', ([], {}), '()\n', (3183, 3185), False, 'import gpytorch\n'), ((3563, 3603), 'torch.allclose', 'torch.allclose', (['result1.loc', 'result2.loc'], {}), '(result1.loc, result2.loc)\n', (3577, 3603), False, 'import torch\n'), ((687, 709), 'pssgp.kernels.MyMaternKernel', 'MyMaternKernel', ([], {'nu': '(1.5)'}), '(nu=1.5)\n', (701, 709), False, 'from pssgp.kernels import MyMaternKernel\n'), ((2647, 2686), 'torch.sin', 'torch.sin', (['(self.train_x * (2 * math.pi))'], {}), '(self.train_x * (2 * math.pi))\n', (2656, 2686), False, 'import torch\n'), ((602, 639), 'gpytorch.kernels.MaternKernel', 'gpytorch.kernels.MaternKernel', ([], {'nu': '(1.5)'}), '(nu=1.5)\n', (631, 639), False, 'import gpytorch\n'), ((2744, 2759), 'math.sqrt', 'math.sqrt', (['(0.04)'], {}), '(0.04)\n', (2753, 2759), False, 'import math\n')]
|
import matplotlib as mpl
import numpy as np
import pandas
import sys
from matplotlib import pyplot as pp
from pprint import pprint
from prep_data import get_raw_xy
from prep_data import get_vpo
sizes = [[15, 8, 10], [20, 10, 20]]
sidx = 1
def setup_plot(sidx=sidx, yfrom=1973, yto=2020, step=4, xls=sizes[sidx][2]):
pp.rcParams['figure.figsize'] = sizes[sidx][:2]
mpl.rc('xtick', labelsize=xls)
mpl.rc('ytick', labelsize=sizes[sidx][2])
pp.style.use('dark_background')
ticks_range = list(range(yfrom, yto, step))
ax = pp.gca()
ax.set_xticks(ticks_range)
ax.tick_params(grid_alpha=0.5)
pp.ylabel('Passangers in billions', fontsize=sizes[sidx][2])
pp.xlabel('Year', fontsize=sizes[sidx][2])
pp.grid()
return pp, ax
def plotme(values, years, prep_values=[], train_x=np.array([None]), test_x=np.array([None]), baseline=[], trscore='', tscore='', title='', bttscore=''):
pp, ax = setup_plot()
pp.plot(years, values, label='Raw values', color='red', linewidth=2)
if prep_values != []:
pp.plot(years, prep_values, label='Prepared values')
if train_x.all() != None:
pp.plot(years, train_x, label='Training prediction'.ljust(
25) + '%s' % trscore, color='green', linewidth=2)
if test_x.all() != None:
pp.plot(years, test_x, label='Test prediction '.ljust(
27) + '%s' % tscore, color='blue', linewidth=2)
if baseline != []:
pp.plot(years, baseline, label='Baseline Training/Test prediction'.ljust(35) +
'%s' % bttscore, color='yellow', linewidth=2)
ax.legend(loc='best', fontsize=sizes[sidx][2])
if title:
pp.title(title)
pp.show()
def plothist(history, sidx=sidx):
pp.rcParams['figure.figsize'] = sizes[sidx][:2]
mpl.rc('xtick', labelsize=sizes[sidx][2])
mpl.rc('ytick', labelsize=sizes[sidx][2])
pp.ylabel('Mean absolute percentage error', fontsize=sizes[sidx][2])
pp.xlabel('Epoch', fontsize=sizes[sidx][2])
pp.grid()
h = history.history
m = 'mean_absolute_percentage_error'
pp.plot(range(1, len(h[m]) + 1), h[m])
ax = pp.gca()
ax.set_yticks(range(1, 100, 9))
ax.set_xticks(range(1, len(h[m]) + 1), 1)
pp.title('Learning curve')
pp.show()
def plotpred(values, years, predicted, error, title=''):
pp, ax = setup_plot(yfrom=2016, yto=2040, step=2, xls=12)
years += [years[-1] + 1]
v = values + [np.nan]
p = [np.nan for i in values]
p += [predicted[0][0]]
p[-2] = v[-2]
pv = predicted[0][0]
pp.plot(years, v, label='Raw values', color='red', linewidth=2)
pp.plot(years, p, label='Prediction %s +/- %s' %
("{:,.0f}".format(pv), "{:,.0f}".format(error)), color='green', linewidth=4, linestyle=":")
ax.legend(loc='best', fontsize=sizes[sidx][2])
pp.title(title)
pp.show()
def prep_tt_for_plot(model, years, train_x, train_y, test_x, test_y):
trainPredict = model.predict(train_x)
testPredict = model.predict(test_x)
trainPredictPlot = np.empty((len(years), 1))
trainPredictPlot[:] = np.nan
trainPredictPlot[:len(trainPredict)] = trainPredict
# Remove the gap in graph by duplicating the last value
# at the end of our training data set
trainPredictPlot[len(trainPredict)] = trainPredict[-1]
testPredictPlot = np.empty((len(years), 1))
testPredictPlot[:] = np.nan
testPredictPlot[len(trainPredict):] = testPredict
return trainPredictPlot, testPredictPlot
if __name__ == '__main__':
d = pandas.read_csv(sys.argv[1], header=2)
values, years = get_raw_xy(d)
p_values = get_vpo(values)
print('Year\tCurrent\tFuture')
pprint(list(zip(zip(years, values), p_values)))
plotme(values, years, prep_values=p_values)
|
[
"matplotlib.pyplot.title",
"matplotlib.rc",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"prep_data.get_raw_xy",
"pandas.read_csv",
"matplotlib.pyplot.style.use",
"prep_data.get_vpo",
"numpy.array",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.grid"
] |
[((376, 406), 'matplotlib.rc', 'mpl.rc', (['"""xtick"""'], {'labelsize': 'xls'}), "('xtick', labelsize=xls)\n", (382, 406), True, 'import matplotlib as mpl\n'), ((411, 452), 'matplotlib.rc', 'mpl.rc', (['"""ytick"""'], {'labelsize': 'sizes[sidx][2]'}), "('ytick', labelsize=sizes[sidx][2])\n", (417, 452), True, 'import matplotlib as mpl\n'), ((457, 488), 'matplotlib.pyplot.style.use', 'pp.style.use', (['"""dark_background"""'], {}), "('dark_background')\n", (469, 488), True, 'from matplotlib import pyplot as pp\n'), ((546, 554), 'matplotlib.pyplot.gca', 'pp.gca', ([], {}), '()\n', (552, 554), True, 'from matplotlib import pyplot as pp\n'), ((625, 685), 'matplotlib.pyplot.ylabel', 'pp.ylabel', (['"""Passangers in billions"""'], {'fontsize': 'sizes[sidx][2]'}), "('Passangers in billions', fontsize=sizes[sidx][2])\n", (634, 685), True, 'from matplotlib import pyplot as pp\n'), ((690, 732), 'matplotlib.pyplot.xlabel', 'pp.xlabel', (['"""Year"""'], {'fontsize': 'sizes[sidx][2]'}), "('Year', fontsize=sizes[sidx][2])\n", (699, 732), True, 'from matplotlib import pyplot as pp\n'), ((737, 746), 'matplotlib.pyplot.grid', 'pp.grid', ([], {}), '()\n', (744, 746), True, 'from matplotlib import pyplot as pp\n'), ((817, 833), 'numpy.array', 'np.array', (['[None]'], {}), '([None])\n', (825, 833), True, 'import numpy as np\n'), ((842, 858), 'numpy.array', 'np.array', (['[None]'], {}), '([None])\n', (850, 858), True, 'import numpy as np\n'), ((950, 1018), 'matplotlib.pyplot.plot', 'pp.plot', (['years', 'values'], {'label': '"""Raw values"""', 'color': '"""red"""', 'linewidth': '(2)'}), "(years, values, label='Raw values', color='red', linewidth=2)\n", (957, 1018), True, 'from matplotlib import pyplot as pp\n'), ((1683, 1692), 'matplotlib.pyplot.show', 'pp.show', ([], {}), '()\n', (1690, 1692), True, 'from matplotlib import pyplot as pp\n'), ((1785, 1826), 'matplotlib.rc', 'mpl.rc', (['"""xtick"""'], {'labelsize': 'sizes[sidx][2]'}), "('xtick', labelsize=sizes[sidx][2])\n", (1791, 1826), True, 'import matplotlib as mpl\n'), ((1831, 1872), 'matplotlib.rc', 'mpl.rc', (['"""ytick"""'], {'labelsize': 'sizes[sidx][2]'}), "('ytick', labelsize=sizes[sidx][2])\n", (1837, 1872), True, 'import matplotlib as mpl\n'), ((1877, 1945), 'matplotlib.pyplot.ylabel', 'pp.ylabel', (['"""Mean absolute percentage error"""'], {'fontsize': 'sizes[sidx][2]'}), "('Mean absolute percentage error', fontsize=sizes[sidx][2])\n", (1886, 1945), True, 'from matplotlib import pyplot as pp\n'), ((1950, 1993), 'matplotlib.pyplot.xlabel', 'pp.xlabel', (['"""Epoch"""'], {'fontsize': 'sizes[sidx][2]'}), "('Epoch', fontsize=sizes[sidx][2])\n", (1959, 1993), True, 'from matplotlib import pyplot as pp\n'), ((1998, 2007), 'matplotlib.pyplot.grid', 'pp.grid', ([], {}), '()\n', (2005, 2007), True, 'from matplotlib import pyplot as pp\n'), ((2125, 2133), 'matplotlib.pyplot.gca', 'pp.gca', ([], {}), '()\n', (2131, 2133), True, 'from matplotlib import pyplot as pp\n'), ((2220, 2246), 'matplotlib.pyplot.title', 'pp.title', (['"""Learning curve"""'], {}), "('Learning curve')\n", (2228, 2246), True, 'from matplotlib import pyplot as pp\n'), ((2251, 2260), 'matplotlib.pyplot.show', 'pp.show', ([], {}), '()\n', (2258, 2260), True, 'from matplotlib import pyplot as pp\n'), ((2544, 2607), 'matplotlib.pyplot.plot', 'pp.plot', (['years', 'v'], {'label': '"""Raw values"""', 'color': '"""red"""', 'linewidth': '(2)'}), "(years, v, label='Raw values', color='red', linewidth=2)\n", (2551, 2607), True, 'from matplotlib import pyplot as pp\n'), ((2820, 2835), 'matplotlib.pyplot.title', 'pp.title', (['title'], {}), '(title)\n', (2828, 2835), True, 'from matplotlib import pyplot as pp\n'), ((2840, 2849), 'matplotlib.pyplot.show', 'pp.show', ([], {}), '()\n', (2847, 2849), True, 'from matplotlib import pyplot as pp\n'), ((3522, 3560), 'pandas.read_csv', 'pandas.read_csv', (['sys.argv[1]'], {'header': '(2)'}), '(sys.argv[1], header=2)\n', (3537, 3560), False, 'import pandas\n'), ((3581, 3594), 'prep_data.get_raw_xy', 'get_raw_xy', (['d'], {}), '(d)\n', (3591, 3594), False, 'from prep_data import get_raw_xy\n'), ((3610, 3625), 'prep_data.get_vpo', 'get_vpo', (['values'], {}), '(values)\n', (3617, 3625), False, 'from prep_data import get_vpo\n'), ((1053, 1105), 'matplotlib.pyplot.plot', 'pp.plot', (['years', 'prep_values'], {'label': '"""Prepared values"""'}), "(years, prep_values, label='Prepared values')\n", (1060, 1105), True, 'from matplotlib import pyplot as pp\n'), ((1663, 1678), 'matplotlib.pyplot.title', 'pp.title', (['title'], {}), '(title)\n', (1671, 1678), True, 'from matplotlib import pyplot as pp\n')]
|
# this code performes a dimension reduction on the dataset,
# using a DenseNet121 pretrained model.
import tensorflow as tf
from scipy.io import loadmat, savemat
import numpy as np
FV = loadmat('images.mat')
data = FV['data']
labels = FV['labels']
print(data.shape)
labels = labels.transpose()
labels = labels.ravel()
print(labels.shape)
inputs = tf.keras.Input(shape=(224, 224, 3))
# here different models were tested,
# TODO : add all the models in parallel with the best model.
model = tf.keras.applications.DenseNet121(include_top=False, weights='imagenet',
input_shape=(224,224,3))
# possibly try other models here.
model_outputs = model(inputs)
outputs = tf.keras.layers.GlobalAveragePooling2D(name='ga')(model_outputs)
feature_extractor = tf.keras.models.Model(inputs=inputs, outputs=outputs)
# get features
X = []
samples = data.shape[0]
for i in range(samples):
X.append(feature_extractor(np.array([data[i]])))
X = np.array(X)
# replace old images with features.
data = X.reshape(746, 1024)
del X
savemat('features.mat', {'data': data,
'labels': labels})
|
[
"scipy.io.loadmat",
"tensorflow.keras.Input",
"scipy.io.savemat",
"tensorflow.keras.models.Model",
"numpy.array",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.keras.applications.DenseNet121"
] |
[((189, 210), 'scipy.io.loadmat', 'loadmat', (['"""images.mat"""'], {}), "('images.mat')\n", (196, 210), False, 'from scipy.io import loadmat, savemat\n'), ((350, 385), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(224, 224, 3)'}), '(shape=(224, 224, 3))\n', (364, 385), True, 'import tensorflow as tf\n'), ((493, 596), 'tensorflow.keras.applications.DenseNet121', 'tf.keras.applications.DenseNet121', ([], {'include_top': '(False)', 'weights': '"""imagenet"""', 'input_shape': '(224, 224, 3)'}), "(include_top=False, weights='imagenet',\n input_shape=(224, 224, 3))\n", (526, 596), True, 'import tensorflow as tf\n'), ((792, 845), 'tensorflow.keras.models.Model', 'tf.keras.models.Model', ([], {'inputs': 'inputs', 'outputs': 'outputs'}), '(inputs=inputs, outputs=outputs)\n', (813, 845), True, 'import tensorflow as tf\n'), ((973, 984), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (981, 984), True, 'import numpy as np\n'), ((1056, 1113), 'scipy.io.savemat', 'savemat', (['"""features.mat"""', "{'data': data, 'labels': labels}"], {}), "('features.mat', {'data': data, 'labels': labels})\n", (1063, 1113), False, 'from scipy.io import loadmat, savemat\n'), ((707, 756), 'tensorflow.keras.layers.GlobalAveragePooling2D', 'tf.keras.layers.GlobalAveragePooling2D', ([], {'name': '"""ga"""'}), "(name='ga')\n", (745, 756), True, 'import tensorflow as tf\n'), ((947, 966), 'numpy.array', 'np.array', (['[data[i]]'], {}), '([data[i]])\n', (955, 966), True, 'import numpy as np\n')]
|
import numpy as np
from matplotlib import pyplot as plt
from ..Xfit.basic import fitline, fitline0, fitconstant
from ..Xfit.MCMC_straight_line import mcmc_sl
from ..Xfit.fit_basic import fit_basic
from ..Xplot.niceplot import niceplot
from matplotlib.offsetbox import AnchoredText
from matplotlib import ticker
def plot_parameters(pars, parameter, R=250e-9, T=22, fit=None, modes=1, ax=None, marker='o',
textbox=False, alpha=1, log='', label=None, ci=0, corner_axes=0, mfc=None,
format_ticks=True, cmap=None, init={}, fix=None, viscosity=False,
fit_report=False, emcee=False, exc=None, excfit=None, excbad=True,
weighted=True, xl=None, xlim=[None,None], ylim=[None,None], **kwargs):
def getD(eta, err=0):
dD = 0
D = kb*(T+273.15)/(6*np.pi*R*eta)
if err:
dD = D*err/eta
return D, dD
def geteta(D, err=0):
deta = 0
eta = kb*(T+273.15)/(6*np.pi*R*D*1e-18)
if err:
deta = eta*err/D
return eta, deta
def blc(q, L, k, lc):
def A(q):
return 4*np.pi/lc*q/k*np.sqrt(1-q**2/(4*k**2))
return 2*(A(q)*L-1+np.exp(-A(q)*L))/(A(q)*L)**2
def line(x, p):
return p[0]*x + p[1]
def power(x, p):
return p[0]*x**p[1] + p[2]
if type(modes) == int:
modes = np.arange(modes-1, modes)
else:
modes = np.array(modes)
modes -= 1
if parameter in [0, 'G', 'dispersion', 'tau']:
name = 't'
elif parameter in [1, 'kww']:
name = 'g'
elif parameter in [2, 'f0', 'ergodicity']:
name = 'b'
if 'ax' is None:
fig, ax = plt.subplots(1, 1, figsize=(9, 4))
kb = 1.381e-23
m_unit = {
'G': 'nm{} s-1'.format(alpha),
'kww': 'nm{}'.format(alpha),
'f0': 'nm{}'.format(alpha),
'tau': 'nm{} s'.format(alpha),
}
b_unit = {
'G': 's-1',
'kww': '',
'f0': '',
'tau': 's'
}
y_label = {
'G': r'$\Gamma (s^{-1})$',
'kww': 'kww',
'f0': 'ergodicity',
'tau': r'$\tau\,(s)$'
}
if fit == '' or fit is None:
dofit = False
else:
dofit = True
qv = pars['q']
qv = qv**alpha
# values to be excluded
iip = np.arange(qv.size)
iif = iip.copy()
if exc is not None:
iip = np.delete(iip, exc)
if excfit is not None:
iif = np.delete(iif, np.hstack((excfit)))
if xl is None:
x = np.linspace(np.min(qv[iif]), np.max(qv[iif]), 100)
else:
x = np.linspace(xl[0], xl[1], 100)
textstr = ""
markers = ['^', 'v'] if (len(modes)<3) else ['o']
for ii, i in enumerate(modes):
if label is None:
labstr = 'mode {}: {}'.format(i+1, parameter)
else:
labstr = label
textstr += labstr
# -------plot decay rates--------
try:
y = np.asarray(pars['{}{}'.format(name, i)], dtype=np.float32)
dy = np.asarray(pars['d{}{}'.format(name, i)], dtype=np.float32)
except KeyError:
return np.zeros(5)
y = np.ma.masked_where(~np.isfinite(y), y)
dy = np.ma.masked_array(dy, mask=y.mask)
if parameter == 'G':
y = 1/y
dy = y**2*dy
else:
pass
nf = np.where(dy.filled(0) <= 0)[0]
bad_points = nf.size
if bad_points:
print('Found {} points with zero error\n'.format(bad_points))
if excbad:
iff = np.array([p for p in iif if p not in nf])
iip = np.array([p for p in iip if p not in nf])
print('Excluded bad points.')
if len(iff)==0 or len(iip)==0:
return np.zeros(5)
color = cmap(ci)
marker = markers[i]
ax.errorbar(qv[iip], y[iip], dy[iip], fmt=marker,
label=labstr, color=color, mfc=mfc)
if dofit:
if fit == 'mcmc_line':
m, b, f_m, m_ls, b_ls = mcmc_sl(
qv[iif], y[iif], dy[iif], doplot=corner_axes)
# ax[0].plot(x2,m_ls*x2+b_ls)
m, b = [(x[0], np.mean(x[1:])) for x in (m, b)]
else:
res = fit_basic(qv[iif], y[iif], dy[iif],
fit, dict(init), fix, emcee)
fitpar = res[0].astype(np.float32)
yf = res[4].eval(res[2].params, x=x)
ax.plot(x, yf, color=color, label=None)
if parameter in ['G', 'tau']:
if viscosity:
power = 1 if (parameter=='G') else -1
textstr += '\neta = {0[0]:.4g} +/- {0[1]:.2g} [cP]'.format(
np.array(geteta(*fitpar[0]))*1e3)**power
elif parameter == 'f0' and dofit and 't' in res[2].params.keys():
msd = 1/(2*res[2].params['t'].value)
dmsd = 2*msd**2*res[2].params['t'].stderr
r_loc = np.sqrt(6*(msd))
dr_loc = 6/2/r_loc*dmsd
textstr += 'localization length: {:.2f} +/- {:.2f} nm\n'.format(
r_loc, dr_loc)
if fit_report and dofit:
print('\n' + textstr)
print('-'*16)
print(res[3])
# if format_ticks:
# x_labels = ax.get_xticks()
# try:
# @ticker.FuncFormatter
# def major_formatter(x, pos):
# return "{:.2f}".format(x)
# ax.ticklabel_format(axis='x', useMathText=True,
# style='sci', scilimits=(0, 0))
# except:
# pass
# set style
if alpha == 1:
x_lab = r'$\mathrm{q} (nm^{-1})$'
else:
x_lab = r'$\mathrm{{q}}^{0} (nm^{{-{0}}})$'.format(alpha)
ax.set_xlabel(x_lab)
ax.set_ylabel(y_label[parameter])
if 'x' in log:
ax.set_xscale('log')
if 'y' in log:
ax.set_yscale('log')
if textbox:
at = AnchoredText(textstr, loc=2,)
ax.add_artist(at)
ax.legend(loc='best')
# ax.get_yaxis().get_major_formatter().set_useOffset(False)
# niceplot(ax,)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
if dofit:
return res
else:
return np.zeros(5)
|
[
"matplotlib.offsetbox.AnchoredText",
"numpy.zeros",
"numpy.isfinite",
"numpy.hstack",
"numpy.min",
"numpy.max",
"numpy.array",
"numpy.arange",
"numpy.linspace",
"numpy.ma.masked_array",
"numpy.mean",
"matplotlib.pyplot.subplots",
"numpy.delete",
"numpy.sqrt"
] |
[((2329, 2347), 'numpy.arange', 'np.arange', (['qv.size'], {}), '(qv.size)\n', (2338, 2347), True, 'import numpy as np\n'), ((1386, 1413), 'numpy.arange', 'np.arange', (['(modes - 1)', 'modes'], {}), '(modes - 1, modes)\n', (1395, 1413), True, 'import numpy as np\n'), ((1438, 1453), 'numpy.array', 'np.array', (['modes'], {}), '(modes)\n', (1446, 1453), True, 'import numpy as np\n'), ((1703, 1737), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(9, 4)'}), '(1, 1, figsize=(9, 4))\n', (1715, 1737), True, 'from matplotlib import pyplot as plt\n'), ((2407, 2426), 'numpy.delete', 'np.delete', (['iip', 'exc'], {}), '(iip, exc)\n', (2416, 2426), True, 'import numpy as np\n'), ((2609, 2639), 'numpy.linspace', 'np.linspace', (['xl[0]', 'xl[1]', '(100)'], {}), '(xl[0], xl[1], 100)\n', (2620, 2639), True, 'import numpy as np\n'), ((3223, 3258), 'numpy.ma.masked_array', 'np.ma.masked_array', (['dy'], {'mask': 'y.mask'}), '(dy, mask=y.mask)\n', (3241, 3258), True, 'import numpy as np\n'), ((6001, 6029), 'matplotlib.offsetbox.AnchoredText', 'AnchoredText', (['textstr'], {'loc': '(2)'}), '(textstr, loc=2)\n', (6013, 6029), False, 'from matplotlib.offsetbox import AnchoredText\n'), ((6272, 6283), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (6280, 6283), True, 'import numpy as np\n'), ((2483, 2500), 'numpy.hstack', 'np.hstack', (['excfit'], {}), '(excfit)\n', (2492, 2500), True, 'import numpy as np\n'), ((2548, 2563), 'numpy.min', 'np.min', (['qv[iif]'], {}), '(qv[iif])\n', (2554, 2563), True, 'import numpy as np\n'), ((2565, 2580), 'numpy.max', 'np.max', (['qv[iif]'], {}), '(qv[iif])\n', (2571, 2580), True, 'import numpy as np\n'), ((1154, 1188), 'numpy.sqrt', 'np.sqrt', (['(1 - q ** 2 / (4 * k ** 2))'], {}), '(1 - q ** 2 / (4 * k ** 2))\n', (1161, 1188), True, 'import numpy as np\n'), ((3146, 3157), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (3154, 3157), True, 'import numpy as np\n'), ((3191, 3205), 'numpy.isfinite', 'np.isfinite', (['y'], {}), '(y)\n', (3202, 3205), True, 'import numpy as np\n'), ((3581, 3622), 'numpy.array', 'np.array', (['[p for p in iif if p not in nf]'], {}), '([p for p in iif if p not in nf])\n', (3589, 3622), True, 'import numpy as np\n'), ((3645, 3686), 'numpy.array', 'np.array', (['[p for p in iip if p not in nf]'], {}), '([p for p in iip if p not in nf])\n', (3653, 3686), True, 'import numpy as np\n'), ((5013, 5029), 'numpy.sqrt', 'np.sqrt', (['(6 * msd)'], {}), '(6 * msd)\n', (5020, 5029), True, 'import numpy as np\n'), ((3807, 3818), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (3815, 3818), True, 'import numpy as np\n'), ((4233, 4247), 'numpy.mean', 'np.mean', (['x[1:]'], {}), '(x[1:])\n', (4240, 4247), True, 'import numpy as np\n')]
|
import numpy as np
import os
import shutil
import tempfile
import unittest
import yt
from yt.utilities.exceptions import \
YTProfileDataShape
from yt.data_objects.particle_filters import add_particle_filter
from yt.data_objects.profiles import Profile1D, Profile2D, Profile3D,\
create_profile
from yt.testing import \
assert_equal, \
assert_raises,\
assert_rel_equal, \
fake_random_ds, \
requires_module
from yt.utilities.exceptions import YTIllDefinedProfile
from yt.visualization.profile_plotter import ProfilePlot, PhasePlot
_fields = ("density", "temperature", "dinosaurs", "tribbles")
_units = ("g/cm**3", "K", "dyne", "erg")
def test_profiles():
ds = fake_random_ds(64, nprocs = 8, fields = _fields, units = _units)
nv = ds.domain_dimensions.prod()
dd = ds.all_data()
(rmi, rma), (tmi, tma), (dmi, dma) = dd.quantities["Extrema"](
["density", "temperature", "dinosaurs"])
rt, tt, dt = dd.quantities["TotalQuantity"](
["density", "temperature", "dinosaurs"])
e1, e2 = 0.9, 1.1
for nb in [8, 16, 32, 64]:
for input_units in ['mks', 'cgs']:
for ex in [rmi, rma, tmi, tma, dmi, dma]:
getattr(ex, 'convert_to_%s' % input_units)()
# We log all the fields or don't log 'em all. No need to do them
# individually.
for lf in [True, False]:
direct_profile = Profile1D(
dd, "density", nb, rmi*e1, rma*e2, lf, weight_field=None)
direct_profile.add_fields(["ones", "temperature"])
indirect_profile_s = create_profile(
dd, "density", ["ones", "temperature"], n_bins=nb,
extrema={'density': (rmi*e1, rma*e2)}, logs={'density': lf},
weight_field=None)
indirect_profile_t = create_profile(
dd, ("gas", "density"),
[("index", "ones"), ("gas", "temperature")], n_bins=nb,
extrema={'density': (rmi*e1, rma*e2)}, logs={'density': lf},
weight_field=None)
for p1d in [direct_profile, indirect_profile_s,
indirect_profile_t]:
assert_equal(p1d["index", "ones"].sum(), nv)
assert_rel_equal(tt, p1d["gas", "temperature"].sum(), 7)
p2d = Profile2D(
dd,
"density", nb, rmi*e1, rma*e2, lf,
"temperature", nb, tmi*e1, tma*e2, lf,
weight_field=None)
p2d.add_fields(["ones", "temperature"])
assert_equal(p2d["ones"].sum(), nv)
assert_rel_equal(tt, p2d["temperature"].sum(), 7)
p3d = Profile3D(
dd,
"density", nb, rmi*e1, rma*e2, lf,
"temperature", nb, tmi*e1, tma*e2, lf,
"dinosaurs", nb, dmi*e1, dma*e2, lf,
weight_field=None)
p3d.add_fields(["ones", "temperature"])
assert_equal(p3d["ones"].sum(), nv)
assert_rel_equal(tt, p3d["temperature"].sum(), 7)
p1d = Profile1D(dd, "x", nb, 0.0, 1.0, False,
weight_field = None)
p1d.add_fields("ones")
av = nv / nb
assert_equal(p1d["ones"], np.ones(nb)*av)
# We re-bin ones with a weight now
p1d = Profile1D(dd, "x", nb, 0.0, 1.0, False,
weight_field = "temperature")
p1d.add_fields(["ones"])
assert_equal(p1d["ones"], np.ones(nb))
# Verify we can access "ones" after adding a new field
# See issue 988
p1d.add_fields(["density"])
assert_equal(p1d["ones"], np.ones(nb))
p2d = Profile2D(dd, "x", nb, 0.0, 1.0, False,
"y", nb, 0.0, 1.0, False,
weight_field = None)
p2d.add_fields("ones")
av = nv / nb**2
assert_equal(p2d["ones"], np.ones((nb, nb))*av)
# We re-bin ones with a weight now
p2d = Profile2D(dd, "x", nb, 0.0, 1.0, False,
"y", nb, 0.0, 1.0, False,
weight_field = "temperature")
p2d.add_fields(["ones"])
assert_equal(p2d["ones"], np.ones((nb, nb)))
p3d = Profile3D(dd, "x", nb, 0.0, 1.0, False,
"y", nb, 0.0, 1.0, False,
"z", nb, 0.0, 1.0, False,
weight_field = None)
p3d.add_fields("ones")
av = nv / nb**3
assert_equal(p3d["ones"], np.ones((nb, nb, nb))*av)
# We re-bin ones with a weight now
p3d = Profile3D(dd, "x", nb, 0.0, 1.0, False,
"y", nb, 0.0, 1.0, False,
"z", nb, 0.0, 1.0, False,
weight_field = "temperature")
p3d.add_fields(["ones"])
assert_equal(p3d["ones"], np.ones((nb,nb,nb)))
p2d = create_profile(dd, ('gas', 'density'), ('gas', 'temperature'),
weight_field=('gas', 'cell_mass'),
extrema={'density': (None, rma*e2)})
assert_equal(p2d.x_bins[0], rmi - np.spacing(rmi))
assert_equal(p2d.x_bins[-1], rma*e2)
assert str(ds.field_info['gas', 'cell_mass'].units) == str(p2d.weight.units)
p2d = create_profile(dd, ('gas', 'density'), ('gas', 'temperature'),
weight_field=('gas', 'cell_mass'),
extrema={'density': (rmi*e2, None)})
assert_equal(p2d.x_bins[0], rmi*e2)
assert_equal(p2d.x_bins[-1], rma + np.spacing(rma))
extrema_s = {'particle_position_x': (0, 1)}
logs_s = {'particle_position_x': False}
extrema_t = {('all', 'particle_position_x'): (0, 1)}
logs_t = {('all', 'particle_position_x'): False}
def test_particle_profiles():
for nproc in [1, 2, 4, 8]:
ds = fake_random_ds(32, nprocs=nproc, particles = 32**3)
dd = ds.all_data()
p1d = Profile1D(dd, "particle_position_x", 128,
0.0, 1.0, False, weight_field = None)
p1d.add_fields(["particle_ones"])
assert_equal(p1d["particle_ones"].sum(), 32**3)
p1d = create_profile(dd, ["particle_position_x"], ["particle_ones"],
weight_field=None, n_bins=128, extrema=extrema_s,
logs=logs_s)
assert_equal(p1d["particle_ones"].sum(), 32**3)
p1d = create_profile(dd,
[("all", "particle_position_x")],
[("all", "particle_ones")],
weight_field=None, n_bins=128, extrema=extrema_t,
logs=logs_t)
assert_equal(p1d["particle_ones"].sum(), 32**3)
p2d = Profile2D(dd, "particle_position_x", 128, 0.0, 1.0, False,
"particle_position_y", 128, 0.0, 1.0, False,
weight_field = None)
p2d.add_fields(["particle_ones"])
assert_equal(p2d["particle_ones"].sum(), 32**3)
p3d = Profile3D(dd, "particle_position_x", 128, 0.0, 1.0, False,
"particle_position_y", 128, 0.0, 1.0, False,
"particle_position_z", 128, 0.0, 1.0, False,
weight_field = None)
p3d.add_fields(["particle_ones"])
assert_equal(p3d["particle_ones"].sum(), 32**3)
def test_mixed_particle_mesh_profiles():
ds = fake_random_ds(32, particles=10)
ad = ds.all_data()
assert_raises(
YTIllDefinedProfile, ProfilePlot, ad, 'radius', 'particle_mass')
assert_raises(
YTIllDefinedProfile, ProfilePlot, ad, 'radius',
['particle_mass', 'particle_ones'])
assert_raises(
YTIllDefinedProfile, ProfilePlot, ad, 'radius',
['particle_mass', 'ones'])
assert_raises(
YTIllDefinedProfile, ProfilePlot, ad, 'particle_radius', 'particle_mass',
'cell_mass')
assert_raises(
YTIllDefinedProfile, ProfilePlot, ad, 'radius', 'cell_mass',
'particle_ones')
assert_raises(
YTIllDefinedProfile, PhasePlot, ad, 'radius', 'particle_mass',
'velocity_x')
assert_raises(
YTIllDefinedProfile, PhasePlot, ad, 'particle_radius', 'particle_mass',
'cell_mass')
assert_raises(
YTIllDefinedProfile, PhasePlot, ad, 'radius', 'cell_mass',
'particle_ones')
assert_raises(
YTIllDefinedProfile, PhasePlot, ad, 'particle_radius', 'particle_mass',
'particle_ones')
def test_particle_profile_negative_field():
# see Issue #1340
n_particles = int(1e4)
ppx, ppy, ppz = np.random.normal(size=[3, n_particles])
pvx, pvy, pvz = - np.ones((3, n_particles))
data = {'particle_position_x': ppx,
'particle_position_y': ppy,
'particle_position_z': ppz,
'particle_velocity_x': pvx,
'particle_velocity_y': pvy,
'particle_velocity_z': pvz}
bbox = 1.1*np.array([[min(ppx), max(ppx)], [min(ppy), max(ppy)], [min(ppz), max(ppz)]])
ds = yt.load_particles(data, bbox=bbox)
ad = ds.all_data()
profile = yt.create_profile(
ad,
["particle_position_x", "particle_position_y"],
"particle_velocity_x",
logs = {'particle_position_x': True,
'particle_position_y': True,
'particle_position_z': True},
weight_field=None)
assert profile['particle_velocity_x'].min() < 0
assert profile.x_bins.min() > 0
assert profile.y_bins.min() > 0
profile = yt.create_profile(
ad,
["particle_position_x", "particle_position_y"],
"particle_velocity_x",
weight_field=None)
assert profile['particle_velocity_x'].min() < 0
assert profile.x_bins.min() < 0
assert profile.y_bins.min() < 0
# can't use CIC deposition with log-scaled bin fields
with assert_raises(RuntimeError):
yt.create_profile(
ad,
["particle_position_x", "particle_position_y"],
"particle_velocity_x",
logs = {'particle_position_x': True,
'particle_position_y': False,
'particle_position_z': False},
weight_field=None, deposition='cic')
# can't use CIC deposition with accumulation or fractional
with assert_raises(RuntimeError):
yt.create_profile(
ad,
["particle_position_x", "particle_position_y"],
"particle_velocity_x",
logs = {'particle_position_x': False,
'particle_position_y': False,
'particle_position_z': False},
weight_field=None, deposition='cic',
accumulation=True, fractional=True)
def test_profile_zero_weight():
def DMparticles(pfilter, data):
filter = data[(pfilter.filtered_type, "particle_type")] == 1
return filter
def DM_in_cell_mass(field, data):
return data['deposit', 'DM_density']*data['index', 'cell_volume']
add_particle_filter("DM", function=DMparticles,
filtered_type='io', requires=["particle_type"])
_fields = ("particle_position_x", "particle_position_y",
"particle_position_z", "particle_mass", "particle_velocity_x",
"particle_velocity_y", "particle_velocity_z", "particle_type")
_units = ('cm', 'cm', 'cm', 'g', 'cm/s', 'cm/s', 'cm/s', 'dimensionless')
ds = fake_random_ds(32, particle_fields=_fields,
particle_field_units=_units, particles=16)
ds.add_particle_filter('DM')
ds.add_field(("gas", "DM_cell_mass"), units="g", function=DM_in_cell_mass,
sampling_type='cell')
sp = ds.sphere(ds.domain_center, (10, 'kpc'))
profile = yt.create_profile(sp,
[("gas", "density")],
[("gas", "radial_velocity")],
weight_field=("gas", "DM_cell_mass"))
assert not np.any(np.isnan(profile['gas', 'radial_velocity']))
def test_profile_override_limits():
ds = fake_random_ds(64, nprocs = 8, fields = _fields, units = _units)
sp = ds.sphere(ds.domain_center, (10, 'kpc'))
obins = np.linspace(-5,5,10)
profile = yt.create_profile(sp,
[ "density"],["temperature"],
override_bins={"density":(obins, "g/cm**3")})
assert_equal(ds.arr(obins, "g/cm**3"), profile.x_bins)
profile = yt.create_profile(sp,
[ "density", "dinosaurs"],["temperature"],
override_bins={"density":(obins, "g/cm**3"),
"dinosaurs":obins})
assert_equal(ds.arr(obins, "g/cm**3"), profile.x_bins)
assert_equal(ds.arr(obins, "dyne"), profile.y_bins)
profile = yt.create_profile(sp,
[ "density", "dinosaurs", "tribbles"],["temperature"],
override_bins={"density":(obins, "g/cm**3"),
"dinosaurs":obins,
"tribbles":(obins, "erg")})
assert_equal(ds.arr(obins, "g/cm**3"), profile.x_bins)
assert_equal(ds.arr(obins, "dyne"), profile.y_bins)
assert_equal(ds.arr(obins, "erg"), profile.z_bins)
class TestBadProfiles(unittest.TestCase):
tmpdir = None
curdir = None
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
self.curdir = os.getcwd()
os.chdir(self.tmpdir)
def tearDown(self):
os.chdir(self.curdir)
# clean up
shutil.rmtree(self.tmpdir)
@requires_module('h5py')
def test_unequal_data_shape_profile(self):
density = np.random.random(128)
temperature = np.random.random(128)
cell_mass = np.random.random((128, 128))
my_data = {
"density": density,
"temperature": temperature,
"cell_mass": cell_mass}
fake_ds_med = {"current_time": yt.YTQuantity(10, "Myr")}
yt.save_as_dataset(fake_ds_med, "mydata.h5", my_data)
ds = yt.load('mydata.h5')
assert_raises(
YTProfileDataShape,
yt.PhasePlot, ds.data, 'temperature', 'density', 'cell_mass')
@requires_module('h5py')
def test_unequal_bin_field_profile(self):
density = np.random.random(128)
temperature = np.random.random(127)
cell_mass = np.random.random((128, 128))
my_data = {
"density": density,
"temperature": temperature,
"cell_mass": cell_mass}
fake_ds_med = {"current_time": yt.YTQuantity(10, "Myr")}
yt.save_as_dataset(fake_ds_med, "mydata.h5", my_data)
ds = yt.load('mydata.h5')
assert_raises(
YTProfileDataShape,
yt.PhasePlot, ds.data, 'temperature', 'density', 'cell_mass')
def test_index_field_units():
# see #1849
ds = fake_random_ds(16, length_unit=2)
ad = ds.all_data()
icv_units = ad['index', 'cell_volume'].units
assert str(icv_units) == 'code_length**3'
gcv_units = ad['gas', 'cell_volume'].units
assert str(gcv_units) == 'cm**3'
prof = ad.profile(['density', 'velocity_x'],
[('gas', 'cell_volume'), ('index', 'cell_volume')],
weight_field=None)
assert str(prof['index', 'cell_volume'].units) == 'code_length**3'
assert str(prof['gas', 'cell_volume'].units) == 'cm**3'
@requires_module("astropy")
def test_export_astropy():
from yt.units.yt_array import YTArray
ds = fake_random_ds(64)
ad = ds.all_data()
prof = ad.profile('radius', [('gas', 'density'), ('gas', 'velocity_x')],
weight_field=('index','ones'), n_bins=32)
# export to AstroPy table
at1 = prof.to_astropy_table()
assert 'radius' in at1.colnames
assert 'density' in at1.colnames
assert 'velocity_x' in at1.colnames
assert_equal(prof.x.d, at1["radius"].value)
assert_equal(prof["density"].d, at1["density"].value)
assert_equal(prof["velocity_x"].d, at1["velocity_x"].value)
assert prof.x.units == YTArray.from_astropy(at1["radius"]).units
assert prof["density"].units == YTArray.from_astropy(at1["density"]).units
assert prof["velocity_x"].units == YTArray.from_astropy(at1["velocity_x"]).units
assert np.all(at1.mask['density'] == prof.used)
at2 = prof.to_astropy_table(fields="density", only_used=True)
assert 'radius' in at2.colnames
assert 'velocity_x' not in at2.colnames
assert_equal(prof.x.d[prof.used], at2["radius"].value)
assert_equal(prof["density"].d[prof.used], at2["density"].value)
@requires_module("pandas")
def test_export_pandas():
ds = fake_random_ds(64)
ad = ds.all_data()
prof = ad.profile('radius', [('gas', 'density'), ('gas', 'velocity_x')],
weight_field=('index','ones'), n_bins=32)
# export to pandas DataFrame
df1 = prof.to_dataframe()
assert 'radius' in df1.columns
assert 'density' in df1.columns
assert 'velocity_x' in df1.columns
assert_equal(prof.x.d, df1["radius"])
assert_equal(prof["density"].d, np.nan_to_num(df1["density"]))
assert_equal(prof["velocity_x"].d, np.nan_to_num(df1["velocity_x"]))
df2 = prof.to_dataframe(fields="density", only_used=True)
assert 'radius' in df2.columns
assert 'velocity_x' not in df2.columns
assert_equal(prof.x.d[prof.used], df2["radius"])
assert_equal(prof["density"].d[prof.used], df2["density"])
|
[
"numpy.nan_to_num",
"yt.YTQuantity",
"yt.data_objects.profiles.Profile2D",
"numpy.ones",
"numpy.isnan",
"numpy.random.normal",
"shutil.rmtree",
"os.chdir",
"yt.data_objects.profiles.create_profile",
"yt.testing.assert_equal",
"yt.load_particles",
"yt.testing.fake_random_ds",
"tempfile.mkdtemp",
"numpy.linspace",
"yt.data_objects.profiles.Profile3D",
"yt.testing.requires_module",
"yt.testing.assert_raises",
"yt.data_objects.profiles.Profile1D",
"yt.save_as_dataset",
"numpy.spacing",
"yt.load",
"yt.data_objects.particle_filters.add_particle_filter",
"yt.create_profile",
"numpy.all",
"os.getcwd",
"yt.units.yt_array.YTArray.from_astropy",
"numpy.random.random"
] |
[((15715, 15741), 'yt.testing.requires_module', 'requires_module', (['"""astropy"""'], {}), "('astropy')\n", (15730, 15741), False, 'from yt.testing import assert_equal, assert_raises, assert_rel_equal, fake_random_ds, requires_module\n'), ((16912, 16937), 'yt.testing.requires_module', 'requires_module', (['"""pandas"""'], {}), "('pandas')\n", (16927, 16937), False, 'from yt.testing import assert_equal, assert_raises, assert_rel_equal, fake_random_ds, requires_module\n'), ((693, 751), 'yt.testing.fake_random_ds', 'fake_random_ds', (['(64)'], {'nprocs': '(8)', 'fields': '_fields', 'units': '_units'}), '(64, nprocs=8, fields=_fields, units=_units)\n', (707, 751), False, 'from yt.testing import assert_equal, assert_raises, assert_rel_equal, fake_random_ds, requires_module\n'), ((7616, 7648), 'yt.testing.fake_random_ds', 'fake_random_ds', (['(32)'], {'particles': '(10)'}), '(32, particles=10)\n', (7630, 7648), False, 'from yt.testing import assert_equal, assert_raises, assert_rel_equal, fake_random_ds, requires_module\n'), ((7676, 7754), 'yt.testing.assert_raises', 'assert_raises', (['YTIllDefinedProfile', 'ProfilePlot', 'ad', '"""radius"""', '"""particle_mass"""'], {}), "(YTIllDefinedProfile, ProfilePlot, ad, 'radius', 'particle_mass')\n", (7689, 7754), False, 'from yt.testing import assert_equal, assert_raises, assert_rel_equal, fake_random_ds, requires_module\n'), ((7768, 7870), 'yt.testing.assert_raises', 'assert_raises', (['YTIllDefinedProfile', 'ProfilePlot', 'ad', '"""radius"""', "['particle_mass', 'particle_ones']"], {}), "(YTIllDefinedProfile, ProfilePlot, ad, 'radius', [\n 'particle_mass', 'particle_ones'])\n", (7781, 7870), False, 'from yt.testing import assert_equal, assert_raises, assert_rel_equal, fake_random_ds, requires_module\n'), ((7887, 7980), 'yt.testing.assert_raises', 'assert_raises', (['YTIllDefinedProfile', 'ProfilePlot', 'ad', '"""radius"""', "['particle_mass', 'ones']"], {}), "(YTIllDefinedProfile, ProfilePlot, ad, 'radius', [\n 'particle_mass', 'ones'])\n", (7900, 7980), False, 'from yt.testing import assert_equal, assert_raises, assert_rel_equal, fake_random_ds, requires_module\n'), ((7997, 8101), 'yt.testing.assert_raises', 'assert_raises', (['YTIllDefinedProfile', 'ProfilePlot', 'ad', '"""particle_radius"""', '"""particle_mass"""', '"""cell_mass"""'], {}), "(YTIllDefinedProfile, ProfilePlot, ad, 'particle_radius',\n 'particle_mass', 'cell_mass')\n", (8010, 8101), False, 'from yt.testing import assert_equal, assert_raises, assert_rel_equal, fake_random_ds, requires_module\n'), ((8119, 8214), 'yt.testing.assert_raises', 'assert_raises', (['YTIllDefinedProfile', 'ProfilePlot', 'ad', '"""radius"""', '"""cell_mass"""', '"""particle_ones"""'], {}), "(YTIllDefinedProfile, ProfilePlot, ad, 'radius', 'cell_mass',\n 'particle_ones')\n", (8132, 8214), False, 'from yt.testing import assert_equal, assert_raises, assert_rel_equal, fake_random_ds, requires_module\n'), ((8233, 8327), 'yt.testing.assert_raises', 'assert_raises', (['YTIllDefinedProfile', 'PhasePlot', 'ad', '"""radius"""', '"""particle_mass"""', '"""velocity_x"""'], {}), "(YTIllDefinedProfile, PhasePlot, ad, 'radius', 'particle_mass',\n 'velocity_x')\n", (8246, 8327), False, 'from yt.testing import assert_equal, assert_raises, assert_rel_equal, fake_random_ds, requires_module\n'), ((8345, 8447), 'yt.testing.assert_raises', 'assert_raises', (['YTIllDefinedProfile', 'PhasePlot', 'ad', '"""particle_radius"""', '"""particle_mass"""', '"""cell_mass"""'], {}), "(YTIllDefinedProfile, PhasePlot, ad, 'particle_radius',\n 'particle_mass', 'cell_mass')\n", (8358, 8447), False, 'from yt.testing import assert_equal, assert_raises, assert_rel_equal, fake_random_ds, requires_module\n'), ((8465, 8558), 'yt.testing.assert_raises', 'assert_raises', (['YTIllDefinedProfile', 'PhasePlot', 'ad', '"""radius"""', '"""cell_mass"""', '"""particle_ones"""'], {}), "(YTIllDefinedProfile, PhasePlot, ad, 'radius', 'cell_mass',\n 'particle_ones')\n", (8478, 8558), False, 'from yt.testing import assert_equal, assert_raises, assert_rel_equal, fake_random_ds, requires_module\n'), ((8576, 8682), 'yt.testing.assert_raises', 'assert_raises', (['YTIllDefinedProfile', 'PhasePlot', 'ad', '"""particle_radius"""', '"""particle_mass"""', '"""particle_ones"""'], {}), "(YTIllDefinedProfile, PhasePlot, ad, 'particle_radius',\n 'particle_mass', 'particle_ones')\n", (8589, 8682), False, 'from yt.testing import assert_equal, assert_raises, assert_rel_equal, fake_random_ds, requires_module\n'), ((8811, 8850), 'numpy.random.normal', 'np.random.normal', ([], {'size': '[3, n_particles]'}), '(size=[3, n_particles])\n', (8827, 8850), True, 'import numpy as np\n'), ((9242, 9276), 'yt.load_particles', 'yt.load_particles', (['data'], {'bbox': 'bbox'}), '(data, bbox=bbox)\n', (9259, 9276), False, 'import yt\n'), ((9315, 9533), 'yt.create_profile', 'yt.create_profile', (['ad', "['particle_position_x', 'particle_position_y']", '"""particle_velocity_x"""'], {'logs': "{'particle_position_x': True, 'particle_position_y': True,\n 'particle_position_z': True}", 'weight_field': 'None'}), "(ad, ['particle_position_x', 'particle_position_y'],\n 'particle_velocity_x', logs={'particle_position_x': True,\n 'particle_position_y': True, 'particle_position_z': True}, weight_field\n =None)\n", (9332, 9533), False, 'import yt\n'), ((9735, 9850), 'yt.create_profile', 'yt.create_profile', (['ad', "['particle_position_x', 'particle_position_y']", '"""particle_velocity_x"""'], {'weight_field': 'None'}), "(ad, ['particle_position_x', 'particle_position_y'],\n 'particle_velocity_x', weight_field=None)\n", (9752, 9850), False, 'import yt\n'), ((11204, 11303), 'yt.data_objects.particle_filters.add_particle_filter', 'add_particle_filter', (['"""DM"""'], {'function': 'DMparticles', 'filtered_type': '"""io"""', 'requires': "['particle_type']"}), "('DM', function=DMparticles, filtered_type='io',\n requires=['particle_type'])\n", (11223, 11303), False, 'from yt.data_objects.particle_filters import add_particle_filter\n'), ((11629, 11719), 'yt.testing.fake_random_ds', 'fake_random_ds', (['(32)'], {'particle_fields': '_fields', 'particle_field_units': '_units', 'particles': '(16)'}), '(32, particle_fields=_fields, particle_field_units=_units,\n particles=16)\n', (11643, 11719), False, 'from yt.testing import assert_equal, assert_raises, assert_rel_equal, fake_random_ds, requires_module\n'), ((11958, 12073), 'yt.create_profile', 'yt.create_profile', (['sp', "[('gas', 'density')]", "[('gas', 'radial_velocity')]"], {'weight_field': "('gas', 'DM_cell_mass')"}), "(sp, [('gas', 'density')], [('gas', 'radial_velocity')],\n weight_field=('gas', 'DM_cell_mass'))\n", (11975, 12073), False, 'import yt\n'), ((12280, 12338), 'yt.testing.fake_random_ds', 'fake_random_ds', (['(64)'], {'nprocs': '(8)', 'fields': '_fields', 'units': '_units'}), '(64, nprocs=8, fields=_fields, units=_units)\n', (12294, 12338), False, 'from yt.testing import assert_equal, assert_raises, assert_rel_equal, fake_random_ds, requires_module\n'), ((12408, 12430), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(10)'], {}), '(-5, 5, 10)\n', (12419, 12430), True, 'import numpy as np\n'), ((12443, 12546), 'yt.create_profile', 'yt.create_profile', (['sp', "['density']", "['temperature']"], {'override_bins': "{'density': (obins, 'g/cm**3')}"}), "(sp, ['density'], ['temperature'], override_bins={\n 'density': (obins, 'g/cm**3')})\n", (12460, 12546), False, 'import yt\n'), ((12679, 12814), 'yt.create_profile', 'yt.create_profile', (['sp', "['density', 'dinosaurs']", "['temperature']"], {'override_bins': "{'density': (obins, 'g/cm**3'), 'dinosaurs': obins}"}), "(sp, ['density', 'dinosaurs'], ['temperature'],\n override_bins={'density': (obins, 'g/cm**3'), 'dinosaurs': obins})\n", (12696, 12814), False, 'import yt\n'), ((13050, 13229), 'yt.create_profile', 'yt.create_profile', (['sp', "['density', 'dinosaurs', 'tribbles']", "['temperature']"], {'override_bins': "{'density': (obins, 'g/cm**3'), 'dinosaurs': obins, 'tribbles': (obins, 'erg')}"}), "(sp, ['density', 'dinosaurs', 'tribbles'], ['temperature'],\n override_bins={'density': (obins, 'g/cm**3'), 'dinosaurs': obins,\n 'tribbles': (obins, 'erg')})\n", (13067, 13229), False, 'import yt\n'), ((13869, 13892), 'yt.testing.requires_module', 'requires_module', (['"""h5py"""'], {}), "('h5py')\n", (13884, 13892), False, 'from yt.testing import assert_equal, assert_raises, assert_rel_equal, fake_random_ds, requires_module\n'), ((14500, 14523), 'yt.testing.requires_module', 'requires_module', (['"""h5py"""'], {}), "('h5py')\n", (14515, 14523), False, 'from yt.testing import assert_equal, assert_raises, assert_rel_equal, fake_random_ds, requires_module\n'), ((15181, 15214), 'yt.testing.fake_random_ds', 'fake_random_ds', (['(16)'], {'length_unit': '(2)'}), '(16, length_unit=2)\n', (15195, 15214), False, 'from yt.testing import assert_equal, assert_raises, assert_rel_equal, fake_random_ds, requires_module\n'), ((15820, 15838), 'yt.testing.fake_random_ds', 'fake_random_ds', (['(64)'], {}), '(64)\n', (15834, 15838), False, 'from yt.testing import assert_equal, assert_raises, assert_rel_equal, fake_random_ds, requires_module\n'), ((16184, 16227), 'yt.testing.assert_equal', 'assert_equal', (['prof.x.d', "at1['radius'].value"], {}), "(prof.x.d, at1['radius'].value)\n", (16196, 16227), False, 'from yt.testing import assert_equal, assert_raises, assert_rel_equal, fake_random_ds, requires_module\n'), ((16232, 16285), 'yt.testing.assert_equal', 'assert_equal', (["prof['density'].d", "at1['density'].value"], {}), "(prof['density'].d, at1['density'].value)\n", (16244, 16285), False, 'from yt.testing import assert_equal, assert_raises, assert_rel_equal, fake_random_ds, requires_module\n'), ((16290, 16349), 'yt.testing.assert_equal', 'assert_equal', (["prof['velocity_x'].d", "at1['velocity_x'].value"], {}), "(prof['velocity_x'].d, at1['velocity_x'].value)\n", (16302, 16349), False, 'from yt.testing import assert_equal, assert_raises, assert_rel_equal, fake_random_ds, requires_module\n'), ((16594, 16634), 'numpy.all', 'np.all', (["(at1.mask['density'] == prof.used)"], {}), "(at1.mask['density'] == prof.used)\n", (16600, 16634), True, 'import numpy as np\n'), ((16785, 16839), 'yt.testing.assert_equal', 'assert_equal', (['prof.x.d[prof.used]', "at2['radius'].value"], {}), "(prof.x.d[prof.used], at2['radius'].value)\n", (16797, 16839), False, 'from yt.testing import assert_equal, assert_raises, assert_rel_equal, fake_random_ds, requires_module\n'), ((16844, 16908), 'yt.testing.assert_equal', 'assert_equal', (["prof['density'].d[prof.used]", "at2['density'].value"], {}), "(prof['density'].d[prof.used], at2['density'].value)\n", (16856, 16908), False, 'from yt.testing import assert_equal, assert_raises, assert_rel_equal, fake_random_ds, requires_module\n'), ((16973, 16991), 'yt.testing.fake_random_ds', 'fake_random_ds', (['(64)'], {}), '(64)\n', (16987, 16991), False, 'from yt.testing import assert_equal, assert_raises, assert_rel_equal, fake_random_ds, requires_module\n'), ((17333, 17370), 'yt.testing.assert_equal', 'assert_equal', (['prof.x.d', "df1['radius']"], {}), "(prof.x.d, df1['radius'])\n", (17345, 17370), False, 'from yt.testing import assert_equal, assert_raises, assert_rel_equal, fake_random_ds, requires_module\n'), ((17655, 17703), 'yt.testing.assert_equal', 'assert_equal', (['prof.x.d[prof.used]', "df2['radius']"], {}), "(prof.x.d[prof.used], df2['radius'])\n", (17667, 17703), False, 'from yt.testing import assert_equal, assert_raises, assert_rel_equal, fake_random_ds, requires_module\n'), ((17708, 17766), 'yt.testing.assert_equal', 'assert_equal', (["prof['density'].d[prof.used]", "df2['density']"], {}), "(prof['density'].d[prof.used], df2['density'])\n", (17720, 17766), False, 'from yt.testing import assert_equal, assert_raises, assert_rel_equal, fake_random_ds, requires_module\n'), ((3223, 3281), 'yt.data_objects.profiles.Profile1D', 'Profile1D', (['dd', '"""x"""', 'nb', '(0.0)', '(1.0)', '(False)'], {'weight_field': 'None'}), "(dd, 'x', nb, 0.0, 1.0, False, weight_field=None)\n", (3232, 3281), False, 'from yt.data_objects.profiles import Profile1D, Profile2D, Profile3D, create_profile\n'), ((3468, 3535), 'yt.data_objects.profiles.Profile1D', 'Profile1D', (['dd', '"""x"""', 'nb', '(0.0)', '(1.0)', '(False)'], {'weight_field': '"""temperature"""'}), "(dd, 'x', nb, 0.0, 1.0, False, weight_field='temperature')\n", (3477, 3535), False, 'from yt.data_objects.profiles import Profile1D, Profile2D, Profile3D, create_profile\n'), ((3828, 3916), 'yt.data_objects.profiles.Profile2D', 'Profile2D', (['dd', '"""x"""', 'nb', '(0.0)', '(1.0)', '(False)', '"""y"""', 'nb', '(0.0)', '(1.0)', '(False)'], {'weight_field': 'None'}), "(dd, 'x', nb, 0.0, 1.0, False, 'y', nb, 0.0, 1.0, False,\n weight_field=None)\n", (3837, 3916), False, 'from yt.data_objects.profiles import Profile1D, Profile2D, Profile3D, create_profile\n'), ((4140, 4237), 'yt.data_objects.profiles.Profile2D', 'Profile2D', (['dd', '"""x"""', 'nb', '(0.0)', '(1.0)', '(False)', '"""y"""', 'nb', '(0.0)', '(1.0)', '(False)'], {'weight_field': '"""temperature"""'}), "(dd, 'x', nb, 0.0, 1.0, False, 'y', nb, 0.0, 1.0, False,\n weight_field='temperature')\n", (4149, 4237), False, 'from yt.data_objects.profiles import Profile1D, Profile2D, Profile3D, create_profile\n'), ((4393, 4508), 'yt.data_objects.profiles.Profile3D', 'Profile3D', (['dd', '"""x"""', 'nb', '(0.0)', '(1.0)', '(False)', '"""y"""', 'nb', '(0.0)', '(1.0)', '(False)', '"""z"""', 'nb', '(0.0)', '(1.0)', '(False)'], {'weight_field': 'None'}), "(dd, 'x', nb, 0.0, 1.0, False, 'y', nb, 0.0, 1.0, False, 'z', nb, \n 0.0, 1.0, False, weight_field=None)\n", (4402, 4508), False, 'from yt.data_objects.profiles import Profile1D, Profile2D, Profile3D, create_profile\n'), ((4763, 4887), 'yt.data_objects.profiles.Profile3D', 'Profile3D', (['dd', '"""x"""', 'nb', '(0.0)', '(1.0)', '(False)', '"""y"""', 'nb', '(0.0)', '(1.0)', '(False)', '"""z"""', 'nb', '(0.0)', '(1.0)', '(False)'], {'weight_field': '"""temperature"""'}), "(dd, 'x', nb, 0.0, 1.0, False, 'y', nb, 0.0, 1.0, False, 'z', nb, \n 0.0, 1.0, False, weight_field='temperature')\n", (4772, 4887), False, 'from yt.data_objects.profiles import Profile1D, Profile2D, Profile3D, create_profile\n'), ((5072, 5213), 'yt.data_objects.profiles.create_profile', 'create_profile', (['dd', "('gas', 'density')", "('gas', 'temperature')"], {'weight_field': "('gas', 'cell_mass')", 'extrema': "{'density': (None, rma * e2)}"}), "(dd, ('gas', 'density'), ('gas', 'temperature'), weight_field\n =('gas', 'cell_mass'), extrema={'density': (None, rma * e2)})\n", (5086, 5213), False, 'from yt.data_objects.profiles import Profile1D, Profile2D, Profile3D, create_profile\n'), ((5332, 5370), 'yt.testing.assert_equal', 'assert_equal', (['p2d.x_bins[-1]', '(rma * e2)'], {}), '(p2d.x_bins[-1], rma * e2)\n', (5344, 5370), False, 'from yt.testing import assert_equal, assert_raises, assert_rel_equal, fake_random_ds, requires_module\n'), ((5469, 5610), 'yt.data_objects.profiles.create_profile', 'create_profile', (['dd', "('gas', 'density')", "('gas', 'temperature')"], {'weight_field': "('gas', 'cell_mass')", 'extrema': "{'density': (rmi * e2, None)}"}), "(dd, ('gas', 'density'), ('gas', 'temperature'), weight_field\n =('gas', 'cell_mass'), extrema={'density': (rmi * e2, None)})\n", (5483, 5610), False, 'from yt.data_objects.profiles import Profile1D, Profile2D, Profile3D, create_profile\n'), ((5670, 5707), 'yt.testing.assert_equal', 'assert_equal', (['p2d.x_bins[0]', '(rmi * e2)'], {}), '(p2d.x_bins[0], rmi * e2)\n', (5682, 5707), False, 'from yt.testing import assert_equal, assert_raises, assert_rel_equal, fake_random_ds, requires_module\n'), ((6030, 6081), 'yt.testing.fake_random_ds', 'fake_random_ds', (['(32)'], {'nprocs': 'nproc', 'particles': '(32 ** 3)'}), '(32, nprocs=nproc, particles=32 ** 3)\n', (6044, 6081), False, 'from yt.testing import assert_equal, assert_raises, assert_rel_equal, fake_random_ds, requires_module\n'), ((6124, 6201), 'yt.data_objects.profiles.Profile1D', 'Profile1D', (['dd', '"""particle_position_x"""', '(128)', '(0.0)', '(1.0)', '(False)'], {'weight_field': 'None'}), "(dd, 'particle_position_x', 128, 0.0, 1.0, False, weight_field=None)\n", (6133, 6201), False, 'from yt.data_objects.profiles import Profile1D, Profile2D, Profile3D, create_profile\n'), ((6341, 6471), 'yt.data_objects.profiles.create_profile', 'create_profile', (['dd', "['particle_position_x']", "['particle_ones']"], {'weight_field': 'None', 'n_bins': '(128)', 'extrema': 'extrema_s', 'logs': 'logs_s'}), "(dd, ['particle_position_x'], ['particle_ones'], weight_field\n =None, n_bins=128, extrema=extrema_s, logs=logs_s)\n", (6355, 6471), False, 'from yt.data_objects.profiles import Profile1D, Profile2D, Profile3D, create_profile\n'), ((6596, 6747), 'yt.data_objects.profiles.create_profile', 'create_profile', (['dd', "[('all', 'particle_position_x')]", "[('all', 'particle_ones')]"], {'weight_field': 'None', 'n_bins': '(128)', 'extrema': 'extrema_t', 'logs': 'logs_t'}), "(dd, [('all', 'particle_position_x')], [('all',\n 'particle_ones')], weight_field=None, n_bins=128, extrema=extrema_t,\n logs=logs_t)\n", (6610, 6747), False, 'from yt.data_objects.profiles import Profile1D, Profile2D, Profile3D, create_profile\n'), ((6927, 7053), 'yt.data_objects.profiles.Profile2D', 'Profile2D', (['dd', '"""particle_position_x"""', '(128)', '(0.0)', '(1.0)', '(False)', '"""particle_position_y"""', '(128)', '(0.0)', '(1.0)', '(False)'], {'weight_field': 'None'}), "(dd, 'particle_position_x', 128, 0.0, 1.0, False,\n 'particle_position_y', 128, 0.0, 1.0, False, weight_field=None)\n", (6936, 7053), False, 'from yt.data_objects.profiles import Profile1D, Profile2D, Profile3D, create_profile\n'), ((7217, 7392), 'yt.data_objects.profiles.Profile3D', 'Profile3D', (['dd', '"""particle_position_x"""', '(128)', '(0.0)', '(1.0)', '(False)', '"""particle_position_y"""', '(128)', '(0.0)', '(1.0)', '(False)', '"""particle_position_z"""', '(128)', '(0.0)', '(1.0)', '(False)'], {'weight_field': 'None'}), "(dd, 'particle_position_x', 128, 0.0, 1.0, False,\n 'particle_position_y', 128, 0.0, 1.0, False, 'particle_position_z', 128,\n 0.0, 1.0, False, weight_field=None)\n", (7226, 7392), False, 'from yt.data_objects.profiles import Profile1D, Profile2D, Profile3D, create_profile\n'), ((8873, 8898), 'numpy.ones', 'np.ones', (['(3, n_particles)'], {}), '((3, n_particles))\n', (8880, 8898), True, 'import numpy as np\n'), ((10072, 10099), 'yt.testing.assert_raises', 'assert_raises', (['RuntimeError'], {}), '(RuntimeError)\n', (10085, 10099), False, 'from yt.testing import assert_equal, assert_raises, assert_rel_equal, fake_random_ds, requires_module\n'), ((10109, 10346), 'yt.create_profile', 'yt.create_profile', (['ad', "['particle_position_x', 'particle_position_y']", '"""particle_velocity_x"""'], {'logs': "{'particle_position_x': True, 'particle_position_y': False,\n 'particle_position_z': False}", 'weight_field': 'None', 'deposition': '"""cic"""'}), "(ad, ['particle_position_x', 'particle_position_y'],\n 'particle_velocity_x', logs={'particle_position_x': True,\n 'particle_position_y': False, 'particle_position_z': False},\n weight_field=None, deposition='cic')\n", (10126, 10346), False, 'import yt\n'), ((10511, 10538), 'yt.testing.assert_raises', 'assert_raises', (['RuntimeError'], {}), '(RuntimeError)\n', (10524, 10538), False, 'from yt.testing import assert_equal, assert_raises, assert_rel_equal, fake_random_ds, requires_module\n'), ((10548, 10822), 'yt.create_profile', 'yt.create_profile', (['ad', "['particle_position_x', 'particle_position_y']", '"""particle_velocity_x"""'], {'logs': "{'particle_position_x': False, 'particle_position_y': False,\n 'particle_position_z': False}", 'weight_field': 'None', 'deposition': '"""cic"""', 'accumulation': '(True)', 'fractional': '(True)'}), "(ad, ['particle_position_x', 'particle_position_y'],\n 'particle_velocity_x', logs={'particle_position_x': False,\n 'particle_position_y': False, 'particle_position_z': False},\n weight_field=None, deposition='cic', accumulation=True, fractional=True)\n", (10565, 10822), False, 'import yt\n'), ((13671, 13689), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (13687, 13689), False, 'import tempfile\n'), ((13712, 13723), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (13721, 13723), False, 'import os\n'), ((13732, 13753), 'os.chdir', 'os.chdir', (['self.tmpdir'], {}), '(self.tmpdir)\n', (13740, 13753), False, 'import os\n'), ((13787, 13808), 'os.chdir', 'os.chdir', (['self.curdir'], {}), '(self.curdir)\n', (13795, 13808), False, 'import os\n'), ((13836, 13862), 'shutil.rmtree', 'shutil.rmtree', (['self.tmpdir'], {}), '(self.tmpdir)\n', (13849, 13862), False, 'import shutil\n'), ((13958, 13979), 'numpy.random.random', 'np.random.random', (['(128)'], {}), '(128)\n', (13974, 13979), True, 'import numpy as np\n'), ((14002, 14023), 'numpy.random.random', 'np.random.random', (['(128)'], {}), '(128)\n', (14018, 14023), True, 'import numpy as np\n'), ((14044, 14072), 'numpy.random.random', 'np.random.random', (['(128, 128)'], {}), '((128, 128))\n', (14060, 14072), True, 'import numpy as np\n'), ((14275, 14328), 'yt.save_as_dataset', 'yt.save_as_dataset', (['fake_ds_med', '"""mydata.h5"""', 'my_data'], {}), "(fake_ds_med, 'mydata.h5', my_data)\n", (14293, 14328), False, 'import yt\n'), ((14343, 14363), 'yt.load', 'yt.load', (['"""mydata.h5"""'], {}), "('mydata.h5')\n", (14350, 14363), False, 'import yt\n'), ((14373, 14472), 'yt.testing.assert_raises', 'assert_raises', (['YTProfileDataShape', 'yt.PhasePlot', 'ds.data', '"""temperature"""', '"""density"""', '"""cell_mass"""'], {}), "(YTProfileDataShape, yt.PhasePlot, ds.data, 'temperature',\n 'density', 'cell_mass')\n", (14386, 14472), False, 'from yt.testing import assert_equal, assert_raises, assert_rel_equal, fake_random_ds, requires_module\n'), ((14588, 14609), 'numpy.random.random', 'np.random.random', (['(128)'], {}), '(128)\n', (14604, 14609), True, 'import numpy as np\n'), ((14632, 14653), 'numpy.random.random', 'np.random.random', (['(127)'], {}), '(127)\n', (14648, 14653), True, 'import numpy as np\n'), ((14674, 14702), 'numpy.random.random', 'np.random.random', (['(128, 128)'], {}), '((128, 128))\n', (14690, 14702), True, 'import numpy as np\n'), ((14905, 14958), 'yt.save_as_dataset', 'yt.save_as_dataset', (['fake_ds_med', '"""mydata.h5"""', 'my_data'], {}), "(fake_ds_med, 'mydata.h5', my_data)\n", (14923, 14958), False, 'import yt\n'), ((14973, 14993), 'yt.load', 'yt.load', (['"""mydata.h5"""'], {}), "('mydata.h5')\n", (14980, 14993), False, 'import yt\n'), ((15003, 15102), 'yt.testing.assert_raises', 'assert_raises', (['YTProfileDataShape', 'yt.PhasePlot', 'ds.data', '"""temperature"""', '"""density"""', '"""cell_mass"""'], {}), "(YTProfileDataShape, yt.PhasePlot, ds.data, 'temperature',\n 'density', 'cell_mass')\n", (15016, 15102), False, 'from yt.testing import assert_equal, assert_raises, assert_rel_equal, fake_random_ds, requires_module\n'), ((17407, 17436), 'numpy.nan_to_num', 'np.nan_to_num', (["df1['density']"], {}), "(df1['density'])\n", (17420, 17436), True, 'import numpy as np\n'), ((17477, 17509), 'numpy.nan_to_num', 'np.nan_to_num', (["df1['velocity_x']"], {}), "(df1['velocity_x'])\n", (17490, 17509), True, 'import numpy as np\n'), ((3629, 3640), 'numpy.ones', 'np.ones', (['nb'], {}), '(nb)\n', (3636, 3640), True, 'import numpy as np\n'), ((3800, 3811), 'numpy.ones', 'np.ones', (['nb'], {}), '(nb)\n', (3807, 3811), True, 'import numpy as np\n'), ((4359, 4376), 'numpy.ones', 'np.ones', (['(nb, nb)'], {}), '((nb, nb))\n', (4366, 4376), True, 'import numpy as np\n'), ((5036, 5057), 'numpy.ones', 'np.ones', (['(nb, nb, nb)'], {}), '((nb, nb, nb))\n', (5043, 5057), True, 'import numpy as np\n'), ((12189, 12232), 'numpy.isnan', 'np.isnan', (["profile['gas', 'radial_velocity']"], {}), "(profile['gas', 'radial_velocity'])\n", (12197, 12232), True, 'import numpy as np\n'), ((14241, 14265), 'yt.YTQuantity', 'yt.YTQuantity', (['(10)', '"""Myr"""'], {}), "(10, 'Myr')\n", (14254, 14265), False, 'import yt\n'), ((14871, 14895), 'yt.YTQuantity', 'yt.YTQuantity', (['(10)', '"""Myr"""'], {}), "(10, 'Myr')\n", (14884, 14895), False, 'import yt\n'), ((16377, 16412), 'yt.units.yt_array.YTArray.from_astropy', 'YTArray.from_astropy', (["at1['radius']"], {}), "(at1['radius'])\n", (16397, 16412), False, 'from yt.units.yt_array import YTArray\n'), ((16455, 16491), 'yt.units.yt_array.YTArray.from_astropy', 'YTArray.from_astropy', (["at1['density']"], {}), "(at1['density'])\n", (16475, 16491), False, 'from yt.units.yt_array import YTArray\n'), ((16537, 16576), 'yt.units.yt_array.YTArray.from_astropy', 'YTArray.from_astropy', (["at1['velocity_x']"], {}), "(at1['velocity_x'])\n", (16557, 16576), False, 'from yt.units.yt_array import YTArray\n'), ((1420, 1491), 'yt.data_objects.profiles.Profile1D', 'Profile1D', (['dd', '"""density"""', 'nb', '(rmi * e1)', '(rma * e2)', 'lf'], {'weight_field': 'None'}), "(dd, 'density', nb, rmi * e1, rma * e2, lf, weight_field=None)\n", (1429, 1491), False, 'from yt.data_objects.profiles import Profile1D, Profile2D, Profile3D, create_profile\n'), ((1614, 1768), 'yt.data_objects.profiles.create_profile', 'create_profile', (['dd', '"""density"""', "['ones', 'temperature']"], {'n_bins': 'nb', 'extrema': "{'density': (rmi * e1, rma * e2)}", 'logs': "{'density': lf}", 'weight_field': 'None'}), "(dd, 'density', ['ones', 'temperature'], n_bins=nb, extrema={\n 'density': (rmi * e1, rma * e2)}, logs={'density': lf}, weight_field=None)\n", (1628, 1768), False, 'from yt.data_objects.profiles import Profile1D, Profile2D, Profile3D, create_profile\n'), ((1859, 2045), 'yt.data_objects.profiles.create_profile', 'create_profile', (['dd', "('gas', 'density')", "[('index', 'ones'), ('gas', 'temperature')]"], {'n_bins': 'nb', 'extrema': "{'density': (rmi * e1, rma * e2)}", 'logs': "{'density': lf}", 'weight_field': 'None'}), "(dd, ('gas', 'density'), [('index', 'ones'), ('gas',\n 'temperature')], n_bins=nb, extrema={'density': (rmi * e1, rma * e2)},\n logs={'density': lf}, weight_field=None)\n", (1873, 2045), False, 'from yt.data_objects.profiles import Profile1D, Profile2D, Profile3D, create_profile\n'), ((2394, 2512), 'yt.data_objects.profiles.Profile2D', 'Profile2D', (['dd', '"""density"""', 'nb', '(rmi * e1)', '(rma * e2)', 'lf', '"""temperature"""', 'nb', '(tmi * e1)', '(tma * e2)', 'lf'], {'weight_field': 'None'}), "(dd, 'density', nb, rmi * e1, rma * e2, lf, 'temperature', nb, tmi *\n e1, tma * e2, lf, weight_field=None)\n", (2403, 2512), False, 'from yt.data_objects.profiles import Profile1D, Profile2D, Profile3D, create_profile\n'), ((2783, 2947), 'yt.data_objects.profiles.Profile3D', 'Profile3D', (['dd', '"""density"""', 'nb', '(rmi * e1)', '(rma * e2)', 'lf', '"""temperature"""', 'nb', '(tmi * e1)', '(tma * e2)', 'lf', '"""dinosaurs"""', 'nb', '(dmi * e1)', '(dma * e2)', 'lf'], {'weight_field': 'None'}), "(dd, 'density', nb, rmi * e1, rma * e2, lf, 'temperature', nb, tmi *\n e1, tma * e2, lf, 'dinosaurs', nb, dmi * e1, dma * e2, lf, weight_field\n =None)\n", (2792, 2947), False, 'from yt.data_objects.profiles import Profile1D, Profile2D, Profile3D, create_profile\n'), ((3394, 3405), 'numpy.ones', 'np.ones', (['nb'], {}), '(nb)\n', (3401, 3405), True, 'import numpy as np\n'), ((4060, 4077), 'numpy.ones', 'np.ones', (['(nb, nb)'], {}), '((nb, nb))\n', (4067, 4077), True, 'import numpy as np\n'), ((4679, 4700), 'numpy.ones', 'np.ones', (['(nb, nb, nb)'], {}), '((nb, nb, nb))\n', (4686, 4700), True, 'import numpy as np\n'), ((5307, 5322), 'numpy.spacing', 'np.spacing', (['rmi'], {}), '(rmi)\n', (5317, 5322), True, 'import numpy as np\n'), ((5749, 5764), 'numpy.spacing', 'np.spacing', (['rma'], {}), '(rma)\n', (5759, 5764), True, 'import numpy as np\n')]
|
import logging
import os
import cv2
import numpy as np
import inferencing_pb2
import media_pb2
import extension_pb2
import extension_pb2_grpc
# import timeit as t
from enum import Enum
from shared_memory import SharedMemoryManager
from exception_handler import PrintGetExceptionDetails
from model_wrapper import YoloV4Model
# Get debug flag from env variable (Returns None if not set)
# Set this environment variables in the IoTEdge Deployment manifest to activate debugging.
DEBUG = os.getenv('DEBUG')
class TransferType(Enum):
BYTES = 1 # Embedded Content
REFERENCE = 2 # Shared Memory
HANDLE = 3 # Reserved
class State:
def __init__(self, mediaStreamDescriptor):
try:
# media descriptor holding input data format
self._mediaStreamDescriptor = mediaStreamDescriptor
# Get how data will be transferred
if self._mediaStreamDescriptor.WhichOneof("data_transfer_properties") is None:
self._contentTransferType = TransferType.BYTES
elif self._mediaStreamDescriptor.HasField("shared_memory_buffer_transfer_properties"):
self._contentTransferType = TransferType.REFERENCE
elif self._mediaStreamDescriptor.HasField("shared_memory_segments_transfer_properties"):
self._contentTransferType = TransferType.HANDLE
# Setup if shared mem used
if self._contentTransferType == TransferType.REFERENCE:
# Create shared memory accessor specific to the client
self._sharedMemoryManager = SharedMemoryManager(
name=self._mediaStreamDescriptor.shared_memory_buffer_transfer_properties.handle_name,
size=self._mediaStreamDescriptor.shared_memory_buffer_transfer_properties.length_bytes)
else:
self._sharedMemoryManager = None
except:
PrintGetExceptionDetails()
raise
class InferenceEngine(extension_pb2_grpc.MediaGraphExtensionServicer):
def __init__(self):
# create ONNX model wrapper
# Thread safe shared resource among all clients
self._YoloV4 = YoloV4Model()
# Debug method for dumping received images with analysis results
def CreateDebugOutput(self, requestSeqNum, cvImage, boxes, scores, indices, confidenceThreshold=0.1):
try:
marked = False
for idx in indices:
confidenceScore = scores[tuple(idx)].tolist()
if confidenceScore >= confidenceThreshold:
objectLabel = self._tYoloV3._labelList[idx[1].tolist()]
idxTuple = (idx[0], idx[2])
ymin, xmin, ymax, xmax = boxes[idxTuple].tolist()
cv2.rectangle(cvImage, (int(xmin), int(ymin)), (int(xmax), int(ymax)), (255, 0, 0), 2)
cv2.putText(cvImage, objectLabel + " - " + str(confidenceScore), (int(xmin), int(ymin - 7)), cv2.FONT_HERSHEY_COMPLEX, 0.3, (255, 0, 0), 1)
marked = True
# Set output file name
if marked:
outputFileName = os.path.join(DEBUG_OUTPUT_FOLDER, str(requestSeqNum) + '_marked.jpg')
else:
outputFileName = os.path.join(DEBUG_OUTPUT_FOLDER, str(requestSeqNum) + '.jpg')
# output with bounding boxes
cv2.imwrite(outputFileName, cvImage)
except:
PrintGetExceptionDetails()
raise
def GetMediaStreamMessageResponse(self, bboxes, originalImageSize):
try:
msg = extension_pb2.MediaStreamMessage()
ih, iw = originalImageSize
for i, bbox in enumerate(bboxes):
confidenceScore = bbox[4].tolist()
objectLabel = self._YoloV4._labelList[int(bbox[5])]
xmin, ymin, xmax, ymax = np.array(bbox[:4], dtype=np.int32)
inference = msg.media_sample.inferences.add()
inference.type = inferencing_pb2.Inference.InferenceType.ENTITY
inference.entity.CopyFrom( inferencing_pb2.Entity(
tag = inferencing_pb2.Tag(
value = objectLabel,
confidence = confidenceScore
),
box = inferencing_pb2.Rectangle(
l = xmin / iw,
t = ymin / ih,
w = (xmax - xmin) / iw,
h = (ymax - ymin) / ih,
)
)
)
return msg
except:
PrintGetExceptionDetails()
raise
def GetCvImageFromRawBytes(self, clientState, mediaSample):
try:
# Get reference to raw bytes
if clientState._contentTransferType == TransferType.BYTES:
rawBytes = memoryview(mediaSample.content_bytes.bytes).toreadonly()
elif clientState._contentTransferType == TransferType.REFERENCE:
# Data sent over shared memory buffer
addressOffset = mediaSample.content_reference.address_offset
lengthBytes = mediaSample.content_reference.length_bytes
# Get memory reference to (in readonly mode) data sent over shared memory
rawBytes = clientState._sharedMemoryManager.ReadBytes(addressOffset, lengthBytes)
# Get encoding details of the media sent by client
encoding = clientState._mediaStreamDescriptor.media_descriptor.video_frame_sample_format.encoding
# Handle JPG, PNG, BMP content
cvImage = None
if encoding == clientState._mediaStreamDescriptor.media_descriptor.video_frame_sample_format.Encoding.JPG or \
encoding == clientState._mediaStreamDescriptor.media_descriptor.video_frame_sample_format.Encoding.PNG or \
encoding == clientState._mediaStreamDescriptor.media_descriptor.video_frame_sample_format.Encoding.BMP:
# np.frombuffer is zero copy command
cvImage = cv2.imdecode(np.frombuffer(rawBytes, dtype=np.uint8), -1)
# Handle RAW content (Just place holder for the user to handle each variation...)
elif encoding == clientState._mediaStreamDescriptor.media_descriptor.video_frame_sample_format.Encoding.RAW:
pixelFormat = clientState._mediaStreamDescriptor.media_descriptor.video_frame_sample_format.pixel_format
if pixelFormat == media_pb2.VideoFrameSampleFormat.PixelFormat.RGBA:
cvImage = cv2.cvtColor(np.frombuffer(rawBytes, dtype=np.uint8), cv2.COLOR_RGBA2RGB)
elif pixelFormat == media_pb2.VideoFrameSampleFormat.PixelFormat.YUV420P:
cvImage = None
return cvImage
except:
PrintGetExceptionDetails()
raise
def ProcessMediaStream(self, requestIterator, context):
# Below logic can be extended into multi-process (per CPU cores, i.e. in case using CPU inferencing)
# For simplicity below, we use single process to handle gRPC clients
# Auto increment counter. Increases per client requests
responseSeqNum = 1
# First message from the client is (must be) MediaStreamDescriptor
mediaStreamMessageRequest = next(requestIterator)
# Extract message IDs
requestSeqNum = mediaStreamMessageRequest.sequence_number
requestAckSeqNum = mediaStreamMessageRequest.ack_sequence_number
# State object per client
clientState = State(mediaStreamMessageRequest.media_stream_descriptor)
if DEBUG is not None:
logging.info('[Received] SeqNum: {0:07d} | AckNum: {1}\nMediaStreamDescriptor:\n{2}'.format(requestSeqNum, requestAckSeqNum, clientState._mediaStreamDescriptor))
# First message response ...
mediaStreamMessage = extension_pb2.MediaStreamMessage(
sequence_number = responseSeqNum,
ack_sequence_number = requestSeqNum,
media_stream_descriptor = extension_pb2.MediaStreamDescriptor(
media_descriptor = media_pb2.MediaDescriptor(
timescale = clientState._mediaStreamDescriptor.media_descriptor.timescale
)
)
)
yield mediaStreamMessage
# Process rest of the MediaStream messagge sequence
for mediaStreamMessageRequest in requestIterator:
try:
# Increment response counter, will be sent to client
responseSeqNum += 1
# Read request id, sent by client
requestSeqNum = mediaStreamMessageRequest.sequence_number
if DEBUG is not None:
logging.info('[Received] SeqNum: {0:07d}'.format(requestSeqNum))
# Get media content bytes. (bytes sent over shared memory buffer, segment or inline to message)
cvImage = self.GetCvImageFromRawBytes(clientState, mediaStreamMessageRequest.media_sample)
if cvImage is None:
logging.info('Cant decode received bytes.')
continue
# start = t.default_timer()
# run inference
boxes, originalImageSize = self._YoloV4.Score(cvImage)
# end = t.default_timer()
# infTime = round((end - start) * 1000, 5)
# logging.info('inf time: {0}'.format(infTime))
# if DEBUG is not None:
# self.CreateDebugOutput(requestSeqNum, cvImage, boxes, scores, indices)
# Check client connection state
if context.is_active():
# return inference result as MediaStreamMessage
mediaStreamMessage = self.GetMediaStreamMessageResponse(boxes, originalImageSize)
mediaStreamMessage.sequence_number = responseSeqNum
mediaStreamMessage.ack_sequence_number = requestSeqNum
mediaStreamMessage.media_sample.timestamp = mediaStreamMessageRequest.media_sample.timestamp
# yield response
yield mediaStreamMessage
else:
break
except:
PrintGetExceptionDetails()
|
[
"cv2.imwrite",
"numpy.frombuffer",
"shared_memory.SharedMemoryManager",
"extension_pb2.MediaStreamMessage",
"media_pb2.MediaDescriptor",
"model_wrapper.YoloV4Model",
"logging.info",
"inferencing_pb2.Tag",
"numpy.array",
"inferencing_pb2.Rectangle",
"os.getenv",
"exception_handler.PrintGetExceptionDetails"
] |
[((489, 507), 'os.getenv', 'os.getenv', (['"""DEBUG"""'], {}), "('DEBUG')\n", (498, 507), False, 'import os\n'), ((2192, 2205), 'model_wrapper.YoloV4Model', 'YoloV4Model', ([], {}), '()\n', (2203, 2205), False, 'from model_wrapper import YoloV4Model\n'), ((3403, 3439), 'cv2.imwrite', 'cv2.imwrite', (['outputFileName', 'cvImage'], {}), '(outputFileName, cvImage)\n', (3414, 3439), False, 'import cv2\n'), ((3617, 3651), 'extension_pb2.MediaStreamMessage', 'extension_pb2.MediaStreamMessage', ([], {}), '()\n', (3649, 3651), False, 'import extension_pb2\n'), ((1604, 1813), 'shared_memory.SharedMemoryManager', 'SharedMemoryManager', ([], {'name': 'self._mediaStreamDescriptor.shared_memory_buffer_transfer_properties.handle_name', 'size': 'self._mediaStreamDescriptor.shared_memory_buffer_transfer_properties.length_bytes'}), '(name=self._mediaStreamDescriptor.\n shared_memory_buffer_transfer_properties.handle_name, size=self.\n _mediaStreamDescriptor.shared_memory_buffer_transfer_properties.\n length_bytes)\n', (1623, 1813), False, 'from shared_memory import SharedMemoryManager\n'), ((1936, 1962), 'exception_handler.PrintGetExceptionDetails', 'PrintGetExceptionDetails', ([], {}), '()\n', (1960, 1962), False, 'from exception_handler import PrintGetExceptionDetails\n'), ((3468, 3494), 'exception_handler.PrintGetExceptionDetails', 'PrintGetExceptionDetails', ([], {}), '()\n', (3492, 3494), False, 'from exception_handler import PrintGetExceptionDetails\n'), ((3898, 3932), 'numpy.array', 'np.array', (['bbox[:4]'], {'dtype': 'np.int32'}), '(bbox[:4], dtype=np.int32)\n', (3906, 3932), True, 'import numpy as np\n'), ((4980, 5006), 'exception_handler.PrintGetExceptionDetails', 'PrintGetExceptionDetails', ([], {}), '()\n', (5004, 5006), False, 'from exception_handler import PrintGetExceptionDetails\n'), ((7228, 7254), 'exception_handler.PrintGetExceptionDetails', 'PrintGetExceptionDetails', ([], {}), '()\n', (7252, 7254), False, 'from exception_handler import PrintGetExceptionDetails\n'), ((6475, 6514), 'numpy.frombuffer', 'np.frombuffer', (['rawBytes'], {'dtype': 'np.uint8'}), '(rawBytes, dtype=np.uint8)\n', (6488, 6514), True, 'import numpy as np\n'), ((9722, 9765), 'logging.info', 'logging.info', (['"""Cant decode received bytes."""'], {}), "('Cant decode received bytes.')\n", (9734, 9765), False, 'import logging\n'), ((10929, 10955), 'exception_handler.PrintGetExceptionDetails', 'PrintGetExceptionDetails', ([], {}), '()\n', (10953, 10955), False, 'from exception_handler import PrintGetExceptionDetails\n'), ((8647, 8750), 'media_pb2.MediaDescriptor', 'media_pb2.MediaDescriptor', ([], {'timescale': 'clientState._mediaStreamDescriptor.media_descriptor.timescale'}), '(timescale=clientState._mediaStreamDescriptor.\n media_descriptor.timescale)\n', (8672, 8750), False, 'import media_pb2\n'), ((4198, 4264), 'inferencing_pb2.Tag', 'inferencing_pb2.Tag', ([], {'value': 'objectLabel', 'confidence': 'confidenceScore'}), '(value=objectLabel, confidence=confidenceScore)\n', (4217, 4264), False, 'import inferencing_pb2\n'), ((4478, 4578), 'inferencing_pb2.Rectangle', 'inferencing_pb2.Rectangle', ([], {'l': '(xmin / iw)', 't': '(ymin / ih)', 'w': '((xmax - xmin) / iw)', 'h': '((ymax - ymin) / ih)'}), '(l=xmin / iw, t=ymin / ih, w=(xmax - xmin) / iw, h\n =(ymax - ymin) / ih)\n', (4503, 4578), False, 'import inferencing_pb2\n'), ((6985, 7024), 'numpy.frombuffer', 'np.frombuffer', (['rawBytes'], {'dtype': 'np.uint8'}), '(rawBytes, dtype=np.uint8)\n', (6998, 7024), True, 'import numpy as np\n')]
|
import numpy as np
import os
# lib from Qiskit Aqua
# from qiskit.aqua import Operator, QuantumInstance
# from qiskit.aqua.algorithms import VQE, ExactEigensolver
# from qiskit.aqua.components.optimizers import COBYLA
from qiskit.aqua.operators import Z2Symmetries
from qiskit.circuit.instruction import Instruction
# lib from Qiskit Aqua Chemistry
from qiskit.chemistry import FermionicOperator
from qiskit.chemistry.drivers import PySCFDriver, UnitsType
from qiskit.chemistry.components.variational_forms import UCCSD
from qiskit.chemistry.components.initial_states import HartreeFock
from torchquantum.plugins.qiskit_processor import QiskitProcessor
from torchquantum.plugins import qiskit2tq
processor = QiskitProcessor(
use_real_qc=False,
backend_name=None,
noise_model_name=None,
coupling_map_name=None,
basis_gates_name=None,
n_shots=8192,
initial_layout=None,
seed_transpiler=42,
seed_simulator=42,
optimization_level=None,
max_jobs=5,
remove_ops=False,
remove_ops_thres=1e-4,
)
# import pdb
# pdb.set_trace()
def load_qubitop_for_molecule(molecule_data):
atom_list = [a[0] + ' ' + " ".join([str(elem) for elem in a[1]]) for a in molecule_data['geometry']]
atom = "; ".join(atom_list)
#atom = 'Li .0 .0 .0; H .0 .0 3.9'
basis = molecule_data['basis']
transform = molecule_data['transform']
electrons = molecule_data['electrons']
active = molecule_data['active_orbitals']
driver = PySCFDriver(atom=atom, unit=UnitsType.ANGSTROM, basis=basis, charge=0, spin=0)
molecule = driver.run()
num_particles = molecule.num_alpha + molecule.num_beta
num_spin_orbitals = molecule.num_orbitals * 2
#print("# of electrons: {}".format(num_particles))
#print("# of spin orbitals: {}".format(num_spin_orbitals))
freeze_list = [x for x in range(int(active/2), int(num_particles/2))]
remove_list = [-x for x in range(active,molecule.num_orbitals-int(num_particles/2)+int(active/2))]
#print(freeze_list)
#print(remove_list)
if transform == 'BK':
map_type = 'bravyi_kitaev'
elif transform == 'JW':
map_type = 'jordan_wigner'
else:
map_type = 'parity'
remove_list = [x % molecule.num_orbitals for x in remove_list]
freeze_list = [x % molecule.num_orbitals for x in freeze_list]
remove_list = [x - len(freeze_list) for x in remove_list]
remove_list += [x + molecule.num_orbitals - len(freeze_list) for x in remove_list]
freeze_list += [x + molecule.num_orbitals for x in freeze_list]
fermiOp = FermionicOperator(h1=molecule.one_body_integrals, h2=molecule.two_body_integrals)
energy_shift = 0
if len(freeze_list) > 0:
fermiOp, energy_shift = fermiOp.fermion_mode_freezing(freeze_list)
num_spin_orbitals -= len(freeze_list)
num_particles -= len(freeze_list)
if len(remove_list) > 0:
fermiOp = fermiOp.fermion_mode_elimination(remove_list)
num_spin_orbitals -= len(remove_list)
qubitOp = fermiOp.mapping(map_type=map_type, threshold=0.00000001)
if len(freeze_list) > 0 or len(remove_list) >0:
qubitOp = Z2Symmetries.two_qubit_reduction(qubitOp, num_particles)
#print(qubitOp.print_operators())
num_spin_orbitals= qubitOp.num_qubits
return molecule, qubitOp, map_type, num_particles, num_spin_orbitals
def generate_uccsd(molecule_data):
molecule, qubitOp, map_type, num_particles, num_spin_orbitals = load_qubitop_for_molecule(molecule_data)
nuclear_repulsion_energy = molecule.nuclear_repulsion_energy
print("# of electrons: {}".format(num_particles))
print("# of spin orbitals: {}".format(num_spin_orbitals))
qubit_reduction = False
HF_state = HartreeFock(num_spin_orbitals, num_particles, map_type, qubit_reduction)
uccsd_ansatz = UCCSD(reps=1,
num_orbitals=num_spin_orbitals, num_particles=num_particles,
initial_state=HF_state, qubit_mapping=map_type,
two_qubit_reduction=qubit_reduction)
circ = uccsd_ansatz.construct_circuit([0.4242] *
uccsd_ansatz.num_parameters)
circ.measure_all()
circ_transpiled = processor.transpile(circ)
q_layer = qiskit2tq(circ_transpiled)
for name, param in q_layer.named_parameters():
if not (param % (np.pi / 2)).detach().cpu().numpy().any():
param.requires_grad = False
#randlist = np.random.rand(uccsd_ansatz.num_parameters) # ansatz parameters
#uccsd_ansatz_circuit = uccsd_ansatz.construct_circuit(randlist)
return q_layer
def molecule_data2str(md):
return md['name'] + ' ' + md['basis'] + ' ' + md['transform']+ ' ' + str(md['active_orbitals'])
def write_ansatz(molecule_data):
#filename = ...
ansatz = generate_uccsd(molecule_data)
randlist = np.random.rand(uccsd_ansatz.num_parameters) # ansatz parameters
uccsd_ansatz_circuit = uccsd_ansatz.construct_circuit(randlist)
print(uccsd_ansatz_circuit)
def write_observable(molecule_data, root):
#filename = ...
_, qubitOp, _, _ , _ = load_qubitop_for_molecule(molecule_data)
molecule_str = molecule_data2str(molecule_data)
numq = qubitOp.num_qubits
molecule_str += ' q' + str(numq) + '\n'
op_str = qubitOp.print_details()
filename = f"{molecule_data['name'].lower()}_" \
f"{molecule_data['transform'].lower()}"
with open(os.path.join(root, filename, f"{filename}.txt"), 'w') as wfid:
wfid.write(f"{molecule_data['name'].lower()} "
f"{molecule_data['transform'].lower()} {numq}\n")
for line in op_str.splitlines():
molecule_str = ''
#print(ord(line[6])) #ZXXIII (6.505213034913027e-19+0j)
linedata = line.split(chr(9))
if not complex(linedata[1]).imag == 0:
print(f"WARNING: imaginary is not zero!!")
molecule_str += str(complex(linedata[1]).real) + ' '
for (i, c) in enumerate(linedata[0]):
molecule_str += c+str(i)+' '
wfid.write(f"{molecule_str}\n")
# molecule_str
# print(molecule_str)
# Molecule parameters for H2
h2_molecule = {
'name' : 'H2',
'basis' : 'sto-3g',
'transform' : 'BK',
'electrons' : 2,
'geometry' : [('H', (0., 0., 0.)), ('H', (0., 0., 0.72))],
'active_orbitals' : 2
}
# Molecule parameters for H2O
h2o_molecule = {
'name' : 'H2O',
'basis' : 'sto-3g',
'transform' : 'BK',
'electrons' : 8,
'geometry' : [('O', (0.,0.,0.)), ('H', (0.757,0.586,0.)), ('H', (-0.757,0.586,0.))],
'active_orbitals' : 4
}
# Molecule parameters for LiH
lih_molecule = {
'name' : 'LiH',
'basis' : 'sto-3g',
'transform' : 'BK',
'electrons' : 4,
'geometry' : [('Li', (0., 0., 0.)), ('H', (0., 0., 1.45))],
'active_orbitals' : 4
}
# Molecule parameters for CH4
ch4_molecule = {
'name' : 'CH4',
'basis' : 'sto-3g',
'transform' : 'BK',
'electrons' : 10,
'geometry' : [('C', (0, 0, 0)), ('H', (0.5541, 0.7996, 0.4965)),
('H', (0.6833, -0.8134, -0.2536)), ('H', (-0.7782, -0.3735, 0.6692)),
('H', (-0.4593, 0.3874, -0.9121))],
'active_orbitals' : 4
}
# generate_uccsd(h2_molecule)
#generate_uccsd(h2o_molecule)
#generate_uccsd(lih_molecule)
#generate_uccsd(ch4_molecule)
molecule_name_dict = {
'h2': h2_molecule,
'h2o': h2o_molecule,
'lih': lih_molecule,
'ch4': ch4_molecule
}
if __name__ == '__main__':
import pdb
pdb.set_trace()
generate_uccsd(molecule_name_dict['ch4'])
# for transform in ['BK', 'JW']:
# for name, info in molecule_name_dict.items():
# root = './examples/data/vqe/'
# info['transform'] = transform
# os.makedirs(os.path.join(root, f"{name}_{transform.lower()}"),
# exist_ok=True)
#
# write_observable(info, root)
|
[
"qiskit.chemistry.components.variational_forms.UCCSD",
"qiskit.chemistry.FermionicOperator",
"qiskit.chemistry.components.initial_states.HartreeFock",
"torchquantum.plugins.qiskit2tq",
"qiskit.chemistry.drivers.PySCFDriver",
"pdb.set_trace",
"numpy.random.rand",
"qiskit.aqua.operators.Z2Symmetries.two_qubit_reduction",
"os.path.join",
"torchquantum.plugins.qiskit_processor.QiskitProcessor"
] |
[((710, 1004), 'torchquantum.plugins.qiskit_processor.QiskitProcessor', 'QiskitProcessor', ([], {'use_real_qc': '(False)', 'backend_name': 'None', 'noise_model_name': 'None', 'coupling_map_name': 'None', 'basis_gates_name': 'None', 'n_shots': '(8192)', 'initial_layout': 'None', 'seed_transpiler': '(42)', 'seed_simulator': '(42)', 'optimization_level': 'None', 'max_jobs': '(5)', 'remove_ops': '(False)', 'remove_ops_thres': '(0.0001)'}), '(use_real_qc=False, backend_name=None, noise_model_name=None,\n coupling_map_name=None, basis_gates_name=None, n_shots=8192,\n initial_layout=None, seed_transpiler=42, seed_simulator=42,\n optimization_level=None, max_jobs=5, remove_ops=False, remove_ops_thres\n =0.0001)\n', (725, 1004), False, 'from torchquantum.plugins.qiskit_processor import QiskitProcessor\n'), ((1477, 1555), 'qiskit.chemistry.drivers.PySCFDriver', 'PySCFDriver', ([], {'atom': 'atom', 'unit': 'UnitsType.ANGSTROM', 'basis': 'basis', 'charge': '(0)', 'spin': '(0)'}), '(atom=atom, unit=UnitsType.ANGSTROM, basis=basis, charge=0, spin=0)\n', (1488, 1555), False, 'from qiskit.chemistry.drivers import PySCFDriver, UnitsType\n'), ((2565, 2651), 'qiskit.chemistry.FermionicOperator', 'FermionicOperator', ([], {'h1': 'molecule.one_body_integrals', 'h2': 'molecule.two_body_integrals'}), '(h1=molecule.one_body_integrals, h2=molecule.\n two_body_integrals)\n', (2582, 2651), False, 'from qiskit.chemistry import FermionicOperator\n'), ((3709, 3781), 'qiskit.chemistry.components.initial_states.HartreeFock', 'HartreeFock', (['num_spin_orbitals', 'num_particles', 'map_type', 'qubit_reduction'], {}), '(num_spin_orbitals, num_particles, map_type, qubit_reduction)\n', (3720, 3781), False, 'from qiskit.chemistry.components.initial_states import HartreeFock\n'), ((3801, 3969), 'qiskit.chemistry.components.variational_forms.UCCSD', 'UCCSD', ([], {'reps': '(1)', 'num_orbitals': 'num_spin_orbitals', 'num_particles': 'num_particles', 'initial_state': 'HF_state', 'qubit_mapping': 'map_type', 'two_qubit_reduction': 'qubit_reduction'}), '(reps=1, num_orbitals=num_spin_orbitals, num_particles=num_particles,\n initial_state=HF_state, qubit_mapping=map_type, two_qubit_reduction=\n qubit_reduction)\n', (3806, 3969), False, 'from qiskit.chemistry.components.variational_forms import UCCSD\n'), ((4229, 4255), 'torchquantum.plugins.qiskit2tq', 'qiskit2tq', (['circ_transpiled'], {}), '(circ_transpiled)\n', (4238, 4255), False, 'from torchquantum.plugins import qiskit2tq\n'), ((4823, 4866), 'numpy.random.rand', 'np.random.rand', (['uccsd_ansatz.num_parameters'], {}), '(uccsd_ansatz.num_parameters)\n', (4837, 4866), True, 'import numpy as np\n'), ((7424, 7439), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (7437, 7439), False, 'import pdb\n'), ((3128, 3184), 'qiskit.aqua.operators.Z2Symmetries.two_qubit_reduction', 'Z2Symmetries.two_qubit_reduction', (['qubitOp', 'num_particles'], {}), '(qubitOp, num_particles)\n', (3160, 3184), False, 'from qiskit.aqua.operators import Z2Symmetries\n'), ((5404, 5451), 'os.path.join', 'os.path.join', (['root', 'filename', 'f"""{filename}.txt"""'], {}), "(root, filename, f'{filename}.txt')\n", (5416, 5451), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014, <NAME>. All rights reserved.
# Distributed under the terms of the new BSD License.
# -----------------------------------------------------------------------------
"""
An ArrayList is a strongly typed list whose type can be anything that can be
interpreted as a numpy data type.
Example
-------
>>> L = ArrayList( [[0], [1,2], [3,4,5], [6,7,8,9]] )
>>> print L
[ [0] [1 2] [3 4 5] [6 7 8 9] ]
>>> print L.data
[0 1 2 3 4 5 6 7 8 9]
You can add several items at once by specifying common or individual size: a
single scalar means all items are the same size while a list of sizes is used
to specify individual item sizes.
Example
-------
>>> L = ArrayList( np.arange(10), [3,3,4])
>>> print L
[ [0 1 2] [3 4 5] [6 7 8 9] ]
>>> print L.data
[0 1 2 3 4 5 6 7 8 9]
"""
import numpy as np
class ArrayList(object):
"""
An ArrayList is a strongly typed list whose type can be anything that can
be interpreted as a numpy data type.
"""
def __init__(self, data=None, itemsize=None, dtype=float,
sizeable=True, writeable=True):
""" Create a new buffer using given data and sizes or dtype
Parameters
----------
data : array_like
An array, any object exposing the array interface, an object
whose __array__ method returns an array, or any (nested) sequence.
itemsize: int or 1-D array
If `itemsize is an integer, N, the array will be divided
into elements of size N. If such partition is not possible,
an error is raised.
If `itemsize` is 1-D array, the array will be divided into
elements whose succesive sizes will be picked from itemsize.
If the sum of itemsize values is different from array size,
an error is raised.
dtype: np.dtype
Any object that can be interpreted as a numpy data type.
sizeable : boolean
Indicate whether item can be appended/inserted/deleted
writeable : boolean
Indicate whether content can be changed
"""
self._sizeable = sizeable
self._writeable = writeable
if data is not None:
if isinstance(data, (list, tuple)):
if isinstance(data[0], (list, tuple)):
itemsize = [len(l) for l in data]
data = [item for sublist in data for item in sublist]
self._data = np.array(data, copy=False)
self._size = self._data.size
# Default is one group with all data inside
_itemsize = np.ones(1) * self._data.size
# Check item sizes and get items count
if itemsize is not None:
if isinstance(itemsize, int):
if (self._size % itemsize) != 0:
raise ValueError("Cannot partition data as requested")
self._count = self._size // itemsize
_itemsize = np.ones(
self._count, dtype=int) * (self._size // self._count)
else:
_itemsize = np.array(itemsize, copy=False)
self._count = len(itemsize)
if _itemsize.sum() != self._size:
raise ValueError("Cannot partition data as requested")
else:
self._count = 1
# Store items
self._items = np.zeros((self._count, 2), int)
C = _itemsize.cumsum()
self._items[1:, 0] += C[:-1]
self._items[0:, 1] += C
else:
self._data = np.zeros(1, dtype=dtype)
self._items = np.zeros((1, 2), dtype=int)
self._size = 0
self._count = 0
@property
def data(self):
""" The array's elements, in memory. """
return self._data[:self._size]
@property
def size(self):
""" Number of base elements, in memory. """
return self._size
@property
def itemsize(self):
""" Individual item sizes """
return self._items[:self._count, 1] - self._items[:self._count, 0]
@property
def dtype(self):
""" Describes the format of the elements in the buffer. """
return self._data.dtype
def reserve(self, capacity):
""" Set current capacity of the underlying array"""
if capacity >= self._data.size:
capacity = int(2 ** np.ceil(np.log2(capacity)))
self._data = np.resize(self._data, capacity)
def __len__(self):
""" x.__len__() <==> len(x) """
return self._count
def __str__(self):
s = '[ '
for item in self:
s += str(item) + ' '
s += ']'
return s
def __getitem__(self, key):
""" x.__getitem__(y) <==> x[y] """
if isinstance(key, int):
if key < 0:
key += len(self)
if key < 0 or key >= len(self):
raise IndexError("Tuple index out of range")
dstart = self._items[key][0]
dstop = self._items[key][1]
return self._data[dstart:dstop]
elif isinstance(key, slice):
istart, istop, step = key.indices(len(self))
if istart > istop:
istart, istop = istop, istart
dstart = self._items[istart][0]
if istart == istop:
dstop = dstart
else:
dstop = self._items[istop - 1][1]
return self._data[dstart:dstop]
elif isinstance(key, str):
return self._data[key][:self._size]
elif key is Ellipsis:
return self.data
else:
raise TypeError("List indices must be integers")
def __setitem__(self, key, data):
""" x.__setitem__(i, y) <==> x[i]=y """
if not self._writeable:
raise AttributeError("List is not writeable")
if isinstance(key, (int, slice)):
if isinstance(key, int):
if key < 0:
key += len(self)
if key < 0 or key > len(self):
raise IndexError("List assignment index out of range")
dstart = self._items[key][0]
dstop = self._items[key][1]
istart = key
elif isinstance(key, slice):
istart, istop, step = key.indices(len(self))
if istart == istop:
return
if istart > istop:
istart, istop = istop, istart
if istart > len(self) or istop > len(self):
raise IndexError("Can only assign iterable")
dstart = self._items[istart][0]
if istart == istop:
dstop = dstart
else:
dstop = self._items[istop - 1][1]
if hasattr(data, "__len__"):
if len(data) == dstop - dstart: # or len(data) == 1:
self._data[dstart:dstop] = data
else:
self.__delitem__(key)
self.insert(istart, data)
else: # we assume len(data) = 1
if dstop - dstart == 1:
self._data[dstart:dstop] = data
else:
self.__delitem__(key)
self.insert(istart, data)
elif key is Ellipsis:
self.data[...] = data
elif isinstance(key, str):
self._data[key][:self._size] = data
else:
raise TypeError("List assignment indices must be integers")
def __delitem__(self, key):
""" x.__delitem__(y) <==> del x[y] """
if not self._sizeable:
raise AttributeError("List is not sizeable")
# Deleting a single item
if isinstance(key, int):
if key < 0:
key += len(self)
if key < 0 or key > len(self):
raise IndexError("List deletion index out of range")
istart, istop = key, key + 1
dstart, dstop = self._items[key]
# Deleting several items
elif isinstance(key, slice):
istart, istop, step = key.indices(len(self))
if istart > istop:
istart, istop = istop, istart
if istart == istop:
return
dstart = self._items[istart][0]
dstop = self._items[istop - 1][1]
elif key is Ellipsis:
istart = 0
istop = len(self)
dstart = 0
dstop = self.size
# Error
else:
raise TypeError("List deletion indices must be integers")
# Remove data
size = self._size - (dstop - dstart)
self._data[
dstart:dstart + self._size - dstop] = self._data[dstop:self._size]
self._size -= dstop - dstart
# Remove corresponding items
size = self._count - istop
self._items[istart:istart + size] = self._items[istop:istop + size]
# Update other items
size = dstop - dstart
self._items[istart:istop + size + 1] -= size, size
self._count -= istop - istart
def insert(self, index, data, itemsize=None):
""" Insert data before index
Parameters
----------
index : int
Index before which data will be inserted.
data : array_like
An array, any object exposing the array interface, an object
whose __array__ method returns an array, or any (nested) sequence.
itemsize: int or 1-D array
If `itemsize is an integer, N, the array will be divided
into elements of size N. If such partition is not possible,
an error is raised.
If `itemsize` is 1-D array, the array will be divided into
elements whose succesive sizes will be picked from itemsize.
If the sum of itemsize values is different from array size,
an error is raised.
"""
if not self._sizeable:
raise AttributeError("List is not sizeable")
if isinstance(data, (list, tuple)) and isinstance(data[0], (list, tuple)): # noqa
itemsize = [len(l) for l in data]
data = [item for sublist in data for item in sublist]
data = np.array(data, copy=False).ravel()
size = data.size
# Check item size and get item number
if itemsize is not None:
if isinstance(itemsize, int):
if (size % itemsize) != 0:
raise ValueError("Cannot partition data as requested")
_count = size // itemsize
_itemsize = np.ones(_count, dtype=int) * (size // _count)
else:
_itemsize = np.array(itemsize, copy=False)
_count = len(itemsize)
if _itemsize.sum() != size:
raise ValueError("Cannot partition data as requested")
else:
_count = 1
# Check if data array is big enough and resize it if necessary
if self._size + size >= self._data.size:
capacity = int(2 ** np.ceil(np.log2(self._size + size)))
self._data = np.resize(self._data, capacity)
# Check if item array is big enough and resize it if necessary
if self._count + _count >= len(self._items):
capacity = int(2 ** np.ceil(np.log2(self._count + _count)))
self._items = np.resize(self._items, (capacity, 2))
# Check index
if index < 0:
index += len(self)
if index < 0 or index > len(self):
raise IndexError("List insertion index out of range")
# Inserting
if index < self._count:
istart = index
dstart = self._items[istart][0]
dstop = self._items[istart][1]
# Move data
Z = self._data[dstart:self._size]
self._data[dstart + size:self._size + size] = Z
# Update moved items
items = self._items[istart:self._count] + size
self._items[istart + _count:self._count + _count] = items
# Appending
else:
dstart = self._size
istart = self._count
# Only one item (faster)
if _count == 1:
# Store data
self._data[dstart:dstart + size] = data
self._size += size
# Store data location (= item)
self._items[istart][0] = dstart
self._items[istart][1] = dstart + size
self._count += 1
# Several items
else:
# Store data
dstop = dstart + size
self._data[dstart:dstop] = data
self._size += size
# Store items
items = np.ones((_count, 2), int) * dstart
C = _itemsize.cumsum()
items[1:, 0] += C[:-1]
items[0:, 1] += C
istop = istart + _count
self._items[istart:istop] = items
self._count += _count
def append(self, data, itemsize=None):
"""
Append data to the end.
Parameters
----------
data : array_like
An array, any object exposing the array interface, an object
whose __array__ method returns an array, or any (nested) sequence.
itemsize: int or 1-D array
If `itemsize is an integer, N, the array will be divided
into elements of size N. If such partition is not possible,
an error is raised.
If `itemsize` is 1-D array, the array will be divided into
elements whose succesive sizes will be picked from itemsize.
If the sum of itemsize values is different from array size,
an error is raised.
"""
self.insert(len(self), data, itemsize)
|
[
"numpy.resize",
"numpy.log2",
"numpy.zeros",
"numpy.ones",
"numpy.array"
] |
[((2573, 2599), 'numpy.array', 'np.array', (['data'], {'copy': '(False)'}), '(data, copy=False)\n', (2581, 2599), True, 'import numpy as np\n'), ((3563, 3594), 'numpy.zeros', 'np.zeros', (['(self._count, 2)', 'int'], {}), '((self._count, 2), int)\n', (3571, 3594), True, 'import numpy as np\n'), ((3747, 3771), 'numpy.zeros', 'np.zeros', (['(1)'], {'dtype': 'dtype'}), '(1, dtype=dtype)\n', (3755, 3771), True, 'import numpy as np\n'), ((3798, 3825), 'numpy.zeros', 'np.zeros', (['(1, 2)'], {'dtype': 'int'}), '((1, 2), dtype=int)\n', (3806, 3825), True, 'import numpy as np\n'), ((4625, 4656), 'numpy.resize', 'np.resize', (['self._data', 'capacity'], {}), '(self._data, capacity)\n', (4634, 4656), True, 'import numpy as np\n'), ((11432, 11463), 'numpy.resize', 'np.resize', (['self._data', 'capacity'], {}), '(self._data, capacity)\n', (11441, 11463), True, 'import numpy as np\n'), ((11687, 11724), 'numpy.resize', 'np.resize', (['self._items', '(capacity, 2)'], {}), '(self._items, (capacity, 2))\n', (11696, 11724), True, 'import numpy as np\n'), ((2722, 2732), 'numpy.ones', 'np.ones', (['(1)'], {}), '(1)\n', (2729, 2732), True, 'import numpy as np\n'), ((10529, 10555), 'numpy.array', 'np.array', (['data'], {'copy': '(False)'}), '(data, copy=False)\n', (10537, 10555), True, 'import numpy as np\n'), ((10991, 11021), 'numpy.array', 'np.array', (['itemsize'], {'copy': '(False)'}), '(itemsize, copy=False)\n', (10999, 11021), True, 'import numpy as np\n'), ((13022, 13047), 'numpy.ones', 'np.ones', (['(_count, 2)', 'int'], {}), '((_count, 2), int)\n', (13029, 13047), True, 'import numpy as np\n'), ((3248, 3278), 'numpy.array', 'np.array', (['itemsize'], {'copy': '(False)'}), '(itemsize, copy=False)\n', (3256, 3278), True, 'import numpy as np\n'), ((10899, 10925), 'numpy.ones', 'np.ones', (['_count'], {'dtype': 'int'}), '(_count, dtype=int)\n', (10906, 10925), True, 'import numpy as np\n'), ((3107, 3138), 'numpy.ones', 'np.ones', (['self._count'], {'dtype': 'int'}), '(self._count, dtype=int)\n', (3114, 3138), True, 'import numpy as np\n'), ((4580, 4597), 'numpy.log2', 'np.log2', (['capacity'], {}), '(capacity)\n', (4587, 4597), True, 'import numpy as np\n'), ((11378, 11404), 'numpy.log2', 'np.log2', (['(self._size + size)'], {}), '(self._size + size)\n', (11385, 11404), True, 'import numpy as np\n'), ((11629, 11658), 'numpy.log2', 'np.log2', (['(self._count + _count)'], {}), '(self._count + _count)\n', (11636, 11658), True, 'import numpy as np\n')]
|
import lightgbm as lgb
import numpy as np
import pandas as pd
from attrdict import AttrDict
from sklearn.externals import joblib
from steppy.base import BaseTransformer
from .utils import NeptuneContext, get_logger
neptune_ctx = NeptuneContext()
logger = get_logger()
class LightGBM(BaseTransformer):
def __init__(self, name=None, **params):
super().__init__()
self.msg_prefix = 'LightGBM transformer'
logger.info('initializing {}.'.format(self.msg_prefix))
self.params = params
self.training_params = ['number_boosting_rounds', 'early_stopping_rounds']
self.evaluation_function = None
self.callbacks = callbacks(channel_prefix=name)
@property
def model_config(self):
return AttrDict({param: value for param, value in self.params.items()
if param not in self.training_params})
@property
def training_config(self):
return AttrDict({param: value for param, value in self.params.items()
if param in self.training_params})
def fit(self,
X, y,
X_valid, y_valid,
feature_names='auto',
categorical_features='auto',
**kwargs):
evaluation_results = {}
self._check_target_shape_and_type(y, 'y')
self._check_target_shape_and_type(y_valid, 'y_valid')
y = self._format_target(y, 'y')
y_valid = self._format_target(y_valid, 'y_valid')
logger.info('{}, train data shape {}'.format(self.msg_prefix, X.shape))
logger.info('{}, validation data shape {}'.format(self.msg_prefix, X_valid.shape))
logger.info('{}, train labels shape {}'.format(self.msg_prefix, y.shape))
logger.info('{}, validation labels shape {}'.format(self.msg_prefix, y_valid.shape))
data_train = lgb.Dataset(data=X,
label=y,
feature_name=feature_names,
categorical_feature=categorical_features,
**kwargs)
data_valid = lgb.Dataset(X_valid,
label=y_valid,
feature_name=feature_names,
categorical_feature=categorical_features,
**kwargs)
self.estimator = lgb.train(self.model_config,
data_train,
feature_name=feature_names,
categorical_feature=categorical_features,
valid_sets=[data_train, data_valid],
valid_names=['data_train', 'data_valid'],
evals_result=evaluation_results,
num_boost_round=self.training_config.number_boosting_rounds,
early_stopping_rounds=self.training_config.early_stopping_rounds,
verbose_eval=self.model_config.verbose,
feval=self.evaluation_function,
callbacks=self.callbacks,
**kwargs)
return self
def transform(self, X, **kwargs):
prediction = self.estimator.predict(X)
return {'prediction': prediction}
def load(self, filepath):
self.estimator = joblib.load(filepath)
return self
def persist(self, filepath):
joblib.dump(self.estimator, filepath)
def _check_target_shape_and_type(self, target, name):
if not any([isinstance(target, obj_type) for obj_type in [pd.Series, np.ndarray, list]]):
raise TypeError(
'{}: "{}" must be "numpy.ndarray" or "Pandas.Series" or "list", got {} instead.'.format(
self.msg_prefix,
name,
type(target)))
try:
assert len(target.shape) == 1, '{}: "{}" must be 1-D. It is {}-D instead.'.format(self.msg_prefix,
name,
len(target.shape))
except AttributeError:
print('{}: cannot determine shape of the {}.'
'Type must be "numpy.ndarray" or "Pandas.Series" or "list", got {} instead'.format(self.msg_prefix,
name,
type(target)))
def _format_target(self, target, name):
if isinstance(target, pd.Series):
return target.values
elif isinstance(target, np.ndarray):
return target
elif isinstance(target, list):
return np.array(target)
else:
raise TypeError('{}: "{}" must be "numpy.ndarray" or "Pandas.Series" or "list", got {} instead.'.format(
self.msg_prefix,
name,
type(target)))
def callbacks(channel_prefix):
neptune_monitor = neptune_monitor_lgbm(channel_prefix)
return [neptune_monitor]
def neptune_monitor_lgbm(channel_prefix=''):
def callback(env):
for name, loss_name, loss_value, _ in env.evaluation_result_list:
if channel_prefix != '':
channel_name = '{}_{}_{}'.format(channel_prefix, name, loss_name)
else:
channel_name = '{}_{}'.format(name, loss_name)
neptune_ctx.ctx.channel_send(channel_name, x=env.iteration, y=loss_value)
return callback
|
[
"sklearn.externals.joblib.dump",
"lightgbm.train",
"lightgbm.Dataset",
"numpy.array",
"sklearn.externals.joblib.load"
] |
[((1858, 1970), 'lightgbm.Dataset', 'lgb.Dataset', ([], {'data': 'X', 'label': 'y', 'feature_name': 'feature_names', 'categorical_feature': 'categorical_features'}), '(data=X, label=y, feature_name=feature_names,\n categorical_feature=categorical_features, **kwargs)\n', (1869, 1970), True, 'import lightgbm as lgb\n'), ((2120, 2239), 'lightgbm.Dataset', 'lgb.Dataset', (['X_valid'], {'label': 'y_valid', 'feature_name': 'feature_names', 'categorical_feature': 'categorical_features'}), '(X_valid, label=y_valid, feature_name=feature_names,\n categorical_feature=categorical_features, **kwargs)\n', (2131, 2239), True, 'import lightgbm as lgb\n'), ((2394, 2879), 'lightgbm.train', 'lgb.train', (['self.model_config', 'data_train'], {'feature_name': 'feature_names', 'categorical_feature': 'categorical_features', 'valid_sets': '[data_train, data_valid]', 'valid_names': "['data_train', 'data_valid']", 'evals_result': 'evaluation_results', 'num_boost_round': 'self.training_config.number_boosting_rounds', 'early_stopping_rounds': 'self.training_config.early_stopping_rounds', 'verbose_eval': 'self.model_config.verbose', 'feval': 'self.evaluation_function', 'callbacks': 'self.callbacks'}), "(self.model_config, data_train, feature_name=feature_names,\n categorical_feature=categorical_features, valid_sets=[data_train,\n data_valid], valid_names=['data_train', 'data_valid'], evals_result=\n evaluation_results, num_boost_round=self.training_config.\n number_boosting_rounds, early_stopping_rounds=self.training_config.\n early_stopping_rounds, verbose_eval=self.model_config.verbose, feval=\n self.evaluation_function, callbacks=self.callbacks, **kwargs)\n", (2403, 2879), True, 'import lightgbm as lgb\n'), ((3476, 3497), 'sklearn.externals.joblib.load', 'joblib.load', (['filepath'], {}), '(filepath)\n', (3487, 3497), False, 'from sklearn.externals import joblib\n'), ((3560, 3597), 'sklearn.externals.joblib.dump', 'joblib.dump', (['self.estimator', 'filepath'], {}), '(self.estimator, filepath)\n', (3571, 3597), False, 'from sklearn.externals import joblib\n'), ((5003, 5019), 'numpy.array', 'np.array', (['target'], {}), '(target)\n', (5011, 5019), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2018 <NAME> & <NAME>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
import numpy as np
# relu激活函数
def relu(x):
x = np.array(x)
return np.maximum(0, x)
# tanh激活函数
def tanh(x):
x = np.array(x)
return np.tanh(x)
# sigmoid激活函数
def sigmoid(x):
x = np.array(x)
return 1 / (1 + np.exp(-x))
# softmax激活函数
def softmax(x):
x = np.array(x)
assert len(x.shape) == 1 or len(x.shape) == 2
if len(x.shape) == 1:
x = x - x.max()
x = np.exp(x)
return x / x.sum()
else:
x = x - x.max(1, keepdims=True)
x = np.exp(x)
return x / x.sum(1, keepdims=True)
# linear激活函数
def linear(x):
x = np.array(x)
return x
# 阈值激活函数
def threshold(x, threshold=0):
x = np.array(x)
out = np.zeros_like(x, dtype=np.float)
out[x >= threshold] = 1
return out
# arctan激活函数
def arctan(x):
x = np.array(x)
return np.arctan(x)
# leaky relu
def leaky_relu(x, alpha=0.1):
x = np.array(x, dtype=np.float)
x[x < 0] = (x * alpha)[x < 0]
return x
# prelu激活函数
def prelu(x, p):
x = np.array(x, dtype=np.float)
x[x < 0] = (x * p)[x < 0]
return x
# elu激活函数
def elu(x, alpha=0.1):
x = np.array(x, dtype=np.float)
x[x < 0] = (alpha * (np.exp(x) - 1))[x < 0]
return x
# softplus激活函数
def softplus(x):
x = np.array(x)
return np.log(1 + np.exp(x))
# bent identity
def bent_identity(x):
x = np.array(x)
return (np.sqrt(np.square(x) + 1) - 1) * 0.5 + x
# Soft Exponential
def soft_exponential(x, p):
x = np.array(x, dtype=np.float)
x[p < 0] = (-np.log(np.maximum(1 - p[p < 0] * (x[p < 0] + p[p < 0]), 1e-7)) / p[p < 0])
x[p == 0] = 0
x[p > 0] = ((np.exp(p * x) - 1) / p + p)[p > 0]
return x
# Sinusoid
def sin(x):
x = np.array(x)
return np.sin(x)
# Sinc
def sinc(x):
x = np.array(x, dtype=np.float)
out = np.ones_like(x, dtype=np.float)
out[x != 0] = np.sin(x[x != 0]) / x[x != 0]
return out
# Gaussian
def guassian(x):
x = np.array(x)
return np.exp(-np.square(x))
|
[
"numpy.zeros_like",
"numpy.maximum",
"numpy.tanh",
"numpy.ones_like",
"numpy.square",
"numpy.sin",
"numpy.array",
"numpy.exp",
"numpy.arctan"
] |
[((725, 736), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (733, 736), True, 'import numpy as np\n'), ((749, 765), 'numpy.maximum', 'np.maximum', (['(0)', 'x'], {}), '(0, x)\n', (759, 765), True, 'import numpy as np\n'), ((805, 816), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (813, 816), True, 'import numpy as np\n'), ((829, 839), 'numpy.tanh', 'np.tanh', (['x'], {}), '(x)\n', (836, 839), True, 'import numpy as np\n'), ((885, 896), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (893, 896), True, 'import numpy as np\n'), ((975, 986), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (983, 986), True, 'import numpy as np\n'), ((1303, 1314), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1311, 1314), True, 'import numpy as np\n'), ((1384, 1395), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1392, 1395), True, 'import numpy as np\n'), ((1407, 1439), 'numpy.zeros_like', 'np.zeros_like', (['x'], {'dtype': 'np.float'}), '(x, dtype=np.float)\n', (1420, 1439), True, 'import numpy as np\n'), ((1528, 1539), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1536, 1539), True, 'import numpy as np\n'), ((1552, 1564), 'numpy.arctan', 'np.arctan', (['x'], {}), '(x)\n', (1561, 1564), True, 'import numpy as np\n'), ((1623, 1650), 'numpy.array', 'np.array', (['x'], {'dtype': 'np.float'}), '(x, dtype=np.float)\n', (1631, 1650), True, 'import numpy as np\n'), ((1744, 1771), 'numpy.array', 'np.array', (['x'], {'dtype': 'np.float'}), '(x, dtype=np.float)\n', (1752, 1771), True, 'import numpy as np\n'), ((1865, 1892), 'numpy.array', 'np.array', (['x'], {'dtype': 'np.float'}), '(x, dtype=np.float)\n', (1873, 1892), True, 'import numpy as np\n'), ((2003, 2014), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2011, 2014), True, 'import numpy as np\n'), ((2102, 2113), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2110, 2113), True, 'import numpy as np\n'), ((2230, 2257), 'numpy.array', 'np.array', (['x'], {'dtype': 'np.float'}), '(x, dtype=np.float)\n', (2238, 2257), True, 'import numpy as np\n'), ((2475, 2486), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2483, 2486), True, 'import numpy as np\n'), ((2499, 2508), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (2505, 2508), True, 'import numpy as np\n'), ((2544, 2571), 'numpy.array', 'np.array', (['x'], {'dtype': 'np.float'}), '(x, dtype=np.float)\n', (2552, 2571), True, 'import numpy as np\n'), ((2583, 2614), 'numpy.ones_like', 'np.ones_like', (['x'], {'dtype': 'np.float'}), '(x, dtype=np.float)\n', (2595, 2614), True, 'import numpy as np\n'), ((2723, 2734), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2731, 2734), True, 'import numpy as np\n'), ((1103, 1112), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (1109, 1112), True, 'import numpy as np\n'), ((1206, 1215), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (1212, 1215), True, 'import numpy as np\n'), ((2634, 2651), 'numpy.sin', 'np.sin', (['x[x != 0]'], {}), '(x[x != 0])\n', (2640, 2651), True, 'import numpy as np\n'), ((918, 928), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (924, 928), True, 'import numpy as np\n'), ((2038, 2047), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (2044, 2047), True, 'import numpy as np\n'), ((2755, 2767), 'numpy.square', 'np.square', (['x'], {}), '(x)\n', (2764, 2767), True, 'import numpy as np\n'), ((1919, 1928), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (1925, 1928), True, 'import numpy as np\n'), ((2283, 2338), 'numpy.maximum', 'np.maximum', (['(1 - p[p < 0] * (x[p < 0] + p[p < 0]))', '(1e-07)'], {}), '(1 - p[p < 0] * (x[p < 0] + p[p < 0]), 1e-07)\n', (2293, 2338), True, 'import numpy as np\n'), ((2388, 2401), 'numpy.exp', 'np.exp', (['(p * x)'], {}), '(p * x)\n', (2394, 2401), True, 'import numpy as np\n'), ((2135, 2147), 'numpy.square', 'np.square', (['x'], {}), '(x)\n', (2144, 2147), True, 'import numpy as np\n')]
|
import numpy as np
import pytest
from numpy import linalg
import numpy.testing as npt
import itertools
from utils import get_rstate, get_printing
import dynesty # noqa
from dynesty import utils as dyfunc # noqa
"""
Run a series of basic tests to check whether anything huge is broken.
"""
nlive = 500
printing = get_printing()
def bootstrap_tol(results, rstate):
""" Compute the uncertainty of means/covs by doing bootstrapping """
n = len(results.logz)
niter = 50
pos = results.samples
wts = np.exp(results.logwt - results.logz[-1])
means = []
covs = []
for i in range(niter):
# curpos = dyfunc.resample_equal(pos, wts)
# xid = np.random.randint(len(curpos), size=len(curpos))
sub = rstate.uniform(size=n) < wts / wts.max()
ind0 = np.nonzero(sub)[0]
ind1 = rstate.choice(ind0, size=len(ind0), replace=True)
mean = pos[ind1].mean(axis=0)
cov = np.cov(pos[ind1].T)
means.append(mean)
covs.append(cov)
return np.std(means, axis=0), np.std(covs, axis=0)
def check_results(results,
mean_truth,
cov_truth,
logz_truth,
mean_tol,
cov_tol,
logz_tol,
sig=5):
""" Check if means and covariances match match expectations
within the tolerances
"""
results.summary()
pos = results.samples
wts = np.exp(results.logwt - results.logz[-1])
mean, cov = dyfunc.mean_and_cov(pos, wts)
logz = results.logz[-1]
logzerr = results.logzerr[-1]
assert logzerr < 10 # check that it is not too large
npt.assert_array_less(np.abs(mean - mean_truth), sig * mean_tol)
npt.assert_array_less(np.abs(cov - cov_truth), sig * cov_tol)
npt.assert_array_less(np.abs((logz_truth - logz)), sig * logz_tol)
# GAUSSIAN TEST
class Gaussian:
def __init__(self, corr=.95, prior_win=10):
self.ndim = 3
self.mean = np.linspace(-1, 1, self.ndim)
self.cov = np.identity(self.ndim) # set covariance to identity matrix
self.cov[self.cov ==
0] = corr # set off-diagonal terms (strongly correlated)
self.cov_inv = linalg.inv(self.cov) # precision matrix
self.lnorm = -0.5 * (np.log(2 * np.pi) * self.ndim +
np.log(linalg.det(self.cov)))
self.prior_win = prior_win # +/- on both sides
self.logz_truth = self.ndim * (-np.log(2 * self.prior_win))
# 3-D correlated multivariate normal log-likelihood
def loglikelihood(self, x):
"""Multivariate normal log-likelihood."""
return -0.5 * np.dot(
(x - self.mean), np.dot(self.cov_inv,
(x - self.mean))) + self.lnorm
# prior transform
def prior_transform(self, u):
"""Flat prior between -10. and 10."""
return self.prior_win * (2. * u - 1.)
# gradient (no jacobian)
def grad_x(self, x):
"""Multivariate normal log-likelihood gradient."""
return -np.dot(self.cov_inv, (x - self.mean))
# gradient (with jacobian)
def grad_u(self, x):
"""Multivariate normal log-likelihood gradient."""
return -np.dot(self.cov_inv, x - self.mean) * 2 * self.prior_win
def check_results_gau(results, g, rstate, sig=5, logz_tol=None):
if logz_tol is None:
logz_tol = sig * results.logzerr[-1]
mean_tol, cov_tol = bootstrap_tol(results, rstate)
# just check that resample_equal works
dyfunc.resample_equal(results.samples,
np.exp(results.logwt - results.logz[-1]))
check_results(results,
g.mean,
g.cov,
g.logz_truth,
mean_tol,
cov_tol,
logz_tol,
sig=sig)
def test_gaussian():
sig = 5
rstate = get_rstate()
g = Gaussian()
sampler = dynesty.NestedSampler(g.loglikelihood,
g.prior_transform,
g.ndim,
nlive=nlive,
rstate=rstate)
sampler.run_nested(print_progress=printing)
# check that jitter/resample/simulate_run work
# for not dynamic sampler
dyfunc.jitter_run(sampler.results, rstate=rstate)
dyfunc.resample_run(sampler.results, rstate=rstate)
dyfunc.simulate_run(sampler.results, rstate=rstate)
# add samples
# check continuation behavior
sampler.run_nested(dlogz=0.1, print_progress=printing)
# get errors
nerr = 3
result_list = []
for i in range(nerr):
sampler.reset()
sampler.run_nested(print_progress=False)
results = sampler.results
result_list.append(results)
pos = results.samples
wts = np.exp(results.logwt - results.logz[-1])
mean, cov = dyfunc.mean_and_cov(pos, wts)
logz = results.logz[-1]
assert (np.abs(logz - g.logz_truth) < sig * results.logzerr[-1])
res_comb = dyfunc.merge_runs(result_list)
assert (np.abs(res_comb.logz[-1] - g.logz_truth) <
sig * results.logzerr[-1])
# check summary
res = sampler.results
res.summary()
# try all combinations excepte none/unif
@pytest.mark.parametrize(
"bound,sample",
list(
itertools.product(['single', 'multi', 'balls', 'cubes', 'none'],
['unif', 'rwalk', 'slice', 'rslice'])))
def test_bounding_sample(bound, sample):
# check various bounding methods
rstate = get_rstate()
if bound == 'none':
if sample != 'unif':
g = Gaussian(0.1)
else:
g = Gaussian(corr=0., prior_win=10)
# make live easy if bound is none
# but also not too easy so propose_point() is exercised
else:
g = Gaussian()
sampler = dynesty.NestedSampler(g.loglikelihood,
g.prior_transform,
g.ndim,
nlive=nlive,
bound=bound,
sample=sample,
rstate=rstate)
sampler.run_nested(print_progress=printing)
check_results_gau(sampler.results, g, rstate)
@pytest.mark.parametrize("bound,sample",
itertools.product(
['single', 'multi', 'balls', 'cubes'], ['unif']))
def test_bounding_bootstrap(bound, sample):
# check various bounding methods
rstate = get_rstate()
g = Gaussian()
sampler = dynesty.NestedSampler(g.loglikelihood,
g.prior_transform,
g.ndim,
nlive=nlive,
bound=bound,
sample=sample,
bootstrap=5,
rstate=rstate)
sampler.run_nested(print_progress=printing)
check_results_gau(sampler.results, g, rstate)
# extra checks for gradients
def test_slice_nograd():
rstate = get_rstate()
g = Gaussian()
sampler = dynesty.NestedSampler(g.loglikelihood,
g.prior_transform,
g.ndim,
nlive=nlive,
sample='hslice',
rstate=rstate)
sampler.run_nested(print_progress=printing)
check_results_gau(sampler.results, g, rstate)
def test_slice_grad():
rstate = get_rstate()
g = Gaussian()
sampler = dynesty.NestedSampler(g.loglikelihood,
g.prior_transform,
g.ndim,
nlive=nlive,
sample='hslice',
gradient=g.grad_x,
compute_jac=True,
rstate=rstate)
sampler.run_nested(print_progress=printing)
check_results_gau(sampler.results, g, rstate)
def test_slice_grad1():
rstate = get_rstate()
g = Gaussian()
sampler = dynesty.NestedSampler(g.loglikelihood,
g.prior_transform,
g.ndim,
nlive=nlive,
sample='hslice',
gradient=g.grad_u,
rstate=rstate)
sampler.run_nested(print_progress=printing)
check_results_gau(sampler.results, g, rstate)
def test_dynamic():
# check dynamic nested sampling behavior
rstate = get_rstate()
g = Gaussian()
dsampler = dynesty.DynamicNestedSampler(g.loglikelihood,
g.prior_transform,
g.ndim,
rstate=rstate)
dsampler.run_nested(print_progress=printing)
# chechk explicit adding batches
dsampler.add_batch(mode='auto')
dsampler.add_batch(mode='weight')
dsampler.add_batch(mode='full')
dsampler.add_batch(logl_bounds=(-10, 0), mode='manual')
dsampler.add_batch(logl_bounds=(-10000000, -1000), mode='manual')
check_results_gau(dsampler.results, g, rstate)
# check error analysis functions
dres = dyfunc.jitter_run(dsampler.results, rstate=rstate)
check_results_gau(dres, g, rstate)
dres = dyfunc.resample_run(dsampler.results, rstate=rstate)
check_results_gau(dres, g, rstate)
dres = dyfunc.simulate_run(dsampler.results, rstate=rstate)
check_results_gau(dres, g, rstate)
dyfunc.kld_error(dsampler.results, rstate=rstate)
def test_ravel_unravel():
""" Here I test that ravel/unravel preserves things correctly """
rstate = get_rstate()
g = Gaussian()
dsampler = dynesty.DynamicNestedSampler(g.loglikelihood,
g.prior_transform,
g.ndim,
bound='single',
sample='unif',
rstate=rstate,
nlive=nlive)
maxiter = 1800
dsampler.run_nested(maxiter=maxiter,
use_stop=False,
nlive_batch=100,
print_progress=printing)
dres = dsampler.results
dres_list = dyfunc.unravel_run(dres)
dres_merge = dyfunc.merge_runs(dres_list)
assert np.abs(dres.logz[-1] - dres_merge.logz[-1]) < 0.01
|
[
"numpy.abs",
"dynesty.utils.mean_and_cov",
"dynesty.DynamicNestedSampler",
"dynesty.utils.jitter_run",
"numpy.exp",
"utils.get_printing",
"dynesty.utils.unravel_run",
"numpy.std",
"numpy.identity",
"numpy.linspace",
"dynesty.utils.simulate_run",
"itertools.product",
"numpy.linalg.det",
"numpy.cov",
"numpy.linalg.inv",
"numpy.dot",
"dynesty.NestedSampler",
"dynesty.utils.merge_runs",
"dynesty.utils.kld_error",
"utils.get_rstate",
"numpy.log",
"dynesty.utils.resample_run",
"numpy.nonzero"
] |
[((317, 331), 'utils.get_printing', 'get_printing', ([], {}), '()\n', (329, 331), False, 'from utils import get_rstate, get_printing\n'), ((520, 560), 'numpy.exp', 'np.exp', (['(results.logwt - results.logz[-1])'], {}), '(results.logwt - results.logz[-1])\n', (526, 560), True, 'import numpy as np\n'), ((1451, 1491), 'numpy.exp', 'np.exp', (['(results.logwt - results.logz[-1])'], {}), '(results.logwt - results.logz[-1])\n', (1457, 1491), True, 'import numpy as np\n'), ((1508, 1537), 'dynesty.utils.mean_and_cov', 'dyfunc.mean_and_cov', (['pos', 'wts'], {}), '(pos, wts)\n', (1527, 1537), True, 'from dynesty import utils as dyfunc\n'), ((3918, 3930), 'utils.get_rstate', 'get_rstate', ([], {}), '()\n', (3928, 3930), False, 'from utils import get_rstate, get_printing\n'), ((3964, 4062), 'dynesty.NestedSampler', 'dynesty.NestedSampler', (['g.loglikelihood', 'g.prior_transform', 'g.ndim'], {'nlive': 'nlive', 'rstate': 'rstate'}), '(g.loglikelihood, g.prior_transform, g.ndim, nlive=\n nlive, rstate=rstate)\n', (3985, 4062), False, 'import dynesty\n'), ((4335, 4384), 'dynesty.utils.jitter_run', 'dyfunc.jitter_run', (['sampler.results'], {'rstate': 'rstate'}), '(sampler.results, rstate=rstate)\n', (4352, 4384), True, 'from dynesty import utils as dyfunc\n'), ((4389, 4440), 'dynesty.utils.resample_run', 'dyfunc.resample_run', (['sampler.results'], {'rstate': 'rstate'}), '(sampler.results, rstate=rstate)\n', (4408, 4440), True, 'from dynesty import utils as dyfunc\n'), ((4445, 4496), 'dynesty.utils.simulate_run', 'dyfunc.simulate_run', (['sampler.results'], {'rstate': 'rstate'}), '(sampler.results, rstate=rstate)\n', (4464, 4496), True, 'from dynesty import utils as dyfunc\n'), ((5085, 5115), 'dynesty.utils.merge_runs', 'dyfunc.merge_runs', (['result_list'], {}), '(result_list)\n', (5102, 5115), True, 'from dynesty import utils as dyfunc\n'), ((5604, 5616), 'utils.get_rstate', 'get_rstate', ([], {}), '()\n', (5614, 5616), False, 'from utils import get_rstate, get_printing\n'), ((5923, 6049), 'dynesty.NestedSampler', 'dynesty.NestedSampler', (['g.loglikelihood', 'g.prior_transform', 'g.ndim'], {'nlive': 'nlive', 'bound': 'bound', 'sample': 'sample', 'rstate': 'rstate'}), '(g.loglikelihood, g.prior_transform, g.ndim, nlive=\n nlive, bound=bound, sample=sample, rstate=rstate)\n', (5944, 6049), False, 'import dynesty\n'), ((6620, 6632), 'utils.get_rstate', 'get_rstate', ([], {}), '()\n', (6630, 6632), False, 'from utils import get_rstate, get_printing\n'), ((6666, 6805), 'dynesty.NestedSampler', 'dynesty.NestedSampler', (['g.loglikelihood', 'g.prior_transform', 'g.ndim'], {'nlive': 'nlive', 'bound': 'bound', 'sample': 'sample', 'bootstrap': '(5)', 'rstate': 'rstate'}), '(g.loglikelihood, g.prior_transform, g.ndim, nlive=\n nlive, bound=bound, sample=sample, bootstrap=5, rstate=rstate)\n', (6687, 6805), False, 'import dynesty\n'), ((6427, 6493), 'itertools.product', 'itertools.product', (["['single', 'multi', 'balls', 'cubes']", "['unif']"], {}), "(['single', 'multi', 'balls', 'cubes'], ['unif'])\n", (6444, 6493), False, 'import itertools\n'), ((7220, 7232), 'utils.get_rstate', 'get_rstate', ([], {}), '()\n', (7230, 7232), False, 'from utils import get_rstate, get_printing\n'), ((7266, 7381), 'dynesty.NestedSampler', 'dynesty.NestedSampler', (['g.loglikelihood', 'g.prior_transform', 'g.ndim'], {'nlive': 'nlive', 'sample': '"""hslice"""', 'rstate': 'rstate'}), "(g.loglikelihood, g.prior_transform, g.ndim, nlive=\n nlive, sample='hslice', rstate=rstate)\n", (7287, 7381), False, 'import dynesty\n'), ((7693, 7705), 'utils.get_rstate', 'get_rstate', ([], {}), '()\n', (7703, 7705), False, 'from utils import get_rstate, get_printing\n'), ((7739, 7891), 'dynesty.NestedSampler', 'dynesty.NestedSampler', (['g.loglikelihood', 'g.prior_transform', 'g.ndim'], {'nlive': 'nlive', 'sample': '"""hslice"""', 'gradient': 'g.grad_x', 'compute_jac': '(True)', 'rstate': 'rstate'}), "(g.loglikelihood, g.prior_transform, g.ndim, nlive=\n nlive, sample='hslice', gradient=g.grad_x, compute_jac=True, rstate=rstate)\n", (7760, 7891), False, 'import dynesty\n'), ((8276, 8288), 'utils.get_rstate', 'get_rstate', ([], {}), '()\n', (8286, 8288), False, 'from utils import get_rstate, get_printing\n'), ((8322, 8456), 'dynesty.NestedSampler', 'dynesty.NestedSampler', (['g.loglikelihood', 'g.prior_transform', 'g.ndim'], {'nlive': 'nlive', 'sample': '"""hslice"""', 'gradient': 'g.grad_u', 'rstate': 'rstate'}), "(g.loglikelihood, g.prior_transform, g.ndim, nlive=\n nlive, sample='hslice', gradient=g.grad_u, rstate=rstate)\n", (8343, 8456), False, 'import dynesty\n'), ((8846, 8858), 'utils.get_rstate', 'get_rstate', ([], {}), '()\n', (8856, 8858), False, 'from utils import get_rstate, get_printing\n'), ((8893, 8984), 'dynesty.DynamicNestedSampler', 'dynesty.DynamicNestedSampler', (['g.loglikelihood', 'g.prior_transform', 'g.ndim'], {'rstate': 'rstate'}), '(g.loglikelihood, g.prior_transform, g.ndim,\n rstate=rstate)\n', (8921, 8984), False, 'import dynesty\n'), ((9539, 9589), 'dynesty.utils.jitter_run', 'dyfunc.jitter_run', (['dsampler.results'], {'rstate': 'rstate'}), '(dsampler.results, rstate=rstate)\n', (9556, 9589), True, 'from dynesty import utils as dyfunc\n'), ((9640, 9692), 'dynesty.utils.resample_run', 'dyfunc.resample_run', (['dsampler.results'], {'rstate': 'rstate'}), '(dsampler.results, rstate=rstate)\n', (9659, 9692), True, 'from dynesty import utils as dyfunc\n'), ((9743, 9795), 'dynesty.utils.simulate_run', 'dyfunc.simulate_run', (['dsampler.results'], {'rstate': 'rstate'}), '(dsampler.results, rstate=rstate)\n', (9762, 9795), True, 'from dynesty import utils as dyfunc\n'), ((9840, 9889), 'dynesty.utils.kld_error', 'dyfunc.kld_error', (['dsampler.results'], {'rstate': 'rstate'}), '(dsampler.results, rstate=rstate)\n', (9856, 9889), True, 'from dynesty import utils as dyfunc\n'), ((10001, 10013), 'utils.get_rstate', 'get_rstate', ([], {}), '()\n', (10011, 10013), False, 'from utils import get_rstate, get_printing\n'), ((10049, 10184), 'dynesty.DynamicNestedSampler', 'dynesty.DynamicNestedSampler', (['g.loglikelihood', 'g.prior_transform', 'g.ndim'], {'bound': '"""single"""', 'sample': '"""unif"""', 'rstate': 'rstate', 'nlive': 'nlive'}), "(g.loglikelihood, g.prior_transform, g.ndim,\n bound='single', sample='unif', rstate=rstate, nlive=nlive)\n", (10077, 10184), False, 'import dynesty\n'), ((10680, 10704), 'dynesty.utils.unravel_run', 'dyfunc.unravel_run', (['dres'], {}), '(dres)\n', (10698, 10704), True, 'from dynesty import utils as dyfunc\n'), ((10722, 10750), 'dynesty.utils.merge_runs', 'dyfunc.merge_runs', (['dres_list'], {}), '(dres_list)\n', (10739, 10750), True, 'from dynesty import utils as dyfunc\n'), ((940, 959), 'numpy.cov', 'np.cov', (['pos[ind1].T'], {}), '(pos[ind1].T)\n', (946, 959), True, 'import numpy as np\n'), ((1023, 1044), 'numpy.std', 'np.std', (['means'], {'axis': '(0)'}), '(means, axis=0)\n', (1029, 1044), True, 'import numpy as np\n'), ((1046, 1066), 'numpy.std', 'np.std', (['covs'], {'axis': '(0)'}), '(covs, axis=0)\n', (1052, 1066), True, 'import numpy as np\n'), ((1684, 1709), 'numpy.abs', 'np.abs', (['(mean - mean_truth)'], {}), '(mean - mean_truth)\n', (1690, 1709), True, 'import numpy as np\n'), ((1753, 1776), 'numpy.abs', 'np.abs', (['(cov - cov_truth)'], {}), '(cov - cov_truth)\n', (1759, 1776), True, 'import numpy as np\n'), ((1819, 1844), 'numpy.abs', 'np.abs', (['(logz_truth - logz)'], {}), '(logz_truth - logz)\n', (1825, 1844), True, 'import numpy as np\n'), ((1991, 2020), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'self.ndim'], {}), '(-1, 1, self.ndim)\n', (2002, 2020), True, 'import numpy as np\n'), ((2040, 2062), 'numpy.identity', 'np.identity', (['self.ndim'], {}), '(self.ndim)\n', (2051, 2062), True, 'import numpy as np\n'), ((2227, 2247), 'numpy.linalg.inv', 'linalg.inv', (['self.cov'], {}), '(self.cov)\n', (2237, 2247), False, 'from numpy import linalg\n'), ((3608, 3648), 'numpy.exp', 'np.exp', (['(results.logwt - results.logz[-1])'], {}), '(results.logwt - results.logz[-1])\n', (3614, 3648), True, 'import numpy as np\n'), ((4874, 4914), 'numpy.exp', 'np.exp', (['(results.logwt - results.logz[-1])'], {}), '(results.logwt - results.logz[-1])\n', (4880, 4914), True, 'import numpy as np\n'), ((4935, 4964), 'dynesty.utils.mean_and_cov', 'dyfunc.mean_and_cov', (['pos', 'wts'], {}), '(pos, wts)\n', (4954, 4964), True, 'from dynesty import utils as dyfunc\n'), ((5128, 5168), 'numpy.abs', 'np.abs', (['(res_comb.logz[-1] - g.logz_truth)'], {}), '(res_comb.logz[-1] - g.logz_truth)\n', (5134, 5168), True, 'import numpy as np\n'), ((5381, 5487), 'itertools.product', 'itertools.product', (["['single', 'multi', 'balls', 'cubes', 'none']", "['unif', 'rwalk', 'slice', 'rslice']"], {}), "(['single', 'multi', 'balls', 'cubes', 'none'], ['unif',\n 'rwalk', 'slice', 'rslice'])\n", (5398, 5487), False, 'import itertools\n'), ((10762, 10805), 'numpy.abs', 'np.abs', (['(dres.logz[-1] - dres_merge.logz[-1])'], {}), '(dres.logz[-1] - dres_merge.logz[-1])\n', (10768, 10805), True, 'import numpy as np\n'), ((804, 819), 'numpy.nonzero', 'np.nonzero', (['sub'], {}), '(sub)\n', (814, 819), True, 'import numpy as np\n'), ((3077, 3112), 'numpy.dot', 'np.dot', (['self.cov_inv', '(x - self.mean)'], {}), '(self.cov_inv, x - self.mean)\n', (3083, 3112), True, 'import numpy as np\n'), ((5013, 5040), 'numpy.abs', 'np.abs', (['(logz - g.logz_truth)'], {}), '(logz - g.logz_truth)\n', (5019, 5040), True, 'import numpy as np\n'), ((2484, 2510), 'numpy.log', 'np.log', (['(2 * self.prior_win)'], {}), '(2 * self.prior_win)\n', (2490, 2510), True, 'import numpy as np\n'), ((2297, 2314), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (2303, 2314), True, 'import numpy as np\n'), ((2365, 2385), 'numpy.linalg.det', 'linalg.det', (['self.cov'], {}), '(self.cov)\n', (2375, 2385), False, 'from numpy import linalg\n'), ((2710, 2745), 'numpy.dot', 'np.dot', (['self.cov_inv', '(x - self.mean)'], {}), '(self.cov_inv, x - self.mean)\n', (2716, 2745), True, 'import numpy as np\n'), ((3247, 3282), 'numpy.dot', 'np.dot', (['self.cov_inv', '(x - self.mean)'], {}), '(self.cov_inv, x - self.mean)\n', (3253, 3282), True, 'import numpy as np\n')]
|
# Copyright (c) 2019-2021, <NAME>, <NAME>, <NAME>, and <NAME>.
#
# Distributed under the 3-clause BSD license, see accompanying file LICENSE
# or https://github.com/scikit-hep/vector for details.
import numpy
from vector.compute.planar import x, y
from vector.compute.spatial import z
from vector.methods import (
AzimuthalRhoPhi,
AzimuthalXY,
LongitudinalEta,
LongitudinalTheta,
LongitudinalZ,
_aztype,
_from_signature,
_ltype,
)
# Rotation is only computed in Cartesian coordinates; the rest are conversions.
# Follows ROOT's conventions.
#
# https://github.com/root-project/root/blob/f8efb11a51cbe5b5152ebef19a4f7b78744ca2fa/math/genvector/src/3DConversions.cxx#L478-L502
#
# I don't know how this relates to Wikipedia's representation:
#
# https://en.wikipedia.org/wiki/Quaternions_and_spatial_rotation#Quaternion-derived_rotation_matrix
def cartesian(lib, u, i, j, k, x, y, z):
q00 = u * u
q01 = u * i
q02 = u * j
q03 = u * k
q11 = i * i
q12 = i * j
q13 = i * k
q22 = j * j
q23 = j * k
q33 = k * k
xp = (q00 + q11 - q22 - q33) * x + (2 * (q12 - q03)) * y + (2 * (q02 + q13)) * z
yp = (2 * (q12 + q03)) * x + (q00 - q11 + q22 - q33) * y + (2 * (q23 - q01)) * z
zp = (2 * (q13 - q02)) * x + (2 * (q23 + q01)) * y + (q00 - q11 - q22 + q33) * z
return (xp, yp, zp)
dispatch_map = {
(AzimuthalXY, LongitudinalZ): (cartesian, AzimuthalXY, LongitudinalZ),
}
def make_conversion(azimuthal, longitudinal):
if (azimuthal, longitudinal) != (AzimuthalXY, LongitudinalZ):
if azimuthal is AzimuthalXY:
to_x = x.xy
to_y = y.xy
if longitudinal is LongitudinalZ:
to_z = z.xy_z
elif longitudinal is LongitudinalTheta:
to_z = z.xy_theta
elif longitudinal is LongitudinalEta:
to_z = z.xy_eta
elif azimuthal is AzimuthalRhoPhi:
to_x = x.rhophi
to_y = y.rhophi
if longitudinal is LongitudinalZ:
to_z = z.rhophi_z
elif longitudinal is LongitudinalTheta:
to_z = z.rhophi_theta
elif longitudinal is LongitudinalEta:
to_z = z.rhophi_eta
cartesian, azout, lout = dispatch_map[AzimuthalXY, LongitudinalZ]
def f(lib, u, i, j, k, coord1, coord2, coord3):
return cartesian(
lib,
u,
i,
j,
k,
to_x(lib, coord1, coord2),
to_y(lib, coord1, coord2),
to_z(lib, coord1, coord2, coord3),
)
dispatch_map[azimuthal, longitudinal] = (f, azout, lout)
for azimuthal in (AzimuthalXY, AzimuthalRhoPhi):
for longitudinal in (LongitudinalZ, LongitudinalTheta, LongitudinalEta):
make_conversion(azimuthal, longitudinal)
def dispatch(u, i, j, k, vec):
function, *returns = _from_signature(
__name__,
dispatch_map,
(
_aztype(vec),
_ltype(vec),
),
)
with numpy.errstate(all="ignore"):
return vec._wrap_result(
function(
vec.lib, u, i, j, k, *vec.azimuthal.elements, *vec.longitudinal.elements
),
returns,
)
|
[
"vector.methods._ltype",
"vector.methods._aztype",
"numpy.errstate"
] |
[((3109, 3137), 'numpy.errstate', 'numpy.errstate', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (3123, 3137), False, 'import numpy\n'), ((3044, 3056), 'vector.methods._aztype', '_aztype', (['vec'], {}), '(vec)\n', (3051, 3056), False, 'from vector.methods import AzimuthalRhoPhi, AzimuthalXY, LongitudinalEta, LongitudinalTheta, LongitudinalZ, _aztype, _from_signature, _ltype\n'), ((3070, 3081), 'vector.methods._ltype', '_ltype', (['vec'], {}), '(vec)\n', (3076, 3081), False, 'from vector.methods import AzimuthalRhoPhi, AzimuthalXY, LongitudinalEta, LongitudinalTheta, LongitudinalZ, _aztype, _from_signature, _ltype\n')]
|
import os
import pytest
import tempfile
import pickle
import numpy as np
from ogindia.utils import comp_array, comp_scalar, dict_compare
from ogindia.get_micro_data import get_calculator
from ogindia import SS, TPI, utils
from ogindia.parameters import Specifications
from taxcalc import GrowFactors
TOL = 1e-5
CUR_PATH = os.path.abspath(os.path.dirname(__file__))
@pytest.yield_fixture
def picklefile1():
x = {'a': 1}
pfile = tempfile.NamedTemporaryFile(mode='a', delete=False)
pickle.dump(x, open(pfile.name, 'wb'))
pfile.close()
# must close and then yield for Windows platform
yield pfile
os.remove(pfile.name)
@pytest.yield_fixture
def picklefile2():
y = {'a': 1, 'b': 2}
pfile = tempfile.NamedTemporaryFile(mode='a', delete=False)
pickle.dump(y, open(pfile.name, 'wb'))
pfile.close()
# must close and then yield for Windows platform
yield pfile
os.remove(pfile.name)
@pytest.yield_fixture
def picklefile3():
x = {'a': np.array([100., 200., 300.]), 'b': 2}
pfile = tempfile.NamedTemporaryFile(mode='a', delete=False)
pickle.dump(x, open(pfile.name, 'wb'))
pfile.close()
# must close and then yield for Windows platform
yield pfile
os.remove(pfile.name)
@pytest.yield_fixture
def picklefile4():
x = {'a': np.array([100., 200., 300.1]), 'b': 2}
pfile = tempfile.NamedTemporaryFile(mode='a', delete=False)
pickle.dump(x, open(pfile.name, 'wb'))
pfile.close()
# must close and then yield for Windows platform
yield pfile
os.remove(pfile.name)
def test_import_ok():
import ogindia
@pytest.mark.full_run
@pytest.mark.parametrize('time_path', [False, True], ids=['SS', 'TPI'])
def test_run_small(time_path):
from ogindia.execute import runner
# Monkey patch enforcement flag since small data won't pass checks
SS.ENFORCE_SOLUTION_CHECKS = False
TPI.ENFORCE_SOLUTION_CHECKS = False
SS.MINIMIZER_TOL = 1e-6
TPI.MINIMIZER_TOL = 1e-6
output_base = './OUTPUT'
input_dir = './OUTPUT'
user_params = {'frisch': 0.41, 'debt_ratio_ss': 0.4}
runner(output_base=output_base, baseline_dir=input_dir, test=True,
time_path=time_path, baseline=True, user_params=user_params,
run_micro=False)
@pytest.mark.full_run
def test_constant_demographics_TPI():
'''
This tests solves the model under the assumption of constant
demographics, a balanced budget, and tax functions that do not vary
over time.
In this case, given how initial guesss for the time
path are made, the time path should be solved for on the first
iteration and the values all along the time path should equal their
steady-state values.
'''
output_base = './OUTPUT'
baseline_dir = './OUTPUT'
# Create output directory structure
ss_dir = os.path.join(output_base, 'SS')
tpi_dir = os.path.join(output_base, 'TPI')
dirs = [ss_dir, tpi_dir]
for _dir in dirs:
try:
print('making dir: ', _dir)
os.makedirs(_dir)
except OSError:
pass
spec = Specifications(run_micro=False, output_base=output_base,
baseline_dir=baseline_dir, test=False,
time_path=True, baseline=True, reform={},
guid='')
user_params = {'constant_demographics': True,
'budget_balance': True,
'zero_taxes': True,
'maxiter': 2,
'eta': (spec.omega_SS.reshape(spec.S, 1) *
spec.lambdas.reshape(1, spec.J))}
spec.update_specifications(user_params)
spec.get_tax_function_parameters(None, False)
# Run SS
ss_outputs = SS.run_SS(spec, None)
# save SS results
utils.mkdirs(os.path.join(baseline_dir, 'SS'))
ss_dir = os.path.join(baseline_dir, 'SS', 'SS_vars.pkl')
pickle.dump(ss_outputs, open(ss_dir, 'wb'))
# Run TPI
tpi_output = TPI.run_TPI(spec, None)
assert(np.allclose(tpi_output['bmat_splus1'][:spec.T, :, :],
ss_outputs['bssmat_splus1']))
def test_compare_pickle_file_bad(picklefile1, picklefile2):
from ogindia.utils import pickle_file_compare
assert not pickle_file_compare(picklefile1.name, picklefile2.name)
def test_compare_pickle_file_bad2(picklefile3, picklefile4):
from ogindia.utils import pickle_file_compare
assert not pickle_file_compare(picklefile3.name, picklefile4.name)
def test_compare_pickle_file_relative(picklefile3, picklefile4):
from ogindia.utils import pickle_file_compare
assert pickle_file_compare(
picklefile3.name, picklefile4.name, relative=True)
def test_compare_pickle_file_basic(picklefile1):
from ogindia.utils import pickle_file_compare
assert pickle_file_compare(picklefile1.name, picklefile1.name)
def test_compare_dict_basic():
from ogindia.utils import dict_compare
lhs = {'a': 1, 'b': 2}
rhs = {'c': 4, 'b': 2}
assert not dict_compare('lhs.pkle', lhs, 'rhs.pkle', rhs, tol=TOL)
def test_compare_dict_more_lhs():
from ogindia.utils import dict_compare
lhs = {'a': 1, 'b': 2, 'c': 3}
rhs = {'c': 4, 'b': 2}
assert not dict_compare('lhs.pkle', lhs, 'rhs.pkle', rhs, tol=TOL)
def test_compare_dict_diff_ndarrays():
from ogindia.utils import dict_compare
lhs = {'a': np.array([1, 2, 3]), 'b': 2}
rhs = {'a': np.array([1, 3]), 'b': 2}
assert not dict_compare('lhs.pkle', lhs, 'rhs.pkle', rhs, tol=TOL)
def test_compare_dict_diff_ndarrays2():
from ogindia.utils import dict_compare
lhs = {'a': np.array([1., 2., 3.]), 'b': 2}
rhs = {'a': np.array([1., 2., 3.1]), 'b': 2}
assert not dict_compare('lhs.pkle', lhs, 'rhs.pkle', rhs, tol=TOL)
def test_comp_array_relative():
x = np.array([100., 200., 300.])
y = np.array([100.01, 200.02, 300.03])
unequal = []
assert not comp_array('test', y, x, 1e-3, unequal)
assert comp_array('test', y, x, 1e-3, unequal, relative=True)
def test_comp_array_relative_exception():
x = np.array([100., 200., 300.])
y = np.array([100.01, 200.02, 300.03])
unequal = []
exc = {'var': 1e-3}
assert comp_array('var', y, x, 1e-5, unequal,
exceptions=exc, relative=True)
def test_comp_scalar_relative():
x = 100
y = 100.01
unequal = []
assert not comp_scalar('test', y, x, 1e-3, unequal)
assert comp_scalar('test', y, x, 1e-3, unequal, relative=True)
def test_comp_scalar_relative_exception():
x = 100
y = 100.01
unequal = []
exc = {'var': 1e-3}
assert comp_scalar('var', y, x, 1e-5, unequal,
exceptions=exc, relative=True)
def test_compare_dict_diff_ndarrays_relative():
lhs = {'a': np.array([100., 200., 300.]), 'b': 2}
rhs = {'a': np.array([100., 200., 300.1]), 'b': 2}
assert dict_compare('lhs.pkle', lhs, 'rhs.pkle',
rhs, tol=1e-3, relative=True)
def test_get_micro_data_get_calculator():
reform = {2017: {
'_rate1': [0.09],
'_rate2': [0.135],
'_rate3': [0.225],
'_rate4': [0.252]
}}
calc = get_calculator(baseline=False, calculator_start_year=2017,
reform=reform, data='pitSmallData.csv',
gfactors=GrowFactors(),
records_start_year=2017)
assert calc.current_year == 2017
|
[
"tempfile.NamedTemporaryFile",
"os.remove",
"ogindia.TPI.run_TPI",
"ogindia.execute.runner",
"os.makedirs",
"taxcalc.GrowFactors",
"os.path.dirname",
"numpy.allclose",
"ogindia.utils.pickle_file_compare",
"ogindia.utils.dict_compare",
"ogindia.parameters.Specifications",
"ogindia.SS.run_SS",
"numpy.array",
"ogindia.utils.comp_scalar",
"pytest.mark.parametrize",
"os.path.join",
"ogindia.utils.comp_array"
] |
[((1635, 1705), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""time_path"""', '[False, True]'], {'ids': "['SS', 'TPI']"}), "('time_path', [False, True], ids=['SS', 'TPI'])\n", (1658, 1705), False, 'import pytest\n'), ((340, 365), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (355, 365), False, 'import os\n'), ((439, 490), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""a"""', 'delete': '(False)'}), "(mode='a', delete=False)\n", (466, 490), False, 'import tempfile\n'), ((625, 646), 'os.remove', 'os.remove', (['pfile.name'], {}), '(pfile.name)\n', (634, 646), False, 'import os\n'), ((728, 779), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""a"""', 'delete': '(False)'}), "(mode='a', delete=False)\n", (755, 779), False, 'import tempfile\n'), ((914, 935), 'os.remove', 'os.remove', (['pfile.name'], {}), '(pfile.name)\n', (923, 935), False, 'import os\n'), ((1043, 1094), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""a"""', 'delete': '(False)'}), "(mode='a', delete=False)\n", (1070, 1094), False, 'import tempfile\n'), ((1229, 1250), 'os.remove', 'os.remove', (['pfile.name'], {}), '(pfile.name)\n', (1238, 1250), False, 'import os\n'), ((1359, 1410), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'mode': '"""a"""', 'delete': '(False)'}), "(mode='a', delete=False)\n", (1386, 1410), False, 'import tempfile\n'), ((1545, 1566), 'os.remove', 'os.remove', (['pfile.name'], {}), '(pfile.name)\n', (1554, 1566), False, 'import os\n'), ((2100, 2253), 'ogindia.execute.runner', 'runner', ([], {'output_base': 'output_base', 'baseline_dir': 'input_dir', 'test': '(True)', 'time_path': 'time_path', 'baseline': '(True)', 'user_params': 'user_params', 'run_micro': '(False)'}), '(output_base=output_base, baseline_dir=input_dir, test=True,\n time_path=time_path, baseline=True, user_params=user_params, run_micro=\n False)\n', (2106, 2253), False, 'from ogindia.execute import runner\n'), ((2829, 2860), 'os.path.join', 'os.path.join', (['output_base', '"""SS"""'], {}), "(output_base, 'SS')\n", (2841, 2860), False, 'import os\n'), ((2875, 2907), 'os.path.join', 'os.path.join', (['output_base', '"""TPI"""'], {}), "(output_base, 'TPI')\n", (2887, 2907), False, 'import os\n'), ((3094, 3250), 'ogindia.parameters.Specifications', 'Specifications', ([], {'run_micro': '(False)', 'output_base': 'output_base', 'baseline_dir': 'baseline_dir', 'test': '(False)', 'time_path': '(True)', 'baseline': '(True)', 'reform': '{}', 'guid': '""""""'}), "(run_micro=False, output_base=output_base, baseline_dir=\n baseline_dir, test=False, time_path=True, baseline=True, reform={}, guid=''\n )\n", (3108, 3250), False, 'from ogindia.parameters import Specifications\n'), ((3731, 3752), 'ogindia.SS.run_SS', 'SS.run_SS', (['spec', 'None'], {}), '(spec, None)\n', (3740, 3752), False, 'from ogindia import SS, TPI, utils\n'), ((3839, 3886), 'os.path.join', 'os.path.join', (['baseline_dir', '"""SS"""', '"""SS_vars.pkl"""'], {}), "(baseline_dir, 'SS', 'SS_vars.pkl')\n", (3851, 3886), False, 'import os\n'), ((3966, 3989), 'ogindia.TPI.run_TPI', 'TPI.run_TPI', (['spec', 'None'], {}), '(spec, None)\n', (3977, 3989), False, 'from ogindia import SS, TPI, utils\n'), ((4001, 4088), 'numpy.allclose', 'np.allclose', (["tpi_output['bmat_splus1'][:spec.T, :, :]", "ss_outputs['bssmat_splus1']"], {}), "(tpi_output['bmat_splus1'][:spec.T, :, :], ss_outputs[\n 'bssmat_splus1'])\n", (4012, 4088), True, 'import numpy as np\n'), ((4603, 4673), 'ogindia.utils.pickle_file_compare', 'pickle_file_compare', (['picklefile3.name', 'picklefile4.name'], {'relative': '(True)'}), '(picklefile3.name, picklefile4.name, relative=True)\n', (4622, 4673), False, 'from ogindia.utils import pickle_file_compare\n'), ((4795, 4850), 'ogindia.utils.pickle_file_compare', 'pickle_file_compare', (['picklefile1.name', 'picklefile1.name'], {}), '(picklefile1.name, picklefile1.name)\n', (4814, 4850), False, 'from ogindia.utils import pickle_file_compare\n'), ((5801, 5832), 'numpy.array', 'np.array', (['[100.0, 200.0, 300.0]'], {}), '([100.0, 200.0, 300.0])\n', (5809, 5832), True, 'import numpy as np\n'), ((5838, 5872), 'numpy.array', 'np.array', (['[100.01, 200.02, 300.03]'], {}), '([100.01, 200.02, 300.03])\n', (5846, 5872), True, 'import numpy as np\n'), ((5956, 6011), 'ogindia.utils.comp_array', 'comp_array', (['"""test"""', 'y', 'x', '(0.001)', 'unequal'], {'relative': '(True)'}), "('test', y, x, 0.001, unequal, relative=True)\n", (5966, 6011), False, 'from ogindia.utils import comp_array, comp_scalar, dict_compare\n'), ((6063, 6094), 'numpy.array', 'np.array', (['[100.0, 200.0, 300.0]'], {}), '([100.0, 200.0, 300.0])\n', (6071, 6094), True, 'import numpy as np\n'), ((6100, 6134), 'numpy.array', 'np.array', (['[100.01, 200.02, 300.03]'], {}), '([100.01, 200.02, 300.03])\n', (6108, 6134), True, 'import numpy as np\n'), ((6187, 6257), 'ogindia.utils.comp_array', 'comp_array', (['"""var"""', 'y', 'x', '(1e-05)', 'unequal'], {'exceptions': 'exc', 'relative': '(True)'}), "('var', y, x, 1e-05, unequal, exceptions=exc, relative=True)\n", (6197, 6257), False, 'from ogindia.utils import comp_array, comp_scalar, dict_compare\n'), ((6425, 6481), 'ogindia.utils.comp_scalar', 'comp_scalar', (['"""test"""', 'y', 'x', '(0.001)', 'unequal'], {'relative': '(True)'}), "('test', y, x, 0.001, unequal, relative=True)\n", (6436, 6481), False, 'from ogindia.utils import comp_array, comp_scalar, dict_compare\n'), ((6605, 6676), 'ogindia.utils.comp_scalar', 'comp_scalar', (['"""var"""', 'y', 'x', '(1e-05)', 'unequal'], {'exceptions': 'exc', 'relative': '(True)'}), "('var', y, x, 1e-05, unequal, exceptions=exc, relative=True)\n", (6616, 6676), False, 'from ogindia.utils import comp_array, comp_scalar, dict_compare\n'), ((6869, 6941), 'ogindia.utils.dict_compare', 'dict_compare', (['"""lhs.pkle"""', 'lhs', '"""rhs.pkle"""', 'rhs'], {'tol': '(0.001)', 'relative': '(True)'}), "('lhs.pkle', lhs, 'rhs.pkle', rhs, tol=0.001, relative=True)\n", (6881, 6941), False, 'from ogindia.utils import dict_compare\n'), ((993, 1024), 'numpy.array', 'np.array', (['[100.0, 200.0, 300.0]'], {}), '([100.0, 200.0, 300.0])\n', (1001, 1024), True, 'import numpy as np\n'), ((1308, 1339), 'numpy.array', 'np.array', (['[100.0, 200.0, 300.1]'], {}), '([100.0, 200.0, 300.1])\n', (1316, 1339), True, 'import numpy as np\n'), ((3792, 3824), 'os.path.join', 'os.path.join', (['baseline_dir', '"""SS"""'], {}), "(baseline_dir, 'SS')\n", (3804, 3824), False, 'import os\n'), ((4235, 4290), 'ogindia.utils.pickle_file_compare', 'pickle_file_compare', (['picklefile1.name', 'picklefile2.name'], {}), '(picklefile1.name, picklefile2.name)\n', (4254, 4290), False, 'from ogindia.utils import pickle_file_compare\n'), ((4419, 4474), 'ogindia.utils.pickle_file_compare', 'pickle_file_compare', (['picklefile3.name', 'picklefile4.name'], {}), '(picklefile3.name, picklefile4.name)\n', (4438, 4474), False, 'from ogindia.utils import pickle_file_compare\n'), ((4996, 5051), 'ogindia.utils.dict_compare', 'dict_compare', (['"""lhs.pkle"""', 'lhs', '"""rhs.pkle"""', 'rhs'], {'tol': 'TOL'}), "('lhs.pkle', lhs, 'rhs.pkle', rhs, tol=TOL)\n", (5008, 5051), False, 'from ogindia.utils import dict_compare\n'), ((5208, 5263), 'ogindia.utils.dict_compare', 'dict_compare', (['"""lhs.pkle"""', 'lhs', '"""rhs.pkle"""', 'rhs'], {'tol': 'TOL'}), "('lhs.pkle', lhs, 'rhs.pkle', rhs, tol=TOL)\n", (5220, 5263), False, 'from ogindia.utils import dict_compare\n'), ((5364, 5383), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (5372, 5383), True, 'import numpy as np\n'), ((5409, 5425), 'numpy.array', 'np.array', (['[1, 3]'], {}), '([1, 3])\n', (5417, 5425), True, 'import numpy as np\n'), ((5450, 5505), 'ogindia.utils.dict_compare', 'dict_compare', (['"""lhs.pkle"""', 'lhs', '"""rhs.pkle"""', 'rhs'], {'tol': 'TOL'}), "('lhs.pkle', lhs, 'rhs.pkle', rhs, tol=TOL)\n", (5462, 5505), False, 'from ogindia.utils import dict_compare\n'), ((5607, 5632), 'numpy.array', 'np.array', (['[1.0, 2.0, 3.0]'], {}), '([1.0, 2.0, 3.0])\n', (5615, 5632), True, 'import numpy as np\n'), ((5655, 5680), 'numpy.array', 'np.array', (['[1.0, 2.0, 3.1]'], {}), '([1.0, 2.0, 3.1])\n', (5663, 5680), True, 'import numpy as np\n'), ((5703, 5758), 'ogindia.utils.dict_compare', 'dict_compare', (['"""lhs.pkle"""', 'lhs', '"""rhs.pkle"""', 'rhs'], {'tol': 'TOL'}), "('lhs.pkle', lhs, 'rhs.pkle', rhs, tol=TOL)\n", (5715, 5758), False, 'from ogindia.utils import dict_compare\n'), ((5905, 5945), 'ogindia.utils.comp_array', 'comp_array', (['"""test"""', 'y', 'x', '(0.001)', 'unequal'], {}), "('test', y, x, 0.001, unequal)\n", (5915, 5945), False, 'from ogindia.utils import comp_array, comp_scalar, dict_compare\n'), ((6373, 6414), 'ogindia.utils.comp_scalar', 'comp_scalar', (['"""test"""', 'y', 'x', '(0.001)', 'unequal'], {}), "('test', y, x, 0.001, unequal)\n", (6384, 6414), False, 'from ogindia.utils import comp_array, comp_scalar, dict_compare\n'), ((6765, 6796), 'numpy.array', 'np.array', (['[100.0, 200.0, 300.0]'], {}), '([100.0, 200.0, 300.0])\n', (6773, 6796), True, 'import numpy as np\n'), ((6819, 6850), 'numpy.array', 'np.array', (['[100.0, 200.0, 300.1]'], {}), '([100.0, 200.0, 300.1])\n', (6827, 6850), True, 'import numpy as np\n'), ((3024, 3041), 'os.makedirs', 'os.makedirs', (['_dir'], {}), '(_dir)\n', (3035, 3041), False, 'import os\n'), ((7320, 7333), 'taxcalc.GrowFactors', 'GrowFactors', ([], {}), '()\n', (7331, 7333), False, 'from taxcalc import GrowFactors\n')]
|
import multiprocessing
import sys
import torch.optim as optim
import numpy as np
from functools import partial
from src.base_model import BaseModel
from src.networks import Destilation_student_matchingInstance
from src.utils import save_images
from src.utils import bland_altman_loss, dice_soft_loss, ss_loss, generate_affine, non_geometric_augmentations, apply_transform
class ADAModel(BaseModel):
def __init__(self, cf, writer, results_folder, models_folder, tensorboard_folder,
run_name, starting_epoch=0):
self.cf = cf
self.results_folder = results_folder
self.models_folder = models_folder
self.tensorboard_folder = tensorboard_folder
self.run_name = run_name
self.starting_epoch = starting_epoch
self.seg_model = Destilation_student_matchingInstance(self.cf.labels - 1, self.cf.channels)
self.seg_model.cuda()
self.writer = writer
self.seg_optimizer = optim.Adam(self.seg_model.parameters(), lr=self.cf.lr)
step_1 = 20000 if self.cf.task == 'ms' else 5000
step_2 = 20000 if self.cf.task == 'ms' else 10000
scheduler_S = optim.lr_scheduler.MultiStepLR(self.seg_optimizer, milestones=[step_1, step_2], gamma=0.1)
self.criterion = dice_soft_loss if self.cf.loss == 'dice' else bland_altman_loss
self.criterion2 = ss_loss
self.iterations = self.cf.iterations
# Discriminator setup #
self.discriminator = DiscriminatorDomain(352, 2, self.cf.discriminator_complexity)
self.optimizer_discriminator = optim.Adam(self.discriminator.parameters(), lr=1e-4)
########################
self.correct = 0
self.num_of_subjects = 0
def initialise(self):
self.seg_model.cuda()
self.discriminator.cuda()
self.p = multiprocessing.Pool(10)
def training_loop(self, source_dl, target_dl):
if self.iterations < self.cf.iterations_adapt:
alpha = 0
beta = 0
else:
alpha = self.cf.alpha_lweights
beta = self.cf.beta_lweights
source_batch = next(source_dl)
source_inputs, source_labels = (source_batch['inputs'].to(self.device),
source_batch['labels'].to(self.device))
target_batch = next(target_dl)
target_inputs, target_labels = (target_batch['inputs'].to(self.device),
target_batch['labels'].to(self.device))
outputs, _, _, _, _, _, _, _, _, _ = self.seg_model(source_inputs)
# Training Discriminator
self.seg_model.eval()
self.discriminator.train()
# do the sampling here.
# Source Domain sampling
inputs_source_discriminator = source_inputs
# Target batch
batch_trs = target_inputs.cpu().numpy()
batch_trs = self.p.map(
partial(non_geometric_augmentations, method='bias', norm_training_images=None),
np.copy(batch_trs))
batch_trs = self.p.map(
partial(non_geometric_augmentations, method='kspace', norm_training_images=None),
np.copy(batch_trs))
inputs_target_discriminator_aug = torch.Tensor(batch_trs).cuda()
Theta, Theta_inv = generate_affine(inputs_target_discriminator_aug,
degreeFreedom=self.cf.affine_rot_degree, scale=self.cf.affine_scale,
shearingScale=self.cf.affine_shearing)
inputs_target_discriminator_aug = apply_transform(inputs_target_discriminator_aug, Theta)
inputs_models_discriminator = torch.cat(
(inputs_source_discriminator, inputs_target_discriminator_aug), 0)
labels_discriminator = to_var_gpu(
torch.cat((torch.zeros(inputs_source_discriminator.size(0)),
torch.ones(inputs_target_discriminator_aug.size(0))), 0).type(torch.LongTensor))
# print('size Discriminator')
# print(inputs_models_discriminator.size())
_, _, _, _, _, _, dec4, dec3, dec2, dec1 = self.seg_model(inputs_models_discriminator)
dec1 = F.interpolate(dec1, size=dec2.size()[2:], mode='bilinear')
dec2 = F.interpolate(dec2, size=dec2.size()[2:], mode='bilinear')
dec3 = F.interpolate(dec3, size=dec2.size()[2:], mode='bilinear')
dec4 = F.interpolate(dec4, size=dec2.size()[2:], mode='bilinear')
inputs_discriminator = torch.cat((dec1, dec2, dec3, dec4), 1)
self.discriminator.zero_grad()
outputs_discriminator = self.discriminator(inputs_discriminator)
loss_discriminator = torch.nn.CrossEntropyLoss(size_average=True)(outputs_discriminator,
labels_discriminator)
self.correct += (torch.argmax(outputs_discriminator, dim=1) == labels_discriminator).float().sum()
self.num_of_subjects += int(outputs_discriminator.size(0))
loss_discriminator.backward()
self.optimizer_discriminator.step()
discriminator_loss = loss_discriminator.item()
# Train model
self.seg_model.train()
self.discriminator.eval()
# Here we get a new batch of target domain slices
target_batch = next(target_dl)
target_inputs, target_labels = (target_batch['inputs'].to(device),
target_batch['labels'].to(device))
outputst, _, _, _, _, _, _, _, _, _ = self.seg_model(target_inputs)
batch_trs = target_inputs.cpu().numpy()
batch_trs = self.p.map(
partial(non_geometric_augmentations, method='bias', norm_training_images=None),
np.copy(batch_trs))
batch_trs = self.p.map(
partial(non_geometric_augmentations, method='kspace', norm_training_images=None),
np.copy(batch_trs))
inputstaug = torch.Tensor(batch_trs).cuda()
Theta, Theta_inv = generate_affine(inputstaug, degreeFreedom=self.cf.affine_rot_degree,
scale=self.cf.affine_scale,
shearingScale=self.cf.affine_shearing)
inputstaug = apply_transform(inputstaug, Theta)
self.seg_model.zero_grad()
outputstaug, _, _, _, _, _, _, _, _, _ = self.seg_model(inputstaug)
outputst_transformed = apply_transform(outputst, Theta)
inputs_models_discriminator = torch.cat((source_inputs, inputstaug), 0)
_, _, _, _, _, _, dec4, dec3, dec2, dec1 = self.seg_model(inputs_models_discriminator)
dec1 = F.interpolate(dec1, size=dec2.size()[2:], mode='bilinear')
dec2 = F.interpolate(dec2, size=dec2.size()[2:], mode='bilinear')
dec3 = F.interpolate(dec3, size=dec2.size()[2:], mode='bilinear')
dec4 = F.interpolate(dec4, size=dec2.size()[2:], mode='bilinear')
inputs_discriminator = torch.cat((dec1, dec2, dec3, dec4), 1)
outputs_discriminator = self.discriminator(inputs_discriminator)
labels_discriminator = to_var_gpu(
torch.cat((torch.zeros(source_inputs.size(0)),
torch.ones(inputstaug.size(0))), 0).type(torch.LongTensor))
loss_discriminator = torch.nn.CrossEntropyLoss(size_average=True)(outputs_discriminator,
labels_discriminator)
supervised_loss = dice_soft_loss(torch.sigmoid(outputs), source_labels)
pc_loss = alpha * self.criterion(torch.sigmoid(outputstaug), torch.sigmoid(outputst_transformed))
adversarial_loss = - beta * loss_discriminator
loss = supervised_loss + pc_loss + adversarial_loss
self.seg_model.zero_grad()
loss.backward()
self.seg_optimizer.step()
postfix_dict = {'loss': loss.item(),
'supervised_loss': supervised_loss.item(),
'pc_loss': pc_loss.item(),
'adversarial_loss': adversarial_loss.item(),
'loss_discriminator': loss_discriminator.item(),
'acc_discriminator': self.correct/self.num_of_subjects
}
tensorboard_dict = {'source_inputs': source_inputs,
'target_inputs': target_inputs,
'source_labels': source_labels,
'target_labels': target_labels,
'inputstaug': inputstaug,
'outputs': outputs,
'outputst': outputst}
def validation_loop(self):
if self.iterations < self.cf.iterations_adapt:
alpha = 0
beta = 0
else:
alpha = self.cf.alpha_lweights
beta = self.cf.beta_lweights
self.seg_model.eval()
self.discriminator.eval()
source_batch = next(source_dl)
source_inputs, source_labels = (source_batch['inputs'].to(self.device),
source_batch['labels'].to(self.device))
target_batch = next(target_dl)
target_inputs, target_labels = (target_batch['inputs'].to(self.device),
target_batch['labels'].to(self.device))
outputs, _, _, _, _, _, _, _, _, _ = self.seg_model(source_inputs)
# Source Domain sampling
inputs_source_discriminator = source_inputs
# Target batch
batch_trs = target_inputs.cpu().numpy()
batch_trs = self.p.map(
partial(non_geometric_augmentations, method='bias', norm_training_images=None),
np.copy(batch_trs))
batch_trs = self.p.map(
partial(non_geometric_augmentations, method='kspace', norm_training_images=None),
np.copy(batch_trs))
inputs_target_discriminator_aug = torch.Tensor(batch_trs).cuda()
Theta, Theta_inv = generate_affine(inputs_target_discriminator_aug,
degreeFreedom=self.cf.affine_rot_degree, scale=self.cf.affine_scale,
shearingScale=self.cf.affine_shearing)
inputs_target_discriminator_aug = apply_transform(inputs_target_discriminator_aug, Theta)
inputs_models_discriminator = torch.cat(
(inputs_source_discriminator, inputs_target_discriminator_aug), 0)
labels_discriminator = to_var_gpu(
torch.cat((torch.zeros(inputs_source_discriminator.size(0)),
torch.ones(inputs_target_discriminator_aug.size(0))), 0).type(torch.LongTensor))
_, _, _, _, _, _, dec4, dec3, dec2, dec1 = self.seg_model(inputs_models_discriminator)
dec1 = F.interpolate(dec1, size=dec2.size()[2:], mode='bilinear')
dec2 = F.interpolate(dec2, size=dec2.size()[2:], mode='bilinear')
dec3 = F.interpolate(dec3, size=dec2.size()[2:], mode='bilinear')
dec4 = F.interpolate(dec4, size=dec2.size()[2:], mode='bilinear')
inputs_discriminator = torch.cat((dec1, dec2, dec3, dec4), 1)
outputs_discriminator = self.discriminator(inputs_discriminator)
loss_discriminator = torch.nn.CrossEntropyLoss(size_average=True)(outputs_discriminator,
labels_discriminator)
self.correct += (torch.argmax(outputs_discriminator, dim=1) == labels_discriminator).float().sum()
self.num_of_subjects += int(outputs_discriminator.size(0))
# Here we get a new batch of target domain slices
target_batch = next(target_dl)
target_inputs, target_labels = (target_batch['inputs'].to(device),
target_batch['labels'].to(device))
outputst, _, _, _, _, _, _, _, _, _ = self.seg_model(target_inputs)
batch_trs = target_inputs.cpu().numpy()
batch_trs = self.p.map(
partial(non_geometric_augmentations, method='bias', norm_training_images=None),
np.copy(batch_trs))
batch_trs = self.p.map(
partial(non_geometric_augmentations, method='kspace', norm_training_images=None),
np.copy(batch_trs))
inputstaug = torch.Tensor(batch_trs).cuda()
Theta, Theta_inv = generate_affine(inputstaug, degreeFreedom=self.cf.affine_rot_degree,
scale=self.cf.affine_scale,
shearingScale=self.cf.affine_shearing)
inputstaug = apply_transform(inputstaug, Theta)
outputstaug, _, _, _, _, _, _, _, _, _ = self.seg_model(inputstaug)
outputst_transformed = apply_transform(outputst, Theta)
supervised_loss = dice_soft_loss(torch.sigmoid(outputs), source_labels)
pc_loss = alpha * self.criterion(torch.sigmoid(outputstaug), torch.sigmoid(outputst_transformed))
adversarial_loss = - beta * loss_discriminator
loss = supervised_loss + pc_loss + adversarial_loss
postfix_dict = {'loss': loss.item(),
'supervised_loss': supervised_loss.item(),
'pc_loss': pc_loss.item(),
'adversarial_loss': adversarial_loss.item(),
'loss_discriminator': loss_discriminator.item(),
'acc_discriminator': self.correct/self.num_of_subjects
}
tensorboard_dict = {'source_inputs': source_inputs,
'target_inputs': target_inputs,
'source_labels': source_labels,
'target_labels': target_labels,
'inputstaug': inputstaug,
'outputs': outputs,
'outputst': outputst}
def tensorboard_logging(self, tensorboard_dict, split):
if self.cf.task == 'tumour':
for idx, modality in enumerate(['flair', 't1c', 't1', 't2']):
save_images(writer=self.writer, images=tensorboard_dict['source_inputs'][:, (idx,), :, :],
normalize=True, sigmoid=False,
iteration=self.iterations, name='source_{}/{}'.format(modality, split))
save_images(writer=self.writer, images=tensorboard_dict['target_inputs'][:, (idx,), :, :],
normalize=True, sigmoid=False,
iteration=self.iterations, name='target_{}/{}'.format(modality, split))
save_images(writer=self.writer, images=tensorboard_dict['inputstaug'][:, (idx,), :, :],
normalize=True, sigmoid=False,
iteration=self.iterations, name='{}_aug/{}'.format(modality, split))
elif self.cf.task == 'ms':
save_images(writer=self.writer, images=tensorboard_dict['source_labels'], normalize=True, sigmoid=False,
iteration=self.iterations, name='source_labels/{}'.format(split), png=True)
save_images(writer=self.writer, images=tensorboard_dict['target_labels'], normalize=True, sigmoid=False,
iteration=self.iterations, name='target_labels/{}'.format(split), png=True)
save_images(writer=self.writer, images=tensorboard_dict['outputs'], normalize=False, sigmoid=True,
iteration=self.iterations, name='outputs_source/{}'.format(split), png=True)
save_images(writer=self.writer, images=tensorboard_dict['source_inputs'], normalize=True,
sigmoid=False, png=True,
iteration=self.iterations, name='source_inputs/{}'.format(split))
save_images(writer=self.writer, images=tensorboard_dict['target_inputs'], normalize=True,
sigmoid=False, png=True,
iteration=self.iterations, name='targets_inputs/{}'.format(split))
save_images(writer=self.writer, images=tensorboard_dict['inputstaug'], normalize=True, sigmoid=False,
iteration=self.iterations, name='inputsaug/{}'.format(split))
save_images(writer=self.writer, images=tensorboard_dict['outputst'], normalize=False, sigmoid=True,
iteration=self.iterations, name='outputs_target/{}'.format(split))
def load(self, checkpoint_path):
self.starting_epoch = int(os.path.basename(checkpoint_path.split('.')[0]).split('_')[-1])
checkpoint = torch.load(checkpoint_path)
self.seg_model = self.seg_model.load_state_dict(checkpoint['seg_model'])
self.discriminator = self.discriminator.load_state_dict(checkpoint['discriminator'])
self.optimizer_discriminator = self.optimizer_discriminator.load_state_dict(checkpoint['optimizer_discriminator'])
self.seg_optimizer = self.seg_optimizer.load_state_dict(checkpoint['seg_optimizer'])
def save(self):
torch.save({'seg_model': self.seg_model.state_dict(),
'discriminator': self.discriminator.state_dict(),
'optimizer_discriminator': self.optimizer_discriminator.state_dict(),
'seg_optimizer': self.seg_optimizer.state_dict(),
}, os.path.join(self.models_folder, self.run_name + '_{}.pt'.format(self.iterations)))
def epoch_reset(self):
self.correct = 0
self.num_of_subjects = 0
|
[
"functools.partial",
"src.networks.Destilation_student_matchingInstance",
"numpy.copy",
"src.utils.apply_transform",
"src.utils.generate_affine",
"multiprocessing.Pool",
"torch.optim.lr_scheduler.MultiStepLR"
] |
[((797, 871), 'src.networks.Destilation_student_matchingInstance', 'Destilation_student_matchingInstance', (['(self.cf.labels - 1)', 'self.cf.channels'], {}), '(self.cf.labels - 1, self.cf.channels)\n', (833, 871), False, 'from src.networks import Destilation_student_matchingInstance\n'), ((1152, 1246), 'torch.optim.lr_scheduler.MultiStepLR', 'optim.lr_scheduler.MultiStepLR', (['self.seg_optimizer'], {'milestones': '[step_1, step_2]', 'gamma': '(0.1)'}), '(self.seg_optimizer, milestones=[step_1,\n step_2], gamma=0.1)\n', (1182, 1246), True, 'import torch.optim as optim\n'), ((1825, 1849), 'multiprocessing.Pool', 'multiprocessing.Pool', (['(10)'], {}), '(10)\n', (1845, 1849), False, 'import multiprocessing\n'), ((3283, 3449), 'src.utils.generate_affine', 'generate_affine', (['inputs_target_discriminator_aug'], {'degreeFreedom': 'self.cf.affine_rot_degree', 'scale': 'self.cf.affine_scale', 'shearingScale': 'self.cf.affine_shearing'}), '(inputs_target_discriminator_aug, degreeFreedom=self.cf.\n affine_rot_degree, scale=self.cf.affine_scale, shearingScale=self.cf.\n affine_shearing)\n', (3298, 3449), False, 'from src.utils import bland_altman_loss, dice_soft_loss, ss_loss, generate_affine, non_geometric_augmentations, apply_transform\n'), ((3566, 3621), 'src.utils.apply_transform', 'apply_transform', (['inputs_target_discriminator_aug', 'Theta'], {}), '(inputs_target_discriminator_aug, Theta)\n', (3581, 3621), False, 'from src.utils import bland_altman_loss, dice_soft_loss, ss_loss, generate_affine, non_geometric_augmentations, apply_transform\n'), ((5998, 6138), 'src.utils.generate_affine', 'generate_affine', (['inputstaug'], {'degreeFreedom': 'self.cf.affine_rot_degree', 'scale': 'self.cf.affine_scale', 'shearingScale': 'self.cf.affine_shearing'}), '(inputstaug, degreeFreedom=self.cf.affine_rot_degree, scale=\n self.cf.affine_scale, shearingScale=self.cf.affine_shearing)\n', (6013, 6138), False, 'from src.utils import bland_altman_loss, dice_soft_loss, ss_loss, generate_affine, non_geometric_augmentations, apply_transform\n'), ((6239, 6273), 'src.utils.apply_transform', 'apply_transform', (['inputstaug', 'Theta'], {}), '(inputstaug, Theta)\n', (6254, 6273), False, 'from src.utils import bland_altman_loss, dice_soft_loss, ss_loss, generate_affine, non_geometric_augmentations, apply_transform\n'), ((6417, 6449), 'src.utils.apply_transform', 'apply_transform', (['outputst', 'Theta'], {}), '(outputst, Theta)\n', (6432, 6449), False, 'from src.utils import bland_altman_loss, dice_soft_loss, ss_loss, generate_affine, non_geometric_augmentations, apply_transform\n'), ((9976, 10142), 'src.utils.generate_affine', 'generate_affine', (['inputs_target_discriminator_aug'], {'degreeFreedom': 'self.cf.affine_rot_degree', 'scale': 'self.cf.affine_scale', 'shearingScale': 'self.cf.affine_shearing'}), '(inputs_target_discriminator_aug, degreeFreedom=self.cf.\n affine_rot_degree, scale=self.cf.affine_scale, shearingScale=self.cf.\n affine_shearing)\n', (9991, 10142), False, 'from src.utils import bland_altman_loss, dice_soft_loss, ss_loss, generate_affine, non_geometric_augmentations, apply_transform\n'), ((10259, 10314), 'src.utils.apply_transform', 'apply_transform', (['inputs_target_discriminator_aug', 'Theta'], {}), '(inputs_target_discriminator_aug, Theta)\n', (10274, 10314), False, 'from src.utils import bland_altman_loss, dice_soft_loss, ss_loss, generate_affine, non_geometric_augmentations, apply_transform\n'), ((12339, 12479), 'src.utils.generate_affine', 'generate_affine', (['inputstaug'], {'degreeFreedom': 'self.cf.affine_rot_degree', 'scale': 'self.cf.affine_scale', 'shearingScale': 'self.cf.affine_shearing'}), '(inputstaug, degreeFreedom=self.cf.affine_rot_degree, scale=\n self.cf.affine_scale, shearingScale=self.cf.affine_shearing)\n', (12354, 12479), False, 'from src.utils import bland_altman_loss, dice_soft_loss, ss_loss, generate_affine, non_geometric_augmentations, apply_transform\n'), ((12580, 12614), 'src.utils.apply_transform', 'apply_transform', (['inputstaug', 'Theta'], {}), '(inputstaug, Theta)\n', (12595, 12614), False, 'from src.utils import bland_altman_loss, dice_soft_loss, ss_loss, generate_affine, non_geometric_augmentations, apply_transform\n'), ((12723, 12755), 'src.utils.apply_transform', 'apply_transform', (['outputst', 'Theta'], {}), '(outputst, Theta)\n', (12738, 12755), False, 'from src.utils import bland_altman_loss, dice_soft_loss, ss_loss, generate_affine, non_geometric_augmentations, apply_transform\n'), ((2912, 2990), 'functools.partial', 'partial', (['non_geometric_augmentations'], {'method': '"""bias"""', 'norm_training_images': 'None'}), "(non_geometric_augmentations, method='bias', norm_training_images=None)\n", (2919, 2990), False, 'from functools import partial\n'), ((3004, 3022), 'numpy.copy', 'np.copy', (['batch_trs'], {}), '(batch_trs)\n', (3011, 3022), True, 'import numpy as np\n'), ((3068, 3153), 'functools.partial', 'partial', (['non_geometric_augmentations'], {'method': '"""kspace"""', 'norm_training_images': 'None'}), "(non_geometric_augmentations, method='kspace', norm_training_images=None\n )\n", (3075, 3153), False, 'from functools import partial\n'), ((3162, 3180), 'numpy.copy', 'np.copy', (['batch_trs'], {}), '(batch_trs)\n', (3169, 3180), True, 'import numpy as np\n'), ((5648, 5726), 'functools.partial', 'partial', (['non_geometric_augmentations'], {'method': '"""bias"""', 'norm_training_images': 'None'}), "(non_geometric_augmentations, method='bias', norm_training_images=None)\n", (5655, 5726), False, 'from functools import partial\n'), ((5740, 5758), 'numpy.copy', 'np.copy', (['batch_trs'], {}), '(batch_trs)\n', (5747, 5758), True, 'import numpy as np\n'), ((5804, 5889), 'functools.partial', 'partial', (['non_geometric_augmentations'], {'method': '"""kspace"""', 'norm_training_images': 'None'}), "(non_geometric_augmentations, method='kspace', norm_training_images=None\n )\n", (5811, 5889), False, 'from functools import partial\n'), ((5898, 5916), 'numpy.copy', 'np.copy', (['batch_trs'], {}), '(batch_trs)\n', (5905, 5916), True, 'import numpy as np\n'), ((9606, 9684), 'functools.partial', 'partial', (['non_geometric_augmentations'], {'method': '"""bias"""', 'norm_training_images': 'None'}), "(non_geometric_augmentations, method='bias', norm_training_images=None)\n", (9613, 9684), False, 'from functools import partial\n'), ((9698, 9716), 'numpy.copy', 'np.copy', (['batch_trs'], {}), '(batch_trs)\n', (9705, 9716), True, 'import numpy as np\n'), ((9762, 9847), 'functools.partial', 'partial', (['non_geometric_augmentations'], {'method': '"""kspace"""', 'norm_training_images': 'None'}), "(non_geometric_augmentations, method='kspace', norm_training_images=None\n )\n", (9769, 9847), False, 'from functools import partial\n'), ((9856, 9874), 'numpy.copy', 'np.copy', (['batch_trs'], {}), '(batch_trs)\n', (9863, 9874), True, 'import numpy as np\n'), ((11989, 12067), 'functools.partial', 'partial', (['non_geometric_augmentations'], {'method': '"""bias"""', 'norm_training_images': 'None'}), "(non_geometric_augmentations, method='bias', norm_training_images=None)\n", (11996, 12067), False, 'from functools import partial\n'), ((12081, 12099), 'numpy.copy', 'np.copy', (['batch_trs'], {}), '(batch_trs)\n', (12088, 12099), True, 'import numpy as np\n'), ((12145, 12230), 'functools.partial', 'partial', (['non_geometric_augmentations'], {'method': '"""kspace"""', 'norm_training_images': 'None'}), "(non_geometric_augmentations, method='kspace', norm_training_images=None\n )\n", (12152, 12230), False, 'from functools import partial\n'), ((12239, 12257), 'numpy.copy', 'np.copy', (['batch_trs'], {}), '(batch_trs)\n', (12246, 12257), True, 'import numpy as np\n')]
|
import inspect
import logging
import os
from itertools import product
from multiprocessing import JoinableQueue, Process
from queue import Empty
import numpy as np
import torch
import torch.nn.functional as F
from pandas import DataFrame
from fonduer.learning.models.marginal import Marginal
logger = logging.getLogger(__name__)
# ###########################################################
# # General Learning Utilities
# ###########################################################
def save_marginals(session, X, marginals, training=True):
"""Save marginal probabilities for a set of Candidates to db.
:param X: A list of arbitrary objects with candidate ids accessible via a
.id attrib
:param marginals: A dense M x K matrix of marginal probabilities, where
K is the cardinality of the candidates, OR a M-dim list/array if K=2.
:param training: If True, these are training marginals / labels; else they
are saved as end model predictions.
Note: The marginals for k=0 are not stored, only for k = 1,...,K
"""
logger = logging.getLogger(__name__)
# Make sure that we are working with a numpy array
try:
shape = marginals.shape
except Exception as e:
marginals = np.array(marginals)
shape = marginals.shape
# Handle binary input as M x 1-dim array; assume elements represent
# poksitive (k=1) class values
if len(shape) == 1:
marginals = np.vstack([1 - marginals, marginals]).T
# Only add values for classes k=1,...,K
marginal_tuples = []
for i in range(shape[0]):
for k in range(1, shape[1] if len(shape) > 1 else 2):
if marginals[i, k] > 0:
marginal_tuples.append((i, k, marginals[i, k]))
# NOTE: This will delete all existing marginals of type `training`
session.query(Marginal).filter(Marginal.training == training).delete(
synchronize_session="fetch"
)
# Prepare bulk INSERT query
q = Marginal.__table__.insert()
# Prepare values
insert_vals = []
for i, k, p in marginal_tuples:
cid = X[i].id
insert_vals.append(
{
"candidate_id": cid,
"training": training,
"value": k,
# We cast p in case its a numpy type, which psycopg2 does not handle
"probability": float(p),
}
)
# Execute update
session.execute(q, insert_vals)
session.commit()
logger.info("Saved {%d} marginals".format(len(marginals)))
def reshape_marginals(marginals):
"""Returns correctly shaped marginals as np array"""
# Make sure training marginals are a numpy array first
try:
shape = marginals.shape
except Exception as e:
marginals = np.array(marginals)
shape = marginals.shape
# Set cardinality + marginals in proper format for binary v. categorical
if len(shape) != 1:
# If k = 2, make sure is M-dim array
if shape[1] == 2:
marginals = marginals[:, 1].reshape(-1)
return marginals
class LabelBalancer(object):
def __init__(self, y):
"""Utility class to rebalance training labels
For example, to get the indices of a training set
with labels y and around 90 percent negative examples,
LabelBalancer(y).get_train_idxs(rebalance=0.1)
"""
self.y = np.ravel(y)
def _get_pos(self, split):
return np.where(self.y > (split + 1e-6))[0]
def _get_neg(self, split):
return np.where(self.y < (split - 1e-6))[0]
def _try_frac(self, m, n, pn):
# Return (a, b) s.t. a <= m, b <= n
# and b / a is as close to pn as possible
r = int(round(float(pn * m) / (1.0 - pn)))
s = int(round(float((1.0 - pn) * n) / pn))
return (m, r) if r <= n else ((s, n) if s <= m else (m, n))
def _get_counts(self, nneg, npos, frac_pos):
if frac_pos > 0.5:
return self._try_frac(nneg, npos, frac_pos)
else:
return self._try_frac(npos, nneg, 1.0 - frac_pos)[::-1]
def get_train_idxs(self, rebalance=False, split=0.5, rand_state=None):
"""Get training indices based on @y
@rebalance: bool or fraction of positive examples desired
If True, default fraction is 0.5. If False no balancing.
@split: Split point for positive and negative classes
"""
rs = np.random if rand_state is None else rand_state
pos, neg = self._get_pos(split), self._get_neg(split)
if rebalance:
if len(pos) == 0:
raise ValueError("No positive labels.")
if len(neg) == 0:
raise ValueError("No negative labels.")
p = 0.5 if rebalance else rebalance
n_neg, n_pos = self._get_counts(len(neg), len(pos), p)
pos = rs.choice(pos, size=n_pos, replace=False)
neg = rs.choice(neg, size=n_neg, replace=False)
idxs = np.concatenate([pos, neg])
rs.shuffle(idxs)
return idxs
# ##########################################################
# # Advanced Scoring Classes
# ##########################################################
class Scorer(object):
"""Abstract type for scorers"""
def __init__(self, test_candidates, test_labels, gold_candidate_set=None):
"""
:param test_candidates: A *list of Candidates* corresponding to
test_labels
:param test_labels: A *csrLabelMatrix* of ground truth labels for the
test candidates
:param gold_candidate_set: (optional) A *CandidateSet* containing the
full set of gold labeled candidates
"""
self.test_candidates = test_candidates
self.test_labels = test_labels
self.gold_candidate_set = gold_candidate_set
def _get_cardinality(self, marginals):
"""Get the cardinality based on the marginals returned by the model."""
if len(marginals.shape) == 1 or marginals.shape[1] < 3:
cardinality = 2
else:
cardinality = marginals.shape[1]
return cardinality
def score(self, test_marginals, **kwargs):
cardinality = self._get_cardinality(test_marginals)
if cardinality == 2:
return self._score_binary(test_marginals, **kwargs)
else:
return self._score_categorical(test_marginals, **kwargs)
def _score_binary(
self,
test_marginals,
train_marginals=None,
b=0.5,
set_unlabeled_as_neg=True,
display=True,
):
raise NotImplementedError()
def _score_categorical(self, test_marginals, train_marginals=None, display=True):
raise NotImplementedError()
def summary_score(self, test_marginals, **kwargs):
"""Return the F1 score (for binary) or accuracy (for categorical)."""
raise NotImplementedError()
class MentionScorer(Scorer):
"""Scorer for mention level assessment"""
def _score_binary(
self,
test_marginals,
train_marginals=None,
b=0.5,
set_unlabeled_as_neg=True,
set_at_thresh_as_neg=True,
display=True,
**kwargs
):
"""
Return scoring metric for the provided marginals, as well as candidates
in error buckets.
:param test_marginals: array of marginals for test candidates
:param train_marginals (optional): array of marginals for training
candidates
:param b: threshold for labeling
:param set_unlabeled_as_neg: set test labels at the decision threshold
of b as negative labels
:param set_at_b_as_neg: set marginals at the decision threshold exactly
as negative predictions
:param display: show calibration plots?
"""
test_label_array = []
tp = set()
fp = set()
tn = set()
fn = set()
for i, candidate in enumerate(self.test_candidates):
# Handle either a LabelMatrix or else assume test_labels array is in
# correct order i.e. same order as test_candidates
try:
test_label_index = self.test_labels.get_row_index(candidate)
test_label = self.test_labels[test_label_index, 0]
except AttributeError:
test_label = self.test_labels[i]
# Set unlabeled examples to -1 by default
if test_label == 0 and set_unlabeled_as_neg:
test_label = -1
# Bucket the candidates for error analysis
test_label_array.append(test_label)
if test_label != 0:
if test_marginals[i] > b:
if test_label == 1:
tp.add(candidate)
else:
fp.add(candidate)
elif test_marginals[i] < b or set_at_thresh_as_neg:
if test_label == -1:
tn.add(candidate)
else:
fn.add(candidate)
if display:
# Calculate scores unadjusted for TPs not in our candidate set
print_scores(
len(tp), len(fp), len(tn), len(fn), title="Scores (Un-adjusted)"
)
# If gold candidate set is provided calculate recall-adjusted scores
if self.gold_candidate_set is not None:
gold_fn = [
c for c in self.gold_candidate_set if c not in self.test_candidates
]
logger.info("\n")
print_scores(
len(tp),
len(fp),
len(tn),
len(fn) + len(gold_fn),
title="Corpus Recall-adjusted Scores",
)
# If training and test marginals provided print calibration plots
if train_marginals is not None and test_marginals is not None:
raise NotImplementedError("Invalid code here.")
return tp, fp, tn, fn
def _score_categorical(
self, test_marginals, train_marginals=None, display=True, **kwargs
):
"""
Return scoring metric for the provided marginals, as well as candidates
in error buckets.
:param test_marginals: array of marginals for test candidates
:param train_marginals (optional): array of marginals for training
candidates
:param display: show calibration plots?
"""
test_label_array = []
correct = set()
incorrect = set()
# Get predictions
test_pred = test_marginals.argmax(axis=1) + 1
# Bucket the candidates for error analysis
for i, candidate in enumerate(self.test_candidates):
# Handle either a LabelMatrix or else assume test_labels array is in
# correct order i.e. same order as test_candidates
try:
test_label_index = self.test_labels.get_row_index(candidate)
test_label = self.test_labels[test_label_index, 0]
except AttributeError:
test_label = self.test_labels[i]
test_label_array.append(test_label)
if test_label != 0:
if test_pred[i] == test_label:
correct.add(candidate)
else:
incorrect.add(candidate)
if display:
nc, ni = len(correct), len(incorrect)
logger.info("Accuracy: {}".format(nc / float(nc + ni)))
# If gold candidate set is provided calculate recall-adjusted scores
if self.gold_candidate_set is not None:
gold_missed = [
c for c in self.gold_candidate_set if c not in self.test_candidates
]
logger.info(
"Coverage: {}".format((nc + ni) / (nc + ni + len(gold_missed)))
)
return correct, incorrect
def summary_score(self, test_marginals, **kwargs):
"""
Return the F1 score (for binary) or accuracy (for categorical).
Also return the label as second argument.
"""
error_sets = self.score(test_marginals, display=False, **kwargs)
if len(error_sets) == 4:
_, _, f1 = binary_scores_from_counts(*map(len, error_sets))
return f1, "F1 Score"
else:
nc, ninc = map(len, error_sets)
return nc / float(nc + ninc), "Accuracy"
def binary_scores_from_counts(ntp, nfp, ntn, nfn):
"""Precision, recall, and F1 scores from counts of TP, FP, TN, FN.
Example usage::
p, r, f1 = binary_scores_from_counts(*map(len, error_sets))
"""
prec = ntp / float(ntp + nfp) if ntp + nfp > 0 else 0.0
rec = ntp / float(ntp + nfn) if ntp + nfn > 0 else 0.0
f1 = (2 * prec * rec) / (prec + rec) if prec + rec > 0 else 0.0
return prec, rec, f1
def print_scores(ntp, nfp, ntn, nfn, title="Scores"):
prec, rec, f1 = binary_scores_from_counts(ntp, nfp, ntn, nfn)
pos_acc = ntp / float(ntp + nfn) if ntp + nfn > 0 else 0.0
neg_acc = ntn / float(ntn + nfp) if ntn + nfp > 0 else 0.0
logger.info("========================================")
logger.info(title)
logger.info("========================================")
logger.info("Pos. class accuracy: {:.3}".format(pos_acc))
logger.info("Neg. class accuracy: {:.3}".format(neg_acc))
logger.info("Precision {:.3}".format(prec))
logger.info("Recall {:.3}".format(rec))
logger.info("F1 {:.3}".format(f1))
logger.info("----------------------------------------")
logger.info("TP: {} | FP: {} | TN: {} | FN: {}".format(ntp, nfp, ntn, nfn))
logger.info("========================================\n")
# ##########################################################
# # Grid search
# ##########################################################
class GridSearch(object):
"""
A class for running a hyperparameter grid search.
:param model_class: The model class being trained
:param parameter_dict: A dictionary of (hyperparameter name, list of values)
pairs. Note that the hyperparameter name must correspond to a keyword
argument in the `model_class.train` method.
:param X_train: The training datapoints
:param Y_train: If applicable, the training labels / marginals
:param model_class_params: Keyword arguments to pass into model_class
construction. Note that a new model is constructed for each new
combination of hyperparameters.
:param model_hyperparams: Hyperparameters for the model- all must be
keyword arguments to the `model_class.train` method. Any that are
included in the grid search will be overwritten.
:param save_dir: Note that checkpoints will be saved in save_dir/grid_search
"""
def __init__(
self,
model_class,
parameter_dict,
X_train,
Y_train=None,
model_class_params={},
model_hyperparams={},
save_dir="checkpoints",
):
self.model_class = model_class
self.parameter_dict = parameter_dict
self.param_names = list(parameter_dict)
self.X_train = X_train
self.Y_train = Y_train
self.model_class_params = model_class_params
self.model_hyperparams = model_hyperparams
self.save_dir = os.path.join(save_dir, "grid_search")
def search_space(self):
return product(*[self.parameter_dict[pn] for pn in self.param_names])
def fit(
self,
X_valid,
Y_valid,
b=0.5,
beta=1,
set_unlabeled_as_neg=True,
n_threads=1,
eval_batch_size=None,
):
"""
Runs grid search, constructing a new instance of model_class for each
hyperparameter combination, training on (self.X_train, self.Y_train),
and validating on (X_valid, Y_valid). Selects the best model according
to F1 score (binary) or accuracy (categorical).
:param b: Scoring decision threshold (binary)
:param beta: F_beta score to select model by (binary)
:param set_unlabeled_as_neg: Set labels = 0 -> -1 (binary)
:param n_threads: Parallelism to use for the grid search
:param eval_batch_size: The batch_size for model evaluation
"""
if n_threads > 1:
opt_model, run_stats = self._fit_mt(
X_valid,
Y_valid,
b=b,
beta=beta,
set_unlabeled_as_neg=set_unlabeled_as_neg,
n_threads=n_threads,
eval_batch_size=eval_batch_size,
)
else:
opt_model, run_stats = self._fit_st(
X_valid,
Y_valid,
b=b,
beta=beta,
set_unlabeled_as_neg=set_unlabeled_as_neg,
eval_batch_size=eval_batch_size,
)
return opt_model, run_stats
def _fit_st(
self,
X_valid,
Y_valid,
b=0.5,
beta=1,
set_unlabeled_as_neg=True,
eval_batch_size=None,
):
"""Single-threaded implementation of `GridSearch.fit`."""
# Iterate over the param values
run_stats = []
run_score_opt = -1.0
for k, param_vals in enumerate(self.search_space()):
hps = self.model_hyperparams.copy()
# Initiate the model from scratch each time
# Some models may have seed set in the init procedure
model = self.model_class(**self.model_class_params)
model_name = "{0}_{1}".format(model.name, k)
# Set the new hyperparam configuration to test
for pn, pv in zip(self.param_names, param_vals):
hps[pn] = pv
logger.info("=" * 60)
NUMTYPES = float, int, np.float64
logger.info(
"[%d] Testing %s"
% (
k + 1,
", ".join(
[
"%s = %s"
% (pn, ("%0.2e" % pv) if isinstance(pv, NUMTYPES) else pv)
for pn, pv in zip(self.param_names, param_vals)
]
),
)
)
logger.info("=" * 60)
# Train the model
train_args = [self.X_train]
if self.Y_train is not None:
train_args.append(self.Y_train)
# Pass in the dev set to the train method if applicable, for dev set
# score printing, best-score checkpointing
# Note: Need to set the save directory since passing in
# (X_dev, Y_dev) will by default trigger checkpoint saving
try:
model.train(
*train_args,
X_dev=X_valid,
Y_dev=Y_valid,
save_dir=self.save_dir,
**hps
)
except Exception as e:
model.train(*train_args, **hps)
# Test the model
run_scores = model.score(
X_valid,
Y_valid,
b=b,
beta=beta,
set_unlabeled_as_neg=set_unlabeled_as_neg,
batch_size=eval_batch_size,
)
if model.cardinality > 2:
run_score, run_score_label = run_scores, "Accuracy"
run_scores = [run_score]
else:
run_score = run_scores[-1]
run_score_label = "F-{0} Score".format(beta)
# Add scores to running stats, print, and set as optimal if best
logger.info("[{0}] {1}: {2}".format(model.name, run_score_label, run_score))
run_stats.append(list(param_vals) + list(run_scores))
if run_score > run_score_opt or k == 0:
model.save(model_name=model_name, save_dir=self.save_dir)
# Also save a separate file for easier access
model.save(
model_name="{0}_best".format(model.name), save_dir=self.save_dir
)
opt_model_name = model_name
run_score_opt = run_score
# Set optimal parameter in the learner model
opt_model = self.model_class(**self.model_class_params)
opt_model.load(opt_model_name, save_dir=self.save_dir)
# Return optimal model & DataFrame of scores
f_score = "F-{0}".format(beta)
run_score_labels = (
["Acc."] if opt_model.cardinality > 2 else ["Prec.", "Rec.", f_score]
)
sort_by = "Acc." if opt_model.cardinality > 2 else f_score
self.results = DataFrame.from_records(
run_stats, columns=self.param_names + run_score_labels
).sort_values(by=sort_by, ascending=False)
return opt_model, self.results
def _fit_mt(
self,
X_valid,
Y_valid,
b=0.5,
beta=1,
set_unlabeled_as_neg=True,
n_threads=2,
eval_batch_size=None,
):
"""Multi-threaded implementation of `GridSearch.fit`."""
# First do a preprocessing pass over the data to make sure it is all
# non-lazily loaded
# TODO: Better way to go about it than this!!
logger.info("Loading data...")
model = self.model_class(**self.model_class_params)
model._preprocess_data(self.X_train)
model._preprocess_data(X_valid)
# Create queue of hyperparameters to test
logger.info("Launching jobs...")
params_queue = JoinableQueue()
param_val_sets = []
for k, param_vals in enumerate(self.search_space()):
param_val_sets.append(param_vals)
hps = self.model_hyperparams.copy()
for pn, pv in zip(self.param_names, param_vals):
hps[pn] = pv
params_queue.put((k, hps))
# Create a queue to store output results
scores_queue = JoinableQueue()
# Start UDF Processes
ps = []
for i in range(n_threads):
p = ModelTester(
self.model_class,
self.model_class_params,
params_queue,
scores_queue,
self.X_train,
X_valid,
Y_valid,
Y_train=self.Y_train,
b=b,
save_dir=self.save_dir,
set_unlabeled_as_neg=set_unlabeled_as_neg,
eval_batch_size=eval_batch_size,
)
p.start()
ps.append(p)
# Collect scores
run_stats = []
while any([p.is_alive() for p in ps]):
while True:
try:
scores = scores_queue.get(True, QUEUE_TIMEOUT)
k = scores[0]
param_vals = param_val_sets[k]
run_stats.append([k] + list(param_vals) + list(scores[1:]))
logger.info("Model {0} Done; score: {1}".format(k, scores[-1]))
scores_queue.task_done()
except Empty:
break
# Terminate the processes
for p in ps:
p.terminate()
# Load best model; first element in each row of run_stats is the model
# index, last one is the score to sort by
# Note: the models may be returned out of order!
i_opt = np.argmax([s[-1] for s in run_stats])
k_opt = run_stats[i_opt][0]
model = self.model_class(**self.model_class_params)
model.load("{0}_{1}".format(model.name, k_opt), save_dir=self.save_dir)
# Also save the best model as separate file
model.save(model_name="{0}_best".format(model.name), save_dir=self.save_dir)
# Return model and DataFrame of scores
# Test for categorical vs. binary in hack-ey way for now...
f_score = "F-{0}".format(beta)
categorical = len(scores) == 2
labels = ["Acc."] if categorical else ["Prec.", "Rec.", f_score]
sort_by = "Acc." if categorical else f_score
self.results = DataFrame.from_records(
run_stats, columns=["Model"] + self.param_names + labels
).sort_values(by=sort_by, ascending=False)
return model, self.results
QUEUE_TIMEOUT = 3
class ModelTester(Process):
def __init__(
self,
model_class,
model_class_params,
params_queue,
scores_queue,
X_train,
X_valid,
Y_valid,
Y_train=None,
b=0.5,
beta=1,
set_unlabeled_as_neg=True,
save_dir="checkpoints",
eval_batch_size=None,
):
Process.__init__(self)
self.model_class = model_class
self.model_class_params = model_class_params
self.params_queue = params_queue
self.scores_queue = scores_queue
self.X_train = X_train
self.Y_train = Y_train
self.X_valid = X_valid
self.Y_valid = Y_valid
self.scorer_params = {
"b": b,
"beta": beta,
"set_unlabeled_as_neg": set_unlabeled_as_neg,
"batch_size": eval_batch_size,
}
self.save_dir = save_dir
def run(self):
while True:
# Get a new configuration from the queue
try:
k, hps = self.params_queue.get(True, QUEUE_TIMEOUT)
# Initiate the model from scratch each time
# Some models may have seed set in the init procedure
model = self.model_class(**self.model_class_params)
model_name = "{0}_{1}".format(model.name, k)
# Pass in the dev set to the train method if applicable, for dev
# set score printing, best-score checkpointing
if "X_dev" in inspect.getargspec(model.train):
hps["X_dev"] = self.X_valid
hps["Y_dev"] = self.Y_valid
# Train model with given hyperparameters
if self.Y_train is not None:
model.train(self.X_train, self.Y_train, **hps)
else:
model.train(self.X_train, **hps)
# Save the model
# NOTE: Currently, we have to save every model because we are
# testing asynchronously. This is obviously memory inefficient,
# although probably not that much of a problem in practice...
model.save(model_name=model_name, save_dir=self.save_dir)
# Test the model
run_scores = model.score(
self.X_valid, self.Y_valid, **self.scorer_params
)
run_scores = [run_scores] if model.cardinality > 2 else list(run_scores)
# Append score to out queue
self.scores_queue.put([k] + run_scores, True, QUEUE_TIMEOUT)
except Empty:
break
class RandomSearch(GridSearch):
"""
A GridSearch over a random subsample of the hyperparameter search space.
:param seed: A seed for the GridSearch instance
"""
def __init__(
self,
model_class,
parameter_dict,
X_train,
Y_train=None,
n=10,
model_class_params={},
model_hyperparams={},
seed=123,
save_dir="checkpoints",
):
"""Search a random sample of size n from a parameter grid"""
self.rand_state = np.random.RandomState()
self.rand_state.seed(seed)
self.n = n
super(RandomSearch, self).__init__(
model_class,
parameter_dict,
X_train,
Y_train=Y_train,
model_class_params=model_class_params,
model_hyperparams=model_hyperparams,
save_dir=save_dir,
)
def search_space(self):
return list(
zip(
*[
self.rand_state.choice(self.parameter_dict[pn], self.n)
for pn in self.param_names
]
)
)
# ##########################################################
# # Loss functions
# ##########################################################
def SoftCrossEntropyLoss(input, target):
"""
Calculate the CrossEntropyLoss with soft targets
:param input: prediction logicts
:param target: target probabilities
"""
total_loss = torch.tensor(0.0)
for i in range(input.size(1)):
cls_idx = torch.full((input.size(0),), i, dtype=torch.long)
loss = F.cross_entropy(input, cls_idx, reduce=False)
total_loss += target[:, i].dot(loss)
return total_loss / input.shape[0]
|
[
"numpy.concatenate",
"numpy.ravel",
"numpy.argmax",
"torch.nn.functional.cross_entropy",
"logging.getLogger",
"numpy.random.RandomState",
"multiprocessing.Process.__init__",
"numpy.where",
"numpy.array",
"inspect.getargspec",
"pandas.DataFrame.from_records",
"itertools.product",
"numpy.vstack",
"multiprocessing.JoinableQueue",
"fonduer.learning.models.marginal.Marginal.__table__.insert",
"os.path.join",
"torch.tensor"
] |
[((304, 331), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (321, 331), False, 'import logging\n'), ((1080, 1107), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1097, 1107), False, 'import logging\n'), ((1986, 2013), 'fonduer.learning.models.marginal.Marginal.__table__.insert', 'Marginal.__table__.insert', ([], {}), '()\n', (2011, 2013), False, 'from fonduer.learning.models.marginal import Marginal\n'), ((28785, 28802), 'torch.tensor', 'torch.tensor', (['(0.0)'], {}), '(0.0)\n', (28797, 28802), False, 'import torch\n'), ((3411, 3422), 'numpy.ravel', 'np.ravel', (['y'], {}), '(y)\n', (3419, 3422), True, 'import numpy as np\n'), ((5022, 5048), 'numpy.concatenate', 'np.concatenate', (['[pos, neg]'], {}), '([pos, neg])\n', (5036, 5048), True, 'import numpy as np\n'), ((15552, 15589), 'os.path.join', 'os.path.join', (['save_dir', '"""grid_search"""'], {}), "(save_dir, 'grid_search')\n", (15564, 15589), False, 'import os\n'), ((15634, 15696), 'itertools.product', 'product', (['*[self.parameter_dict[pn] for pn in self.param_names]'], {}), '(*[self.parameter_dict[pn] for pn in self.param_names])\n', (15641, 15696), False, 'from itertools import product\n'), ((21880, 21895), 'multiprocessing.JoinableQueue', 'JoinableQueue', ([], {}), '()\n', (21893, 21895), False, 'from multiprocessing import JoinableQueue, Process\n'), ((22281, 22296), 'multiprocessing.JoinableQueue', 'JoinableQueue', ([], {}), '()\n', (22294, 22296), False, 'from multiprocessing import JoinableQueue, Process\n'), ((23734, 23771), 'numpy.argmax', 'np.argmax', (['[s[-1] for s in run_stats]'], {}), '([s[-1] for s in run_stats])\n', (23743, 23771), True, 'import numpy as np\n'), ((24999, 25021), 'multiprocessing.Process.__init__', 'Process.__init__', (['self'], {}), '(self)\n', (25015, 25021), False, 'from multiprocessing import JoinableQueue, Process\n'), ((27818, 27841), 'numpy.random.RandomState', 'np.random.RandomState', ([], {}), '()\n', (27839, 27841), True, 'import numpy as np\n'), ((28921, 28966), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['input', 'cls_idx'], {'reduce': '(False)'}), '(input, cls_idx, reduce=False)\n', (28936, 28966), True, 'import torch.nn.functional as F\n'), ((1251, 1270), 'numpy.array', 'np.array', (['marginals'], {}), '(marginals)\n', (1259, 1270), True, 'import numpy as np\n'), ((1455, 1492), 'numpy.vstack', 'np.vstack', (['[1 - marginals, marginals]'], {}), '([1 - marginals, marginals])\n', (1464, 1492), True, 'import numpy as np\n'), ((2792, 2811), 'numpy.array', 'np.array', (['marginals'], {}), '(marginals)\n', (2800, 2811), True, 'import numpy as np\n'), ((3470, 3502), 'numpy.where', 'np.where', (['(self.y > split + 1e-06)'], {}), '(self.y > split + 1e-06)\n', (3478, 3502), True, 'import numpy as np\n'), ((3554, 3586), 'numpy.where', 'np.where', (['(self.y < split - 1e-06)'], {}), '(self.y < split - 1e-06)\n', (3562, 3586), True, 'import numpy as np\n'), ((20986, 21064), 'pandas.DataFrame.from_records', 'DataFrame.from_records', (['run_stats'], {'columns': '(self.param_names + run_score_labels)'}), '(run_stats, columns=self.param_names + run_score_labels)\n', (21008, 21064), False, 'from pandas import DataFrame\n'), ((24429, 24514), 'pandas.DataFrame.from_records', 'DataFrame.from_records', (['run_stats'], {'columns': "(['Model'] + self.param_names + labels)"}), "(run_stats, columns=['Model'] + self.param_names + labels\n )\n", (24451, 24514), False, 'from pandas import DataFrame\n'), ((26154, 26185), 'inspect.getargspec', 'inspect.getargspec', (['model.train'], {}), '(model.train)\n', (26172, 26185), False, 'import inspect\n')]
|
from argparse import ArgumentParser
import numpy as np
import requests
from mmcls.apis import inference_model, init_model, show_result_pyplot
def parse_args():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument('model_name', help='The model name in the server')
parser.add_argument(
'--inference-addr',
default='127.0.0.1:8080',
help='Address and port of the inference server')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
args = parser.parse_args()
return args
def main(args):
# Inference single image by native apis.
model = init_model(args.config, args.checkpoint, device=args.device)
model_result = inference_model(model, args.img)
show_result_pyplot(model, args.img, model_result, title='pytorch_result')
# Inference single image by torchserve engine.
url = 'http://' + args.inference_addr + '/predictions/' + args.model_name
with open(args.img, 'rb') as image:
response = requests.post(url, image)
server_result = response.json()
show_result_pyplot(model, args.img, server_result, title='server_result')
assert np.allclose(model_result['pred_score'], server_result['pred_score'])
print('Test complete, the results of PyTorch and TorchServe are the same.')
if __name__ == '__main__':
args = parse_args()
main(args)
|
[
"argparse.ArgumentParser",
"numpy.allclose",
"mmcls.apis.inference_model",
"mmcls.apis.show_result_pyplot",
"requests.post",
"mmcls.apis.init_model"
] |
[((177, 193), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (191, 193), False, 'from argparse import ArgumentParser\n'), ((798, 858), 'mmcls.apis.init_model', 'init_model', (['args.config', 'args.checkpoint'], {'device': 'args.device'}), '(args.config, args.checkpoint, device=args.device)\n', (808, 858), False, 'from mmcls.apis import inference_model, init_model, show_result_pyplot\n'), ((878, 910), 'mmcls.apis.inference_model', 'inference_model', (['model', 'args.img'], {}), '(model, args.img)\n', (893, 910), False, 'from mmcls.apis import inference_model, init_model, show_result_pyplot\n'), ((915, 988), 'mmcls.apis.show_result_pyplot', 'show_result_pyplot', (['model', 'args.img', 'model_result'], {'title': '"""pytorch_result"""'}), "(model, args.img, model_result, title='pytorch_result')\n", (933, 988), False, 'from mmcls.apis import inference_model, init_model, show_result_pyplot\n'), ((1244, 1317), 'mmcls.apis.show_result_pyplot', 'show_result_pyplot', (['model', 'args.img', 'server_result'], {'title': '"""server_result"""'}), "(model, args.img, server_result, title='server_result')\n", (1262, 1317), False, 'from mmcls.apis import inference_model, init_model, show_result_pyplot\n'), ((1330, 1398), 'numpy.allclose', 'np.allclose', (["model_result['pred_score']", "server_result['pred_score']"], {}), "(model_result['pred_score'], server_result['pred_score'])\n", (1341, 1398), True, 'import numpy as np\n'), ((1178, 1203), 'requests.post', 'requests.post', (['url', 'image'], {}), '(url, image)\n', (1191, 1203), False, 'import requests\n')]
|
## @package teetool
# This module contains the GaussianProcess class
#
# See GaussianProcess class for more details
import teetool as tt
import numpy as np
from numpy.linalg import det, inv, pinv, cond
## GaussianProcess class evaluates an ensemble of trajectories as a Gaussian process
#
# Such a Gaussian process has a mean and covariance, and expresses itself as an ellipse (2d) or ellipsoid (3d) at a constant variance
class GaussianProcess(object):
## The constructor of GaussianProcess
# @param self object pointer
# @param cluster_data trajectory data in specific format: a list of (x, Y), where x [npoints x 1] and Y [npoints x ndim]
# @param ngaus number of Gaussians desired
def __init__(self, cluster_data, ngaus):
# Fit cluster_data in a [0, 1] domain
outline = tt.helpers.get_cluster_data_outline(cluster_data)
cluster_data_norm = tt.helpers.get_cluster_data_norm(cluster_data,
outline)
## normalised cluster_data
self._cluster_data = cluster_data_norm
## original outline
self._outline = outline
## number of Gaussians after modelling
self._ngaus = ngaus
## dimensionality of trajectory data
self._ndim = tt.helpers.getDimension(cluster_data)
## obtain vectors to multiply normalised values with, allows for a transformation back to the actual values from the normalised ones.
# @param self The object pointer.
# @return M, vector with minimum values [ngaus*ndim x 1]
# @return D, vector with difference values [ngaus*ndim x 1]
def _outline2vectors(self):
M_list = [] # list with minimum values [M]
D_list = [] # list with difference [D]
for d in range(self._ndim):
xmin = self._outline[d*2+0]
xmax = self._outline[d*2+1]
m1 = np.ones(shape=(self._ngaus, 1)) * (xmin)
M_list.append(m1)
d1 = np.ones(shape=(self._ngaus, 1)) * (xmax - xmin)
D_list.append(d1)
M = np.concatenate(M_list, axis=0) # vector
D = np.concatenate(D_list, axis=0) # vector
return (M, D)
## returns the mu_y, sig_y vector to the original dimensions using the outline
# @param self The object pointer.
# @param mu_y mean vector [ngaus*ndim x 1]
# @param sig_y covariance matrix [ngaus*ndim x ngaus*ndim]
def _norm2real(self, mu_y, sig_y):
(M, D) = self._outline2vectors()
D_diag = np.diagflat(D ** 2)
mu_y_real = np.multiply(mu_y, D) + M
sig_y_real = sig_y * D_diag
return mu_y_real, sig_y_real
## models the trajectory data via re-sampling, ignoring noise, missing data, trends, etc. Quick method only suitable for high-quality data
# @param self The object pointer.
# @return mu_y mean vector [ngaus*ndim x 1]
# @return sig_y covariance matrix [ngaus*ndim x ngaus*ndim]
# @return cc mean [ndim x 1] in ngaus cells
# @return cA covariance [ndim x ndim] in ngaus cells
def model_by_resampling(self):
# extract
cluster_data = self._cluster_data
ngaus = self._ngaus
mdim = self._ndim
# predict these values
xp = np.linspace(0, 1, ngaus)
yc = [] # list to put trajectories
for (xn, Yn) in cluster_data:
# array to fill
yp = np.empty(shape=(ngaus, mdim))
for d in range(mdim):
ynd = Yn[:, d]
yp[:, d] = np.interp(xp, xn, ynd)
# single column
yp1 = np.reshape(yp, (-1, 1), order='F')
yc.append(yp1)
# compute values
ntraj = len(yc) # number of trajectories
# obtain average [mu]
mu_y = np.zeros(shape=(mdim*ngaus, 1))
for yn in yc:
mu_y += yn
mu_y = (mu_y / ntraj)
# obtain standard deviation [sig]
sig_y_sum = np.zeros(shape=(mdim*ngaus, mdim*ngaus))
for yn in yc:
sig_y_sum += (yn - mu_y) * (yn - mu_y).transpose()
sig_y = np.mat(sig_y_sum / ntraj)
# convert to original values
mu_y, sig_y = self._norm2real(mu_y, sig_y)
# convert to cells
(cc, cA) = self._getGMMCells(mu_y, sig_y, self._ngaus)
return (mu_y, sig_y, cc, cA)
## models the trajectory data via maximum likelihood. It uses the basis function as specified to handle missing data, however, noise per trajectory has no influence on the parameter estimation. A suitable method in the absence of noise and known shape of trajectories.
# @param self The object pointer.
# @param type_basis see Basis class for input
# @param nbasis see Basis class for input
# @return mu_y mean vector [ngaus*ndim x 1]
# @return sig_y covariance matrix [ngaus*ndim x ngaus*ndim]
# @return cc mean [ndim x 1] in ngaus cells
# @return cA covariance [ndim x ndim] in ngaus cells
def model_by_ml(self, type_basis, nbasis):
# extract
cluster_data = self._cluster_data
ngaus = self._ngaus
ndim = self._ndim
ntraj = len(cluster_data)
# create a basis
basis = tt.basis.Basis(type_basis, nbasis, ndim)
wc = []
for i, (xn, Y) in enumerate(cluster_data):
yn = np.reshape(Y, newshape=(-1,1), order='F')
Hn = basis.get(xn)
wn = pinv(Hn) * yn
wn = np.mat(wn)
wc.append(wn)
# obtain average [mu]
mu_w = np.zeros(shape=(ndim*nbasis, 1))
for wn in wc:
mu_w += wn
mu_w = np.mat(mu_w / ntraj)
# obtain standard deviation [sig]
sig_w_sum = np.zeros(shape=(ndim*nbasis, ndim*nbasis))
for wn in wc:
sig_w_sum += (wn - mu_w)*(wn - mu_w).transpose()
sig_w = np.mat(sig_w_sum / ntraj)
# predict these values
xp = np.linspace(0, 1, ngaus)
Hp = basis.get(xp)
mu_y = Hp * mu_w
sig_y = Hp * sig_w * Hp.transpose()
# convert to original values
mu_y, sig_y = self._norm2real(mu_y, sig_y)
(cc, cA) = self._getGMMCells(mu_y, sig_y, self._ngaus)
return (mu_y, sig_y, cc, cA)
## models the trajectory data via expectation maximization. It uses the basis function as specified to handle missing data, and, when noisy data is detected within a trajectory, the global trend, as learned, takes over. A suitable method in the presence of noise or an unknown shape of trajectories -- the latter as different models can be compared via likelihood
# @param self The object pointer.
# @param type_basis see Basis class for input
# @param nbasis see Basis class for input
# @param maximum_iterations maximum allowed number of evaluations till convergence
# @return mu_y mean vector [ngaus*ndim x 1]
# @return sig_y covariance matrix [ngaus*ndim x ngaus*ndim]
# @return cc mean [ndim x 1] in ngaus cells
# @return cA covariance [ndim x ndim] in ngaus cells
def model_by_em(self, type_basis, nbasis, maximum_iterations=2001):
# extract
cluster_data = self._cluster_data
ngaus = self._ngaus
ndim = self._ndim
ntraj = len(cluster_data)
Mstar = 0
for (xn, Yn) in cluster_data:
Mstar += np.size(xn)
# create a basis
basis = tt.basis.Basis(type_basis, nbasis, ndim)
# from cluster_data to cell structure
yc, Hc = self._from_clusterdata2cells(cluster_data, basis)
# hardcoded parameters
MAX_ITERATIONS = maximum_iterations # maximum number of iterations
CONV_LIKELIHOOD = 1e-3 # stop convergence
# min_eig = 10**-6 # minimum eigenvalue (numerical trick)
# initial variables
BETA_EM = 1000.
mu_w = np.zeros(shape=(nbasis*ndim, 1))
sig_w = np.mat(np.eye(nbasis*ndim))
sig_w_inv = inv(sig_w)
loglikelihood_previous = np.inf
for i_iter in range(MAX_ITERATIONS):
# Expectation (54) (55)
(Ewc, Ewwc) = self._Ewc_Ewwc(yc, Hc, mu_w, sig_w_inv, BETA_EM)
# Maximization :: (56), (57)
# E [ MU ]
mu_w = self._E_mu(Ewc)
# E [ SIGMA ]
sig_w = self._E_sigma(mu_w, yc, Hc, Ewc, Ewwc)
# pre-calculate inverse
sig_w_inv = inv(sig_w)
# E [BETA]
BETA_EM = self._E_beta(yc, Hc, Ewc, Ewwc, ndim, Mstar)
# //// log likelihood ///////////
# // ln( p(Y|w) - likelihood
loglikelihood_pYw = self._L_pYw(yc,
Hc,
Ewc,
Ewwc,
ndim,
Mstar,
BETA_EM)
# // ln( p(w) ) - prior
loglikelihood_pw = self._L_pw(Ewc,
Ewwc,
mu_w,
sig_w,
sig_w_inv,
ndim,
nbasis)
loglikelihood_pY = loglikelihood_pYw + loglikelihood_pw
# // check convergence
loglikelihood_diff = np.abs(loglikelihood_pY - loglikelihood_previous)
if np.isfinite(loglikelihood_pY):
# check
if (loglikelihood_diff < CONV_LIKELIHOOD):
break
else:
# not a valid loglikelihood
print("warning: not a finite loglikelihood")
break
# output
#if (i_iter % 100 == 0):
# print("{0} {1} {2}".format(i_iter, loglikelihood_pY, min_eig))
# store previous log_likelihood
loglikelihood_previous = loglikelihood_pY
# predict these values
xp = np.linspace(0, 1, ngaus)
Hp = basis.get(xp)
mu_y = Hp * mu_w
sig_y = Hp * sig_w * Hp.transpose()
# convert to original values
mu_y, sig_y = self._norm2real(mu_y, sig_y)
(cc, cA) = self._getGMMCells(mu_y, sig_y, self._ngaus)
return (mu_y, sig_y, cc, cA)
def _from_clusterdata2cells(self, cluster_data, basis):
"""converts from cluster_data (xn, Yn) list, to cells
Input:
cluster_data -
basis -
Output:
yc -
Hc -
"""
# prepare data
yc = []
Hc = []
for (xn, Yn) in cluster_data:
# data
yn = np.reshape(Yn, newshape=(-1,1), order='F')
Hn = basis.get(xn)
# add to list
yc.append(yn)
Hc.append(Hn)
return (yc, Hc)
def _Ewc_Ewwc(self, yc, Hc, mu_w, sig_w_inv, BETA_EM):
"""returns the expected values Ewc and Ewwc
input:
yc - [points]
Hc - [Gram matrix]
mu_w - E[w]
sig_w_inv - 1 / E[ww]
BETA_EM - 1 / noise
output:
Ewc - [E[wn]]
Ewnwc - [E[wnwn]]
"""
ntraj = len(yc)
Ewc = []
Ewwc = []
# Expectation (54) (55)
for n in range(ntraj):
# data
yn = yc[n]
Hn = Hc[n]
(Ewn, Ewnwn) = self._Ewn_Ewnwn(yn,
Hn,
mu_w,
sig_w_inv,
BETA_EM)
# store
Ewc.append(Ewn);
Ewwc.append(Ewnwn);
return (Ewc, Ewwc)
def _Ewn_Ewnwn(self, yn, Hn, mu_w, sig_w_inv, BETA_EM):
"""returns the expected values Ewn and Ewnwn
input:
yn - points
Hn - Gram matrix
mu_w - E[w]
sig_w_inv - 1 / E[ww]
BETA_EM - 1 / noise
output:
Ewn - E[wn]
Ewnwn - E[wnwn]
"""
# calculate S :: (50)
Sn_inv = sig_w_inv + np.multiply(BETA_EM,(Hn.transpose()*Hn))
Sn = np.mat(inv(Sn_inv))
Ewn = (Sn *((np.multiply(BETA_EM,(Hn.transpose()*yn))) + ((sig_w_inv*mu_w))))
# assure matrix
Ewn = np.mat(Ewn)
# BISHOP (2.62)
Ewnwn = Sn + Ewn*Ewn.transpose()
# assure matrix
Ewnwn = np.mat(Ewnwn)
return (Ewn, Ewnwn)
def _E_mu(self, Ewc):
"""returns the expected value E [ MU ]
Input:
Ewc - list of expected values
Output:
mu_w - average of expected values
"""
# total number of trajectories
ntraj = len(Ewc)
mu_w_sum = np.zeros_like(Ewc[0])
for Ewn in Ewc:
# sum
mu_w_sum += Ewn
mu_w = np.mat(mu_w_sum / ntraj)
return mu_w
def _E_sigma(self, mu_w, yc, Hc, Ewc, Ewwc):
"""return the expected variance E [ SIGMA ]
this takes into account the measured data and the model
Input:
mu_w -
yc -
Hc -
Ewc -
Ewwc -
Output:
sig_w -
"""
# total number of trajectories
ntraj = len(yc)
sig_w_sum = np.zeros_like(Ewwc[0])
# E [ SIGMA ]
# sig_w_sum = np.zeros((nbasis*ndim, nbasis*ndim));
for n in range(ntraj):
# extract data
yn = yc[n]
Hn = Hc[n]
Ewn = Ewc[n]
Ewnwn = Ewwc[n]
# sum
SIGMA_n = Ewnwn - 2.*(mu_w*Ewn.transpose()) + mu_w*mu_w.transpose()
sig_w_sum += SIGMA_n
sig_w = np.mat(sig_w_sum / ntraj)
return sig_w
def _E_beta(self, yc, Hc, Ewc, Ewwc, ndim, Mstar):
"""returns the expected noise parameter"""
ntraj = len(yc)
# E [BETA]
BETA_sum_inv = 0.;
for n in range(ntraj):
# extract data
yn = yc[n]
Hn = Hc[n]
Ewn = Ewc[n]
Ewnwn = Ewwc[n]
BETA_sum_inv += np.dot(yn.transpose(),yn) - 2.*(np.dot(yn.transpose(),(Hn*Ewn))) + np.trace((Hn.transpose()*Hn)*Ewnwn)
BETA_EM = np.mat( (ndim*Mstar) / BETA_sum_inv )
return BETA_EM
def _L_pYw(self, yc, Hc, Ewc, Ewwc, ndim, Mstar, BETA_EM):
"""returns ln( p (Y|w) )
likelihood of data, given the parameters"""
ntraj = len(yc)
loglikelihood_pYw_sum = 0.;
for n in range(ntraj):
# extract data
yn = yc[n]
Hn = Hc[n]
Ewn = Ewc[n]
Ewnwn = Ewwc[n]
# loglikelihood_pYw_sum = loglikelihood_pYw_sum + ((yn.')*yn - 2*(yn.')*(Hn*Ewn) + trace(((Hn.')*Hn)*Ewnwn));
loglikelihood_pYw_sum += np.dot(yn.transpose(),yn) - 2.*(np.dot(yn.transpose(),(Hn*Ewn))) + np.trace((Hn.transpose()*Hn)*Ewnwn)
# loglikelihood_pYw = + ((Mstar*D) / 2) * log(2*pi) - ((Mstar*D) / 2) * log( BETA_EM ) + (BETA_EM/2) * loglikelihood_pYw_sum;
loglikelihood_pYw = (Mstar*ndim / 2.) * np.log(2.*np.pi) - (Mstar*ndim / 2.) * np.log(BETA_EM) + (BETA_EM / 2.) * loglikelihood_pYw_sum
return loglikelihood_pYw
def _L_pw(self, Ewc, Ewwc, mu_w, sig_w, sig_w_inv, ndim, nbasis):
"""returns ln( p(w) )
likelihood of parameters, before seeing the data"""
# test conditioning sig_w
if cond(sig_w) > 0:
return 1e4
ntraj = len(Ewc)
loglikelihood_pw_sum = 0.;
for n in range(ntraj):
# extract data
Ewn = Ewc[n]
Ewnwn = Ewwc[n]
# loglikelihood_pw_sum = loglikelihood_pw_sum + trace( (LAMBDA_EM)*( Ewnwn - 2*MU_EM*(Ewn.') + (MU_EM*(MU_EM.')) ) );
loglikelihood_pw_sum += np.trace(sig_w_inv*(Ewnwn - 2.*mu_w*Ewn.transpose() + mu_w*mu_w.transpose()))
# loglikelihood_pw = + ((N*J*D) / 2) * log(2*pi) + (N/2) * ln_det_Sigma + (1/2) * loglikelihood_pw_sum;
loglikelihood_pw = (ntraj*nbasis*ndim/2.)*np.log(2*np.pi) + (ntraj/2.)*np.log(det(sig_w)) + (1./2.)*loglikelihood_pw_sum
return loglikelihood_pw
def _getGMMCells(self, mu_y, sig_y, ngaus):
"""
return Gaussian Mixture Model (GMM) in cells
"""
cc = []
cA = []
for m in range(ngaus):
# single cell
(c, A) = self._getMuSigma(mu_y, sig_y, m, ngaus)
# check for singularity
A = tt.helpers.nearest_spd(A)
cc.append(c)
cA.append(A)
return (cc, cA)
def _getMuSigma(self, mu_y, sig_y, npoint, ngaus):
"""
returns (mu, sigma)
"""
# mu_y [DM x 1]
# sig_y [DM x DM]
D = self._ndim
# check range
if ((npoint < 0) or (npoint >= ngaus)):
raise ValueError("{0}, not in [0, {1}]".format(npoint, ngaus))
c = np.empty(shape=(D, 1))
A = np.empty(shape=(D, D))
# select position
for d_row in range(D):
c[d_row, 0] = mu_y[(npoint+d_row*ngaus), 0]
for d_col in range(D):
A[d_row, d_col] = sig_y[(npoint+d_row*ngaus), (npoint+d_col*ngaus)]
return (c, A)
|
[
"teetool.helpers.nearest_spd",
"numpy.abs",
"numpy.diagflat",
"numpy.empty",
"numpy.ones",
"numpy.linalg.cond",
"numpy.interp",
"numpy.mat",
"numpy.linalg.pinv",
"teetool.helpers.get_cluster_data_norm",
"numpy.zeros_like",
"numpy.multiply",
"numpy.isfinite",
"numpy.reshape",
"numpy.linspace",
"numpy.linalg.det",
"numpy.size",
"teetool.helpers.get_cluster_data_outline",
"numpy.linalg.inv",
"numpy.concatenate",
"numpy.log",
"numpy.zeros",
"teetool.helpers.getDimension",
"numpy.eye",
"teetool.basis.Basis"
] |
[((850, 899), 'teetool.helpers.get_cluster_data_outline', 'tt.helpers.get_cluster_data_outline', (['cluster_data'], {}), '(cluster_data)\n', (885, 899), True, 'import teetool as tt\n'), ((928, 983), 'teetool.helpers.get_cluster_data_norm', 'tt.helpers.get_cluster_data_norm', (['cluster_data', 'outline'], {}), '(cluster_data, outline)\n', (960, 983), True, 'import teetool as tt\n'), ((1329, 1366), 'teetool.helpers.getDimension', 'tt.helpers.getDimension', (['cluster_data'], {}), '(cluster_data)\n', (1352, 1366), True, 'import teetool as tt\n'), ((2127, 2157), 'numpy.concatenate', 'np.concatenate', (['M_list'], {'axis': '(0)'}), '(M_list, axis=0)\n', (2141, 2157), True, 'import numpy as np\n'), ((2179, 2209), 'numpy.concatenate', 'np.concatenate', (['D_list'], {'axis': '(0)'}), '(D_list, axis=0)\n', (2193, 2209), True, 'import numpy as np\n'), ((2579, 2598), 'numpy.diagflat', 'np.diagflat', (['(D ** 2)'], {}), '(D ** 2)\n', (2590, 2598), True, 'import numpy as np\n'), ((3319, 3343), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'ngaus'], {}), '(0, 1, ngaus)\n', (3330, 3343), True, 'import numpy as np\n'), ((3851, 3884), 'numpy.zeros', 'np.zeros', ([], {'shape': '(mdim * ngaus, 1)'}), '(shape=(mdim * ngaus, 1))\n', (3859, 3884), True, 'import numpy as np\n'), ((4023, 4067), 'numpy.zeros', 'np.zeros', ([], {'shape': '(mdim * ngaus, mdim * ngaus)'}), '(shape=(mdim * ngaus, mdim * ngaus))\n', (4031, 4067), True, 'import numpy as np\n'), ((4167, 4192), 'numpy.mat', 'np.mat', (['(sig_y_sum / ntraj)'], {}), '(sig_y_sum / ntraj)\n', (4173, 4192), True, 'import numpy as np\n'), ((5282, 5322), 'teetool.basis.Basis', 'tt.basis.Basis', (['type_basis', 'nbasis', 'ndim'], {}), '(type_basis, nbasis, ndim)\n', (5296, 5322), True, 'import teetool as tt\n'), ((5613, 5647), 'numpy.zeros', 'np.zeros', ([], {'shape': '(ndim * nbasis, 1)'}), '(shape=(ndim * nbasis, 1))\n', (5621, 5647), True, 'import numpy as np\n'), ((5708, 5728), 'numpy.mat', 'np.mat', (['(mu_w / ntraj)'], {}), '(mu_w / ntraj)\n', (5714, 5728), True, 'import numpy as np\n'), ((5792, 5838), 'numpy.zeros', 'np.zeros', ([], {'shape': '(ndim * nbasis, ndim * nbasis)'}), '(shape=(ndim * nbasis, ndim * nbasis))\n', (5800, 5838), True, 'import numpy as np\n'), ((5936, 5961), 'numpy.mat', 'np.mat', (['(sig_w_sum / ntraj)'], {}), '(sig_w_sum / ntraj)\n', (5942, 5961), True, 'import numpy as np\n'), ((6007, 6031), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'ngaus'], {}), '(0, 1, ngaus)\n', (6018, 6031), True, 'import numpy as np\n'), ((7489, 7529), 'teetool.basis.Basis', 'tt.basis.Basis', (['type_basis', 'nbasis', 'ndim'], {}), '(type_basis, nbasis, ndim)\n', (7503, 7529), True, 'import teetool as tt\n'), ((7938, 7972), 'numpy.zeros', 'np.zeros', ([], {'shape': '(nbasis * ndim, 1)'}), '(shape=(nbasis * ndim, 1))\n', (7946, 7972), True, 'import numpy as np\n'), ((8035, 8045), 'numpy.linalg.inv', 'inv', (['sig_w'], {}), '(sig_w)\n', (8038, 8045), False, 'from numpy.linalg import det, inv, pinv, cond\n'), ((10187, 10211), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'ngaus'], {}), '(0, 1, ngaus)\n', (10198, 10211), True, 'import numpy as np\n'), ((12659, 12670), 'numpy.mat', 'np.mat', (['Ewn'], {}), '(Ewn)\n', (12665, 12670), True, 'import numpy as np\n'), ((12778, 12791), 'numpy.mat', 'np.mat', (['Ewnwn'], {}), '(Ewnwn)\n', (12784, 12791), True, 'import numpy as np\n'), ((13113, 13134), 'numpy.zeros_like', 'np.zeros_like', (['Ewc[0]'], {}), '(Ewc[0])\n', (13126, 13134), True, 'import numpy as np\n'), ((13222, 13246), 'numpy.mat', 'np.mat', (['(mu_w_sum / ntraj)'], {}), '(mu_w_sum / ntraj)\n', (13228, 13246), True, 'import numpy as np\n'), ((13675, 13697), 'numpy.zeros_like', 'np.zeros_like', (['Ewwc[0]'], {}), '(Ewwc[0])\n', (13688, 13697), True, 'import numpy as np\n'), ((14089, 14114), 'numpy.mat', 'np.mat', (['(sig_w_sum / ntraj)'], {}), '(sig_w_sum / ntraj)\n', (14095, 14114), True, 'import numpy as np\n'), ((14626, 14661), 'numpy.mat', 'np.mat', (['(ndim * Mstar / BETA_sum_inv)'], {}), '(ndim * Mstar / BETA_sum_inv)\n', (14632, 14661), True, 'import numpy as np\n'), ((17352, 17374), 'numpy.empty', 'np.empty', ([], {'shape': '(D, 1)'}), '(shape=(D, 1))\n', (17360, 17374), True, 'import numpy as np\n'), ((17387, 17409), 'numpy.empty', 'np.empty', ([], {'shape': '(D, D)'}), '(shape=(D, D))\n', (17395, 17409), True, 'import numpy as np\n'), ((2620, 2640), 'numpy.multiply', 'np.multiply', (['mu_y', 'D'], {}), '(mu_y, D)\n', (2631, 2640), True, 'import numpy as np\n'), ((3473, 3502), 'numpy.empty', 'np.empty', ([], {'shape': '(ngaus, mdim)'}), '(shape=(ngaus, mdim))\n', (3481, 3502), True, 'import numpy as np\n'), ((3666, 3700), 'numpy.reshape', 'np.reshape', (['yp', '(-1, 1)'], {'order': '"""F"""'}), "(yp, (-1, 1), order='F')\n", (3676, 3700), True, 'import numpy as np\n'), ((5409, 5451), 'numpy.reshape', 'np.reshape', (['Y'], {'newshape': '(-1, 1)', 'order': '"""F"""'}), "(Y, newshape=(-1, 1), order='F')\n", (5419, 5451), True, 'import numpy as np\n'), ((5530, 5540), 'numpy.mat', 'np.mat', (['wn'], {}), '(wn)\n', (5536, 5540), True, 'import numpy as np\n'), ((7435, 7446), 'numpy.size', 'np.size', (['xn'], {}), '(xn)\n', (7442, 7446), True, 'import numpy as np\n'), ((7994, 8015), 'numpy.eye', 'np.eye', (['(nbasis * ndim)'], {}), '(nbasis * ndim)\n', (8000, 8015), True, 'import numpy as np\n'), ((8494, 8504), 'numpy.linalg.inv', 'inv', (['sig_w'], {}), '(sig_w)\n', (8497, 8504), False, 'from numpy.linalg import det, inv, pinv, cond\n'), ((9553, 9602), 'numpy.abs', 'np.abs', (['(loglikelihood_pY - loglikelihood_previous)'], {}), '(loglikelihood_pY - loglikelihood_previous)\n', (9559, 9602), True, 'import numpy as np\n'), ((9619, 9648), 'numpy.isfinite', 'np.isfinite', (['loglikelihood_pY'], {}), '(loglikelihood_pY)\n', (9630, 9648), True, 'import numpy as np\n'), ((10881, 10924), 'numpy.reshape', 'np.reshape', (['Yn'], {'newshape': '(-1, 1)', 'order': '"""F"""'}), "(Yn, newshape=(-1, 1), order='F')\n", (10891, 10924), True, 'import numpy as np\n'), ((12520, 12531), 'numpy.linalg.inv', 'inv', (['Sn_inv'], {}), '(Sn_inv)\n', (12523, 12531), False, 'from numpy.linalg import det, inv, pinv, cond\n'), ((15843, 15854), 'numpy.linalg.cond', 'cond', (['sig_w'], {}), '(sig_w)\n', (15847, 15854), False, 'from numpy.linalg import det, inv, pinv, cond\n'), ((16909, 16934), 'teetool.helpers.nearest_spd', 'tt.helpers.nearest_spd', (['A'], {}), '(A)\n', (16931, 16934), True, 'import teetool as tt\n'), ((1947, 1978), 'numpy.ones', 'np.ones', ([], {'shape': '(self._ngaus, 1)'}), '(shape=(self._ngaus, 1))\n', (1954, 1978), True, 'import numpy as np\n'), ((2036, 2067), 'numpy.ones', 'np.ones', ([], {'shape': '(self._ngaus, 1)'}), '(shape=(self._ngaus, 1))\n', (2043, 2067), True, 'import numpy as np\n'), ((3596, 3618), 'numpy.interp', 'np.interp', (['xp', 'xn', 'ynd'], {}), '(xp, xn, ynd)\n', (3605, 3618), True, 'import numpy as np\n'), ((5499, 5507), 'numpy.linalg.pinv', 'pinv', (['Hn'], {}), '(Hn)\n', (5503, 5507), False, 'from numpy.linalg import det, inv, pinv, cond\n'), ((15506, 15525), 'numpy.log', 'np.log', (['(2.0 * np.pi)'], {}), '(2.0 * np.pi)\n', (15512, 15525), True, 'import numpy as np\n'), ((15545, 15560), 'numpy.log', 'np.log', (['BETA_EM'], {}), '(BETA_EM)\n', (15551, 15560), True, 'import numpy as np\n'), ((16466, 16483), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (16472, 16483), True, 'import numpy as np\n'), ((16502, 16512), 'numpy.linalg.det', 'det', (['sig_w'], {}), '(sig_w)\n', (16505, 16512), False, 'from numpy.linalg import det, inv, pinv, cond\n')]
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cv2
import tensorflow as tf
from PIL import Image
import os
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
from keras.models import Sequential, load_model
from keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout
data = []
labels = []
classes = 43
cur_path = os.getcwd()
#Retrieving the images and their labels
for i in range(classes):
path = os.path.join(cur_path,'train',str(i))
images = os.listdir(path)
for a in images:
try:
image = Image.open(path + '\\'+ a)
image = image.resize((30,30))
image = np.array(image)
#sim = Image.fromarray(image)
data.append(image)
labels.append(i)
except:
print("Error loading image")
#Converting lists into numpy arrays
data = np.array(data)
labels = np.array(labels)
print(data.shape, labels.shape)
#Splitting training and testing dataset
X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=0.2, random_state=42)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
#Converting the labels into one hot encoding
y_train = to_categorical(y_train, 43)
y_test = to_categorical(y_test, 43)
#Building the model
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(5,5), activation='relu', input_shape=X_train.shape[1:]))
model.add(Conv2D(filters=32, kernel_size=(5,5), activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(rate=0.25))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation='relu'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Dropout(rate=0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(rate=0.5))
model.add(Dense(43, activation='softmax'))
#Compilation of the model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
epochs = 15
history = model.fit(X_train, y_train, batch_size=32, epochs=epochs, validation_data=(X_test, y_test))
model.save("my_model.h5")
#plotting graphs for accuracy
plt.figure(0)
plt.plot(history.history['accuracy'], label='training accuracy')
plt.plot(history.history['val_accuracy'], label='val accuracy')
plt.title('Accuracy')
plt.xlabel('epochs')
plt.ylabel('accuracy')
plt.legend()
plt.show()
plt.figure(1)
plt.plot(history.history['loss'], label='training loss')
plt.plot(history.history['val_loss'], label='val loss')
plt.title('Loss')
plt.xlabel('epochs')
plt.ylabel('loss')
plt.legend()
plt.show()
#testing accuracy on test dataset
from sklearn.metrics import accuracy_score
y_test = pd.read_csv('Test.csv')
labels = y_test["ClassId"].values
imgs = y_test["Path"].values
data=[]
for img in imgs:
image = Image.open(img)
image = image.resize((30,30))
data.append(np.array(image))
X_test=np.array(data)
pred = model.predict_classes(X_test)
#Accuracy with the test data
from sklearn.metrics import accuracy_score
print(accuracy_score(labels, pred))
|
[
"matplotlib.pyplot.title",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.accuracy_score",
"keras.layers.MaxPool2D",
"matplotlib.pyplot.figure",
"keras.layers.Flatten",
"keras.utils.to_categorical",
"matplotlib.pyplot.show",
"keras.layers.Dropout",
"matplotlib.pyplot.legend",
"keras.layers.Conv2D",
"matplotlib.pyplot.ylabel",
"os.listdir",
"matplotlib.pyplot.plot",
"os.getcwd",
"PIL.Image.open",
"keras.layers.Dense",
"numpy.array",
"keras.models.Sequential",
"matplotlib.pyplot.xlabel"
] |
[((395, 406), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (404, 406), False, 'import os\n'), ((916, 930), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (924, 930), True, 'import numpy as np\n'), ((940, 956), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (948, 956), True, 'import numpy as np\n'), ((1065, 1127), 'sklearn.model_selection.train_test_split', 'train_test_split', (['data', 'labels'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(data, labels, test_size=0.2, random_state=42)\n', (1081, 1127), False, 'from sklearn.model_selection import train_test_split\n'), ((1249, 1276), 'keras.utils.to_categorical', 'to_categorical', (['y_train', '(43)'], {}), '(y_train, 43)\n', (1263, 1276), False, 'from keras.utils import to_categorical\n'), ((1286, 1312), 'keras.utils.to_categorical', 'to_categorical', (['y_test', '(43)'], {}), '(y_test, 43)\n', (1300, 1312), False, 'from keras.utils import to_categorical\n'), ((1342, 1354), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (1352, 1354), False, 'from keras.models import Sequential, load_model\n'), ((2219, 2232), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {}), '(0)\n', (2229, 2232), True, 'import matplotlib.pyplot as plt\n'), ((2233, 2297), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['accuracy']"], {'label': '"""training accuracy"""'}), "(history.history['accuracy'], label='training accuracy')\n", (2241, 2297), True, 'import matplotlib.pyplot as plt\n'), ((2298, 2361), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_accuracy']"], {'label': '"""val accuracy"""'}), "(history.history['val_accuracy'], label='val accuracy')\n", (2306, 2361), True, 'import matplotlib.pyplot as plt\n'), ((2362, 2383), 'matplotlib.pyplot.title', 'plt.title', (['"""Accuracy"""'], {}), "('Accuracy')\n", (2371, 2383), True, 'import matplotlib.pyplot as plt\n'), ((2384, 2404), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {}), "('epochs')\n", (2394, 2404), True, 'import matplotlib.pyplot as plt\n'), ((2405, 2427), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy"""'], {}), "('accuracy')\n", (2415, 2427), True, 'import matplotlib.pyplot as plt\n'), ((2428, 2440), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2438, 2440), True, 'import matplotlib.pyplot as plt\n'), ((2441, 2451), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2449, 2451), True, 'import matplotlib.pyplot as plt\n'), ((2453, 2466), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (2463, 2466), True, 'import matplotlib.pyplot as plt\n'), ((2467, 2523), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss']"], {'label': '"""training loss"""'}), "(history.history['loss'], label='training loss')\n", (2475, 2523), True, 'import matplotlib.pyplot as plt\n'), ((2524, 2579), 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss']"], {'label': '"""val loss"""'}), "(history.history['val_loss'], label='val loss')\n", (2532, 2579), True, 'import matplotlib.pyplot as plt\n'), ((2580, 2597), 'matplotlib.pyplot.title', 'plt.title', (['"""Loss"""'], {}), "('Loss')\n", (2589, 2597), True, 'import matplotlib.pyplot as plt\n'), ((2598, 2618), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epochs"""'], {}), "('epochs')\n", (2608, 2618), True, 'import matplotlib.pyplot as plt\n'), ((2619, 2637), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""loss"""'], {}), "('loss')\n", (2629, 2637), True, 'import matplotlib.pyplot as plt\n'), ((2638, 2650), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2648, 2650), True, 'import matplotlib.pyplot as plt\n'), ((2651, 2661), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2659, 2661), True, 'import matplotlib.pyplot as plt\n'), ((2750, 2773), 'pandas.read_csv', 'pd.read_csv', (['"""Test.csv"""'], {}), "('Test.csv')\n", (2761, 2773), True, 'import pandas as pd\n'), ((2968, 2982), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (2976, 2982), True, 'import numpy as np\n'), ((536, 552), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (546, 552), False, 'import os\n'), ((1365, 1458), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(32)', 'kernel_size': '(5, 5)', 'activation': '"""relu"""', 'input_shape': 'X_train.shape[1:]'}), "(filters=32, kernel_size=(5, 5), activation='relu', input_shape=\n X_train.shape[1:])\n", (1371, 1458), False, 'from keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout\n'), ((1464, 1521), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(32)', 'kernel_size': '(5, 5)', 'activation': '"""relu"""'}), "(filters=32, kernel_size=(5, 5), activation='relu')\n", (1470, 1521), False, 'from keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout\n'), ((1532, 1559), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1541, 1559), False, 'from keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout\n'), ((1571, 1589), 'keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.25)'}), '(rate=0.25)\n', (1578, 1589), False, 'from keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout\n'), ((1601, 1658), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(filters=64, kernel_size=(3, 3), activation='relu')\n", (1607, 1658), False, 'from keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout\n'), ((1670, 1727), 'keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3, 3)', 'activation': '"""relu"""'}), "(filters=64, kernel_size=(3, 3), activation='relu')\n", (1676, 1727), False, 'from keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout\n'), ((1739, 1766), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1748, 1766), False, 'from keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout\n'), ((1778, 1796), 'keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.25)'}), '(rate=0.25)\n', (1785, 1796), False, 'from keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout\n'), ((1808, 1817), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1815, 1817), False, 'from keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout\n'), ((1829, 1858), 'keras.layers.Dense', 'Dense', (['(256)'], {'activation': '"""relu"""'}), "(256, activation='relu')\n", (1834, 1858), False, 'from keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout\n'), ((1870, 1887), 'keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.5)'}), '(rate=0.5)\n', (1877, 1887), False, 'from keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout\n'), ((1899, 1930), 'keras.layers.Dense', 'Dense', (['(43)'], {'activation': '"""softmax"""'}), "(43, activation='softmax')\n", (1904, 1930), False, 'from keras.layers import Conv2D, MaxPool2D, Dense, Flatten, Dropout\n'), ((2877, 2892), 'PIL.Image.open', 'Image.open', (['img'], {}), '(img)\n', (2887, 2892), False, 'from PIL import Image\n'), ((3100, 3128), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['labels', 'pred'], {}), '(labels, pred)\n', (3114, 3128), False, 'from sklearn.metrics import accuracy_score\n'), ((2943, 2958), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (2951, 2958), True, 'import numpy as np\n'), ((608, 635), 'PIL.Image.open', 'Image.open', (["(path + '\\\\' + a)"], {}), "(path + '\\\\' + a)\n", (618, 635), False, 'from PIL import Image\n'), ((697, 712), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (705, 712), True, 'import numpy as np\n')]
|
import io
import numpy as np
import pytest
from typing import List, Tuple
from mlagents_envs.communicator_objects.agent_info_pb2 import AgentInfoProto
from mlagents_envs.communicator_objects.observation_pb2 import (
ObservationProto,
NONE,
PNG,
)
from mlagents_envs.communicator_objects.brain_parameters_pb2 import BrainParametersProto
from mlagents_envs.communicator_objects.agent_info_action_pair_pb2 import (
AgentInfoActionPairProto,
)
from mlagents_envs.communicator_objects.agent_action_pb2 import AgentActionProto
from mlagents_envs.base_env import (
BehaviorSpec,
ActionSpec,
DecisionSteps,
TerminalSteps,
)
from mlagents_envs.exception import UnityObservationException
from mlagents_envs.rpc_utils import (
behavior_spec_from_proto,
process_pixels,
_process_visual_observation,
_process_vector_observation,
steps_from_proto,
)
from PIL import Image
def generate_list_agent_proto(
n_agent: int,
shape: List[Tuple[int]],
infinite_rewards: bool = False,
nan_observations: bool = False,
) -> List[AgentInfoProto]:
result = []
for agent_index in range(n_agent):
ap = AgentInfoProto()
ap.reward = float("inf") if infinite_rewards else agent_index
ap.done = agent_index % 2 == 0
ap.max_step_reached = agent_index % 4 == 0
ap.id = agent_index
ap.action_mask.extend([True, False] * 5)
obs_proto_list = []
for obs_index in range(len(shape)):
obs_proto = ObservationProto()
obs_proto.shape.extend(list(shape[obs_index]))
obs_proto.compression_type = NONE
obs_proto.float_data.data.extend(
([float("nan")] if nan_observations else [0.1])
* np.prod(shape[obs_index])
)
obs_proto_list.append(obs_proto)
ap.observations.extend(obs_proto_list)
result.append(ap)
return result
def generate_compressed_data(in_array: np.ndarray) -> bytes:
image_arr = (in_array * 255).astype(np.uint8)
bytes_out = bytes()
num_channels = in_array.shape[2]
num_images = (num_channels + 2) // 3
# Split the input image into batches of 3 channels.
for i in range(num_images):
sub_image = image_arr[..., 3 * i : 3 * i + 3]
if (i == num_images - 1) and (num_channels % 3) != 0:
# Pad zeros
zero_shape = list(in_array.shape)
zero_shape[2] = 3 - (num_channels % 3)
z = np.zeros(zero_shape, dtype=np.uint8)
sub_image = np.concatenate([sub_image, z], axis=2)
im = Image.fromarray(sub_image, "RGB")
byteIO = io.BytesIO()
im.save(byteIO, format="PNG")
bytes_out += byteIO.getvalue()
return bytes_out
# test helper function for old C# API (no compressed channel mapping)
def generate_compressed_proto_obs(
in_array: np.ndarray, grayscale: bool = False
) -> ObservationProto:
obs_proto = ObservationProto()
obs_proto.compressed_data = generate_compressed_data(in_array)
obs_proto.compression_type = PNG
if grayscale:
# grayscale flag is only used for old API without mapping
expected_shape = [in_array.shape[0], in_array.shape[1], 1]
obs_proto.shape.extend(expected_shape)
else:
obs_proto.shape.extend(in_array.shape)
return obs_proto
# test helper function for new C# API (with compressed channel mapping)
def generate_compressed_proto_obs_with_mapping(
in_array: np.ndarray, mapping: List[int]
) -> ObservationProto:
obs_proto = ObservationProto()
obs_proto.compressed_data = generate_compressed_data(in_array)
obs_proto.compression_type = PNG
if mapping is not None:
obs_proto.compressed_channel_mapping.extend(mapping)
expected_shape = [
in_array.shape[0],
in_array.shape[1],
len({m for m in mapping if m >= 0}),
]
obs_proto.shape.extend(expected_shape)
else:
obs_proto.shape.extend(in_array.shape)
return obs_proto
def generate_uncompressed_proto_obs(in_array: np.ndarray) -> ObservationProto:
obs_proto = ObservationProto()
obs_proto.float_data.data.extend(in_array.flatten().tolist())
obs_proto.compression_type = NONE
obs_proto.shape.extend(in_array.shape)
return obs_proto
def proto_from_steps(
decision_steps: DecisionSteps, terminal_steps: TerminalSteps
) -> List[AgentInfoProto]:
agent_info_protos: List[AgentInfoProto] = []
# Take care of the DecisionSteps first
for agent_id in decision_steps.agent_id:
agent_id_index = decision_steps.agent_id_to_index[agent_id]
reward = decision_steps.reward[agent_id_index]
done = False
max_step_reached = False
agent_mask = None
if decision_steps.action_mask is not None:
agent_mask = [] # type: ignore
for _branch in decision_steps.action_mask:
agent_mask = np.concatenate(
(agent_mask, _branch[agent_id_index, :]), axis=0
)
observations: List[ObservationProto] = []
for all_observations_of_type in decision_steps.obs:
observation = all_observations_of_type[agent_id_index]
if len(observation.shape) == 3:
observations.append(generate_uncompressed_proto_obs(observation))
else:
observations.append(
ObservationProto(
float_data=ObservationProto.FloatData(data=observation),
shape=[len(observation)],
compression_type=NONE,
)
)
agent_info_proto = AgentInfoProto(
reward=reward,
done=done,
id=agent_id,
max_step_reached=max_step_reached,
action_mask=agent_mask,
observations=observations,
)
agent_info_protos.append(agent_info_proto)
# Take care of the TerminalSteps second
for agent_id in terminal_steps.agent_id:
agent_id_index = terminal_steps.agent_id_to_index[agent_id]
reward = terminal_steps.reward[agent_id_index]
done = True
max_step_reached = terminal_steps.interrupted[agent_id_index]
final_observations: List[ObservationProto] = []
for all_observations_of_type in terminal_steps.obs:
observation = all_observations_of_type[agent_id_index]
if len(observation.shape) == 3:
final_observations.append(generate_uncompressed_proto_obs(observation))
else:
final_observations.append(
ObservationProto(
float_data=ObservationProto.FloatData(data=observation),
shape=[len(observation)],
compression_type=NONE,
)
)
agent_info_proto = AgentInfoProto(
reward=reward,
done=done,
id=agent_id,
max_step_reached=max_step_reached,
action_mask=None,
observations=final_observations,
)
agent_info_protos.append(agent_info_proto)
return agent_info_protos
# The arguments here are the DecisionSteps, TerminalSteps and continuous/discrete actions for a single agent name
def proto_from_steps_and_action(
decision_steps: DecisionSteps,
terminal_steps: TerminalSteps,
continuous_actions: np.ndarray,
discrete_actions: np.ndarray,
) -> List[AgentInfoActionPairProto]:
agent_info_protos = proto_from_steps(decision_steps, terminal_steps)
agent_action_protos = []
num_agents = (
len(continuous_actions)
if continuous_actions is not None
else len(discrete_actions)
)
for i in range(num_agents):
proto = AgentActionProto()
if continuous_actions is not None:
proto.continuous_actions.extend(continuous_actions[i])
proto.vector_actions_deprecated.extend(continuous_actions[i])
if discrete_actions is not None:
proto.discrete_actions.extend(discrete_actions[i])
proto.vector_actions_deprecated.extend(discrete_actions[i])
agent_action_protos.append(proto)
agent_info_action_pair_protos = [
AgentInfoActionPairProto(agent_info=agent_info_proto, action_info=action_proto)
for agent_info_proto, action_proto in zip(
agent_info_protos, agent_action_protos
)
]
return agent_info_action_pair_protos
def test_process_pixels():
in_array = np.random.rand(128, 64, 3)
byte_arr = generate_compressed_data(in_array)
out_array = process_pixels(byte_arr, 3)
assert out_array.shape == (128, 64, 3)
assert np.sum(in_array - out_array) / np.prod(in_array.shape) < 0.01
assert np.allclose(in_array, out_array, atol=0.01)
def test_process_pixels_multi_png():
height = 128
width = 64
num_channels = 7
in_array = np.random.rand(height, width, num_channels)
byte_arr = generate_compressed_data(in_array)
out_array = process_pixels(byte_arr, num_channels)
assert out_array.shape == (height, width, num_channels)
assert np.sum(in_array - out_array) / np.prod(in_array.shape) < 0.01
assert np.allclose(in_array, out_array, atol=0.01)
def test_process_pixels_gray():
in_array = np.random.rand(128, 64, 3)
byte_arr = generate_compressed_data(in_array)
out_array = process_pixels(byte_arr, 1)
assert out_array.shape == (128, 64, 1)
assert np.mean(in_array.mean(axis=2, keepdims=True) - out_array) < 0.01
assert np.allclose(in_array.mean(axis=2, keepdims=True), out_array, atol=0.01)
def test_vector_observation():
n_agents = 10
shapes = [(3,), (4,)]
list_proto = generate_list_agent_proto(n_agents, shapes)
for obs_index, shape in enumerate(shapes):
arr = _process_vector_observation(obs_index, shape, list_proto)
assert list(arr.shape) == ([n_agents] + list(shape))
assert np.allclose(arr, 0.1, atol=0.01)
def test_process_visual_observation():
in_array_1 = np.random.rand(128, 64, 3)
proto_obs_1 = generate_compressed_proto_obs(in_array_1)
in_array_2 = np.random.rand(128, 64, 3)
in_array_2_mapping = [0, 1, 2]
proto_obs_2 = generate_compressed_proto_obs_with_mapping(
in_array_2, in_array_2_mapping
)
ap1 = AgentInfoProto()
ap1.observations.extend([proto_obs_1])
ap2 = AgentInfoProto()
ap2.observations.extend([proto_obs_2])
ap_list = [ap1, ap2]
arr = _process_visual_observation(0, (128, 64, 3), ap_list)
assert list(arr.shape) == [2, 128, 64, 3]
assert np.allclose(arr[0, :, :, :], in_array_1, atol=0.01)
assert np.allclose(arr[1, :, :, :], in_array_2, atol=0.01)
def test_process_visual_observation_grayscale():
in_array_1 = np.random.rand(128, 64, 3)
proto_obs_1 = generate_compressed_proto_obs(in_array_1, grayscale=True)
expected_out_array_1 = np.mean(in_array_1, axis=2, keepdims=True)
in_array_2 = np.random.rand(128, 64, 3)
in_array_2_mapping = [0, 0, 0]
proto_obs_2 = generate_compressed_proto_obs_with_mapping(
in_array_2, in_array_2_mapping
)
expected_out_array_2 = np.mean(in_array_2, axis=2, keepdims=True)
ap1 = AgentInfoProto()
ap1.observations.extend([proto_obs_1])
ap2 = AgentInfoProto()
ap2.observations.extend([proto_obs_2])
ap_list = [ap1, ap2]
arr = _process_visual_observation(0, (128, 64, 1), ap_list)
assert list(arr.shape) == [2, 128, 64, 1]
assert np.allclose(arr[0, :, :, :], expected_out_array_1, atol=0.01)
assert np.allclose(arr[1, :, :, :], expected_out_array_2, atol=0.01)
def test_process_visual_observation_padded_channels():
in_array_1 = np.random.rand(128, 64, 12)
in_array_1_mapping = [0, 1, 2, 3, -1, -1, 4, 5, 6, 7, -1, -1]
proto_obs_1 = generate_compressed_proto_obs_with_mapping(
in_array_1, in_array_1_mapping
)
expected_out_array_1 = np.take(in_array_1, [0, 1, 2, 3, 6, 7, 8, 9], axis=2)
ap1 = AgentInfoProto()
ap1.observations.extend([proto_obs_1])
ap_list = [ap1]
arr = _process_visual_observation(0, (128, 64, 8), ap_list)
assert list(arr.shape) == [1, 128, 64, 8]
assert np.allclose(arr[0, :, :, :], expected_out_array_1, atol=0.01)
def test_process_visual_observation_bad_shape():
in_array_1 = np.random.rand(128, 64, 3)
proto_obs_1 = generate_compressed_proto_obs(in_array_1)
ap1 = AgentInfoProto()
ap1.observations.extend([proto_obs_1])
ap_list = [ap1]
with pytest.raises(UnityObservationException):
_process_visual_observation(0, (128, 42, 3), ap_list)
def test_batched_step_result_from_proto():
n_agents = 10
shapes = [(3,), (4,)]
spec = BehaviorSpec(shapes, ActionSpec.create_continuous(3))
ap_list = generate_list_agent_proto(n_agents, shapes)
decision_steps, terminal_steps = steps_from_proto(ap_list, spec)
for agent_id in range(n_agents):
if agent_id in decision_steps:
# we set the reward equal to the agent id in generate_list_agent_proto
assert decision_steps[agent_id].reward == agent_id
elif agent_id in terminal_steps:
assert terminal_steps[agent_id].reward == agent_id
else:
raise Exception("Missing agent from the steps")
# We sort the AgentId since they are split between DecisionSteps and TerminalSteps
combined_agent_id = list(decision_steps.agent_id) + list(terminal_steps.agent_id)
combined_agent_id.sort()
assert combined_agent_id == list(range(n_agents))
for agent_id in range(n_agents):
assert (agent_id in terminal_steps) == (agent_id % 2 == 0)
if agent_id in terminal_steps:
assert terminal_steps[agent_id].interrupted == (agent_id % 4 == 0)
assert decision_steps.obs[0].shape[1] == shapes[0][0]
assert decision_steps.obs[1].shape[1] == shapes[1][0]
assert terminal_steps.obs[0].shape[1] == shapes[0][0]
assert terminal_steps.obs[1].shape[1] == shapes[1][0]
def test_action_masking_discrete():
n_agents = 10
shapes = [(3,), (4,)]
behavior_spec = BehaviorSpec(shapes, ActionSpec.create_discrete((7, 3)))
ap_list = generate_list_agent_proto(n_agents, shapes)
decision_steps, terminal_steps = steps_from_proto(ap_list, behavior_spec)
masks = decision_steps.action_mask
assert isinstance(masks, list)
assert len(masks) == 2
assert masks[0].shape == (n_agents / 2, 7) # half agents are done
assert masks[1].shape == (n_agents / 2, 3) # half agents are done
assert masks[0][0, 0]
assert not masks[1][0, 0]
assert masks[1][0, 1]
def test_action_masking_discrete_1():
n_agents = 10
shapes = [(3,), (4,)]
behavior_spec = BehaviorSpec(shapes, ActionSpec.create_discrete((10,)))
ap_list = generate_list_agent_proto(n_agents, shapes)
decision_steps, terminal_steps = steps_from_proto(ap_list, behavior_spec)
masks = decision_steps.action_mask
assert isinstance(masks, list)
assert len(masks) == 1
assert masks[0].shape == (n_agents / 2, 10)
assert masks[0][0, 0]
def test_action_masking_discrete_2():
n_agents = 10
shapes = [(3,), (4,)]
behavior_spec = BehaviorSpec(shapes, ActionSpec.create_discrete((2, 2, 6)))
ap_list = generate_list_agent_proto(n_agents, shapes)
decision_steps, terminal_steps = steps_from_proto(ap_list, behavior_spec)
masks = decision_steps.action_mask
assert isinstance(masks, list)
assert len(masks) == 3
assert masks[0].shape == (n_agents / 2, 2)
assert masks[1].shape == (n_agents / 2, 2)
assert masks[2].shape == (n_agents / 2, 6)
assert masks[0][0, 0]
def test_action_masking_continuous():
n_agents = 10
shapes = [(3,), (4,)]
behavior_spec = BehaviorSpec(shapes, ActionSpec.create_continuous(10))
ap_list = generate_list_agent_proto(n_agents, shapes)
decision_steps, terminal_steps = steps_from_proto(ap_list, behavior_spec)
masks = decision_steps.action_mask
assert masks is None
def test_agent_behavior_spec_from_proto():
agent_proto = generate_list_agent_proto(1, [(3,), (4,)])[0]
bp = BrainParametersProto()
bp.vector_action_size_deprecated.extend([5, 4])
bp.vector_action_space_type_deprecated = 0
behavior_spec = behavior_spec_from_proto(bp, agent_proto)
assert behavior_spec.action_spec.is_discrete()
assert not behavior_spec.action_spec.is_continuous()
assert behavior_spec.observation_shapes == [(3,), (4,)]
assert behavior_spec.action_spec.discrete_branches == (5, 4)
assert behavior_spec.action_spec.discrete_size == 2
bp = BrainParametersProto()
bp.vector_action_size_deprecated.extend([6])
bp.vector_action_space_type_deprecated = 1
behavior_spec = behavior_spec_from_proto(bp, agent_proto)
assert not behavior_spec.action_spec.is_discrete()
assert behavior_spec.action_spec.is_continuous()
assert behavior_spec.action_spec.continuous_size == 6
def test_batched_step_result_from_proto_raises_on_infinite():
n_agents = 10
shapes = [(3,), (4,)]
behavior_spec = BehaviorSpec(shapes, ActionSpec.create_continuous(3))
ap_list = generate_list_agent_proto(n_agents, shapes, infinite_rewards=True)
with pytest.raises(RuntimeError):
steps_from_proto(ap_list, behavior_spec)
def test_batched_step_result_from_proto_raises_on_nan():
n_agents = 10
shapes = [(3,), (4,)]
behavior_spec = BehaviorSpec(shapes, ActionSpec.create_continuous(3))
ap_list = generate_list_agent_proto(n_agents, shapes, nan_observations=True)
with pytest.raises(RuntimeError):
steps_from_proto(ap_list, behavior_spec)
|
[
"numpy.sum",
"mlagents_envs.communicator_objects.observation_pb2.ObservationProto",
"mlagents_envs.rpc_utils.behavior_spec_from_proto",
"numpy.allclose",
"mlagents_envs.communicator_objects.brain_parameters_pb2.BrainParametersProto",
"numpy.mean",
"mlagents_envs.base_env.ActionSpec.create_continuous",
"numpy.prod",
"mlagents_envs.communicator_objects.agent_action_pb2.AgentActionProto",
"mlagents_envs.rpc_utils._process_vector_observation",
"mlagents_envs.communicator_objects.observation_pb2.ObservationProto.FloatData",
"mlagents_envs.rpc_utils._process_visual_observation",
"numpy.random.rand",
"mlagents_envs.rpc_utils.steps_from_proto",
"pytest.raises",
"io.BytesIO",
"mlagents_envs.base_env.ActionSpec.create_discrete",
"mlagents_envs.communicator_objects.agent_info_pb2.AgentInfoProto",
"mlagents_envs.rpc_utils.process_pixels",
"numpy.concatenate",
"numpy.zeros",
"numpy.take",
"mlagents_envs.communicator_objects.agent_info_action_pair_pb2.AgentInfoActionPairProto",
"PIL.Image.fromarray"
] |
[((2964, 2982), 'mlagents_envs.communicator_objects.observation_pb2.ObservationProto', 'ObservationProto', ([], {}), '()\n', (2980, 2982), False, 'from mlagents_envs.communicator_objects.observation_pb2 import ObservationProto, NONE, PNG\n'), ((3569, 3587), 'mlagents_envs.communicator_objects.observation_pb2.ObservationProto', 'ObservationProto', ([], {}), '()\n', (3585, 3587), False, 'from mlagents_envs.communicator_objects.observation_pb2 import ObservationProto, NONE, PNG\n'), ((4151, 4169), 'mlagents_envs.communicator_objects.observation_pb2.ObservationProto', 'ObservationProto', ([], {}), '()\n', (4167, 4169), False, 'from mlagents_envs.communicator_objects.observation_pb2 import ObservationProto, NONE, PNG\n'), ((8617, 8643), 'numpy.random.rand', 'np.random.rand', (['(128)', '(64)', '(3)'], {}), '(128, 64, 3)\n', (8631, 8643), True, 'import numpy as np\n'), ((8710, 8737), 'mlagents_envs.rpc_utils.process_pixels', 'process_pixels', (['byte_arr', '(3)'], {}), '(byte_arr, 3)\n', (8724, 8737), False, 'from mlagents_envs.rpc_utils import behavior_spec_from_proto, process_pixels, _process_visual_observation, _process_vector_observation, steps_from_proto\n'), ((8865, 8908), 'numpy.allclose', 'np.allclose', (['in_array', 'out_array'], {'atol': '(0.01)'}), '(in_array, out_array, atol=0.01)\n', (8876, 8908), True, 'import numpy as np\n'), ((9016, 9059), 'numpy.random.rand', 'np.random.rand', (['height', 'width', 'num_channels'], {}), '(height, width, num_channels)\n', (9030, 9059), True, 'import numpy as np\n'), ((9126, 9164), 'mlagents_envs.rpc_utils.process_pixels', 'process_pixels', (['byte_arr', 'num_channels'], {}), '(byte_arr, num_channels)\n', (9140, 9164), False, 'from mlagents_envs.rpc_utils import behavior_spec_from_proto, process_pixels, _process_visual_observation, _process_vector_observation, steps_from_proto\n'), ((9309, 9352), 'numpy.allclose', 'np.allclose', (['in_array', 'out_array'], {'atol': '(0.01)'}), '(in_array, out_array, atol=0.01)\n', (9320, 9352), True, 'import numpy as np\n'), ((9402, 9428), 'numpy.random.rand', 'np.random.rand', (['(128)', '(64)', '(3)'], {}), '(128, 64, 3)\n', (9416, 9428), True, 'import numpy as np\n'), ((9495, 9522), 'mlagents_envs.rpc_utils.process_pixels', 'process_pixels', (['byte_arr', '(1)'], {}), '(byte_arr, 1)\n', (9509, 9522), False, 'from mlagents_envs.rpc_utils import behavior_spec_from_proto, process_pixels, _process_visual_observation, _process_vector_observation, steps_from_proto\n'), ((10149, 10175), 'numpy.random.rand', 'np.random.rand', (['(128)', '(64)', '(3)'], {}), '(128, 64, 3)\n', (10163, 10175), True, 'import numpy as np\n'), ((10253, 10279), 'numpy.random.rand', 'np.random.rand', (['(128)', '(64)', '(3)'], {}), '(128, 64, 3)\n', (10267, 10279), True, 'import numpy as np\n'), ((10433, 10449), 'mlagents_envs.communicator_objects.agent_info_pb2.AgentInfoProto', 'AgentInfoProto', ([], {}), '()\n', (10447, 10449), False, 'from mlagents_envs.communicator_objects.agent_info_pb2 import AgentInfoProto\n'), ((10503, 10519), 'mlagents_envs.communicator_objects.agent_info_pb2.AgentInfoProto', 'AgentInfoProto', ([], {}), '()\n', (10517, 10519), False, 'from mlagents_envs.communicator_objects.agent_info_pb2 import AgentInfoProto\n'), ((10598, 10651), 'mlagents_envs.rpc_utils._process_visual_observation', '_process_visual_observation', (['(0)', '(128, 64, 3)', 'ap_list'], {}), '(0, (128, 64, 3), ap_list)\n', (10625, 10651), False, 'from mlagents_envs.rpc_utils import behavior_spec_from_proto, process_pixels, _process_visual_observation, _process_vector_observation, steps_from_proto\n'), ((10709, 10760), 'numpy.allclose', 'np.allclose', (['arr[0, :, :, :]', 'in_array_1'], {'atol': '(0.01)'}), '(arr[0, :, :, :], in_array_1, atol=0.01)\n', (10720, 10760), True, 'import numpy as np\n'), ((10772, 10823), 'numpy.allclose', 'np.allclose', (['arr[1, :, :, :]', 'in_array_2'], {'atol': '(0.01)'}), '(arr[1, :, :, :], in_array_2, atol=0.01)\n', (10783, 10823), True, 'import numpy as np\n'), ((10892, 10918), 'numpy.random.rand', 'np.random.rand', (['(128)', '(64)', '(3)'], {}), '(128, 64, 3)\n', (10906, 10918), True, 'import numpy as np\n'), ((11022, 11064), 'numpy.mean', 'np.mean', (['in_array_1'], {'axis': '(2)', 'keepdims': '(True)'}), '(in_array_1, axis=2, keepdims=True)\n', (11029, 11064), True, 'import numpy as np\n'), ((11082, 11108), 'numpy.random.rand', 'np.random.rand', (['(128)', '(64)', '(3)'], {}), '(128, 64, 3)\n', (11096, 11108), True, 'import numpy as np\n'), ((11278, 11320), 'numpy.mean', 'np.mean', (['in_array_2'], {'axis': '(2)', 'keepdims': '(True)'}), '(in_array_2, axis=2, keepdims=True)\n', (11285, 11320), True, 'import numpy as np\n'), ((11332, 11348), 'mlagents_envs.communicator_objects.agent_info_pb2.AgentInfoProto', 'AgentInfoProto', ([], {}), '()\n', (11346, 11348), False, 'from mlagents_envs.communicator_objects.agent_info_pb2 import AgentInfoProto\n'), ((11402, 11418), 'mlagents_envs.communicator_objects.agent_info_pb2.AgentInfoProto', 'AgentInfoProto', ([], {}), '()\n', (11416, 11418), False, 'from mlagents_envs.communicator_objects.agent_info_pb2 import AgentInfoProto\n'), ((11497, 11550), 'mlagents_envs.rpc_utils._process_visual_observation', '_process_visual_observation', (['(0)', '(128, 64, 1)', 'ap_list'], {}), '(0, (128, 64, 1), ap_list)\n', (11524, 11550), False, 'from mlagents_envs.rpc_utils import behavior_spec_from_proto, process_pixels, _process_visual_observation, _process_vector_observation, steps_from_proto\n'), ((11608, 11669), 'numpy.allclose', 'np.allclose', (['arr[0, :, :, :]', 'expected_out_array_1'], {'atol': '(0.01)'}), '(arr[0, :, :, :], expected_out_array_1, atol=0.01)\n', (11619, 11669), True, 'import numpy as np\n'), ((11681, 11742), 'numpy.allclose', 'np.allclose', (['arr[1, :, :, :]', 'expected_out_array_2'], {'atol': '(0.01)'}), '(arr[1, :, :, :], expected_out_array_2, atol=0.01)\n', (11692, 11742), True, 'import numpy as np\n'), ((11817, 11844), 'numpy.random.rand', 'np.random.rand', (['(128)', '(64)', '(12)'], {}), '(128, 64, 12)\n', (11831, 11844), True, 'import numpy as np\n'), ((12045, 12098), 'numpy.take', 'np.take', (['in_array_1', '[0, 1, 2, 3, 6, 7, 8, 9]'], {'axis': '(2)'}), '(in_array_1, [0, 1, 2, 3, 6, 7, 8, 9], axis=2)\n', (12052, 12098), True, 'import numpy as np\n'), ((12110, 12126), 'mlagents_envs.communicator_objects.agent_info_pb2.AgentInfoProto', 'AgentInfoProto', ([], {}), '()\n', (12124, 12126), False, 'from mlagents_envs.communicator_objects.agent_info_pb2 import AgentInfoProto\n'), ((12200, 12253), 'mlagents_envs.rpc_utils._process_visual_observation', '_process_visual_observation', (['(0)', '(128, 64, 8)', 'ap_list'], {}), '(0, (128, 64, 8), ap_list)\n', (12227, 12253), False, 'from mlagents_envs.rpc_utils import behavior_spec_from_proto, process_pixels, _process_visual_observation, _process_vector_observation, steps_from_proto\n'), ((12311, 12372), 'numpy.allclose', 'np.allclose', (['arr[0, :, :, :]', 'expected_out_array_1'], {'atol': '(0.01)'}), '(arr[0, :, :, :], expected_out_array_1, atol=0.01)\n', (12322, 12372), True, 'import numpy as np\n'), ((12441, 12467), 'numpy.random.rand', 'np.random.rand', (['(128)', '(64)', '(3)'], {}), '(128, 64, 3)\n', (12455, 12467), True, 'import numpy as np\n'), ((12538, 12554), 'mlagents_envs.communicator_objects.agent_info_pb2.AgentInfoProto', 'AgentInfoProto', ([], {}), '()\n', (12552, 12554), False, 'from mlagents_envs.communicator_objects.agent_info_pb2 import AgentInfoProto\n'), ((12980, 13011), 'mlagents_envs.rpc_utils.steps_from_proto', 'steps_from_proto', (['ap_list', 'spec'], {}), '(ap_list, spec)\n', (12996, 13011), False, 'from mlagents_envs.rpc_utils import behavior_spec_from_proto, process_pixels, _process_visual_observation, _process_vector_observation, steps_from_proto\n'), ((14376, 14416), 'mlagents_envs.rpc_utils.steps_from_proto', 'steps_from_proto', (['ap_list', 'behavior_spec'], {}), '(ap_list, behavior_spec)\n', (14392, 14416), False, 'from mlagents_envs.rpc_utils import behavior_spec_from_proto, process_pixels, _process_visual_observation, _process_vector_observation, steps_from_proto\n'), ((14997, 15037), 'mlagents_envs.rpc_utils.steps_from_proto', 'steps_from_proto', (['ap_list', 'behavior_spec'], {}), '(ap_list, behavior_spec)\n', (15013, 15037), False, 'from mlagents_envs.rpc_utils import behavior_spec_from_proto, process_pixels, _process_visual_observation, _process_vector_observation, steps_from_proto\n'), ((15472, 15512), 'mlagents_envs.rpc_utils.steps_from_proto', 'steps_from_proto', (['ap_list', 'behavior_spec'], {}), '(ap_list, behavior_spec)\n', (15488, 15512), False, 'from mlagents_envs.rpc_utils import behavior_spec_from_proto, process_pixels, _process_visual_observation, _process_vector_observation, steps_from_proto\n'), ((16035, 16075), 'mlagents_envs.rpc_utils.steps_from_proto', 'steps_from_proto', (['ap_list', 'behavior_spec'], {}), '(ap_list, behavior_spec)\n', (16051, 16075), False, 'from mlagents_envs.rpc_utils import behavior_spec_from_proto, process_pixels, _process_visual_observation, _process_vector_observation, steps_from_proto\n'), ((16258, 16280), 'mlagents_envs.communicator_objects.brain_parameters_pb2.BrainParametersProto', 'BrainParametersProto', ([], {}), '()\n', (16278, 16280), False, 'from mlagents_envs.communicator_objects.brain_parameters_pb2 import BrainParametersProto\n'), ((16400, 16441), 'mlagents_envs.rpc_utils.behavior_spec_from_proto', 'behavior_spec_from_proto', (['bp', 'agent_proto'], {}), '(bp, agent_proto)\n', (16424, 16441), False, 'from mlagents_envs.rpc_utils import behavior_spec_from_proto, process_pixels, _process_visual_observation, _process_vector_observation, steps_from_proto\n'), ((16740, 16762), 'mlagents_envs.communicator_objects.brain_parameters_pb2.BrainParametersProto', 'BrainParametersProto', ([], {}), '()\n', (16760, 16762), False, 'from mlagents_envs.communicator_objects.brain_parameters_pb2 import BrainParametersProto\n'), ((16879, 16920), 'mlagents_envs.rpc_utils.behavior_spec_from_proto', 'behavior_spec_from_proto', (['bp', 'agent_proto'], {}), '(bp, agent_proto)\n', (16903, 16920), False, 'from mlagents_envs.rpc_utils import behavior_spec_from_proto, process_pixels, _process_visual_observation, _process_vector_observation, steps_from_proto\n'), ((1158, 1174), 'mlagents_envs.communicator_objects.agent_info_pb2.AgentInfoProto', 'AgentInfoProto', ([], {}), '()\n', (1172, 1174), False, 'from mlagents_envs.communicator_objects.agent_info_pb2 import AgentInfoProto\n'), ((2606, 2639), 'PIL.Image.fromarray', 'Image.fromarray', (['sub_image', '"""RGB"""'], {}), "(sub_image, 'RGB')\n", (2621, 2639), False, 'from PIL import Image\n'), ((2657, 2669), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (2667, 2669), False, 'import io\n'), ((5717, 5861), 'mlagents_envs.communicator_objects.agent_info_pb2.AgentInfoProto', 'AgentInfoProto', ([], {'reward': 'reward', 'done': 'done', 'id': 'agent_id', 'max_step_reached': 'max_step_reached', 'action_mask': 'agent_mask', 'observations': 'observations'}), '(reward=reward, done=done, id=agent_id, max_step_reached=\n max_step_reached, action_mask=agent_mask, observations=observations)\n', (5731, 5861), False, 'from mlagents_envs.communicator_objects.agent_info_pb2 import AgentInfoProto\n'), ((6953, 7097), 'mlagents_envs.communicator_objects.agent_info_pb2.AgentInfoProto', 'AgentInfoProto', ([], {'reward': 'reward', 'done': 'done', 'id': 'agent_id', 'max_step_reached': 'max_step_reached', 'action_mask': 'None', 'observations': 'final_observations'}), '(reward=reward, done=done, id=agent_id, max_step_reached=\n max_step_reached, action_mask=None, observations=final_observations)\n', (6967, 7097), False, 'from mlagents_envs.communicator_objects.agent_info_pb2 import AgentInfoProto\n'), ((7867, 7885), 'mlagents_envs.communicator_objects.agent_action_pb2.AgentActionProto', 'AgentActionProto', ([], {}), '()\n', (7883, 7885), False, 'from mlagents_envs.communicator_objects.agent_action_pb2 import AgentActionProto\n'), ((8334, 8413), 'mlagents_envs.communicator_objects.agent_info_action_pair_pb2.AgentInfoActionPairProto', 'AgentInfoActionPairProto', ([], {'agent_info': 'agent_info_proto', 'action_info': 'action_proto'}), '(agent_info=agent_info_proto, action_info=action_proto)\n', (8358, 8413), False, 'from mlagents_envs.communicator_objects.agent_info_action_pair_pb2 import AgentInfoActionPairProto\n'), ((9924, 9981), 'mlagents_envs.rpc_utils._process_vector_observation', '_process_vector_observation', (['obs_index', 'shape', 'list_proto'], {}), '(obs_index, shape, list_proto)\n', (9951, 9981), False, 'from mlagents_envs.rpc_utils import behavior_spec_from_proto, process_pixels, _process_visual_observation, _process_vector_observation, steps_from_proto\n'), ((10058, 10090), 'numpy.allclose', 'np.allclose', (['arr', '(0.1)'], {'atol': '(0.01)'}), '(arr, 0.1, atol=0.01)\n', (10069, 10090), True, 'import numpy as np\n'), ((12627, 12667), 'pytest.raises', 'pytest.raises', (['UnityObservationException'], {}), '(UnityObservationException)\n', (12640, 12667), False, 'import pytest\n'), ((12677, 12730), 'mlagents_envs.rpc_utils._process_visual_observation', '_process_visual_observation', (['(0)', '(128, 42, 3)', 'ap_list'], {}), '(0, (128, 42, 3), ap_list)\n', (12704, 12730), False, 'from mlagents_envs.rpc_utils import behavior_spec_from_proto, process_pixels, _process_visual_observation, _process_vector_observation, steps_from_proto\n'), ((12852, 12883), 'mlagents_envs.base_env.ActionSpec.create_continuous', 'ActionSpec.create_continuous', (['(3)'], {}), '(3)\n', (12880, 12883), False, 'from mlagents_envs.base_env import BehaviorSpec, ActionSpec, DecisionSteps, TerminalSteps\n'), ((14245, 14279), 'mlagents_envs.base_env.ActionSpec.create_discrete', 'ActionSpec.create_discrete', (['(7, 3)'], {}), '((7, 3))\n', (14271, 14279), False, 'from mlagents_envs.base_env import BehaviorSpec, ActionSpec, DecisionSteps, TerminalSteps\n'), ((14867, 14900), 'mlagents_envs.base_env.ActionSpec.create_discrete', 'ActionSpec.create_discrete', (['(10,)'], {}), '((10,))\n', (14893, 14900), False, 'from mlagents_envs.base_env import BehaviorSpec, ActionSpec, DecisionSteps, TerminalSteps\n'), ((15338, 15375), 'mlagents_envs.base_env.ActionSpec.create_discrete', 'ActionSpec.create_discrete', (['(2, 2, 6)'], {}), '((2, 2, 6))\n', (15364, 15375), False, 'from mlagents_envs.base_env import BehaviorSpec, ActionSpec, DecisionSteps, TerminalSteps\n'), ((15906, 15938), 'mlagents_envs.base_env.ActionSpec.create_continuous', 'ActionSpec.create_continuous', (['(10)'], {}), '(10)\n', (15934, 15938), False, 'from mlagents_envs.base_env import BehaviorSpec, ActionSpec, DecisionSteps, TerminalSteps\n'), ((17236, 17267), 'mlagents_envs.base_env.ActionSpec.create_continuous', 'ActionSpec.create_continuous', (['(3)'], {}), '(3)\n', (17264, 17267), False, 'from mlagents_envs.base_env import BehaviorSpec, ActionSpec, DecisionSteps, TerminalSteps\n'), ((17359, 17386), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (17372, 17386), False, 'import pytest\n'), ((17396, 17436), 'mlagents_envs.rpc_utils.steps_from_proto', 'steps_from_proto', (['ap_list', 'behavior_spec'], {}), '(ap_list, behavior_spec)\n', (17412, 17436), False, 'from mlagents_envs.rpc_utils import behavior_spec_from_proto, process_pixels, _process_visual_observation, _process_vector_observation, steps_from_proto\n'), ((17581, 17612), 'mlagents_envs.base_env.ActionSpec.create_continuous', 'ActionSpec.create_continuous', (['(3)'], {}), '(3)\n', (17609, 17612), False, 'from mlagents_envs.base_env import BehaviorSpec, ActionSpec, DecisionSteps, TerminalSteps\n'), ((17704, 17731), 'pytest.raises', 'pytest.raises', (['RuntimeError'], {}), '(RuntimeError)\n', (17717, 17731), False, 'import pytest\n'), ((17741, 17781), 'mlagents_envs.rpc_utils.steps_from_proto', 'steps_from_proto', (['ap_list', 'behavior_spec'], {}), '(ap_list, behavior_spec)\n', (17757, 17781), False, 'from mlagents_envs.rpc_utils import behavior_spec_from_proto, process_pixels, _process_visual_observation, _process_vector_observation, steps_from_proto\n'), ((1508, 1526), 'mlagents_envs.communicator_objects.observation_pb2.ObservationProto', 'ObservationProto', ([], {}), '()\n', (1524, 1526), False, 'from mlagents_envs.communicator_objects.observation_pb2 import ObservationProto, NONE, PNG\n'), ((2493, 2529), 'numpy.zeros', 'np.zeros', (['zero_shape'], {'dtype': 'np.uint8'}), '(zero_shape, dtype=np.uint8)\n', (2501, 2529), True, 'import numpy as np\n'), ((2554, 2592), 'numpy.concatenate', 'np.concatenate', (['[sub_image, z]'], {'axis': '(2)'}), '([sub_image, z], axis=2)\n', (2568, 2592), True, 'import numpy as np\n'), ((8792, 8820), 'numpy.sum', 'np.sum', (['(in_array - out_array)'], {}), '(in_array - out_array)\n', (8798, 8820), True, 'import numpy as np\n'), ((8823, 8846), 'numpy.prod', 'np.prod', (['in_array.shape'], {}), '(in_array.shape)\n', (8830, 8846), True, 'import numpy as np\n'), ((9236, 9264), 'numpy.sum', 'np.sum', (['(in_array - out_array)'], {}), '(in_array - out_array)\n', (9242, 9264), True, 'import numpy as np\n'), ((9267, 9290), 'numpy.prod', 'np.prod', (['in_array.shape'], {}), '(in_array.shape)\n', (9274, 9290), True, 'import numpy as np\n'), ((4973, 5037), 'numpy.concatenate', 'np.concatenate', (['(agent_mask, _branch[agent_id_index, :])'], {'axis': '(0)'}), '((agent_mask, _branch[agent_id_index, :]), axis=0)\n', (4987, 5037), True, 'import numpy as np\n'), ((1760, 1785), 'numpy.prod', 'np.prod', (['shape[obs_index]'], {}), '(shape[obs_index])\n', (1767, 1785), True, 'import numpy as np\n'), ((5507, 5551), 'mlagents_envs.communicator_objects.observation_pb2.ObservationProto.FloatData', 'ObservationProto.FloatData', ([], {'data': 'observation'}), '(data=observation)\n', (5533, 5551), False, 'from mlagents_envs.communicator_objects.observation_pb2 import ObservationProto, NONE, PNG\n'), ((6743, 6787), 'mlagents_envs.communicator_objects.observation_pb2.ObservationProto.FloatData', 'ObservationProto.FloatData', ([], {'data': 'observation'}), '(data=observation)\n', (6769, 6787), False, 'from mlagents_envs.communicator_objects.observation_pb2 import ObservationProto, NONE, PNG\n')]
|
#!/usr/bin/env python3 -u
# -*- coding: utf-8 -*-
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
from logging import warning
import numpy as np
import pandas as pd
from sklearn.utils import check_array, check_consistent_length
from sktime.datatypes import check_is_scitype, convert
from sktime.performance_metrics.forecasting._classes import _BaseForecastingErrorMetric
# TODO: Rework tests now
class _BaseProbaForecastingErrorMetric(_BaseForecastingErrorMetric):
"""Base class for probabilistic forecasting error metrics in sktime.
Extends sktime's BaseMetric to the forecasting interface. Forecasting error
metrics measure the error (loss) between forecasts and true values. Lower
values are better.
Parameters
----------
multioutput : {'raw_values', 'uniform_average'} or array-like of shape \
(n_outputs,), default='uniform_average'
Defines how to aggregate metric for multivariate (multioutput) data.
If array-like, values used as weights to average the errors.
If 'raw_values', returns a full set of errors in case of multioutput input.
If 'uniform_average', errors of all outputs are averaged with uniform weight.
score_average : bool, optional, default=True
for interval and quantile losses only
if True, metric/loss is averaged by upper/lower and/or quantile
if False, metric/loss is not averaged by upper/lower and/or quantile
"""
_tags = {
"scitype:y_pred": "pred_quantiles",
"lower_is_better": True,
}
def __init__(
self,
func=None,
name=None,
multioutput="uniform_average",
score_average=True,
):
self.multioutput = multioutput
self.score_average = score_average
super().__init__(func, name=name, multioutput=multioutput)
def __call__(self, y_true, y_pred, **kwargs):
"""Calculate metric value using underlying metric function.
Parameters
----------
y_true : pd.Series, pd.DataFrame or np.array of shape (fh,) or \
(fh, n_outputs) where fh is the forecasting horizon
Ground truth (correct) target values.
y_pred : return object of probabilistic predictition method scitype:y_pred
must be at fh and for variables equal to those in y_true.
Returns
-------
loss : float or 1-column pd.DataFrame with calculated metric value(s)
metric is always averaged (arithmetic) over fh values
if multioutput = "raw_values",
will have a column level corresponding to variables in y_true
if multioutput = multioutput = "uniform_average" or or array-like
entries will be averaged over output variable column
if score_average = False,
will have column levels corresponding to quantiles/intervals
if score_average = True,
entries will be averaged over quantiles/interval column
"""
return self.evaluate(y_true, y_pred, multioutput=self.multioutput, **kwargs)
def evaluate(self, y_true, y_pred, multioutput=None, **kwargs):
"""Evaluate the desired metric on given inputs.
Parameters
----------
y_true : pd.Series, pd.DataFrame or np.array of shape (fh,) or \
(fh, n_outputs) where fh is the forecasting horizon
Ground truth (correct) target values.
y_pred : return object of probabilistic predictition method scitype:y_pred
must be at fh and for variables equal to those in y_true
multioutput : string "uniform_average" or "raw_values" determines how\
multioutput results will be treated.
Returns
-------
loss : float or 1-column pd.DataFrame with calculated metric value(s)
metric is always averaged (arithmetic) over fh values
if multioutput = "raw_values",
will have a column level corresponding to variables in y_true
if multioutput = multioutput = "uniform_average" or or array-like
entries will be averaged over output variable column
if score_average = False,
will have column levels corresponding to quantiles/intervals
if score_average = True,
entries will be averaged over quantiles/interval column
"""
# Input checks and conversions
y_true_inner, y_pred_inner, multioutput = self._check_ys(
y_true, y_pred, multioutput
)
# Don't want to include scores for 0 width intervals, makes no sense
if 0 in y_pred_inner.columns.get_level_values(1):
y_pred_inner = y_pred_inner.drop(0, axis=1, level=1)
warning(
"Dropping 0 width interval, don't include 0.5 quantile\
for interval metrics."
)
# pass to inner function
out = self._evaluate(y_true_inner, y_pred_inner, multioutput, **kwargs)
if self.score_average and multioutput == "uniform_average":
out = float(out.mean(axis=1, level=None)) # average over all
if self.score_average and multioutput == "raw_values":
out = out.mean(axis=1, level=0) # average over scores
if not self.score_average and multioutput == "uniform_average":
out = out.mean(axis=1, level=1) # average over variables
if not self.score_average and multioutput == "raw_values":
out = out # don't average
if isinstance(out, pd.DataFrame):
out = out.squeeze(axis=0)
return out
def _evaluate(self, y_true, y_pred, multioutput, **kwargs):
"""Evaluate the desired metric on given inputs.
Parameters
----------
y_true : pd.DataFrame or of shape (fh,) or \
(fh, n_outputs) where fh is the forecasting horizon
Ground truth (correct) target values.
y_pred : pd.DataFrame of shape (fh,) or \
(fh, n_outputs) where fh is the forecasting horizon
Forecasted values.
multioutput : string "uniform_average" or "raw_values" determines how\
multioutput results will be treated.
Returns
-------
loss : pd.DataFrame of shape (, n_outputs), calculated loss metric.
"""
# Default implementation relies on implementation of evaluate_by_index
try:
index_df = self._evaluate_by_index(y_true, y_pred, multioutput)
out_df = pd.DataFrame(index_df.mean(axis=0)).T
out_df.columns = index_df.columns
return out_df
except RecursionError:
RecursionError("Must implement one of _evaluate or _evaluate_by_index")
def evaluate_by_index(self, y_true, y_pred, multioutput=None, **kwargs):
"""Return the metric evaluated at each time point.
Parameters
----------
y_true : pd.Series, pd.DataFrame or np.array of shape (fh,) or \
(fh, n_outputs) where fh is the forecasting horizon
Ground truth (correct) target values.
y_pred : return object of probabilistic predictition method scitype:y_pred
must be at fh and for variables equal to those in y_true
multioutput : string "uniform_average" or "raw_values" determines how\
multioutput results will be treated.
Returns
-------
loss : pd.DataFrame of length len(fh), with calculated metric value(s)
i-th column contains metric value(s) for prediction at i-th fh element
if multioutput = "raw_values",
will have a column level corresponding to variables in y_true
if multioutput = multioutput = "uniform_average" or or array-like
entries will be averaged over output variable column
if score_average = False,
will have column levels corresponding to quantiles/intervals
if score_average = True,
entries will be averaged over quantiles/interval column
"""
# Input checks and conversions
y_true_inner, y_pred_inner, multioutput = self._check_ys(
y_true, y_pred, multioutput
)
# Don't want to include scores for 0 width intervals, makes no sense
if 0 in y_pred_inner.columns.get_level_values(1):
y_pred_inner = y_pred_inner.drop(0, axis=1, level=1)
warning(
"Dropping 0 width interval, don't include 0.5 quantile\
for interval metrics."
)
# pass to inner function
out = self._evaluate_by_index(y_true_inner, y_pred_inner, multioutput, **kwargs)
if self.score_average and multioutput == "uniform_average":
out = out.mean(axis=1, level=None) # average over all
if self.score_average and multioutput == "raw_values":
out = out.mean(axis=1, level=0) # average over scores
if not self.score_average and multioutput == "uniform_average":
out = out.mean(axis=1, level=1) # average over variables
if not self.score_average and multioutput == "raw_values":
out = out # don't average
return out
def _evaluate_by_index(self, y_true, y_pred, multioutput, **kwargs):
"""Logic for finding the metric evaluated at each index.
By default this uses _evaluate to find jackknifed pseudosamples. This
estimates the error at each of the time points.
Parameters
----------
y_true : pd.Series, pd.DataFrame or np.array of shape (fh,) or \
(fh, n_outputs) where fh is the forecasting horizon
Ground truth (correct) target values.
y_pred : pd.Series, pd.DataFrame or np.array of shape (fh,) or \
(fh, n_outputs) where fh is the forecasting horizon
Forecasted values.
multioutput : string "uniform_average" or "raw_values" determines how \
multioutput results will be treated.
"""
n = y_true.shape[0]
out_series = pd.Series(index=y_pred.index)
try:
x_bar = self.evaluate(y_true, y_pred, multioutput, **kwargs)
for i in range(n):
out_series[i] = n * x_bar - (n - 1) * self.evaluate(
np.vstack((y_true[:i, :], y_true[i + 1 :, :])), # noqa
np.vstack((y_pred[:i, :], y_pred[i + 1 :, :])), # noqa
multioutput,
)
return out_series
except RecursionError:
RecursionError("Must implement one of _evaluate or _evaluate_by_index")
def _check_consistent_input(self, y_true, y_pred, multioutput):
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
if not isinstance(y_pred, pd.DataFrame):
ValueError("y_pred should be a dataframe.")
if not all(y_pred.dtypes == float):
ValueError("Data should be numeric.")
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
n_outputs = y_true.shape[1]
allowed_multioutput_str = ("raw_values", "uniform_average", "variance_weighted")
if isinstance(multioutput, str):
if multioutput not in allowed_multioutput_str:
raise ValueError(
"Allowed 'multioutput' string values are {}. "
"You provided multioutput={!r}".format(
allowed_multioutput_str, multioutput
)
)
elif multioutput is not None:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in multi-output case.")
elif n_outputs != len(multioutput):
raise ValueError(
"There must be equally many custom weights (%d) as outputs (%d)."
% (len(multioutput), n_outputs)
)
return y_true, y_pred, multioutput
def _check_ys(self, y_true, y_pred, multioutput):
if multioutput is None:
multioutput = self.multioutput
valid, msg, metadata = check_is_scitype(
y_pred, scitype="Proba", return_metadata=True, var_name="y_pred"
)
if not valid:
raise TypeError(msg)
y_pred_mtype = metadata["mtype"]
inner_y_pred_mtype = self.get_tag("scitype:y_pred")
y_pred_inner = convert(
y_pred,
from_type=y_pred_mtype,
to_type=inner_y_pred_mtype,
as_scitype="Proba",
)
if inner_y_pred_mtype == "pred_interval":
if 0.0 in y_pred_inner.columns.get_level_values(1):
for var in y_pred_inner.columns.get_level_values(0):
y_pred_inner[var, 0.0, "upper"] = y_pred_inner[var, 0.0, "lower"]
y_pred_inner.sort_index(axis=1, level=[0, 1], inplace=True)
y_true, y_pred, multioutput = self._check_consistent_input(
y_true, y_pred, multioutput
)
return y_true, y_pred_inner, multioutput
def _get_alpha_from(self, y_pred):
"""Fetch the alphas present in y_pred."""
alphas = np.unique(list(y_pred.columns.get_level_values(1)))
if not all(((alphas > 0) & (alphas < 1))):
raise ValueError("Alpha must be between 0 and 1.")
return alphas
def _check_alpha(self, alpha):
"""Check that alpha input is valid."""
if alpha is None:
return None
if isinstance(alpha, float):
alpha = [alpha]
if not isinstance(alpha, np.ndarray):
alpha = np.asarray(alpha)
if not all(((alpha > 0) & (alpha < 1))):
raise ValueError("Alpha must be between 0 and 1.")
return alpha
def _handle_multioutput(self, loss, multioutput):
"""Specificies how multivariate outputs should be handled.
Parameters
----------
loss : float, np.ndarray the evaluated metric value.
multioutput : string "uniform_average" or "raw_values" determines how \
multioutput results will be treated.
"""
if isinstance(multioutput, str):
if multioutput == "raw_values":
return loss
elif multioutput == "uniform_average":
# pass None as weights to np.average: uniform mean
multioutput = None
else:
raise ValueError(
"multioutput is expected to be 'raw_values' "
"or 'uniform_average' but we got %r"
" instead." % multioutput
)
if loss.ndim > 1:
out = np.average(loss, weights=multioutput, axis=1)
else:
out = np.average(loss, weights=multioutput)
return out
class PinballLoss(_BaseProbaForecastingErrorMetric):
"""Evaluate the pinball loss at all quantiles given in data.
Parameters
----------
multioutput : string "uniform_average" or "raw_values" determines how\
multioutput results will be treated.
score_average : bool, optional, default = True
specifies whether scores for each quantile should be averaged.
alpha (optional) : float, list or np.ndarray, specifies what quantiles to \
evaluate metric at.
"""
_tags = {
"scitype:y_pred": "pred_quantiles",
"lower_is_better": True,
}
def __init__(
self,
multioutput="uniform_average",
score_average=True,
alpha=None,
):
name = "PinballLoss"
self.score_average = score_average
self.alpha = self._check_alpha(alpha)
self.metric_args = {"alpha": alpha}
super().__init__(
name=name, multioutput=multioutput, score_average=score_average
)
def _evaluate_by_index(self, y_true, y_pred, multioutput, **kwargs):
"""Logic for finding the metric evaluated at each index.
y_true : pd.Series, pd.DataFrame or np.array of shape (fh,) or \
(fh, n_outputs) where fh is the forecasting horizon
Ground truth (correct) target value`s.
y_pred : pd.Series, pd.DataFrame or np.array of shape (fh,) or \
(fh, n_outputs) where fh is the forecasting horizon
Forecasted values.
multioutput : string "uniform_average" or "raw_values"
Determines how multioutput results will be treated.
"""
alpha = self.alpha
y_pred_alphas = self._get_alpha_from(y_pred)
if alpha is None:
alphas = y_pred_alphas
else:
# if alpha was provided, check whether they are predicted
# if not all alpha are observed, raise a ValueError
if not np.isin(alpha, y_pred_alphas).all():
# todo: make error msg more informative
# which alphas are missing
msg = "not all quantile values in alpha are available in y_pred"
raise ValueError(msg)
else:
alphas = alpha
alphas = self._check_alpha(alphas)
alpha_preds = y_pred.iloc[
:, [x in alphas for x in y_pred.columns.get_level_values(1)]
]
alpha_preds_np = alpha_preds.to_numpy()
alpha_mat = np.repeat(
(y_pred.columns.get_level_values(1).to_numpy().reshape(1, -1)),
repeats=y_true.shape[0],
axis=0,
)
y_true_np = np.repeat(y_true, axis=1, repeats=len(alphas))
diff = y_true_np - alpha_preds_np
sign = (diff >= 0).astype(diff.dtype)
loss = alpha_mat * sign * diff - (1 - alpha_mat) * (1 - sign) * diff
out_df = pd.DataFrame(loss, columns=alpha_preds.columns)
return out_df
@classmethod
def get_test_params(cls, parameter_set="default"):
"""Retrieve test parameters."""
params1 = {}
params2 = {"alpha": [0.1, 0.5, 0.9]}
return [params1, params2]
class EmpiricalCoverage(_BaseProbaForecastingErrorMetric):
"""Evaluate the pinball loss at all quantiles given in data.
Parameters
----------
multioutput : string "uniform_average" or "raw_values" determines how\
multioutput results will be treated.
score_average : bool, optional, default = True
specifies whether scores for each quantile should be averaged.
"""
_tags = {
"scitype:y_pred": "pred_interval",
"lower_is_better": False,
}
def __init__(self, multioutput="uniform_average", score_average=True):
name = "EmpiricalCoverage"
self.score_average = score_average
self.multioutput = multioutput
super().__init__(
name=name, score_average=score_average, multioutput=multioutput
)
def _evaluate_by_index(self, y_true, y_pred, multioutput, **kwargs):
"""Logic for finding the metric evaluated at each index.
y_true : pd.Series, pd.DataFrame or np.array of shape (fh,) or \
(fh, n_outputs) where fh is the forecasting horizon
Ground truth (correct) target values.
y_pred : pd.Series, pd.DataFrame or np.array of shape (fh,) or \
(fh, n_outputs) where fh is the forecasting horizon
Forecasted values.
multioutput : string "uniform_average" or "raw_values" determines how \
multioutput results will be treated.
"""
lower = y_pred.iloc[:, y_pred.columns.get_level_values(2) == "lower"].to_numpy()
upper = y_pred.iloc[:, y_pred.columns.get_level_values(2) == "upper"].to_numpy()
if not isinstance(y_true, np.ndarray):
y_true_np = y_true.to_numpy()
else:
y_true_np = y_true
if y_true_np.ndim == 1:
y_true_np = y_true.reshape(-1, 1)
scores = np.unique(np.round(y_pred.columns.get_level_values(1), 7))
no_scores = len(scores)
vars = np.unique(y_pred.columns.get_level_values(0))
y_true_np = np.tile(y_true_np, no_scores)
truth_array = (y_true_np > lower).astype(int) * (y_true_np < upper).astype(int)
out_df = pd.DataFrame(
truth_array, columns=pd.MultiIndex.from_product([vars, scores])
)
return out_df
@classmethod
def get_test_params(self):
"""Retrieve test parameters."""
params1 = {}
return [params1]
class ConstraintViolation(_BaseProbaForecastingErrorMetric):
"""Evaluate the pinball loss at all quantiles given in data.
Parameters
----------
multioutput : string "uniform_average" or "raw_values" determines how\
multioutput results will be treated.
score_average : bool, optional, default = True
specifies whether scores for each quantile should be averaged.
"""
_tags = {
"scitype:y_pred": "pred_interval",
"lower_is_better": True,
}
def __init__(self, multioutput="uniform_average", score_average=True):
name = "ConstraintViolation"
self.score_average = score_average
self.multioutput = multioutput
super().__init__(
name=name, score_average=score_average, multioutput=multioutput
)
def _evaluate_by_index(self, y_true, y_pred, multioutput, **kwargs):
"""Logic for finding the metric evaluated at each index.
y_true : pd.Series, pd.DataFrame or np.array of shape (fh,) or \
(fh, n_outputs) where fh is the forecasting horizon
Ground truth (correct) target values.
y_pred : pd.Series, pd.DataFrame or np.array of shape (fh,) or \
(fh, n_outputs) where fh is the forecasting horizon
Forecasted values.
multioutput : string "uniform_average" or "raw_values" determines how \
multioutput results will be treated.
"""
lower = y_pred.iloc[:, y_pred.columns.get_level_values(2) == "lower"].to_numpy()
upper = y_pred.iloc[:, y_pred.columns.get_level_values(2) == "upper"].to_numpy()
if not isinstance(y_true, np.ndarray):
y_true_np = y_true.to_numpy()
else:
y_true_np = y_true
if y_true_np.ndim == 1:
y_true_np = y_true.reshape(-1, 1)
scores = np.unique(np.round(y_pred.columns.get_level_values(1), 7))
no_scores = len(scores)
vars = np.unique(y_pred.columns.get_level_values(0))
y_true_np = np.tile(y_true_np, no_scores)
int_distance = ((y_true_np < lower).astype(int) * abs(lower - y_true_np)) + (
(y_true_np > upper).astype(int) * abs(y_true_np - upper)
)
out_df = pd.DataFrame(
int_distance, columns=pd.MultiIndex.from_product([vars, scores])
)
return out_df
@classmethod
def get_test_params(self):
"""Retrieve test parameters."""
params1 = {}
return [params1]
|
[
"pandas.DataFrame",
"sktime.datatypes.check_is_scitype",
"numpy.isin",
"numpy.average",
"sklearn.utils.check_array",
"logging.warning",
"numpy.asarray",
"sktime.datatypes.convert",
"pandas.MultiIndex.from_product",
"pandas.Series",
"numpy.tile",
"sklearn.utils.check_consistent_length",
"numpy.vstack"
] |
[((10207, 10236), 'pandas.Series', 'pd.Series', ([], {'index': 'y_pred.index'}), '(index=y_pred.index)\n', (10216, 10236), True, 'import pandas as pd\n'), ((10848, 10887), 'sklearn.utils.check_consistent_length', 'check_consistent_length', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (10871, 10887), False, 'from sklearn.utils import check_array, check_consistent_length\n'), ((10906, 10942), 'sklearn.utils.check_array', 'check_array', (['y_true'], {'ensure_2d': '(False)'}), '(y_true, ensure_2d=False)\n', (10917, 10942), False, 'from sklearn.utils import check_array, check_consistent_length\n'), ((12377, 12464), 'sktime.datatypes.check_is_scitype', 'check_is_scitype', (['y_pred'], {'scitype': '"""Proba"""', 'return_metadata': '(True)', 'var_name': '"""y_pred"""'}), "(y_pred, scitype='Proba', return_metadata=True, var_name=\n 'y_pred')\n", (12393, 12464), False, 'from sktime.datatypes import check_is_scitype, convert\n'), ((12664, 12755), 'sktime.datatypes.convert', 'convert', (['y_pred'], {'from_type': 'y_pred_mtype', 'to_type': 'inner_y_pred_mtype', 'as_scitype': '"""Proba"""'}), "(y_pred, from_type=y_pred_mtype, to_type=inner_y_pred_mtype,\n as_scitype='Proba')\n", (12671, 12755), False, 'from sktime.datatypes import check_is_scitype, convert\n'), ((17986, 18033), 'pandas.DataFrame', 'pd.DataFrame', (['loss'], {'columns': 'alpha_preds.columns'}), '(loss, columns=alpha_preds.columns)\n', (17998, 18033), True, 'import pandas as pd\n'), ((20303, 20332), 'numpy.tile', 'np.tile', (['y_true_np', 'no_scores'], {}), '(y_true_np, no_scores)\n', (20310, 20332), True, 'import numpy as np\n'), ((22735, 22764), 'numpy.tile', 'np.tile', (['y_true_np', 'no_scores'], {}), '(y_true_np, no_scores)\n', (22742, 22764), True, 'import numpy as np\n'), ((4833, 4940), 'logging.warning', 'warning', (['"""Dropping 0 width interval, don\'t include 0.5 quantile for interval metrics."""'], {}), '(\n "Dropping 0 width interval, don\'t include 0.5 quantile for interval metrics."\n )\n', (4840, 4940), False, 'from logging import warning\n'), ((8562, 8669), 'logging.warning', 'warning', (['"""Dropping 0 width interval, don\'t include 0.5 quantile for interval metrics."""'], {}), '(\n "Dropping 0 width interval, don\'t include 0.5 quantile for interval metrics."\n )\n', (8569, 8669), False, 'from logging import warning\n'), ((13881, 13898), 'numpy.asarray', 'np.asarray', (['alpha'], {}), '(alpha)\n', (13891, 13898), True, 'import numpy as np\n'), ((14948, 14993), 'numpy.average', 'np.average', (['loss'], {'weights': 'multioutput', 'axis': '(1)'}), '(loss, weights=multioutput, axis=1)\n', (14958, 14993), True, 'import numpy as np\n'), ((15026, 15063), 'numpy.average', 'np.average', (['loss'], {'weights': 'multioutput'}), '(loss, weights=multioutput)\n', (15036, 15063), True, 'import numpy as np\n'), ((11772, 11813), 'sklearn.utils.check_array', 'check_array', (['multioutput'], {'ensure_2d': '(False)'}), '(multioutput, ensure_2d=False)\n', (11783, 11813), False, 'from sklearn.utils import check_array, check_consistent_length\n'), ((20487, 20529), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (['[vars, scores]'], {}), '([vars, scores])\n', (20513, 20529), True, 'import pandas as pd\n'), ((22997, 23039), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (['[vars, scores]'], {}), '([vars, scores])\n', (23023, 23039), True, 'import pandas as pd\n'), ((17043, 17072), 'numpy.isin', 'np.isin', (['alpha', 'y_pred_alphas'], {}), '(alpha, y_pred_alphas)\n', (17050, 17072), True, 'import numpy as np\n'), ((10443, 10488), 'numpy.vstack', 'np.vstack', (['(y_true[:i, :], y_true[i + 1:, :])'], {}), '((y_true[:i, :], y_true[i + 1:, :]))\n', (10452, 10488), True, 'import numpy as np\n'), ((10519, 10564), 'numpy.vstack', 'np.vstack', (['(y_pred[:i, :], y_pred[i + 1:, :])'], {}), '((y_pred[:i, :], y_pred[i + 1:, :]))\n', (10528, 10564), True, 'import numpy as np\n')]
|
import time
import numpy as np
import torch
from onpolicy.runner.shared.base_runner import Runner
import wandb
import imageio
def _t2n(x):
return x.detach().cpu().numpy()
class MPERunner(Runner):
"""Runner class to perform training, evaluation. and data collection for the MPEs. See parent class for details."""
def __init__(self, config):
super(MPERunner, self).__init__(config)
def run(self):
self.warmup()
start = time.time()
episodes = int(self.num_env_steps) // self.episode_length // self.n_rollout_threads
for episode in range(episodes):
if self.use_linear_lr_decay:
self.trainer.policy.lr_decay(episode, episodes)
for step in range(self.episode_length):
# Sample actions
values, actions, action_log_probs, rnn_states, rnn_states_critic, actions_env = self.collect(step)
# Obser reward and next obs
obs, rewards, dones, infos = self.envs.step(actions_env)
data = obs, rewards, dones, infos, values, actions, action_log_probs, rnn_states, rnn_states_critic
# insert data into buffer
self.insert(data)
# compute return and update network
self.compute()
train_infos = self.train()
# post process
total_num_steps = (episode + 1) * self.episode_length * self.n_rollout_threads
# save model
if (episode % self.save_interval == 0 or episode == episodes - 1):
self.save()
# log information
if episode % self.log_interval == 0:
end = time.time()
print("\n Scenario {} Algo {} Exp {} updates {}/{} episodes, total num timesteps {}/{}, FPS {}.\n"
.format(self.all_args.scenario_name,
self.algorithm_name,
self.experiment_name,
episode,
episodes,
total_num_steps,
self.num_env_steps,
int(total_num_steps / (end - start))))
if self.env_name == "MPE":
env_infos = {}
for agent_id in range(self.num_agents):
idv_rews = []
for info in infos:
if 'individual_reward' in info[agent_id].keys():
idv_rews.append(info[agent_id]['individual_reward'])
agent_k = 'agent%i/individual_rewards' % agent_id
env_infos[agent_k] = idv_rews
train_infos["average_episode_rewards"] = np.mean(self.buffer.rewards) * self.episode_length
print("average episode rewards is {}".format(train_infos["average_episode_rewards"]))
self.log_train(train_infos, total_num_steps)
self.log_env(env_infos, total_num_steps)
# eval
if episode % self.eval_interval == 0 and self.use_eval:
self.eval(total_num_steps)
def warmup(self):
# reset env
obs = self.envs.reset()
# replay buffer
if self.use_centralized_V:
share_obs = obs.reshape(self.n_rollout_threads, -1)
share_obs = np.expand_dims(share_obs, 1).repeat(self.num_agents, axis=1)
else:
share_obs = obs
self.buffer.share_obs[0] = share_obs.copy()
self.buffer.obs[0] = obs.copy()
@torch.no_grad()
def collect(self, step):
self.trainer.prep_rollout()
value, action, action_log_prob, rnn_states, rnn_states_critic \
= self.trainer.policy.get_actions(np.concatenate(self.buffer.share_obs[step]),
np.concatenate(self.buffer.obs[step]),
np.concatenate(self.buffer.rnn_states[step]),
np.concatenate(self.buffer.rnn_states_critic[step]),
np.concatenate(self.buffer.masks[step]))
# [self.envs, agents, dim]
values = np.array(np.split(_t2n(value), self.n_rollout_threads))
actions = np.array(np.split(_t2n(action), self.n_rollout_threads))
action_log_probs = np.array(np.split(_t2n(action_log_prob), self.n_rollout_threads))
rnn_states = np.array(np.split(_t2n(rnn_states), self.n_rollout_threads))
rnn_states_critic = np.array(np.split(_t2n(rnn_states_critic), self.n_rollout_threads))
# rearrange action
if self.envs.action_space[0].__class__.__name__ == 'MultiDiscrete':
for i in range(self.envs.action_space[0].shape):
uc_actions_env = np.eye(self.envs.action_space[0].high[i] + 1)[actions[:, :, i]]
if i == 0:
actions_env = uc_actions_env
else:
actions_env = np.concatenate((actions_env, uc_actions_env), axis=2)
elif self.envs.action_space[0].__class__.__name__ == 'Discrete':
actions_env = np.squeeze(np.eye(self.envs.action_space[0].n)[actions], 2)
else:
raise NotImplementedError
return values, actions, action_log_probs, rnn_states, rnn_states_critic, actions_env
def insert(self, data):
obs, rewards, dones, infos, values, actions, action_log_probs, rnn_states, rnn_states_critic = data
rnn_states[dones == True] = np.zeros(((dones == True).sum(), self.recurrent_N, self.hidden_size), dtype=np.float32)
rnn_states_critic[dones == True] = np.zeros(((dones == True).sum(), *self.buffer.rnn_states_critic.shape[3:]), dtype=np.float32)
masks = np.ones((self.n_rollout_threads, self.num_agents, 1), dtype=np.float32)
masks[dones == True] = np.zeros(((dones == True).sum(), 1), dtype=np.float32)
if self.use_centralized_V:
share_obs = obs.reshape(self.n_rollout_threads, -1)
share_obs = np.expand_dims(share_obs, 1).repeat(self.num_agents, axis=1)
else:
share_obs = obs
self.buffer.insert(share_obs, obs, rnn_states, rnn_states_critic, actions, action_log_probs, values, rewards, masks)
@torch.no_grad()
def eval(self, total_num_steps):
eval_episode_rewards = []
eval_obs = self.eval_envs.reset()
eval_rnn_states = np.zeros((self.n_eval_rollout_threads, *self.buffer.rnn_states.shape[2:]), dtype=np.float32)
eval_masks = np.ones((self.n_eval_rollout_threads, self.num_agents, 1), dtype=np.float32)
for eval_step in range(self.episode_length):
self.trainer.prep_rollout()
eval_action, eval_rnn_states = self.trainer.policy.act(np.concatenate(eval_obs),
np.concatenate(eval_rnn_states),
np.concatenate(eval_masks),
deterministic=True)
eval_actions = np.array(np.split(_t2n(eval_action), self.n_eval_rollout_threads))
eval_rnn_states = np.array(np.split(_t2n(eval_rnn_states), self.n_eval_rollout_threads))
if self.eval_envs.action_space[0].__class__.__name__ == 'MultiDiscrete':
for i in range(self.eval_envs.action_space[0].shape):
eval_uc_actions_env = np.eye(self.eval_envs.action_space[0].high[i]+1)[eval_actions[:, :, i]]
if i == 0:
eval_actions_env = eval_uc_actions_env
else:
eval_actions_env = np.concatenate((eval_actions_env, eval_uc_actions_env), axis=2)
elif self.eval_envs.action_space[0].__class__.__name__ == 'Discrete':
eval_actions_env = np.squeeze(np.eye(self.eval_envs.action_space[0].n)[eval_actions], 2)
else:
raise NotImplementedError
# Obser reward and next obs
eval_obs, eval_rewards, eval_dones, eval_infos = self.eval_envs.step(eval_actions_env)
eval_episode_rewards.append(eval_rewards)
eval_rnn_states[eval_dones == True] = np.zeros(((eval_dones == True).sum(), self.recurrent_N, self.hidden_size), dtype=np.float32)
eval_masks = np.ones((self.n_eval_rollout_threads, self.num_agents, 1), dtype=np.float32)
eval_masks[eval_dones == True] = np.zeros(((eval_dones == True).sum(), 1), dtype=np.float32)
eval_episode_rewards = np.array(eval_episode_rewards)
eval_env_infos = {}
eval_env_infos['eval_average_episode_rewards'] = np.sum(np.array(eval_episode_rewards), axis=0)
print("eval average episode rewards of agent: " + str(eval_average_episode_rewards))
self.log_env(eval_env_infos, total_num_steps)
@torch.no_grad()
def render(self):
"""Visualize the env."""
envs = self.envs
all_frames = []
for episode in range(self.all_args.render_episodes):
obs = envs.reset()
if self.all_args.save_gifs:
image = envs.render('rgb_array')[0][0]
all_frames.append(image)
rnn_states = np.zeros((self.n_rollout_threads, self.num_agents, self.recurrent_N, self.hidden_size), dtype=np.float32)
masks = np.ones((self.n_rollout_threads, self.num_agents, 1), dtype=np.float32)
episode_rewards = []
for step in range(self.episode_length):
calc_start = time.time()
self.trainer.prep_rollout()
action, rnn_states = self.trainer.policy.act(np.concatenate(obs),
np.concatenate(rnn_states),
np.concatenate(masks),
deterministic=True)
actions = np.array(np.split(_t2n(action), self.n_rollout_threads))
rnn_states = np.array(np.split(_t2n(rnn_states), self.n_rollout_threads))
if envs.action_space[0].__class__.__name__ == 'MultiDiscrete':
for i in range(envs.action_space[0].shape):
uc_actions_env = np.eye(envs.action_space[0].high[i]+1)[actions[:, :, i]]
if i == 0:
actions_env = uc_actions_env
else:
actions_env = np.concatenate((actions_env, uc_actions_env), axis=2)
elif envs.action_space[0].__class__.__name__ == 'Discrete':
actions_env = np.squeeze(np.eye(envs.action_space[0].n)[actions], 2)
else:
raise NotImplementedError
# Obser reward and next obs
obs, rewards, dones, infos = envs.step(actions_env)
episode_rewards.append(rewards)
rnn_states[dones == True] = np.zeros(((dones == True).sum(), self.recurrent_N, self.hidden_size), dtype=np.float32)
masks = np.ones((self.n_rollout_threads, self.num_agents, 1), dtype=np.float32)
masks[dones == True] = np.zeros(((dones == True).sum(), 1), dtype=np.float32)
if self.all_args.save_gifs:
image = envs.render('rgb_array')[0][0]
all_frames.append(image)
calc_end = time.time()
elapsed = calc_end - calc_start
if elapsed < self.all_args.ifi:
time.sleep(self.all_args.ifi - elapsed)
print("average episode rewards is: " + str(np.mean(np.sum(np.array(episode_rewards), axis=0))))
if self.all_args.save_gifs:
imageio.mimsave(str(self.gif_dir) + '/render.gif', all_frames, duration=self.all_args.ifi)
|
[
"numpy.zeros",
"numpy.ones",
"numpy.expand_dims",
"time.time",
"time.sleep",
"numpy.mean",
"numpy.array",
"numpy.eye",
"torch.no_grad",
"numpy.concatenate"
] |
[((3685, 3700), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3698, 3700), False, 'import torch\n'), ((6358, 6373), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6371, 6373), False, 'import torch\n'), ((8961, 8976), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (8974, 8976), False, 'import torch\n'), ((464, 475), 'time.time', 'time.time', ([], {}), '()\n', (473, 475), False, 'import time\n'), ((5841, 5912), 'numpy.ones', 'np.ones', (['(self.n_rollout_threads, self.num_agents, 1)'], {'dtype': 'np.float32'}), '((self.n_rollout_threads, self.num_agents, 1), dtype=np.float32)\n', (5848, 5912), True, 'import numpy as np\n'), ((6514, 6610), 'numpy.zeros', 'np.zeros', (['(self.n_eval_rollout_threads, *self.buffer.rnn_states.shape[2:])'], {'dtype': 'np.float32'}), '((self.n_eval_rollout_threads, *self.buffer.rnn_states.shape[2:]),\n dtype=np.float32)\n', (6522, 6610), True, 'import numpy as np\n'), ((6628, 6704), 'numpy.ones', 'np.ones', (['(self.n_eval_rollout_threads, self.num_agents, 1)'], {'dtype': 'np.float32'}), '((self.n_eval_rollout_threads, self.num_agents, 1), dtype=np.float32)\n', (6635, 6704), True, 'import numpy as np\n'), ((8645, 8675), 'numpy.array', 'np.array', (['eval_episode_rewards'], {}), '(eval_episode_rewards)\n', (8653, 8675), True, 'import numpy as np\n'), ((3884, 3927), 'numpy.concatenate', 'np.concatenate', (['self.buffer.share_obs[step]'], {}), '(self.buffer.share_obs[step])\n', (3898, 3927), True, 'import numpy as np\n'), ((3957, 3994), 'numpy.concatenate', 'np.concatenate', (['self.buffer.obs[step]'], {}), '(self.buffer.obs[step])\n', (3971, 3994), True, 'import numpy as np\n'), ((4024, 4068), 'numpy.concatenate', 'np.concatenate', (['self.buffer.rnn_states[step]'], {}), '(self.buffer.rnn_states[step])\n', (4038, 4068), True, 'import numpy as np\n'), ((4098, 4149), 'numpy.concatenate', 'np.concatenate', (['self.buffer.rnn_states_critic[step]'], {}), '(self.buffer.rnn_states_critic[step])\n', (4112, 4149), True, 'import numpy as np\n'), ((4179, 4218), 'numpy.concatenate', 'np.concatenate', (['self.buffer.masks[step]'], {}), '(self.buffer.masks[step])\n', (4193, 4218), True, 'import numpy as np\n'), ((8431, 8507), 'numpy.ones', 'np.ones', (['(self.n_eval_rollout_threads, self.num_agents, 1)'], {'dtype': 'np.float32'}), '((self.n_eval_rollout_threads, self.num_agents, 1), dtype=np.float32)\n', (8438, 8507), True, 'import numpy as np\n'), ((8768, 8798), 'numpy.array', 'np.array', (['eval_episode_rewards'], {}), '(eval_episode_rewards)\n', (8776, 8798), True, 'import numpy as np\n'), ((9344, 9454), 'numpy.zeros', 'np.zeros', (['(self.n_rollout_threads, self.num_agents, self.recurrent_N, self.hidden_size)'], {'dtype': 'np.float32'}), '((self.n_rollout_threads, self.num_agents, self.recurrent_N, self.\n hidden_size), dtype=np.float32)\n', (9352, 9454), True, 'import numpy as np\n'), ((9470, 9541), 'numpy.ones', 'np.ones', (['(self.n_rollout_threads, self.num_agents, 1)'], {'dtype': 'np.float32'}), '((self.n_rollout_threads, self.num_agents, 1), dtype=np.float32)\n', (9477, 9541), True, 'import numpy as np\n'), ((1740, 1751), 'time.time', 'time.time', ([], {}), '()\n', (1749, 1751), False, 'import time\n'), ((6866, 6890), 'numpy.concatenate', 'np.concatenate', (['eval_obs'], {}), '(eval_obs)\n', (6880, 6890), True, 'import numpy as np\n'), ((6940, 6971), 'numpy.concatenate', 'np.concatenate', (['eval_rnn_states'], {}), '(eval_rnn_states)\n', (6954, 6971), True, 'import numpy as np\n'), ((7021, 7047), 'numpy.concatenate', 'np.concatenate', (['eval_masks'], {}), '(eval_masks)\n', (7035, 7047), True, 'import numpy as np\n'), ((9682, 9693), 'time.time', 'time.time', ([], {}), '()\n', (9691, 9693), False, 'import time\n'), ((11232, 11303), 'numpy.ones', 'np.ones', (['(self.n_rollout_threads, self.num_agents, 1)'], {'dtype': 'np.float32'}), '((self.n_rollout_threads, self.num_agents, 1), dtype=np.float32)\n', (11239, 11303), True, 'import numpy as np\n'), ((2858, 2886), 'numpy.mean', 'np.mean', (['self.buffer.rewards'], {}), '(self.buffer.rewards)\n', (2865, 2886), True, 'import numpy as np\n'), ((3483, 3511), 'numpy.expand_dims', 'np.expand_dims', (['share_obs', '(1)'], {}), '(share_obs, 1)\n', (3497, 3511), True, 'import numpy as np\n'), ((4871, 4916), 'numpy.eye', 'np.eye', (['(self.envs.action_space[0].high[i] + 1)'], {}), '(self.envs.action_space[0].high[i] + 1)\n', (4877, 4916), True, 'import numpy as np\n'), ((5067, 5120), 'numpy.concatenate', 'np.concatenate', (['(actions_env, uc_actions_env)'], {'axis': '(2)'}), '((actions_env, uc_actions_env), axis=2)\n', (5081, 5120), True, 'import numpy as np\n'), ((6123, 6151), 'numpy.expand_dims', 'np.expand_dims', (['share_obs', '(1)'], {}), '(share_obs, 1)\n', (6137, 6151), True, 'import numpy as np\n'), ((9800, 9819), 'numpy.concatenate', 'np.concatenate', (['obs'], {}), '(obs)\n', (9814, 9819), True, 'import numpy as np\n'), ((9873, 9899), 'numpy.concatenate', 'np.concatenate', (['rnn_states'], {}), '(rnn_states)\n', (9887, 9899), True, 'import numpy as np\n'), ((9953, 9974), 'numpy.concatenate', 'np.concatenate', (['masks'], {}), '(masks)\n', (9967, 9974), True, 'import numpy as np\n'), ((11578, 11589), 'time.time', 'time.time', ([], {}), '()\n', (11587, 11589), False, 'import time\n'), ((5231, 5266), 'numpy.eye', 'np.eye', (['self.envs.action_space[0].n'], {}), '(self.envs.action_space[0].n)\n', (5237, 5266), True, 'import numpy as np\n'), ((7522, 7572), 'numpy.eye', 'np.eye', (['(self.eval_envs.action_space[0].high[i] + 1)'], {}), '(self.eval_envs.action_space[0].high[i] + 1)\n', (7528, 7572), True, 'import numpy as np\n'), ((7757, 7820), 'numpy.concatenate', 'np.concatenate', (['(eval_actions_env, eval_uc_actions_env)'], {'axis': '(2)'}), '((eval_actions_env, eval_uc_actions_env), axis=2)\n', (7771, 7820), True, 'import numpy as np\n'), ((11718, 11757), 'time.sleep', 'time.sleep', (['(self.all_args.ifi - elapsed)'], {}), '(self.all_args.ifi - elapsed)\n', (11728, 11757), False, 'import time\n'), ((7949, 7989), 'numpy.eye', 'np.eye', (['self.eval_envs.action_space[0].n'], {}), '(self.eval_envs.action_space[0].n)\n', (7955, 7989), True, 'import numpy as np\n'), ((10406, 10446), 'numpy.eye', 'np.eye', (['(envs.action_space[0].high[i] + 1)'], {}), '(envs.action_space[0].high[i] + 1)\n', (10412, 10446), True, 'import numpy as np\n'), ((10627, 10680), 'numpy.concatenate', 'np.concatenate', (['(actions_env, uc_actions_env)'], {'axis': '(2)'}), '((actions_env, uc_actions_env), axis=2)\n', (10641, 10680), True, 'import numpy as np\n'), ((10802, 10832), 'numpy.eye', 'np.eye', (['envs.action_space[0].n'], {}), '(envs.action_space[0].n)\n', (10808, 10832), True, 'import numpy as np\n'), ((11829, 11854), 'numpy.array', 'np.array', (['episode_rewards'], {}), '(episode_rewards)\n', (11837, 11854), True, 'import numpy as np\n')]
|
##############################################################################
# Institute for the Design of Advanced Energy Systems Process Systems
# Engineering Framework (IDAES PSE Framework) Copyright (c) 2018-2019, by the
# software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia
# University Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.txt and LICENSE.txt for full copyright and
# license information, respectively. Both files are also available online
# at the URL "https://github.com/IDAES/idaes-pse".
##############################################################################
import pyomo.environ as pyo
from idaes.surrogate import ripe
import numpy as np
import random
from . import isotsim
np.random.seed(20)
def main():
#ndata = 100
noise = 0.1
ns = 5
lb_conc = [0,0,0,0,0]
ub_conc = [10,10,0,0,0]
# Initialize concentration arrays
# initial concentrations - only 2 data points at bounds
cdata0 = [[1,1,0,0,0],[10,10,0,0,0]]
cdata = isotsim.sim(cdata0)
nd = len(cdata0)
# Considered reaction stoichiometries
stoich = [[-1,-1,1,0,0] ,[0,-1,-1,1,0],[-1,0,0,-1,1],[-1,-2,0,1,0] ,[-2,-2,0,0,1],[-1,-1,-1,0,1],[-2,-1,1,-1,1]]
# IRIPE internal mass action kinetics are specified
mechs = [['all','massact']]
# Use expected variance - estimated from data if not provided
sigma = np.multiply(noise**2,np.array(cdata))
# Call to RIPE
results = ripe.ripemodel(cdata,stoich = stoich,mechanisms=mechs,x0=cdata0,hide_output=False,sigma=sigma,deltaterm=0,expand_output=True)
# Adaptive experimental design using error maximization sampling
[new_points, err] = ripe.ems(results,isotsim.sim,lb_conc,ub_conc,5,x=cdata,x0=cdata0)
# Implement EMS as described in the RIPE publication
new_res = isotsim.sim(new_points)[0]
ite = 0
# print 'maximum allowable tolerances : ', [noise*s for s in new_res]
while any(err > [2*noise*s for s in new_res] ):
# print 'Which concentrations violate error (True=violation) : ', err > [noise*s for s in new_res]
results = {}
ite+=1
# Data updated explicitly so RBFopt subroutines produce consistent results
new_cdata0 = np.zeros([nd+ite,ns])
new_cdata = np.zeros([nd+ite,ns])
new_cdata0[:-1][:] = cdata0[:][:]
new_cdata[:-1][:] = cdata[:][:]
new_cdata0[-1][:] = new_points[:]
res = isotsim.sim(new_points)[0]
for j in range(len(res)):
new_cdata[-1][j] = res[j]
#Update weight parameters
sigma = np.multiply(noise**2,np.array(new_cdata))
# Build updated RIPE model
results = ripe.ripemodel(new_cdata,stoich = stoich,mechanisms=mechs,x0=new_cdata0,sigma=sigma,expand_output=True)
# Another call to EMS
[new_points, err] = ripe.ems(results,isotsim.sim,lb_conc,ub_conc,5,x=cdata,x0=cdata0)
# Update results
new_res = isotsim.sim(new_points)[0]
cdata0 = new_cdata0
cdata = new_cdata
# Final call to RIPE to get concise output
results = ripe.ripemodel(cdata,stoich = stoich,mechanisms=mechs,x0=cdata0,sigma=sigma,expand_output=False)
#print results
if __name__ == "__main__":
main()
|
[
"idaes.surrogate.ripe.ems",
"idaes.surrogate.ripe.ripemodel",
"numpy.random.seed",
"numpy.zeros",
"numpy.array"
] |
[((907, 925), 'numpy.random.seed', 'np.random.seed', (['(20)'], {}), '(20)\n', (921, 925), True, 'import numpy as np\n'), ((1634, 1768), 'idaes.surrogate.ripe.ripemodel', 'ripe.ripemodel', (['cdata'], {'stoich': 'stoich', 'mechanisms': 'mechs', 'x0': 'cdata0', 'hide_output': '(False)', 'sigma': 'sigma', 'deltaterm': '(0)', 'expand_output': '(True)'}), '(cdata, stoich=stoich, mechanisms=mechs, x0=cdata0,\n hide_output=False, sigma=sigma, deltaterm=0, expand_output=True)\n', (1648, 1768), False, 'from idaes.surrogate import ripe\n'), ((1854, 1925), 'idaes.surrogate.ripe.ems', 'ripe.ems', (['results', 'isotsim.sim', 'lb_conc', 'ub_conc', '(5)'], {'x': 'cdata', 'x0': 'cdata0'}), '(results, isotsim.sim, lb_conc, ub_conc, 5, x=cdata, x0=cdata0)\n', (1862, 1925), False, 'from idaes.surrogate import ripe\n'), ((3270, 3374), 'idaes.surrogate.ripe.ripemodel', 'ripe.ripemodel', (['cdata'], {'stoich': 'stoich', 'mechanisms': 'mechs', 'x0': 'cdata0', 'sigma': 'sigma', 'expand_output': '(False)'}), '(cdata, stoich=stoich, mechanisms=mechs, x0=cdata0, sigma=\n sigma, expand_output=False)\n', (3284, 3374), False, 'from idaes.surrogate import ripe\n'), ((1583, 1598), 'numpy.array', 'np.array', (['cdata'], {}), '(cdata)\n', (1591, 1598), True, 'import numpy as np\n'), ((2404, 2428), 'numpy.zeros', 'np.zeros', (['[nd + ite, ns]'], {}), '([nd + ite, ns])\n', (2412, 2428), True, 'import numpy as np\n'), ((2447, 2471), 'numpy.zeros', 'np.zeros', (['[nd + ite, ns]'], {}), '([nd + ite, ns])\n', (2455, 2471), True, 'import numpy as np\n'), ((2854, 2964), 'idaes.surrogate.ripe.ripemodel', 'ripe.ripemodel', (['new_cdata'], {'stoich': 'stoich', 'mechanisms': 'mechs', 'x0': 'new_cdata0', 'sigma': 'sigma', 'expand_output': '(True)'}), '(new_cdata, stoich=stoich, mechanisms=mechs, x0=new_cdata0,\n sigma=sigma, expand_output=True)\n', (2868, 2964), False, 'from idaes.surrogate import ripe\n'), ((3017, 3088), 'idaes.surrogate.ripe.ems', 'ripe.ems', (['results', 'isotsim.sim', 'lb_conc', 'ub_conc', '(5)'], {'x': 'cdata', 'x0': 'cdata0'}), '(results, isotsim.sim, lb_conc, ub_conc, 5, x=cdata, x0=cdata0)\n', (3025, 3088), False, 'from idaes.surrogate import ripe\n'), ((2779, 2798), 'numpy.array', 'np.array', (['new_cdata'], {}), '(new_cdata)\n', (2787, 2798), True, 'import numpy as np\n')]
|
import matplotlib
matplotlib.use('WXAgg')
from matplotlib import cm
import matplotlib.pyplot as plt
import numpy as np
import CoolProp
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure(figsize = (2,2))
ax = fig.add_subplot(111, projection='3d')
NT = 1000
NR = 1000
rho,t = np.logspace(np.log10(2e-3), np.log10(1100), NR),np.linspace(275.15,700,NT)
RHO,T = np.meshgrid(rho,t)
P = CoolProp.CoolProp.PropsSI('P','D',RHO.reshape((NR*NT,1)),'T',T.reshape((NR*NT,1)),'REFPROP-Water').reshape(NT,NR)
Tsat = np.linspace(273.17,647.0,100)
psat = CoolProp.CoolProp.PropsSI('P','Q',0,'T',Tsat,'Water')
rhoL = CoolProp.CoolProp.PropsSI('D','Q',0,'T',Tsat,'Water')
rhoV = CoolProp.CoolProp.PropsSI('D','Q',1,'T',Tsat,'Water')
ax.plot_surface(np.log(RHO),T,np.log(P), cmap=cm.jet, edgecolor = 'none')
ax.plot(np.log(rhoL),Tsat,np.log(psat),color='k',lw=2)
ax.plot(np.log(rhoV),Tsat,np.log(psat),color='k',lw=2)
ax.text(0.3,800,22, "CoolProp", size = 12)
ax.set_frame_on(False)
ax.set_axis_off()
ax.view_init(22, -136)
ax.set_xlabel(r'$\ln\rho$ ')
ax.set_ylabel('$T$')
ax.set_zlabel('$p$')
plt.tight_layout()
plt.savefig('_static/PVTCP.png',transparent = True)
plt.savefig('_static/PVTCP.pdf',transparent = True)
plt.close()
|
[
"numpy.meshgrid",
"numpy.log",
"matplotlib.pyplot.close",
"CoolProp.CoolProp.PropsSI",
"matplotlib.pyplot.figure",
"matplotlib.use",
"numpy.linspace",
"numpy.log10",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.savefig"
] |
[((18, 41), 'matplotlib.use', 'matplotlib.use', (['"""WXAgg"""'], {}), "('WXAgg')\n", (32, 41), False, 'import matplotlib\n'), ((182, 208), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2, 2)'}), '(figsize=(2, 2))\n', (192, 208), True, 'import matplotlib.pyplot as plt\n'), ((365, 384), 'numpy.meshgrid', 'np.meshgrid', (['rho', 't'], {}), '(rho, t)\n', (376, 384), True, 'import numpy as np\n'), ((511, 542), 'numpy.linspace', 'np.linspace', (['(273.17)', '(647.0)', '(100)'], {}), '(273.17, 647.0, 100)\n', (522, 542), True, 'import numpy as np\n'), ((548, 606), 'CoolProp.CoolProp.PropsSI', 'CoolProp.CoolProp.PropsSI', (['"""P"""', '"""Q"""', '(0)', '"""T"""', 'Tsat', '"""Water"""'], {}), "('P', 'Q', 0, 'T', Tsat, 'Water')\n", (573, 606), False, 'import CoolProp\n'), ((609, 667), 'CoolProp.CoolProp.PropsSI', 'CoolProp.CoolProp.PropsSI', (['"""D"""', '"""Q"""', '(0)', '"""T"""', 'Tsat', '"""Water"""'], {}), "('D', 'Q', 0, 'T', Tsat, 'Water')\n", (634, 667), False, 'import CoolProp\n'), ((670, 728), 'CoolProp.CoolProp.PropsSI', 'CoolProp.CoolProp.PropsSI', (['"""D"""', '"""Q"""', '(1)', '"""T"""', 'Tsat', '"""Water"""'], {}), "('D', 'Q', 1, 'T', Tsat, 'Water')\n", (695, 728), False, 'import CoolProp\n'), ((1088, 1106), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1104, 1106), True, 'import matplotlib.pyplot as plt\n'), ((1107, 1157), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""_static/PVTCP.png"""'], {'transparent': '(True)'}), "('_static/PVTCP.png', transparent=True)\n", (1118, 1157), True, 'import matplotlib.pyplot as plt\n'), ((1159, 1209), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""_static/PVTCP.pdf"""'], {'transparent': '(True)'}), "('_static/PVTCP.pdf', transparent=True)\n", (1170, 1209), True, 'import matplotlib.pyplot as plt\n'), ((1211, 1222), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1220, 1222), True, 'import matplotlib.pyplot as plt\n'), ((330, 358), 'numpy.linspace', 'np.linspace', (['(275.15)', '(700)', 'NT'], {}), '(275.15, 700, NT)\n', (341, 358), True, 'import numpy as np\n'), ((741, 752), 'numpy.log', 'np.log', (['RHO'], {}), '(RHO)\n', (747, 752), True, 'import numpy as np\n'), ((755, 764), 'numpy.log', 'np.log', (['P'], {}), '(P)\n', (761, 764), True, 'import numpy as np\n'), ((807, 819), 'numpy.log', 'np.log', (['rhoL'], {}), '(rhoL)\n', (813, 819), True, 'import numpy as np\n'), ((825, 837), 'numpy.log', 'np.log', (['psat'], {}), '(psat)\n', (831, 837), True, 'import numpy as np\n'), ((862, 874), 'numpy.log', 'np.log', (['rhoV'], {}), '(rhoV)\n', (868, 874), True, 'import numpy as np\n'), ((880, 892), 'numpy.log', 'np.log', (['psat'], {}), '(psat)\n', (886, 892), True, 'import numpy as np\n'), ((294, 309), 'numpy.log10', 'np.log10', (['(0.002)'], {}), '(0.002)\n', (302, 309), True, 'import numpy as np\n'), ((310, 324), 'numpy.log10', 'np.log10', (['(1100)'], {}), '(1100)\n', (318, 324), True, 'import numpy as np\n')]
|
from __future__ import division
import sys
import pytest
import numpy as np
from datashader.glyphs import Glyph
from datashader.glyphs.line import _build_draw_segment, \
_build_map_onto_pixel_for_line
from datashader.utils import ngjit
py2_skip = pytest.mark.skipif(sys.version_info.major < 3, reason="py2 not supported")
mapper = ngjit(lambda x: x)
map_onto_pixel = _build_map_onto_pixel_for_line(mapper, mapper)
sx, tx, sy, ty = 1, 0, 1, 0
xmin, xmax, ymin, ymax = 0, 5, 0, 5
@pytest.fixture
def draw_line():
@ngjit
def append(i, x, y, agg):
agg[y, x] += 1
expand_aggs_and_cols = Glyph._expand_aggs_and_cols(append, 1)
return _build_draw_segment(append, map_onto_pixel, expand_aggs_and_cols,
False)
@py2_skip
@pytest.mark.benchmark(group="draw_line")
def test_draw_line_left_border(benchmark, draw_line):
n = 10**4
x0, y0 = (0, 0)
x1, y1 = (0, n)
agg = np.zeros((n+1, n+1), dtype='i4')
benchmark(draw_line, sx, tx, sy, ty, xmin, xmax, ymin, ymax, x0, y0, x1, y1, 0, True, agg)
@py2_skip
@pytest.mark.benchmark(group="draw_line")
def test_draw_line_diagonal(benchmark, draw_line):
n = 10**4
x0, y0 = (0, 0)
x1, y1 = (n, n)
agg = np.zeros((n+1, n+1), dtype='i4')
benchmark(draw_line, sx, tx, sy, ty, xmin, xmax, ymin, ymax, x0, y0, x1, y1, 0, True, agg)
@py2_skip
@pytest.mark.benchmark(group="draw_line")
def test_draw_line_offset(benchmark, draw_line):
n = 10**4
x0, y0 = (0, n//4)
x1, y1 = (n, n//4-1)
agg = np.zeros((n+1, n+1), dtype='i4')
benchmark(draw_line, sx, tx, sy, ty, xmin, xmax, ymin, ymax, x0, y0, x1, y1, 0, True, agg)
|
[
"datashader.glyphs.Glyph._expand_aggs_and_cols",
"numpy.zeros",
"pytest.mark.benchmark",
"pytest.mark.skipif",
"datashader.utils.ngjit",
"datashader.glyphs.line._build_map_onto_pixel_for_line",
"datashader.glyphs.line._build_draw_segment"
] |
[((256, 330), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(sys.version_info.major < 3)'], {'reason': '"""py2 not supported"""'}), "(sys.version_info.major < 3, reason='py2 not supported')\n", (274, 330), False, 'import pytest\n'), ((342, 360), 'datashader.utils.ngjit', 'ngjit', (['(lambda x: x)'], {}), '(lambda x: x)\n', (347, 360), False, 'from datashader.utils import ngjit\n'), ((378, 424), 'datashader.glyphs.line._build_map_onto_pixel_for_line', '_build_map_onto_pixel_for_line', (['mapper', 'mapper'], {}), '(mapper, mapper)\n', (408, 424), False, 'from datashader.glyphs.line import _build_draw_segment, _build_map_onto_pixel_for_line\n'), ((783, 823), 'pytest.mark.benchmark', 'pytest.mark.benchmark', ([], {'group': '"""draw_line"""'}), "(group='draw_line')\n", (804, 823), False, 'import pytest\n'), ((1084, 1124), 'pytest.mark.benchmark', 'pytest.mark.benchmark', ([], {'group': '"""draw_line"""'}), "(group='draw_line')\n", (1105, 1124), False, 'import pytest\n'), ((1381, 1421), 'pytest.mark.benchmark', 'pytest.mark.benchmark', ([], {'group': '"""draw_line"""'}), "(group='draw_line')\n", (1402, 1421), False, 'import pytest\n'), ((616, 654), 'datashader.glyphs.Glyph._expand_aggs_and_cols', 'Glyph._expand_aggs_and_cols', (['append', '(1)'], {}), '(append, 1)\n', (643, 654), False, 'from datashader.glyphs import Glyph\n'), ((666, 738), 'datashader.glyphs.line._build_draw_segment', '_build_draw_segment', (['append', 'map_onto_pixel', 'expand_aggs_and_cols', '(False)'], {}), '(append, map_onto_pixel, expand_aggs_and_cols, False)\n', (685, 738), False, 'from datashader.glyphs.line import _build_draw_segment, _build_map_onto_pixel_for_line\n'), ((943, 979), 'numpy.zeros', 'np.zeros', (['(n + 1, n + 1)'], {'dtype': '"""i4"""'}), "((n + 1, n + 1), dtype='i4')\n", (951, 979), True, 'import numpy as np\n'), ((1241, 1277), 'numpy.zeros', 'np.zeros', (['(n + 1, n + 1)'], {'dtype': '"""i4"""'}), "((n + 1, n + 1), dtype='i4')\n", (1249, 1277), True, 'import numpy as np\n'), ((1544, 1580), 'numpy.zeros', 'np.zeros', (['(n + 1, n + 1)'], {'dtype': '"""i4"""'}), "((n + 1, n + 1), dtype='i4')\n", (1552, 1580), True, 'import numpy as np\n')]
|
import time
import numpy as np
from pyembree import rtcore_scene as rtcs
from pyembree.mesh_construction import TriangleMesh
N = 4
def xplane(x):
return [[[x, -1.0, -1.0],
[x, +1.0, -1.0],
[x, -1.0, +1.0]],
[[x, +1.0, -1.0],
[x, +1.0, +1.0],
[x, -1.0, +1.0]]]
triangles = xplane(7.0)
triangles = np.array(triangles, 'float32')
scene = rtcs.EmbreeScene()
mesh = TriangleMesh(scene, triangles)
origins = np.zeros((N, 3), dtype='float32')
origins[:,0] = 0.1
origins[0,1] = -0.2
origins[1,1] = +0.2
origins[2,1] = +0.3
origins[3,1] = -8.2
dirs = np.zeros((N, 3), dtype='float32')
dirs[:, 0] = 1.0
t1 = time.time()
res = scene.run(origins, dirs, output=1)
t2 = time.time()
print("Ran in {0:.3f} s".format(t2-t1))
print('Output is a dict containing Embree results with id of intersected dimensionless coordinates')
print(res)
ray_inter = res['geomID'] >= 0
print('{0} rays intersect geometry (over {1})'.format(sum(ray_inter), N))
print('Intersection coordinates')
primID = res['primID'][ray_inter]
u = res['u'][ray_inter]
v = res['v'][ray_inter]
w = 1 - u - v
inters = np.vstack(w) * triangles[primID][:, 0, :] + \
np.vstack(u) * triangles[primID][:, 1, :] + \
np.vstack(v) * triangles[primID][:, 2, :]
print(inters)
|
[
"pyembree.rtcore_scene.EmbreeScene",
"numpy.zeros",
"time.time",
"numpy.array",
"pyembree.mesh_construction.TriangleMesh",
"numpy.vstack"
] |
[((370, 400), 'numpy.array', 'np.array', (['triangles', '"""float32"""'], {}), "(triangles, 'float32')\n", (378, 400), True, 'import numpy as np\n'), ((410, 428), 'pyembree.rtcore_scene.EmbreeScene', 'rtcs.EmbreeScene', ([], {}), '()\n', (426, 428), True, 'from pyembree import rtcore_scene as rtcs\n'), ((436, 466), 'pyembree.mesh_construction.TriangleMesh', 'TriangleMesh', (['scene', 'triangles'], {}), '(scene, triangles)\n', (448, 466), False, 'from pyembree.mesh_construction import TriangleMesh\n'), ((478, 511), 'numpy.zeros', 'np.zeros', (['(N, 3)'], {'dtype': '"""float32"""'}), "((N, 3), dtype='float32')\n", (486, 511), True, 'import numpy as np\n'), ((619, 652), 'numpy.zeros', 'np.zeros', (['(N, 3)'], {'dtype': '"""float32"""'}), "((N, 3), dtype='float32')\n", (627, 652), True, 'import numpy as np\n'), ((676, 687), 'time.time', 'time.time', ([], {}), '()\n', (685, 687), False, 'import time\n'), ((734, 745), 'time.time', 'time.time', ([], {}), '()\n', (743, 745), False, 'import time\n'), ((1254, 1266), 'numpy.vstack', 'np.vstack', (['v'], {}), '(v)\n', (1263, 1266), True, 'import numpy as np\n'), ((1144, 1156), 'numpy.vstack', 'np.vstack', (['w'], {}), '(w)\n', (1153, 1156), True, 'import numpy as np\n'), ((1199, 1211), 'numpy.vstack', 'np.vstack', (['u'], {}), '(u)\n', (1208, 1211), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
import read_data as rd
import argparse
import os
import time
import sklearn
from sklearn.externals import joblib
from sklearn.metrics import precision_recall_curve
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
def prepare_data(df=None):
'''
Preps the data to be used in the model. Right now, the code itself must
be modified to tweak which columns are included in what way.
Parameters
----------
df : Dataframe to use. If not specified, the dataframe is loaded automatically.
Returns
-------
predictors : NxM DataFrame of the predictors for the classification problem.
meta_info : Nx6 DataFrame containing the columns 'Escherichia.coli' and
'Full_date', to be used, e.g., for leave-one-year-out cross
validation and creating the true class labels (elevated vs.
not elevated E. coli levels). The columns 'Client.ID','BEACH',
'Drek_Prediction'and 'Weekday' are also returned.
'''
# Meta columns are not used as predictors
meta_columns = ['Client.ID','BEACH','Full_date','Escherichia.coli',
'Drek_Prediction','Weekday']
# Deterministic columns are known ahead of time, their actual values can be used.
deterministic_columns = [
'Client.ID', # subsumed by the geographic flags
'group_prior_mean',
'previous_reading',
'accum_rain', #added to try to capture storm events
'Collection_Time', # mostly missing values but may still be of some use
'12hrPressureChange', # overnight pressure change
#'precipIntensity',
#'precipIntensityMax',
#'temperatureMin',
#'temperatureMax',
#'humidity',
#'windSpeed',
#'cloudCover',
#'flag_geographically_a_north_beach',
'categorical_beach_grouping'
#'12th_previous',
#'Montrose_previous',
#'Rainbow_previous',
#'63rd_previous',
#'Osterman_previous'
]
# Deterministic columns are known ahead of time, their actual values are used.
# These hourly variables have an additional parameter which defines what hours
# should be used. For example, an entry
# 'temperature':[-16,-13,-12,-11,-9,-3,0]
# would indicate that the hourly temperature at offsets of
# [-16,-13,-12,-11,-9,-3,0] from MIDNIGHT the day of should be included as
# variables in the model.
deterministic_hourly_columns = {
'temperature':np.linspace(-19,4,num=6,dtype=np.int64),#range(-19,5),
'windVectorX':np.linspace(-19,4,num=6,dtype=np.int64),#range(-19,5),#[-4,-2,0,2,4],
'windVectorY':np.linspace(-19,4,num=6,dtype=np.int64),
#'windSpeed':[-2,0,2,4],
#'windBearing':[-2,0,2,4],
'pressure':[0],
'cloudCover':[-15], #range(-19,5),
'humidity':[4],
#'precipIntensity':[4]#np.linspace(-10,4,num=4,dtype=np.int64)
}
for var in deterministic_hourly_columns:
for hr in deterministic_hourly_columns[var]:
deterministic_columns.append(var + '_hour_' + str(hr))
# Historical columns have their previous days' values added to the predictors,
# but not the current day's value(s) unless the historical column also exists
# in the deterministic columns list.
# Similar to the hourly columns, you need to specify which previous days
# to include as variables. For example, below we have an entry
# 'temperatureMax': range(1,4)
# which indicates that the max temperature from 1, 2, and 3 days previous
# should be included.
historical_columns = {
#'temperatureMin': range(2,3),
'temperatureMax': range(2,5),
# 'humidity': range(1,3),
#'windSpeed': range(1,3),
'pressure': range(1,3),
'dewPoint': range(1,3),
#'cloudCover': range(1,3),
'windVectorX': range(2,3),
'windVectorY': range(2,3),
'Escherichia.coli': range(2,8)
}
historical_columns_list = list(historical_columns.keys())
######################################################
#### Get relevant columns, add historical data
######################################################
all_columns = meta_columns + deterministic_columns + historical_columns_list #+ derived_columns
all_columns = list(set(all_columns))
df = df[all_columns]
for var in historical_columns:
df = rd.add_column_prior_data(
df, var, historical_columns[var],
beach_col_name='Client.ID', timestamp_col_name='Full_date'
)
df.drop((set(historical_columns_list) - set(deterministic_columns)) - set(meta_columns),
axis=1, inplace=True)
######################################################
#### Average the historical columns, fill in NaNs
######################################################
# Creates a "trailing_average_daily_" column for each historical variable
# which is simply the mean of the previous day columns of that variable.
# NaN values for any previous day data is filled in by that mean value.
for var in historical_columns:
cname = 'trailing_average_daily_' + var
rnge = historical_columns[var]
if len(rnge) == 1: # no need to create a trailing average of a single number...
continue
df[cname] = df[[str(n) + '_day_prior_' + var for n in rnge]].mean(1)
for n in rnge:
df[str(n) + '_day_prior_' + var].fillna(df[cname], inplace=True)
# Do a similar process for the hourly data.
for var in deterministic_hourly_columns:
cname = 'trailing_average_hourly_' + var
rnge = deterministic_hourly_columns[var]
if len(rnge) == 1: # no need to create a trailing average of a single number...
continue
df[cname] = df[[var + '_hour_' + str(n) for n in rnge]].mean(1)
for n in rnge:
df[var + '_hour_' + str(n)].fillna(df[cname], inplace=True)
######################################################
#### Process non-numeric columns
######################################################
# process all of the nonnumeric columns
# This method just assigns a numeric value to each possible value
# of the non-numeric column. Note that this will not work well
# for regression-style models, where instead dummy columns should
# be created.
def nonnumericCols(data, verbose=True):
for f in data.columns:
if data[f].dtype=='object':
if (verbose):
print('Column ' + str(f) + ' being treated as non-numeric')
lbl = sklearn.preprocessing.LabelEncoder()
lbl.fit(list(data[f].values))
data.loc[:,f] = lbl.transform(list(data[f].values))
return data
# Do this at the end so meta_data has Beach names and Weekdays
#df = nonnumericCols(df)
# As a last NaN filling measure, we fill the NaNs of all columns
# that are NOT the E. coli column with the mean value of the column,
# the mean value taken over all data not from the same year as the
# year of the row we are filling. For example, if there is a NaN
# in the temperatureMax column in some row from 2010, then we will
# fill that value with the mean temperatureMax value from all years
# that are NOT 2010.
cols = df.columns.tolist()
cols.remove('Escherichia.coli')
years = df['Full_date'].map(lambda x: x.year)
for yr in years.unique():
not_yr = np.array(years != yr)
is_yr = np.array(years == yr)
df.ix[is_yr, cols] = df.ix[is_yr, cols].fillna(df.ix[not_yr, cols].median())
######################################################
#### Drop any rows that still have NA, set up outputs
######################################################
# The following lines will print the % of rows that:
# (a) have a NaN value in some column other than Escherichia.coli, AND
# (b) the column Escherichia.coli is NOT NaN.
# Since we are now filling NaNs with column averages above, this should
# always report 0%. I'm leaving the check in here just to be sure, though.
total_rows_predictors = df.dropna(subset=['Escherichia.coli'], axis=0).shape[0]
nonnan_rows_predictors = df.dropna(axis=0).shape[0]
print('Dropping {0:.4f}% of rows because predictors contain NANs'.format(
100.0 - 100.0 * nonnan_rows_predictors / total_rows_predictors
))
# Any rows that still have NaNs are NaN b/c there is no E. coli reading
# We should drop these rows b/c there is nothing for us to predict.
df.dropna(axis=0, inplace=True)
#df.dropna(axis=0, how='any', subset=['Full_date','Escherichia.coli'], inplace=True)
predictors = df.drop(set(meta_columns)-set(['Client.ID']) , axis=1)
meta_info = df[meta_columns]
predictors = nonnumericCols(predictors)
return predictors, meta_info
def display_predictions_by_beach(results, predict_col = 'predictedEPA'):
'''
Helper function to test ensemble of models on 2015 data.
Displays the prediction results by beach, sorted from north to south.
Parameters
----------
results : dataframe with all predictions
Returns
-------
precision : percent of reported warnings that are actually correct
recall : percent of all actual ecoli outbreaks that are warned about
Also prints table of results to console
'''
results['correct_warning'] = (results['expected'])&(results[predict_col])
results['incorrect_warning'] = (results['expected']==False)&(results[predict_col])
results['missed_warning'] = (results['expected'])&(~results[predict_col])
print(results.groupby(['Client.ID','BEACH'])['incorrect_warning','correct_warning','missed_warning'].sum())
TP = results['correct_warning'].sum()
FP = results['incorrect_warning'].sum()
FN = results['missed_warning'].sum()
precision = TP/(TP+FP)
recall = TP/(TP+FN)
return precision, recall
def calibrateThreshold(target, predictions, FNR):
'''
Helper function to calibrate the decision threshold such that
False Negative Rate (FNR) should be values between 1.0 and 10
'''
countOfAllNeg = len(target[target<236])
cut = max(np.exp(predictions))
for firstcut in np.linspace(cut,50,10):
countOfCorrectNeg = len(target[(target<236)&(np.exp(predictions)<firstcut)])
specif = countOfCorrectNeg/countOfAllNeg
if specif < (1.0-FNR/100):
cut = firstcut + (max(np.exp(predictions0))-50)/9 #go back up one cut to begin next search
break
for secondcut in np.linspace(cut, cut/2,100):
countOfCorrectNeg = len(target[(target<236)&(np.exp(predictions)<secondcut)])
specif = countOfCorrectNeg/countOfAllNeg
if specif <= (1.0-FNR/100):
cut = secondcut
break
return cut
###############################################################################
###############################################################################
#### This builds the set of leave one out RF and GBM models ####
###############################################################################
###############################################################################
if __name__ == '__main__':
'''
This script will produce and put in a folder named model_<model_suffix>:
-- 18 .pkl files
- 9 Random Forest Classifier models
- 9 Gradient Boosting Regression models
-- 5 .csv files
- data saved from reading in via read_data()
- processed data ready for modeling
- accompaning meta data for modeling
- summary of precision, recall and tuned threshold values
- results of testing on 2015 data
-- 1 .txt file containing a list of predictors
'''
# Command Line Argument parsing
parser = argparse.ArgumentParser(description='Process beach data.')
parser.add_argument('-id', '--input_data', type=str,
metavar='data',
help='input pre-read data CSV filename')
parser.add_argument('-ip', '--input_processed', type=str,
metavar='processed',
help='input processed modeling data CSV filename')
parser.add_argument('-ip2', '--input_meta', type=str,
metavar='processed_meta',
help='input processed modeling metadata CSV filename')
parser.add_argument('-s', '--suffix', type=str,
metavar='model_suffix',
help='suffix to identify this model build results')
parser.add_argument('-v', '--verbose', action='count', default=1)
args = parser.parse_args()
if args.suffix:
model_suffix = args.suffix
else:
model_suffix = time.strftime("%d_%m_%Y")
directory = 'model_'+model_suffix
if not os.path.exists(directory):
os.makedirs(directory)
##########################
### Load the data
##########################
if args.input_data:
print('Loading data from {0}'.format(args.input_data))
df = pd.read_csv(args.input_data, parse_dates='Full_date', low_memory=False)
df['Full_date'] = rd.date_lookup(df['Full_date'])
else:
print('Reading and loading data. Saving to {}'.format(directory+'/all_data.csv'))
df = rd.read_data(read_weather_station=False, read_water_sensor=False, add_each_beach_data=True)
df.to_csv(directory+'/all_data.csv', index=False)
###############################
### Prepare Predictors
###############################
if args.input_processed:
print('Using Preprocessed data from {0} and {1}'.format(args.input_processed, args.input_meta ))
datafilename = args.input_processed
metadatafilename = args.input_meta
data_processed = pd.read_csv(datafilename)
meta_info = pd.read_csv(metadatafilename, parse_dates='Full_date')
meta_info['Full_date'] = rd.date_lookup(meta_info['Full_date'])
else:
print('Preparing data for modeling. Saving to {0} and {1}'.format(directory+'/processed.csv', directory+'/meta_processed.csv'))
data_processed, meta_info = prepare_data(df)
data_processed.to_csv(directory+'/processed.csv', index=False)
meta_info.to_csv(directory+'/meta_processed.csv', index=False)
f = open(directory+'/feature_list.txt', 'w')
f.write("\n".join(list(data_processed.columns) ) ) # For easy reference
f.close()
if args.verbose>=1:
print('Using the following columns as predictors:')
for c in data_processed.columns:
print('\t' + str(c))
##########################################################################
### Split data into Train/Validate (2006-2014) and Testing (2015)
##########################################################################
train_processed = data_processed[meta_info['Full_date'] < '1-1-2015'].copy()
test_processed = data_processed[meta_info['Full_date'] > '1-1-2015'].copy()
train_meta_info = meta_info[meta_info['Full_date'] < '1-1-2015'].copy()
test_meta_info = meta_info[meta_info['Full_date'] > '1-1-2015'].copy()
##########################################################################
### Setup Random Forest classifier and Gradient Boosting Regressor
##########################################################################
RF_reg = RandomForestRegressor(n_estimators=500,
max_depth=10,
max_features=0.8,
min_samples_split=10,
min_samples_leaf=4,
oob_score=True,
n_jobs=-1)
gbm_reg = GradientBoostingRegressor(loss='quantile',
learning_rate=0.025,
n_estimators=1500, # train longer, no concern of overfitting
subsample=0.8,
min_samples_split=10,
min_samples_leaf=4,
max_depth=10,
alpha=0.85)
##########################################################################
### Train models by holding one year out
### Validate and tune cutoff thresholds on held out year
##########################################################################
dataSeries = []
colIndexes = []
timestamps = train_meta_info['Full_date'].map(lambda x: x.year)
print('\nBegining training and validation of hold-one-year-out models\n')
for yr in range(2006, 2015):
### HOLD OUT YEAR
train_ind = np.array((timestamps != yr))
# Remove weekends from training b/c sampled under different conditions
train_data = train_processed.ix[train_ind & (train_meta_info['Weekday']!='Saturday')
& (train_meta_info['Weekday']!='Sunday')]
train_target = train_meta_info.ix[train_ind & (train_meta_info['Weekday']!='Saturday')
& (train_meta_info['Weekday']!='Sunday'),'Escherichia.coli']
# Leave weekends in held out validation data
test_data = train_processed.ix[~train_ind]
test_target = train_meta_info.ix[~train_ind,'Escherichia.coli']
### TRAIN Random Forest Regressor model and save as pickle file
startTime = time.time() ## This is only to keep track of training time
RF_reg.fit(train_data, np.log(train_target+1) )
filename = directory+'/RF_regress' + '_' +str(yr) +'.pkl'
joblib.dump(RF_reg, filename, compress=9)
### VALIDATE MODEL on held out year to calibarate cutoff threshold based on False Negative Rate
predictions0 = getattr(RF_reg, 'predict')(test_data)
# rescales to between 0 and 1 in order to use in precision_recall_curve()
predictionsX0= predictions0-predictions0.min()
predictionsX0= predictionsX0/(predictions0.max()-predictions0.min())
precisionV, recallV, threshV = precision_recall_curve(test_target>=236, predictionsX0)
threshV = np.exp(threshV*(predictions0.max()-predictions0.min())+predictions0.min()) # map back from [0,1] to origial scaling
RFthresh = calibrateThreshold(test_target, predictions0, 2.0) # FNR of 2%
threshIdx = (np.abs(threshV-RFthresh)).argmin()
RF_rec = recallV[threshIdx]
RF_prec = precisionV[threshIdx]
RFthreshAlt = calibrateThreshold(test_target, predictions0, 5.0) # FNR of 5%
threshIdx = (np.abs(threshV-RFthreshAlt)).argmin()
RF_recAlt = recallV[threshIdx]
RF_precAlt = precisionV[threshIdx]
# REPORT Results
print(' RF ensemble {0} model: thresh for 2% FNR = {1}, recall= {2}, precision = {3}'\
.format(yr,np.int(RFthresh),np.int(RF_rec*100+.4),np.int(RF_prec*100+.4) ))
print(' RF ensemble {0} model: thresh for 5% FNR = {1}, recall= {2}, precision = {3}'\
.format(yr,np.int(RFthreshAlt),np.int(RF_recAlt*100+.4),np.int(RF_precAlt*100+.4) ))
if args.verbose>=3:
print('\t runtime of building and testing RF model was {0} minutes'.format(np.round((time.time() - startTime)/60) ))
### TRAIN Gradient Boosting Regression model and save as pickle file
startTime = time.time()
gbm_reg.fit(train_data, np.log(train_target+1))
filename = directory+'/GBM_regress' + '_' + str(yr) +'.pkl'
joblib.dump(gbm_reg, filename, compress=9)
### VALIDATE MODEL on held out year to calibarate cutoff threshold based on False Negative Rate
predictions0 = getattr(gbm_reg, 'predict')(test_data)
# rescales to between 0 and 1 in order to use in precision_recall_curve()
predictionsX0= predictions0-predictions0.min()
predictionsX0= predictionsX0/(predictions0.max()-predictions0.min())
precisionV, recallV, threshV = precision_recall_curve(test_target>=236, predictionsX0)
threshV = np.exp(threshV*(predictions0.max()-predictions0.min())+predictions0.min()) # map back from [0,1] to origial scaling
GBMthresh = calibrateThreshold(test_target, predictions0, 2.0) # FNR of 2%
threshIdx = (np.abs(threshV-GBMthresh)).argmin()
GBM_rec = recallV[threshIdx]
GBM_prec = precisionV[threshIdx]
GBMthreshAlt = calibrateThreshold(test_target, predictions0, 5.0) # FNR of 5%
threshIdx = (np.abs(threshV-GBMthreshAlt)).argmin()
GBM_recAlt = recallV[threshIdx]
GBM_precAlt = precisionV[threshIdx]
# REPORT Results
print(' GBM ensemble {0} model: thresh for 2% FNR = {1}, recall= {2}, precision = {3}'\
.format(yr,np.int(GBMthresh),np.int(GBM_rec*100+.4),np.int(GBM_prec*100+.4) ))
print(' GBM ensemble {0} model: thresh for 5% FNR = {1}, recall= {2}, precision = {3}'\
.format(yr,np.int(GBMthreshAlt),np.int(GBM_recAlt*100+.4),np.int(GBM_precAlt*100+.4) ))
if args.verbose>=3:
print('\t runtime of building and testing GBM model was {0} minutes'.format(np.round((time.time() - startTime)/60)))
# SAVE the precision, recall, and tuned thresholds
d = { 'RF_precision2p':RF_prec, 'RF_recall2p':RF_rec, 'RF_thresh2p': RFthresh,
'RF_precision5p':RF_precAlt, 'RF_recall5p':RF_recAlt, 'RF_thresh5p': RFthreshAlt,
'GBM_precision2p':GBM_prec, 'GBM_recall2p':GBM_rec, 'GBM_thresh2p': GBMthresh,
'GBM_precision5p':GBM_precAlt, 'GBM_recall5p':GBM_recAlt, 'GBM_thresh5p': GBMthreshAlt
}
d = pd.Series(d, index = [ 'RF_precision2p', 'RF_recall2p', 'RF_thresh2p',
'RF_precision5p', 'RF_recall5p', 'RF_thresh5p',
'GBM_precision2p', 'GBM_recall2p', 'GBM_thresh2p',
'GBM_precision5p', 'GBM_recall5p', 'GBM_thresh5p'])
dataSeries = dataSeries + [ d ]
colIndexes = colIndexes + [yr]
summaryFrame = pd.DataFrame( dataSeries , index = colIndexes)
summaryFileName = directory+'/ValidationReport2.csv'
summaryFrame.to_csv(summaryFileName)
##########################################################################
### Test models on 2015 data
##########################################################################
print('\nTesting ensemble of models on 2015 data\n')
results = test_meta_info.copy()
results['expected'] = results['Escherichia.coli']>=235
results['predictedEPA'] = results['Drek_Prediction']>=235
RF_cols = []
GBM_cols = []
RF_bool_cols2p = []
RF_bool_cols5p = []
GBM_bool_cols2p = []
GBM_bool_cols5p = []
for yr in range(2006, 2015):
filename = directory+'/GBM_regress' + '_' + str(yr) +'.pkl'
gbmmodel = joblib.load(filename)
pred_col_name = 'GBM_' +str(yr)+ '_pred'
GBM_cols = GBM_cols + [pred_col_name]
results[pred_col_name] = np.exp(getattr(gbmmodel, 'predict')(test_processed))
results[pred_col_name+'_bool_2p'] = results[pred_col_name] > summaryFrame.ix[yr,'GBM_thresh2p']
results[pred_col_name+'_bool_5p'] = results[pred_col_name] > summaryFrame.ix[yr,'GBM_thresh5p']
GBM_bool_cols2p = GBM_bool_cols2p + [pred_col_name+'_bool_2p']
GBM_bool_cols5p = GBM_bool_cols5p + [pred_col_name+'_bool_5p']
for yr in range(2006, 2015):
filename = directory+'/RF_regress' + '_' +str(yr) +'.pkl'
RFmodel = joblib.load(filename)
pred_col_name = 'RF_' +str(yr)+ '_pred'
results[pred_col_name] = np.exp(getattr(RFmodel, 'predict')(test_processed))
RF_cols = RF_cols + [pred_col_name]
results[pred_col_name+'_bool_2p'] = results[pred_col_name] > summaryFrame.ix[yr,'RF_thresh2p']
results[pred_col_name+'_bool_5p'] = results[pred_col_name] > summaryFrame.ix[yr,'RF_thresh5p']
RF_cols = RF_cols + [pred_col_name]
RF_bool_cols2p = RF_bool_cols2p + [pred_col_name+'_bool_2p']
RF_bool_cols5p = RF_bool_cols5p + [pred_col_name+'_bool_5p']
results['mean_GBM'] = results[GBM_cols].mean(1)
results['max_GBM'] = results[GBM_cols].max(1)
results['min_GBM'] = results[GBM_cols].min(1)
results['mean_RF'] = results[RF_cols].mean(1)
results['max_RF'] = results[RF_cols].max(1)
results['min_RF'] = results[RF_cols].min(1)
# The above results could be interesting to drill down into to see how the
# different models are biased, and how much variance in the predictions.
# For now, the method of final prediction is to predict Ecoli_High == True
# IF ((any GBM predicts true) AND (any RF predicts true)) OR (EPA predicts true)
results['predict_RF2p'] = results[RF_bool_cols2p].sum(1) > 1
results['predict_GBM2p'] = results[GBM_bool_cols2p].sum(1) > 1
results['predict_Combo2p'] = (((results['predict_RF2p'])&(results['predict_GBM2p']))|(results['predictedEPA']) )
results['predict_RF5p'] = results[RF_bool_cols5p].sum(1) > 1
results['predict_GBM5p'] = results[GBM_bool_cols5p].sum(1) > 1
results['predict_Combo5p'] = (((results['predict_RF5p'])&(results['predict_GBM5p']))|(results['predictedEPA']) )
results.to_csv(directory+'/results_RF_GBM.csv', index=False)
# Look at performance of GMB ensemble at 5% FNR alone
prec, rec = display_predictions_by_beach(results, 'predict_GBM5p')
print('GBM ensemble model at 5% FNR: recall= {0}, precision = {1}\n'.format(np.int(rec*100),np.int(prec*100)))
# Look at performance of RF ensemble at 5% FNR alone
prec, rec = display_predictions_by_beach(results, 'predict_RF5p')
print('RF ensemble model at 5% FNR: recall= {0}, precision = {1}\n'.format(np.int(rec*100),np.int(prec*100)))
prec, rec = display_predictions_by_beach(results, 'predict_Combo5p')
print('Combo ensemble model variant at 5% FNR with AND: recall= {0}, precision = {1}\n'.format(np.int(rec*100),np.int(prec*100)))
# Try out some variants of putting models together
results['predict_Combo5p'] = (((results['predict_RF5p'])|(results['predict_GBM5p']))|(results['predictedEPA']) )
prec, rec = display_predictions_by_beach(results, 'predict_Combo5p')
print('Combo ensemble model variant at 5% FNR with OR: recall= {0}, precision = {1}\n'.format(np.int(rec*100),np.int(prec*100)))
prec, rec = display_predictions_by_beach(results, 'predict_Combo2p')
print('Combo ensemble model variant at 2% FNR with AND: recall= {0}, precision = {1}\n'.format(np.int(rec*100),np.int(prec*100)))
# Try out some variants of putting models together
results['predict_Combo2p'] = (((results['predict_RF2p'])|(results['predict_GBM2p']))|(results['predictedEPA']) )
prec, rec = display_predictions_by_beach(results, 'predict_Combo2p')
print('Combo ensemble model variant at 2% FNR with OR: recall= {0}, precision = {1}\n'.format(np.int(rec*100),np.int(prec*100)))
# Try out some variants of putting models together
results['predict_RF'] = results['mean_RF']> np.exp(summaryFrame.RF_thresh5p.min())
results['predict_GBM'] = results['mean_GBM']> np.exp(summaryFrame.GBM_thresh5p.min())
results['predict_Combo'] = (((results['predict_RF'])&(results['predict_GBM']))|(results['predictedEPA']) )
prec, rec = display_predictions_by_beach(results, 'predict_Combo')
print('Combo ensemble model variant with one threshold: recall= {0}, precision = {1}\n'.format(np.int(rec*100),np.int(prec*100)))
|
[
"sklearn.externals.joblib.dump",
"numpy.abs",
"argparse.ArgumentParser",
"pandas.read_csv",
"sklearn.ensemble.GradientBoostingRegressor",
"time.strftime",
"numpy.exp",
"read_data.read_data",
"pandas.DataFrame",
"read_data.date_lookup",
"os.path.exists",
"sklearn.preprocessing.LabelEncoder",
"numpy.int",
"numpy.linspace",
"sklearn.ensemble.RandomForestRegressor",
"sklearn.metrics.precision_recall_curve",
"pandas.Series",
"sklearn.externals.joblib.load",
"os.makedirs",
"numpy.log",
"read_data.add_column_prior_data",
"time.time",
"numpy.array"
] |
[((10726, 10750), 'numpy.linspace', 'np.linspace', (['cut', '(50)', '(10)'], {}), '(cut, 50, 10)\n', (10737, 10750), True, 'import numpy as np\n'), ((11068, 11098), 'numpy.linspace', 'np.linspace', (['cut', '(cut / 2)', '(100)'], {}), '(cut, cut / 2, 100)\n', (11079, 11098), True, 'import numpy as np\n'), ((12418, 12476), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process beach data."""'}), "(description='Process beach data.')\n", (12441, 12476), False, 'import argparse\n'), ((16215, 16359), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': '(500)', 'max_depth': '(10)', 'max_features': '(0.8)', 'min_samples_split': '(10)', 'min_samples_leaf': '(4)', 'oob_score': '(True)', 'n_jobs': '(-1)'}), '(n_estimators=500, max_depth=10, max_features=0.8,\n min_samples_split=10, min_samples_leaf=4, oob_score=True, n_jobs=-1)\n', (16236, 16359), False, 'from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor\n'), ((16607, 16780), 'sklearn.ensemble.GradientBoostingRegressor', 'GradientBoostingRegressor', ([], {'loss': '"""quantile"""', 'learning_rate': '(0.025)', 'n_estimators': '(1500)', 'subsample': '(0.8)', 'min_samples_split': '(10)', 'min_samples_leaf': '(4)', 'max_depth': '(10)', 'alpha': '(0.85)'}), "(loss='quantile', learning_rate=0.025,\n n_estimators=1500, subsample=0.8, min_samples_split=10,\n min_samples_leaf=4, max_depth=10, alpha=0.85)\n", (16632, 16780), False, 'from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor\n'), ((23401, 23443), 'pandas.DataFrame', 'pd.DataFrame', (['dataSeries'], {'index': 'colIndexes'}), '(dataSeries, index=colIndexes)\n', (23413, 23443), True, 'import pandas as pd\n'), ((2635, 2677), 'numpy.linspace', 'np.linspace', (['(-19)', '(4)'], {'num': '(6)', 'dtype': 'np.int64'}), '(-19, 4, num=6, dtype=np.int64)\n', (2646, 2677), True, 'import numpy as np\n'), ((2713, 2755), 'numpy.linspace', 'np.linspace', (['(-19)', '(4)'], {'num': '(6)', 'dtype': 'np.int64'}), '(-19, 4, num=6, dtype=np.int64)\n', (2724, 2755), True, 'import numpy as np\n'), ((2806, 2848), 'numpy.linspace', 'np.linspace', (['(-19)', '(4)'], {'num': '(6)', 'dtype': 'np.int64'}), '(-19, 4, num=6, dtype=np.int64)\n', (2817, 2848), True, 'import numpy as np\n'), ((4628, 4751), 'read_data.add_column_prior_data', 'rd.add_column_prior_data', (['df', 'var', 'historical_columns[var]'], {'beach_col_name': '"""Client.ID"""', 'timestamp_col_name': '"""Full_date"""'}), "(df, var, historical_columns[var], beach_col_name=\n 'Client.ID', timestamp_col_name='Full_date')\n", (4652, 4751), True, 'import read_data as rd\n'), ((7838, 7859), 'numpy.array', 'np.array', (['(years != yr)'], {}), '(years != yr)\n', (7846, 7859), True, 'import numpy as np\n'), ((7877, 7898), 'numpy.array', 'np.array', (['(years == yr)'], {}), '(years == yr)\n', (7885, 7898), True, 'import numpy as np\n'), ((10684, 10703), 'numpy.exp', 'np.exp', (['predictions'], {}), '(predictions)\n', (10690, 10703), True, 'import numpy as np\n'), ((13427, 13452), 'time.strftime', 'time.strftime', (['"""%d_%m_%Y"""'], {}), "('%d_%m_%Y')\n", (13440, 13452), False, 'import time\n'), ((13514, 13539), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (13528, 13539), False, 'import os\n'), ((13550, 13572), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (13561, 13572), False, 'import os\n'), ((13774, 13845), 'pandas.read_csv', 'pd.read_csv', (['args.input_data'], {'parse_dates': '"""Full_date"""', 'low_memory': '(False)'}), "(args.input_data, parse_dates='Full_date', low_memory=False)\n", (13785, 13845), True, 'import pandas as pd\n'), ((13873, 13904), 'read_data.date_lookup', 'rd.date_lookup', (["df['Full_date']"], {}), "(df['Full_date'])\n", (13887, 13904), True, 'import read_data as rd\n'), ((14029, 14124), 'read_data.read_data', 'rd.read_data', ([], {'read_weather_station': '(False)', 'read_water_sensor': '(False)', 'add_each_beach_data': '(True)'}), '(read_weather_station=False, read_water_sensor=False,\n add_each_beach_data=True)\n', (14041, 14124), True, 'import read_data as rd\n'), ((14557, 14582), 'pandas.read_csv', 'pd.read_csv', (['datafilename'], {}), '(datafilename)\n', (14568, 14582), True, 'import pandas as pd\n'), ((14604, 14658), 'pandas.read_csv', 'pd.read_csv', (['metadatafilename'], {'parse_dates': '"""Full_date"""'}), "(metadatafilename, parse_dates='Full_date')\n", (14615, 14658), True, 'import pandas as pd\n'), ((14700, 14738), 'read_data.date_lookup', 'rd.date_lookup', (["meta_info['Full_date']"], {}), "(meta_info['Full_date'])\n", (14714, 14738), True, 'import read_data as rd\n'), ((17789, 17815), 'numpy.array', 'np.array', (['(timestamps != yr)'], {}), '(timestamps != yr)\n', (17797, 17815), True, 'import numpy as np\n'), ((18608, 18619), 'time.time', 'time.time', ([], {}), '()\n', (18617, 18619), False, 'import time\n'), ((18800, 18841), 'sklearn.externals.joblib.dump', 'joblib.dump', (['RF_reg', 'filename'], {'compress': '(9)'}), '(RF_reg, filename, compress=9)\n', (18811, 18841), False, 'from sklearn.externals import joblib\n'), ((19269, 19326), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', (['(test_target >= 236)', 'predictionsX0'], {}), '(test_target >= 236, predictionsX0)\n', (19291, 19326), False, 'from sklearn.metrics import precision_recall_curve\n'), ((20599, 20610), 'time.time', 'time.time', ([], {}), '()\n', (20608, 20610), False, 'import time\n'), ((20747, 20789), 'sklearn.externals.joblib.dump', 'joblib.dump', (['gbm_reg', 'filename'], {'compress': '(9)'}), '(gbm_reg, filename, compress=9)\n', (20758, 20789), False, 'from sklearn.externals import joblib\n'), ((21218, 21275), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', (['(test_target >= 236)', 'predictionsX0'], {}), '(test_target >= 236, predictionsX0)\n', (21240, 21275), False, 'from sklearn.metrics import precision_recall_curve\n'), ((22970, 23200), 'pandas.Series', 'pd.Series', (['d'], {'index': "['RF_precision2p', 'RF_recall2p', 'RF_thresh2p', 'RF_precision5p',\n 'RF_recall5p', 'RF_thresh5p', 'GBM_precision2p', 'GBM_recall2p',\n 'GBM_thresh2p', 'GBM_precision5p', 'GBM_recall5p', 'GBM_thresh5p']"}), "(d, index=['RF_precision2p', 'RF_recall2p', 'RF_thresh2p',\n 'RF_precision5p', 'RF_recall5p', 'RF_thresh5p', 'GBM_precision2p',\n 'GBM_recall2p', 'GBM_thresh2p', 'GBM_precision5p', 'GBM_recall5p',\n 'GBM_thresh5p'])\n", (22979, 23200), True, 'import pandas as pd\n'), ((24294, 24315), 'sklearn.externals.joblib.load', 'joblib.load', (['filename'], {}), '(filename)\n', (24305, 24315), False, 'from sklearn.externals import joblib\n'), ((24979, 25000), 'sklearn.externals.joblib.load', 'joblib.load', (['filename'], {}), '(filename)\n', (24990, 25000), False, 'from sklearn.externals import joblib\n'), ((18699, 18723), 'numpy.log', 'np.log', (['(train_target + 1)'], {}), '(train_target + 1)\n', (18705, 18723), True, 'import numpy as np\n'), ((20644, 20668), 'numpy.log', 'np.log', (['(train_target + 1)'], {}), '(train_target + 1)\n', (20650, 20668), True, 'import numpy as np\n'), ((27020, 27037), 'numpy.int', 'np.int', (['(rec * 100)'], {}), '(rec * 100)\n', (27026, 27037), True, 'import numpy as np\n'), ((27036, 27054), 'numpy.int', 'np.int', (['(prec * 100)'], {}), '(prec * 100)\n', (27042, 27054), True, 'import numpy as np\n'), ((27270, 27287), 'numpy.int', 'np.int', (['(rec * 100)'], {}), '(rec * 100)\n', (27276, 27287), True, 'import numpy as np\n'), ((27286, 27304), 'numpy.int', 'np.int', (['(prec * 100)'], {}), '(prec * 100)\n', (27292, 27304), True, 'import numpy as np\n'), ((27491, 27508), 'numpy.int', 'np.int', (['(rec * 100)'], {}), '(rec * 100)\n', (27497, 27508), True, 'import numpy as np\n'), ((27507, 27525), 'numpy.int', 'np.int', (['(prec * 100)'], {}), '(prec * 100)\n', (27513, 27525), True, 'import numpy as np\n'), ((27885, 27902), 'numpy.int', 'np.int', (['(rec * 100)'], {}), '(rec * 100)\n', (27891, 27902), True, 'import numpy as np\n'), ((27901, 27919), 'numpy.int', 'np.int', (['(prec * 100)'], {}), '(prec * 100)\n', (27907, 27919), True, 'import numpy as np\n'), ((28098, 28115), 'numpy.int', 'np.int', (['(rec * 100)'], {}), '(rec * 100)\n', (28104, 28115), True, 'import numpy as np\n'), ((28114, 28132), 'numpy.int', 'np.int', (['(prec * 100)'], {}), '(prec * 100)\n', (28120, 28132), True, 'import numpy as np\n'), ((28492, 28509), 'numpy.int', 'np.int', (['(rec * 100)'], {}), '(rec * 100)\n', (28498, 28509), True, 'import numpy as np\n'), ((28508, 28526), 'numpy.int', 'np.int', (['(prec * 100)'], {}), '(prec * 100)\n', (28514, 28526), True, 'import numpy as np\n'), ((29051, 29068), 'numpy.int', 'np.int', (['(rec * 100)'], {}), '(rec * 100)\n', (29057, 29068), True, 'import numpy as np\n'), ((29067, 29085), 'numpy.int', 'np.int', (['(prec * 100)'], {}), '(prec * 100)\n', (29073, 29085), True, 'import numpy as np\n'), ((6935, 6971), 'sklearn.preprocessing.LabelEncoder', 'sklearn.preprocessing.LabelEncoder', ([], {}), '()\n', (6969, 6971), False, 'import sklearn\n'), ((19566, 19592), 'numpy.abs', 'np.abs', (['(threshV - RFthresh)'], {}), '(threshV - RFthresh)\n', (19572, 19592), True, 'import numpy as np\n'), ((19789, 19818), 'numpy.abs', 'np.abs', (['(threshV - RFthreshAlt)'], {}), '(threshV - RFthreshAlt)\n', (19795, 19818), True, 'import numpy as np\n'), ((20070, 20086), 'numpy.int', 'np.int', (['RFthresh'], {}), '(RFthresh)\n', (20076, 20086), True, 'import numpy as np\n'), ((20087, 20113), 'numpy.int', 'np.int', (['(RF_rec * 100 + 0.4)'], {}), '(RF_rec * 100 + 0.4)\n', (20093, 20113), True, 'import numpy as np\n'), ((20109, 20136), 'numpy.int', 'np.int', (['(RF_prec * 100 + 0.4)'], {}), '(RF_prec * 100 + 0.4)\n', (20115, 20136), True, 'import numpy as np\n'), ((20259, 20278), 'numpy.int', 'np.int', (['RFthreshAlt'], {}), '(RFthreshAlt)\n', (20265, 20278), True, 'import numpy as np\n'), ((20279, 20308), 'numpy.int', 'np.int', (['(RF_recAlt * 100 + 0.4)'], {}), '(RF_recAlt * 100 + 0.4)\n', (20285, 20308), True, 'import numpy as np\n'), ((20304, 20334), 'numpy.int', 'np.int', (['(RF_precAlt * 100 + 0.4)'], {}), '(RF_precAlt * 100 + 0.4)\n', (20310, 20334), True, 'import numpy as np\n'), ((21515, 21542), 'numpy.abs', 'np.abs', (['(threshV - GBMthresh)'], {}), '(threshV - GBMthresh)\n', (21521, 21542), True, 'import numpy as np\n'), ((21742, 21772), 'numpy.abs', 'np.abs', (['(threshV - GBMthreshAlt)'], {}), '(threshV - GBMthreshAlt)\n', (21748, 21772), True, 'import numpy as np\n'), ((22023, 22040), 'numpy.int', 'np.int', (['GBMthresh'], {}), '(GBMthresh)\n', (22029, 22040), True, 'import numpy as np\n'), ((22041, 22068), 'numpy.int', 'np.int', (['(GBM_rec * 100 + 0.4)'], {}), '(GBM_rec * 100 + 0.4)\n', (22047, 22068), True, 'import numpy as np\n'), ((22064, 22092), 'numpy.int', 'np.int', (['(GBM_prec * 100 + 0.4)'], {}), '(GBM_prec * 100 + 0.4)\n', (22070, 22092), True, 'import numpy as np\n'), ((22216, 22236), 'numpy.int', 'np.int', (['GBMthreshAlt'], {}), '(GBMthreshAlt)\n', (22222, 22236), True, 'import numpy as np\n'), ((22237, 22267), 'numpy.int', 'np.int', (['(GBM_recAlt * 100 + 0.4)'], {}), '(GBM_recAlt * 100 + 0.4)\n', (22243, 22267), True, 'import numpy as np\n'), ((22263, 22294), 'numpy.int', 'np.int', (['(GBM_precAlt * 100 + 0.4)'], {}), '(GBM_precAlt * 100 + 0.4)\n', (22269, 22294), True, 'import numpy as np\n'), ((10804, 10823), 'numpy.exp', 'np.exp', (['predictions'], {}), '(predictions)\n', (10810, 10823), True, 'import numpy as np\n'), ((11151, 11170), 'numpy.exp', 'np.exp', (['predictions'], {}), '(predictions)\n', (11157, 11170), True, 'import numpy as np\n'), ((10958, 10978), 'numpy.exp', 'np.exp', (['predictions0'], {}), '(predictions0)\n', (10964, 10978), True, 'import numpy as np\n'), ((20465, 20476), 'time.time', 'time.time', ([], {}), '()\n', (20474, 20476), False, 'import time\n'), ((22422, 22433), 'time.time', 'time.time', ([], {}), '()\n', (22431, 22433), False, 'import time\n')]
|
# This function is copied from https://github.com/Rubikplayer/flame-fitting
'''
Copyright 2015 <NAME>, <NAME> and the Max Planck Gesellschaft. All rights reserved.
This software is provided for research purposes only.
By using this software you agree to the terms of the SMPL Model license here http://smpl.is.tue.mpg.de/license
More information about SMPL is available here http://smpl.is.tue.mpg.
For comments or questions, please email us at: <EMAIL>
About this file:
================
This module defines the mapping of joint-angles to pose-blendshapes.
Modules included:
- posemap:
computes the joint-to-pose blend shape mapping given a mapping type as input
'''
import chumpy as ch
import numpy as np
import cv2
class Rodrigues(ch.Ch):
dterms = 'rt'
def compute_r(self):
return cv2.Rodrigues(self.rt.r)[0]
def compute_dr_wrt(self, wrt):
if wrt is self.rt:
return cv2.Rodrigues(self.rt.r)[1].T
def lrotmin(p):
if isinstance(p, np.ndarray):
p = p.ravel()[3:]
return np.concatenate(
[(cv2.Rodrigues(np.array(pp))[0] - np.eye(3)).ravel() for pp in p.reshape((-1, 3))]).ravel()
if p.ndim != 2 or p.shape[1] != 3:
p = p.reshape((-1, 3))
p = p[1:]
return ch.concatenate([(Rodrigues(pp) - ch.eye(3)).ravel() for pp in p]).ravel()
def posemap(s):
if s == 'lrotmin':
return lrotmin
else:
raise Exception('Unknown posemapping: %s' % (str(s),))
|
[
"chumpy.eye",
"cv2.Rodrigues",
"numpy.array",
"numpy.eye"
] |
[((832, 856), 'cv2.Rodrigues', 'cv2.Rodrigues', (['self.rt.r'], {}), '(self.rt.r)\n', (845, 856), False, 'import cv2\n'), ((946, 970), 'cv2.Rodrigues', 'cv2.Rodrigues', (['self.rt.r'], {}), '(self.rt.r)\n', (959, 970), False, 'import cv2\n'), ((1329, 1338), 'chumpy.eye', 'ch.eye', (['(3)'], {}), '(3)\n', (1335, 1338), True, 'import chumpy as ch\n'), ((1139, 1148), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (1145, 1148), True, 'import numpy as np\n'), ((1120, 1132), 'numpy.array', 'np.array', (['pp'], {}), '(pp)\n', (1128, 1132), True, 'import numpy as np\n')]
|
import os
from pathlib import Path
import pandas as pd
import numpy as np
import math
from parsing import split_tmp, split_wnd, split_ceil, split_vis, split_liquid_precip, split_snow
def see_maps_location(lat, lon):
print(f'https://www.google.com.au/maps/search/{lat},{lon}')
def get_complete_station_years(path):
"""
Figure out which stations have complete histories
"""
station_years = pd.DataFrame()
years = os.listdir(path/'raw')
for y in years:
this_station_year = pd.DataFrame.from_dict({
'id':[s[:-4] for s in os.listdir(path/'raw'/f'{y}')],
'year':y
})
station_years = pd.concat([station_years, this_station_year])
files_per_station = station_years['id'].value_counts()
stations_with_complete_history = files_per_station==len(station_years['year'].unique())
is_complete_station_year = station_years['id'].isin(files_per_station[stations_with_complete_history].index)
complete_station_years = station_years[is_complete_station_year].sort_values(['id','year'])
complete_station_years.reset_index(inplace=True, drop=True)
stations = complete_station_years['id'].unique()
return stations, complete_station_years
def process_station_data(df):
"""
Map the raw data from weather obs csv file to numeric columns in DataFrame
"""
df.columns = map(str.lower, df.columns)
timef = ['station','date','report_type']
# parse out information from each of the relevant columns
# data dictionary can be found at https://www.ncei.noaa.gov/data/global-hourly/doc/isd-format-document.pdf
wnd, ceil, vis, tmp = split_wnd(df), split_ceil(df), split_vis(df), split_tmp(df)
wndf, ceilf, visf, tmpf = ['wnd_speed'], ['ceil','ceil_height'], ['vis_distance'], ['tmp']
rain = split_liquid_precip(df)
snow = split_snow(df)
df['total_precip'] = rain['liquid_precip_depth_dimension'] + snow['snow_equivalent_water_depth_dimension']
slim = pd.concat([
df[timef],
tmp['tmp'],
rain['liquid_precip_depth_dimension'], snow['snow_equivalent_water_depth_dimension'], df['total_precip'],
wnd[wndf], ceil[ceilf], vis[visf],
] , axis=1)
# remove "Airways special report" records, 'SY-SA' records
slim = slim[slim['report_type'] != 'SAOSP']
# remove duplicated records by time
slim = slim[~slim.date.duplicated()]
slim.drop(['report_type'], axis=1, inplace=True)
metadata = df[['station','latitude','longitude','elevation','name']].head(1)
return metadata, slim
def get_all_station_data(path, station, years):
"""
Sift through all the years with this station included, read the data, clean it
"""
station_dfs = list()
for year in years:
this_year = pd.read_csv(
path/'raw'/f'{year}'/f'{station}.csv',
encoding='utf-8',
parse_dates=['DATE'],
low_memory=False,
dtype={'STATION': 'object', 'LATITUDE': np.float32,'LONGITUDE': np.float32,
'ELEVATION': np.float32, 'NAME': str, 'REPORT_TYPE':str,
'TMP': str,
},
)
# don't use this station if any of the years have less than two observations per day
if this_year.shape[0] < 365 * 2:
metadata, _ = process_station_data(this_year)
else:
metadata, cleaned_data = process_station_data(this_year)
cleaned_data['year'] = year
station_dfs.append(cleaned_data)
if len(station_dfs) > 0:
station_data = pd.concat(station_dfs)
# time series interpolation only works with datetime index
station_data.set_index('date', inplace=True, drop=False)
station_data = interpolate_measurements(station_data)
station_data.station = station
station_data.reset_index(inplace=True, drop=True)
else:
# filter out stations with less reliable
station_data = None
return metadata, station_data
def interpolate_measurements(station_data):
"""
Create a baseline frequency of measurements, fill in the gaps
"""
base = pd.DataFrame(
index = pd.date_range(
start=str(min(station_data.date).year), end=str(max(station_data.date).year+1),
freq='H', closed='left'
)
)
df = pd.merge(base, station_data, how='left', left_index=True, right_index=True)
df['date'] = df.index.values
df['tmp'] = df['tmp'].interpolate(method='time', limit_direction='both')
# avoid warning about Nan mean operation
if (df['vis_distance'].isnull().sum() == df['vis_distance'].shape[0]):
df['vis_distance'].fillna(0)
else:
df['vis_distance'] = df['vis_distance'].fillna(df['vis_distance'].median())
df['wnd_speed'] = df['wnd_speed'].interpolate(method='time', limit_direction='both')
df['ceil'] = df['ceil'].fillna(0)
df['ceil_height'] = df['ceil_height'].fillna(0)
df['liquid_precip_depth_dimension'] = df['liquid_precip_depth_dimension'].fillna(0)
df['snow_equivalent_water_depth_dimension'] = df['snow_equivalent_water_depth_dimension'].fillna(0)
df['total_precip'] = df['total_precip'].fillna(0)
return df
def collect_data_from_csvs(PATH, sample_size=None, shuffle=True):
stations, station_years = get_complete_station_years(Path(PATH))
if shuffle: np.random.shuffle(stations)
if sample_size is not None:
g = int(sample_size/10)
if (sample_size < len(stations)):
station_iterator = stations[0:int(sample_size)]
else:
g = 100
station_iterator = stations
c=0
dfs = list()
metas = list()
print(f'Iterating through {len(station_iterator)} station file sets')
for station in station_iterator:
years = station_years['year'][station_years['id']==station]
metadata, station_data = get_all_station_data(Path(PATH), station, years)
if station_data is None:
pass
else:
c+=1
if c % g == 0:
print(f'{c} - '+metadata.to_csv(None, header=False, index=False)[:-1])
dfs.append(station_data)
metas.append(metadata)
metadata = pd.concat(metas)
df = pd.concat(dfs)
df = df.drop(['year'],axis=1)
df.station = df.station.astype('category')
metadata.station = metadata.station.astype('category')
# get rid of stations with missing info (already having tried to interpolate)
notnull_counts = df.groupby('station').apply(lambda c: c.notnull().sum())
legit_stations = notnull_counts[(notnull_counts.apply(min, axis=1) == notnull_counts.apply(max).max())].index
df = df[df.station.apply(lambda s: s in legit_stations)]
metadata = metadata[metadata.station.apply(lambda s: s in legit_stations)]
df.sort_values(['station','date'], inplace=True)
df.reset_index(drop=True, inplace=True)
metadata.sort_values(['station'], inplace=True)
metadata.reset_index(drop=True, inplace=True)
return df, metadata
def get_city_data(PATH, metadata, pop_threshold=1e6):
print('Getting populous cities...')
raw_cities = pd.read_csv(PATH,
low_memory=False,
encoding='utf-8',
dtype={
'Country':'category',
'City': 'object',
'AccentCity': 'object',
'Region': 'category',
'Population': 'float32',
'Latitude': 'float32',
'Longitude': 'float32',
})
pop = raw_cities[raw_cities.Population > pop_threshold].copy()
pop.sort_values('Population', ascending=False, inplace=True)
cities = pop[~pop[['Latitude','Longitude']].duplicated()]
cities.reset_index(drop=True, inplace=True)
clos = cities.apply(find_closest_station, metadata=metadata, axis=1).apply(pd.Series)
clos.columns=['station','closest_station_distance_km']
mrgd = pd.merge(cities, clos, left_index=True, right_index=True, how='left')
return mrgd.copy()
def distance(origin, destination):
"""
Haversince distance from https://gist.github.com/rochacbruno/2883505
Returns distance in kilometers
"""
lat1, lon1 = origin
lat2, lon2 = destination
radius = 6371 # km radius of Earth
dlat = math.radians(lat2-lat1)
dlon = math.radians(lon2-lon1)
a = math.sin(dlat/2) * math.sin(dlat/2) + math.cos(math.radians(lat1)) \
* math.cos(math.radians(lat2)) * math.sin(dlon/2) * math.sin(dlon/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = radius * c
return d
def find_distance(m, coords):
return distance((m['latitude'], m['longitude']), coords)
def find_closest_station(p, metadata):
coords = (p.Latitude, p.Longitude)
d = metadata.apply(find_distance, axis=1, coords=coords)
return metadata.loc[d.idxmin()].station, min(d)
if __name__ == '__main__':
PATH = f'/home/ubuntu/climate-classification/data'
SAMPLE_SIZE = 4000
df, metadata = collect_data_from_csvs(PATH, sample_size=SAMPLE_SIZE, shuffle=True)
cities = get_city_data('./data/worldcitiespop.csv', metadata)
closest_cities = cities.groupby('station').apply(lambda d: d.closest_station_distance_km.idxmin())
ma = cities.loc[closest_cities]
slim = df[df.station.apply(lambda s: s in ma.station.values)].copy()
# need to reset categories so .groupby().apply() doesn't pick up the old ones
for d in (slim, ma, cities):
d['station'] = d['station'].astype(str).astype('category')
d.reset_index(drop=True, inplace=True)
print('Saving...')
slim.to_feather(f'{PATH}/df')
ma.to_feather(f'{PATH}/metadata')
cities.to_feather(f'{PATH}/cities')
print('Finished')
|
[
"pandas.DataFrame",
"os.listdir",
"parsing.split_wnd",
"math.sqrt",
"pandas.read_csv",
"math.radians",
"pandas.merge",
"parsing.split_vis",
"parsing.split_tmp",
"parsing.split_liquid_precip",
"math.sin",
"parsing.split_ceil",
"pathlib.Path",
"parsing.split_snow",
"pandas.concat",
"numpy.random.shuffle"
] |
[((414, 428), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (426, 428), True, 'import pandas as pd\n'), ((441, 465), 'os.listdir', 'os.listdir', (["(path / 'raw')"], {}), "(path / 'raw')\n", (451, 465), False, 'import os\n'), ((1817, 1840), 'parsing.split_liquid_precip', 'split_liquid_precip', (['df'], {}), '(df)\n', (1836, 1840), False, 'from parsing import split_tmp, split_wnd, split_ceil, split_vis, split_liquid_precip, split_snow\n'), ((1852, 1866), 'parsing.split_snow', 'split_snow', (['df'], {}), '(df)\n', (1862, 1866), False, 'from parsing import split_tmp, split_wnd, split_ceil, split_vis, split_liquid_precip, split_snow\n'), ((1994, 2186), 'pandas.concat', 'pd.concat', (["[df[timef], tmp['tmp'], rain['liquid_precip_depth_dimension'], snow[\n 'snow_equivalent_water_depth_dimension'], df['total_precip'], wnd[wndf],\n ceil[ceilf], vis[visf]]"], {'axis': '(1)'}), "([df[timef], tmp['tmp'], rain['liquid_precip_depth_dimension'],\n snow['snow_equivalent_water_depth_dimension'], df['total_precip'], wnd[\n wndf], ceil[ceilf], vis[visf]], axis=1)\n", (2003, 2186), True, 'import pandas as pd\n'), ((4375, 4450), 'pandas.merge', 'pd.merge', (['base', 'station_data'], {'how': '"""left"""', 'left_index': '(True)', 'right_index': '(True)'}), "(base, station_data, how='left', left_index=True, right_index=True)\n", (4383, 4450), True, 'import pandas as pd\n'), ((6295, 6311), 'pandas.concat', 'pd.concat', (['metas'], {}), '(metas)\n', (6304, 6311), True, 'import pandas as pd\n'), ((6321, 6335), 'pandas.concat', 'pd.concat', (['dfs'], {}), '(dfs)\n', (6330, 6335), True, 'import pandas as pd\n'), ((7237, 7469), 'pandas.read_csv', 'pd.read_csv', (['PATH'], {'low_memory': '(False)', 'encoding': '"""utf-8"""', 'dtype': "{'Country': 'category', 'City': 'object', 'AccentCity': 'object', 'Region':\n 'category', 'Population': 'float32', 'Latitude': 'float32', 'Longitude':\n 'float32'}"}), "(PATH, low_memory=False, encoding='utf-8', dtype={'Country':\n 'category', 'City': 'object', 'AccentCity': 'object', 'Region':\n 'category', 'Population': 'float32', 'Latitude': 'float32', 'Longitude':\n 'float32'})\n", (7248, 7469), True, 'import pandas as pd\n'), ((8068, 8137), 'pandas.merge', 'pd.merge', (['cities', 'clos'], {'left_index': '(True)', 'right_index': '(True)', 'how': '"""left"""'}), "(cities, clos, left_index=True, right_index=True, how='left')\n", (8076, 8137), True, 'import pandas as pd\n'), ((8431, 8456), 'math.radians', 'math.radians', (['(lat2 - lat1)'], {}), '(lat2 - lat1)\n', (8443, 8456), False, 'import math\n'), ((8466, 8491), 'math.radians', 'math.radians', (['(lon2 - lon1)'], {}), '(lon2 - lon1)\n', (8478, 8491), False, 'import math\n'), ((660, 705), 'pandas.concat', 'pd.concat', (['[station_years, this_station_year]'], {}), '([station_years, this_station_year])\n', (669, 705), True, 'import pandas as pd\n'), ((1651, 1664), 'parsing.split_wnd', 'split_wnd', (['df'], {}), '(df)\n', (1660, 1664), False, 'from parsing import split_tmp, split_wnd, split_ceil, split_vis, split_liquid_precip, split_snow\n'), ((1666, 1680), 'parsing.split_ceil', 'split_ceil', (['df'], {}), '(df)\n', (1676, 1680), False, 'from parsing import split_tmp, split_wnd, split_ceil, split_vis, split_liquid_precip, split_snow\n'), ((1682, 1695), 'parsing.split_vis', 'split_vis', (['df'], {}), '(df)\n', (1691, 1695), False, 'from parsing import split_tmp, split_wnd, split_ceil, split_vis, split_liquid_precip, split_snow\n'), ((1697, 1710), 'parsing.split_tmp', 'split_tmp', (['df'], {}), '(df)\n', (1706, 1710), False, 'from parsing import split_tmp, split_wnd, split_ceil, split_vis, split_liquid_precip, split_snow\n'), ((2792, 3067), 'pandas.read_csv', 'pd.read_csv', (["(path / 'raw' / f'{year}' / f'{station}.csv')"], {'encoding': '"""utf-8"""', 'parse_dates': "['DATE']", 'low_memory': '(False)', 'dtype': "{'STATION': 'object', 'LATITUDE': np.float32, 'LONGITUDE': np.float32,\n 'ELEVATION': np.float32, 'NAME': str, 'REPORT_TYPE': str, 'TMP': str}"}), "(path / 'raw' / f'{year}' / f'{station}.csv', encoding='utf-8',\n parse_dates=['DATE'], low_memory=False, dtype={'STATION': 'object',\n 'LATITUDE': np.float32, 'LONGITUDE': np.float32, 'ELEVATION': np.\n float32, 'NAME': str, 'REPORT_TYPE': str, 'TMP': str})\n", (2803, 3067), True, 'import pandas as pd\n'), ((3602, 3624), 'pandas.concat', 'pd.concat', (['station_dfs'], {}), '(station_dfs)\n', (3611, 3624), True, 'import pandas as pd\n'), ((5396, 5406), 'pathlib.Path', 'Path', (['PATH'], {}), '(PATH)\n', (5400, 5406), False, 'from pathlib import Path\n'), ((5424, 5451), 'numpy.random.shuffle', 'np.random.shuffle', (['stations'], {}), '(stations)\n', (5441, 5451), True, 'import numpy as np\n'), ((5967, 5977), 'pathlib.Path', 'Path', (['PATH'], {}), '(PATH)\n', (5971, 5977), False, 'from pathlib import Path\n'), ((8498, 8516), 'math.sin', 'math.sin', (['(dlat / 2)'], {}), '(dlat / 2)\n', (8506, 8516), False, 'import math\n'), ((8517, 8535), 'math.sin', 'math.sin', (['(dlat / 2)'], {}), '(dlat / 2)\n', (8525, 8535), False, 'import math\n'), ((8627, 8645), 'math.sin', 'math.sin', (['(dlon / 2)'], {}), '(dlon / 2)\n', (8635, 8645), False, 'import math\n'), ((8667, 8679), 'math.sqrt', 'math.sqrt', (['a'], {}), '(a)\n', (8676, 8679), False, 'import math\n'), ((8681, 8697), 'math.sqrt', 'math.sqrt', (['(1 - a)'], {}), '(1 - a)\n', (8690, 8697), False, 'import math\n'), ((8608, 8626), 'math.sin', 'math.sin', (['(dlon / 2)'], {}), '(dlon / 2)\n', (8616, 8626), False, 'import math\n'), ((572, 605), 'os.listdir', 'os.listdir', (["(path / 'raw' / f'{y}')"], {}), "(path / 'raw' / f'{y}')\n", (582, 605), False, 'import os\n'), ((8545, 8563), 'math.radians', 'math.radians', (['lat1'], {}), '(lat1)\n', (8557, 8563), False, 'import math\n'), ((8586, 8604), 'math.radians', 'math.radians', (['lat2'], {}), '(lat2)\n', (8598, 8604), False, 'import math\n')]
|
import torch
import numpy as np
import pandas as pd
import os
from RBM import RBM
from load_dataset import MNIST
import cv2
from PIL import Image
from matplotlib import pyplot as plt
def image_beautifier(names, final_name):
image_names = sorted(names)
images = [Image.open(x) for x in names]
widths, heights = zip(*(i.size for i in images))
total_width = sum(widths)
max_height = max(heights)
new_im = Image.new('RGB', (total_width, max_height))
x_offset = 0
for im in images:
new_im.paste(im, (x_offset,0))
x_offset += im.size[0]
new_im.save(final_name)
img = cv2.imread(final_name)
img = cv2.resize(img, (img.shape[1]//2, img.shape[0]//2))
cv2.imwrite(final_name, img)
def gen_displayable_images():
suffix = '_image.jpg'
for n in range(10):
prefix = './images_RBM/digitwise/'+str(n)+'_'
names = ['original', 'hidden', 'reconstructed']
names = [prefix+name+suffix for name in names]
image_beautifier(names, './images_RBM/'+str(n)+'.jpg')
if __name__ == '__main__':
mnist = MNIST()
train_x, train_y, test_x, test_y = mnist.load_dataset()
vn = train_x.shape[1]
hn = 2500
rbm = RBM(vn, hn)
rbm.load_rbm('mnist_trained_rbm.pt')
for n in range(10):
x = test_x[np.where(test_y==n)[0][0]]
x = x.unsqueeze(0)
hidden_image = []
gen_image = []
for k in range(rbm.k):
_, hk = rbm.sample_h(x)
_, vk = rbm.sample_v(hk)
gen_image.append(vk.numpy())
hidden_image.append(hk.numpy())
hidden_image = np.array(hidden_image)
hidden_image = np.mean(hidden_image, axis=0)
gen_image = np.array(gen_image)
gen_image = np.mean(gen_image, axis=0)
image = x.numpy()
image = mnist.inv_transform_normalizer(image)[0]
hidden_image = (hidden_image*255)[0]
gen_image = mnist.inv_transform_normalizer(gen_image)[0]
image = np.reshape(image, (28, 28))
hidden_image = np.reshape(hidden_image, (50, 50))
gen_image = np.reshape(gen_image, (28, 28))
image = image.astype(np.int)
hidden_image = hidden_image.astype(np.int)
gen_image = gen_image.astype(np.int)
print(image.shape, hidden_image.shape, gen_image.shape)
prefix = './images_RBM/digitwise/'+str(n)+'_'
suffix = '_image.jpg'
plt.cla()
plt.imshow(image, cmap="gray")
plt.title('original image')
plt.savefig(prefix+'original'+suffix)
plt.cla()
plt.imshow(hidden_image, cmap="gray")
plt.title('hidden image')
plt.savefig(prefix+'hidden'+suffix)
plt.cla()
plt.imshow(gen_image, cmap="gray")
plt.title('reconstructed image')
plt.savefig(prefix+'reconstructed'+suffix)
gen_displayable_images()
|
[
"matplotlib.pyplot.title",
"PIL.Image.new",
"matplotlib.pyplot.savefig",
"load_dataset.MNIST",
"RBM.RBM",
"cv2.imwrite",
"matplotlib.pyplot.imshow",
"PIL.Image.open",
"cv2.imread",
"numpy.mean",
"numpy.array",
"numpy.reshape",
"matplotlib.pyplot.cla",
"numpy.where",
"cv2.resize"
] |
[((410, 453), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(total_width, max_height)'], {}), "('RGB', (total_width, max_height))\n", (419, 453), False, 'from PIL import Image\n'), ((579, 601), 'cv2.imread', 'cv2.imread', (['final_name'], {}), '(final_name)\n', (589, 601), False, 'import cv2\n'), ((609, 664), 'cv2.resize', 'cv2.resize', (['img', '(img.shape[1] // 2, img.shape[0] // 2)'], {}), '(img, (img.shape[1] // 2, img.shape[0] // 2))\n', (619, 664), False, 'import cv2\n'), ((662, 690), 'cv2.imwrite', 'cv2.imwrite', (['final_name', 'img'], {}), '(final_name, img)\n', (673, 690), False, 'import cv2\n'), ((1007, 1014), 'load_dataset.MNIST', 'MNIST', ([], {}), '()\n', (1012, 1014), False, 'from load_dataset import MNIST\n'), ((1114, 1125), 'RBM.RBM', 'RBM', (['vn', 'hn'], {}), '(vn, hn)\n', (1117, 1125), False, 'from RBM import RBM\n'), ((266, 279), 'PIL.Image.open', 'Image.open', (['x'], {}), '(x)\n', (276, 279), False, 'from PIL import Image\n'), ((1449, 1471), 'numpy.array', 'np.array', (['hidden_image'], {}), '(hidden_image)\n', (1457, 1471), True, 'import numpy as np\n'), ((1489, 1518), 'numpy.mean', 'np.mean', (['hidden_image'], {'axis': '(0)'}), '(hidden_image, axis=0)\n', (1496, 1518), True, 'import numpy as np\n'), ((1533, 1552), 'numpy.array', 'np.array', (['gen_image'], {}), '(gen_image)\n', (1541, 1552), True, 'import numpy as np\n'), ((1567, 1593), 'numpy.mean', 'np.mean', (['gen_image'], {'axis': '(0)'}), '(gen_image, axis=0)\n', (1574, 1593), True, 'import numpy as np\n'), ((1775, 1802), 'numpy.reshape', 'np.reshape', (['image', '(28, 28)'], {}), '(image, (28, 28))\n', (1785, 1802), True, 'import numpy as np\n'), ((1820, 1854), 'numpy.reshape', 'np.reshape', (['hidden_image', '(50, 50)'], {}), '(hidden_image, (50, 50))\n', (1830, 1854), True, 'import numpy as np\n'), ((1869, 1900), 'numpy.reshape', 'np.reshape', (['gen_image', '(28, 28)'], {}), '(gen_image, (28, 28))\n', (1879, 1900), True, 'import numpy as np\n'), ((2156, 2165), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (2163, 2165), True, 'from matplotlib import pyplot as plt\n'), ((2168, 2198), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {'cmap': '"""gray"""'}), "(image, cmap='gray')\n", (2178, 2198), True, 'from matplotlib import pyplot as plt\n'), ((2201, 2228), 'matplotlib.pyplot.title', 'plt.title', (['"""original image"""'], {}), "('original image')\n", (2210, 2228), True, 'from matplotlib import pyplot as plt\n'), ((2231, 2272), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(prefix + 'original' + suffix)"], {}), "(prefix + 'original' + suffix)\n", (2242, 2272), True, 'from matplotlib import pyplot as plt\n'), ((2272, 2281), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (2279, 2281), True, 'from matplotlib import pyplot as plt\n'), ((2284, 2321), 'matplotlib.pyplot.imshow', 'plt.imshow', (['hidden_image'], {'cmap': '"""gray"""'}), "(hidden_image, cmap='gray')\n", (2294, 2321), True, 'from matplotlib import pyplot as plt\n'), ((2324, 2349), 'matplotlib.pyplot.title', 'plt.title', (['"""hidden image"""'], {}), "('hidden image')\n", (2333, 2349), True, 'from matplotlib import pyplot as plt\n'), ((2352, 2391), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(prefix + 'hidden' + suffix)"], {}), "(prefix + 'hidden' + suffix)\n", (2363, 2391), True, 'from matplotlib import pyplot as plt\n'), ((2391, 2400), 'matplotlib.pyplot.cla', 'plt.cla', ([], {}), '()\n', (2398, 2400), True, 'from matplotlib import pyplot as plt\n'), ((2403, 2437), 'matplotlib.pyplot.imshow', 'plt.imshow', (['gen_image'], {'cmap': '"""gray"""'}), "(gen_image, cmap='gray')\n", (2413, 2437), True, 'from matplotlib import pyplot as plt\n'), ((2440, 2472), 'matplotlib.pyplot.title', 'plt.title', (['"""reconstructed image"""'], {}), "('reconstructed image')\n", (2449, 2472), True, 'from matplotlib import pyplot as plt\n'), ((2475, 2521), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(prefix + 'reconstructed' + suffix)"], {}), "(prefix + 'reconstructed' + suffix)\n", (2486, 2521), True, 'from matplotlib import pyplot as plt\n'), ((1200, 1221), 'numpy.where', 'np.where', (['(test_y == n)'], {}), '(test_y == n)\n', (1208, 1221), True, 'import numpy as np\n')]
|
from setuptools import setup, find_packages
from setuptools.extension import Extension
from Cython.Build import cythonize
import numpy.distutils.misc_util
import argparse
import sys, os
import numpy as np
print(sys.argv)
parser = argparse.ArgumentParser(description='Build Cython Extension for CPU')
parser.add_argument('-n', dest="n", default=0, help="The device id")
args, unknown = parser.parse_known_args()
dev = args.n
sys.argv = ['cpu_setup.py', 'build_ext', '--inplace']
#Check if Cython is installed
try:
from Cython.Distutils import build_ext
except:
print("You don't seem to have Cython installed")
sys.exit(1)
KOKKOS_DIR=os.environ["KOKKOS_DIR"]
os.environ["CC"] = "gcc -fopenmp"
os.environ["CXX"] = "g++ -fopenmp"
#include directories
inc_dirs = numpy.distutils.misc_util.get_numpy_include_dirs()
inc_dirs = inc_dirs + [KOKKOS_DIR]
inc_dirs = inc_dirs + [np.get_include()]
inc_dirs = inc_dirs + [KOKKOS_DIR+'/lib/include']
inc_dirs = inc_dirs + [KOKKOS_DIR+'/cpu_build/lib/include/']
inc_dirs = inc_dirs + [KOKKOS_DIR+'/cpu_build/core/']
# hmlp library directory
lib_dirs = [KOKKOS_DIR]
lib_dirs = lib_dirs + [KOKKOS_DIR+'/cpu_build/lib/lib64']
lib_dirs = lib_dirs + [KOKKOS_DIR+'/cpu_build/lib/lib']
def scandir(dir, files=[]):
for file in os.listdir(dir):
path = os.path.join(dir, file)
if os.path.isfile(path) and path.endswith(".pyx"):
files.append(path.replace(os.path.sep, ".")[:-4])
elif os.path.isdir(path):
scandir(path, files)
return files
def makeExtension(extName):
extPath = extName.replace(".", os.path.sep)+".pyx"
return Extension(
extName,
[extPath],
include_dirs = inc_dirs,
language='c++',
libraries = ['kokkoscore'],
library_dirs = lib_dirs,
runtime_library_dirs = lib_dirs,
extra_compile_args=["-std=c++11","-O3", "-Wno-sign-compare", "-w"],
extra_link_args=["-lkokkoscore", "-Wl,--no-as-needed", "-ldl", "-lpthread"]
)
extNames = scandir("kokkos/cpu")
print(extNames)
extensions = [makeExtension(name) for name in extNames]
print(extensions)
setup(
name="kokkos_cpu",
packages=["kokkos_cpu"],
ext_modules=extensions,
package_data={
'':['*.pxd']
},
zip_safe=False,
include_package_data=True,
cmdclass = {'build_ext': build_ext}
)
|
[
"setuptools.setup",
"argparse.ArgumentParser",
"os.path.isdir",
"setuptools.extension.Extension",
"os.path.isfile",
"numpy.get_include",
"os.path.join",
"os.listdir",
"sys.exit"
] |
[((231, 300), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Build Cython Extension for CPU"""'}), "(description='Build Cython Extension for CPU')\n", (254, 300), False, 'import argparse\n'), ((2139, 2328), 'setuptools.setup', 'setup', ([], {'name': '"""kokkos_cpu"""', 'packages': "['kokkos_cpu']", 'ext_modules': 'extensions', 'package_data': "{'': ['*.pxd']}", 'zip_safe': '(False)', 'include_package_data': '(True)', 'cmdclass': "{'build_ext': build_ext}"}), "(name='kokkos_cpu', packages=['kokkos_cpu'], ext_modules=extensions,\n package_data={'': ['*.pxd']}, zip_safe=False, include_package_data=True,\n cmdclass={'build_ext': build_ext})\n", (2144, 2328), False, 'from setuptools import setup, find_packages\n'), ((1277, 1292), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (1287, 1292), False, 'import sys, os\n'), ((1633, 1944), 'setuptools.extension.Extension', 'Extension', (['extName', '[extPath]'], {'include_dirs': 'inc_dirs', 'language': '"""c++"""', 'libraries': "['kokkoscore']", 'library_dirs': 'lib_dirs', 'runtime_library_dirs': 'lib_dirs', 'extra_compile_args': "['-std=c++11', '-O3', '-Wno-sign-compare', '-w']", 'extra_link_args': "['-lkokkoscore', '-Wl,--no-as-needed', '-ldl', '-lpthread']"}), "(extName, [extPath], include_dirs=inc_dirs, language='c++',\n libraries=['kokkoscore'], library_dirs=lib_dirs, runtime_library_dirs=\n lib_dirs, extra_compile_args=['-std=c++11', '-O3', '-Wno-sign-compare',\n '-w'], extra_link_args=['-lkokkoscore', '-Wl,--no-as-needed', '-ldl',\n '-lpthread'])\n", (1642, 1944), False, 'from setuptools.extension import Extension\n'), ((624, 635), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (632, 635), False, 'import sys, os\n'), ((884, 900), 'numpy.get_include', 'np.get_include', ([], {}), '()\n', (898, 900), True, 'import numpy as np\n'), ((1309, 1332), 'os.path.join', 'os.path.join', (['dir', 'file'], {}), '(dir, file)\n', (1321, 1332), False, 'import sys, os\n'), ((1344, 1364), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (1358, 1364), False, 'import sys, os\n'), ((1467, 1486), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (1480, 1486), False, 'import sys, os\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 16 11:52:12 2019
@author: z5095790
"""
import numpy as np
import copy
import pickle
import os
from keras.models import load_model
class Node:
"""Binary tree with Ture and False Branches"""
def __init__(self, col=-1, value = None, parentID = None, ID = None, branch=None, results=None, numSamples=0, memory=None, leaf =0):
self.col = col
self.value = value
self.parentID = parentID
self.ID = ID
self.branch = branch
self.results = results #
self.numSamples = numSamples # stat of the number of training samples flow through that leaf
self.memory = memory # store samples fall into that leaf
self.leaf = leaf # FALSE for nodes, TRUE for leaves
def load_weight_bias_path(path):
DNNModel = load_model(path)
weight1 = DNNModel.layers[0].get_weights()[0]
biase1 = DNNModel.layers[0].get_weights()[1] #2,4
weight2 = DNNModel.layers[2].get_weights()[0]
biase2 = DNNModel.layers[2].get_weights()[1] #2,4
weight3 = DNNModel.layers[4].get_weights()[0]
biase3 = DNNModel.layers[4].get_weights()[1] #2,4
weight = [weight1, weight2, weight3]
bias = [biase1, biase2, biase3]
return(weight, bias)
def countLeaves(tree,showID = False):
count = 0
leafID_list = []
for i in range(0,len(tree)):
if tree[i].leaf == 1:
count += 1
leafID_list.append(tree[i].ID)
if showID:
return (count,leafID_list)
return count
def activationByLayer(hidden_nodes,activations):
activationByLayer = []
startNode = 0
for i in range(0, len(hidden_nodes)):
layer_activation = []
num_node_layer = hidden_nodes[i]
for j in range(0,num_node_layer):
layer_activation.append(activations[startNode+j])
activationByLayer.append(layer_activation)
startNode = startNode + num_node_layer
return activationByLayer
def transformWeight(hidden_nodes,activations,weight,bias):
weight_input_to_layers = copy.deepcopy(weight)
bias_input_to_layers = copy.deepcopy(bias)
weight_layer_activated = copy.deepcopy(weight)
for i in range(0,len(activations)):
for j in range(0,hidden_nodes[i]):
if activations[i][j] == 0:
weight_layer_activated[i+1][j,:] = 0
weight_input_to_layers[i+1] = np.matmul(weight_input_to_layers[i],weight_layer_activated[i+1])
bias_input_to_layers[i+1] = np.matmul(bias_input_to_layers[i],weight_layer_activated[i+1]) + bias_input_to_layers[i+1]
return (weight_input_to_layers,bias_input_to_layers)
def extractRules(tree,hidden_nodes,num_input,num_output,weight,bias):
leaves_list = []
rule_list = []
rule_list_txt = []
num_leaves, leaves_list = countLeaves(tree, showID = True)
num_hidden_layers = len(hidden_nodes)
# create a list of names for input and output vectors
input_name_array = []
for i in range(0,num_input):
input_name_array.append('X_'+str(i))
output_name_array = []
for i in range(0,num_output):
output_name_array.append('Y_'+str(i))
# generate rules for each leaf
num_constraints = np.zeros([num_leaves,1])
for i in range(0,num_leaves):
leafResult = tree[leaves_list[i]].results
leafResultByLayer = activationByLayer(hidden_nodes,leafResult)
weight_input_to_layers, bias_input_to_layers = transformWeight(hidden_nodes,leafResultByLayer,weight,bias)
# rules for activating hidden layers
rule_txt = 'IF:\n\n'
rule = np.zeros([tree[leaves_list[i]].col+1,num_input+2])
startCol = 0
for j in range(0,num_hidden_layers):
for m in range(0,hidden_nodes[j]):
if startCol == tree[leaves_list[i]].col:
break
else:
for k in range(0,num_input):
if k == 0:
rule_txt = rule_txt + '(' + str(weight_input_to_layers[j][k,m]) + input_name_array[k] + ')'
else:
rule_txt = rule_txt + ' + (' + str(weight_input_to_layers[j][k,m]) + input_name_array[k] + ')'
rule[startCol,k] = weight_input_to_layers[j][k,m]
if leafResultByLayer[j][m] == 1:
rule_txt = rule_txt + ' > ' + str(-bias_input_to_layers[j][m]) + "\n"
rule[startCol,-1] = 1
else:
rule_txt = rule_txt + ' <= ' + str(-bias_input_to_layers[j][m]) + "\n"
rule[startCol,-1] = -1
rule[startCol,num_input] = bias_input_to_layers[j][m]
startCol += 1
rule_txt = rule_txt + "THEN hidden layer " + str(j) + " activation is: " + str(leafResultByLayer[j]) + "\n\n"
# rules for decision at output
for m in range(0,num_output):
if num_output == 1:
rule_txt = rule_txt + 'IF:\n'
else:
result = '\t\t' + output_name_array[j] + ' = softmax('
for k in range(0,num_input):
if k == 0:
rule_txt = rule_txt + '(' + str(weight_input_to_layers[-1][k,m]) + input_name_array[k] + ')'
else:
rule_txt = rule_txt + ' + (' + str(weight_input_to_layers[-1][k,m]) + input_name_array[k] + ')'
rule[-1,k] = weight_input_to_layers[-1][k,m]
rule_txt = rule_txt + ' + (' + str(bias_input_to_layers[-1][m]) + ') > ' + str(0) + "\n"
rule_txt = rule_txt + 'THEN: class = 1, OTHERWISE class = 0.'
rule[-1,num_input] = bias_input_to_layers[-1][m]
rule_list_txt.append(rule_txt)
rule_list.append(rule)
num_constraints[i] = len(rule)-1
return (rule_list_txt, rule_list, num_constraints)
if __name__ == '__main__':
hidden_nodes = [5,5]
num_input = 2
num_output = 4
Tree_Directory = "./NumericalData/wall-following-2/Saved_Trees/"
Model_Directory = "./NumericalData/wall-following-2/Model/"
listdir_PrunedTrees = os.listdir(Tree_Directory)
listdir_PrunedTrees = listdir_PrunedTrees[0:100]
listdir_DNNmodel = os.listdir(Model_Directory)
#load tree, weight, bias
total_num_constraints = None
for i in range(0, 10):
weight, bias = load_weight_bias_path(Model_Directory + listdir_DNNmodel[i])
with open(Tree_Directory + listdir_PrunedTrees[i], 'rb') as f:
tree = pickle.load(f)
rule_list_txt, rule_list, num_constraints = extractRules(tree,hidden_nodes,num_input,num_output,weight,bias)
if total_num_constraints is None:
total_num_constraints = num_constraints
else:
total_num_constraints = np.vstack([total_num_constraints,num_constraints])
print("Tree %d extracted." %i)
|
[
"keras.models.load_model",
"copy.deepcopy",
"numpy.zeros",
"pickle.load",
"numpy.matmul",
"os.listdir",
"numpy.vstack"
] |
[((852, 868), 'keras.models.load_model', 'load_model', (['path'], {}), '(path)\n', (862, 868), False, 'from keras.models import load_model\n'), ((2177, 2198), 'copy.deepcopy', 'copy.deepcopy', (['weight'], {}), '(weight)\n', (2190, 2198), False, 'import copy\n'), ((2227, 2246), 'copy.deepcopy', 'copy.deepcopy', (['bias'], {}), '(bias)\n', (2240, 2246), False, 'import copy\n'), ((2277, 2298), 'copy.deepcopy', 'copy.deepcopy', (['weight'], {}), '(weight)\n', (2290, 2298), False, 'import copy\n'), ((3421, 3446), 'numpy.zeros', 'np.zeros', (['[num_leaves, 1]'], {}), '([num_leaves, 1])\n', (3429, 3446), True, 'import numpy as np\n'), ((6675, 6701), 'os.listdir', 'os.listdir', (['Tree_Directory'], {}), '(Tree_Directory)\n', (6685, 6701), False, 'import os\n'), ((6780, 6807), 'os.listdir', 'os.listdir', (['Model_Directory'], {}), '(Model_Directory)\n', (6790, 6807), False, 'import os\n'), ((2529, 2596), 'numpy.matmul', 'np.matmul', (['weight_input_to_layers[i]', 'weight_layer_activated[i + 1]'], {}), '(weight_input_to_layers[i], weight_layer_activated[i + 1])\n', (2538, 2596), True, 'import numpy as np\n'), ((3832, 3887), 'numpy.zeros', 'np.zeros', (['[tree[leaves_list[i]].col + 1, num_input + 2]'], {}), '([tree[leaves_list[i]].col + 1, num_input + 2])\n', (3840, 3887), True, 'import numpy as np\n'), ((2631, 2696), 'numpy.matmul', 'np.matmul', (['bias_input_to_layers[i]', 'weight_layer_activated[i + 1]'], {}), '(bias_input_to_layers[i], weight_layer_activated[i + 1])\n', (2640, 2696), True, 'import numpy as np\n'), ((7079, 7093), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7090, 7093), False, 'import pickle\n'), ((7374, 7425), 'numpy.vstack', 'np.vstack', (['[total_num_constraints, num_constraints]'], {}), '([total_num_constraints, num_constraints])\n', (7383, 7425), True, 'import numpy as np\n')]
|
import numpy as np
from finitewave.core.fibrosis import FibrosisPattern
class ScarGauss2DPattern(FibrosisPattern):
def __init__(self, mean, std, corr, size):
self.mean = mean
self.std = std
self.corr = corr
self.size = size
def generate(self, size, mesh=None):
if mesh is None:
mesh = np.zeros(size)
covs = [[self.std[0]**2, self.std[0]*self.std[1]*self.corr],
[self.std[0]*self.std[1]*self.corr, self.std[1]**2]]
nrm = np.random.multivariate_normal(self.mean, self.covs, self.size).T
mesh[nrm[0].astype(int), nrm[1].astype(int)] = 2
return mesh
|
[
"numpy.zeros",
"numpy.random.multivariate_normal"
] |
[((349, 363), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (357, 363), True, 'import numpy as np\n'), ((517, 579), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['self.mean', 'self.covs', 'self.size'], {}), '(self.mean, self.covs, self.size)\n', (546, 579), True, 'import numpy as np\n')]
|
"""
@Time: 2020/8/17 18:08
@Author: Zhirui(<NAME>
@E-mail: <EMAIL>
@Program:
"""
import os
import random
import pandas as pd
import numpy as np
from sklearn.impute import SimpleImputer
import tensorflow as tf
from tensorflow.keras import layers, optimizers
from tensorflow.keras.models import Sequential
from tensorflow.keras.utils import plot_model
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
from utils.logger import logger
from data_process import get_apptointment_info, get_treat_info
from wtp.duration.predict_lgb_model import NUM_FEATURES, CATE_FEATURES
from wtp.duration.config_duration import DT_MODEL_DIR
def one_hot_encoding(processed_data):
cate_onehot_data = pd.DataFrame({})
update_cate_features = []
for feature in CATE_FEATURES:
tmp = pd.get_dummies(processed_data[[feature]], prefix=f"{feature}_")
update_cate_features.extend(tmp.columns)
cate_onehot_data = pd.concat([cate_onehot_data, tmp], axis=1)
cate_onehot_data['AppointmentSerNum'] = processed_data['AppointmentSerNum']
cate_onehot_data = cate_onehot_data.groupby(by='AppointmentSerNum').sum()
return cate_onehot_data, update_cate_features
def fill_num(processed_data):
imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean')
imp_mean.fit(processed_data[NUM_FEATURES])
processed_data.loc[:, NUM_FEATURES] = imp_mean.transform(processed_data[NUM_FEATURES])
return processed_data
def split_feature_label(all_data, update_cate_features):
patients_lst = []
train_samples_lst = []
label_samples_lst = []
for pat, sample in all_data.groupby('PatientSerNum'):
sample = sample[NUM_FEATURES + update_cate_features]
label_samples_lst.append(sample.iloc[-1, 1])
sample.iloc[-1, 1] = 0
patients_lst.append(pat)
train_samples_lst.append(sample.values)
return patients_lst, train_samples_lst, label_samples_lst
def process_sequence_data(processed_data):
logger.debug(f'Fill zero in nan!')
processed_data = fill_num(processed_data)
logger.debug(f'Process numerical features!')
num_features_single_value = ['age', 'Scheduled_duration', 'Actual_duration']
num_data_single_value = processed_data[num_features_single_value + ['AppointmentSerNum']]
num_data_single_value = num_data_single_value.groupby(by='AppointmentSerNum').mean()
num_data_single_value = num_data_single_value.reset_index(drop=False)
num_features_multiple_value = ['ImagesTaken', 'MU', 'MUCoeff', 'TreatmentTime']
num_data_multiple_value = processed_data[num_features_multiple_value + ['AppointmentSerNum']]
num_data_multiple_value = num_data_multiple_value.groupby(by='AppointmentSerNum').sum()
num_data_multiple_value = num_data_multiple_value.reset_index(drop=False)
num_data = pd.merge(num_data_single_value, num_data_multiple_value, on='AppointmentSerNum', how='inner')
logger.debug(f'Encode categorical features!')
cate_onehot_data, update_cate_features = one_hot_encoding(processed_data)
feature_data = pd.merge(num_data, cate_onehot_data, on='AppointmentSerNum', how='inner')
logger.debug(f'Add appointment information!')
information_features = ['PatientSerNum', 'AppointmentSerNum',
'ScheduledStartTime', 'ScheduledEndTime', 'ActualStartDate', 'ActualEndDate']# FractionNumber
information_data = processed_data[information_features]
information_data = information_data.drop_duplicates().reset_index(drop=True)
all_data = pd.merge(feature_data, information_data, on='AppointmentSerNum', how='inner')
logger.debug(f'Split features and labels!')
all_data = all_data.sort_values(by=['PatientSerNum', 'ScheduledStartTime']).reset_index(drop=True)
patients_lst, train_samples_lst, label_samples_lst = split_feature_label(all_data, update_cate_features)
return patients_lst, train_samples_lst, label_samples_lst
def sequence_model():
model = Sequential()
model.add(
layers.LSTM(128,
batch_input_shape=(None, None, 209),
dropout=0.1,
recurrent_dropout=0.5,
name='input'
)
)
# model_sequence.add(layers.LSTM(
# output_dim = 32,
# ))
# stateful = True 本次batch的参数返回到下一次的训练中
model.add(layers.Dense(32))
model.add(layers.Dense(1))
return model
def generate_data(x_set, y_set, batch_size):
i = 0
while True:
feature_samples = []
label_samples = []
for b in range(batch_size):
if i == len(x_set):
i = 0
random.seed(1)
random.shuffle(x_set)
random.seed(1)
random.shuffle(y_set)
feature_samples.append(x_set[i])
label_samples.append(y_set[i])
i = i + 1
feature_samples = tf.keras.preprocessing.sequence.pad_sequences(np.array(feature_samples), padding="pre")
yield feature_samples, np.array(label_samples)
# yield ({'input': train_samples}, {'output': batch_samples})
def split_train_test(train_samples_lst, label_samples_lst, seed=1):
random.seed(seed)
random.shuffle(train_samples_lst)
random.seed(seed)
random.shuffle(label_samples_lst)
data_length = len(train_samples_lst)
train_set = train_samples_lst[: int(data_length * 0.9)]
label_train_set = label_samples_lst[: int(data_length * 0.9)]
val_set = train_samples_lst[int(data_length * 0.9): int(data_length * 0.95)]
label_val_set = label_samples_lst[int(data_length * 0.9): int(data_length * 0.95)]
test_set = train_samples_lst[int(data_length * 0.95):]
label_test_set = label_samples_lst[int(data_length * 0.95):]
return train_set, label_train_set, val_set, label_val_set, test_set, label_test_set
def train_and_test(train_set, label_train_set, val_set, label_val_set, test_set, label_test_set, seed, model_name):
logger.debug(f'Start training model for {seed}!')
model_sequence = sequence_model()
opt = optimizers.Adam(lr=0.001)
model_sequence.compile(
optimizer=opt,
loss='mae',
metrics=['mean_absolute_percentage_error', 'mae']
)
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
min_delta=1e5,
patience=5,
verbose=1,
restore_best_weights=True)
checkpoint_path_industry = os.path.join(DT_MODEL_DIR, f"{model_name}.h5")
cp_callback_model = tf.keras.callbacks.ModelCheckpoint(checkpoint_path_industry,
monitor='val_loss',
verbose=0,
save_best_only=True,
save_weights_only=False,
mode='min',
period=1)
random.seed(seed)
random.shuffle(train_set)
random.seed(seed)
random.shuffle(label_train_set)
model_sequence.fit_generator(generate_data(train_set, label_train_set, 32),
steps_per_epoch=len(train_set) // 32,
epochs=100,
callbacks=[early_stopping, cp_callback_model],
# batch_size=256,
validation_data=generate_data(val_set, label_val_set, 32),
validation_steps=len(val_set) // 32)
logger.debug(f'Start testing model!')
padding_test_set = tf.keras.preprocessing.sequence.pad_sequences(np.array(test_set), padding="pre")
y_pred = model_sequence.predict(padding_test_set)
residual = np.array(label_test_set) - y_pred.reshape(-1, )
logger.debug(f"MAE: {np.mean(np.abs(residual))}")
logger.debug(f"MAPE: {100. * np.mean(np.abs(residual / np.array(label_test_set)))}")
return y_pred
if __name__ == '__main__':
processed_appointment_data = get_apptointment_info()
processed_treatment_data = get_treat_info()
processed_data = pd.merge(processed_appointment_data, processed_treatment_data,
on=['PatientSerNum', 'date'], how='inner')
_, train_samples_lst, label_samples_lst = process_sequence_data(processed_data)
train_set, label_train_set, val_set, label_val_set, test_set, label_test_set = \
split_train_test(train_samples_lst, label_samples_lst)
pred_y_ensemble = []
for seed in range(10):
pred_y = train_and_test(train_set, label_train_set, val_set, label_val_set, test_set, label_test_set,
seed=seed, model_name=f'sequence_model_{seed}.h5')
pred_y_ensemble.append(pred_y.reshape(-1, ))
|
[
"data_process.get_treat_info",
"numpy.abs",
"tensorflow.keras.layers.Dense",
"random.shuffle",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.models.Sequential",
"os.path.join",
"tensorflow.keras.callbacks.EarlyStopping",
"pandas.DataFrame",
"sklearn.impute.SimpleImputer",
"pandas.merge",
"random.seed",
"tensorflow.keras.optimizers.Adam",
"pandas.concat",
"utils.logger.logger.debug",
"pandas.get_dummies",
"data_process.get_apptointment_info",
"tensorflow.compat.v1.logging.set_verbosity",
"tensorflow.keras.layers.LSTM",
"numpy.array"
] |
[((356, 418), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.compat.v1.logging.set_verbosity', (['tf.compat.v1.logging.ERROR'], {}), '(tf.compat.v1.logging.ERROR)\n', (390, 418), True, 'import tensorflow as tf\n'), ((703, 719), 'pandas.DataFrame', 'pd.DataFrame', (['{}'], {}), '({})\n', (715, 719), True, 'import pandas as pd\n'), ((1237, 1290), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'missing_values': 'np.nan', 'strategy': '"""mean"""'}), "(missing_values=np.nan, strategy='mean')\n", (1250, 1290), False, 'from sklearn.impute import SimpleImputer\n'), ((1986, 2020), 'utils.logger.logger.debug', 'logger.debug', (['f"""Fill zero in nan!"""'], {}), "(f'Fill zero in nan!')\n", (1998, 2020), False, 'from utils.logger import logger\n'), ((2072, 2116), 'utils.logger.logger.debug', 'logger.debug', (['f"""Process numerical features!"""'], {}), "(f'Process numerical features!')\n", (2084, 2116), False, 'from utils.logger import logger\n'), ((2822, 2920), 'pandas.merge', 'pd.merge', (['num_data_single_value', 'num_data_multiple_value'], {'on': '"""AppointmentSerNum"""', 'how': '"""inner"""'}), "(num_data_single_value, num_data_multiple_value, on=\n 'AppointmentSerNum', how='inner')\n", (2830, 2920), True, 'import pandas as pd\n'), ((2921, 2966), 'utils.logger.logger.debug', 'logger.debug', (['f"""Encode categorical features!"""'], {}), "(f'Encode categorical features!')\n", (2933, 2966), False, 'from utils.logger import logger\n'), ((3064, 3137), 'pandas.merge', 'pd.merge', (['num_data', 'cate_onehot_data'], {'on': '"""AppointmentSerNum"""', 'how': '"""inner"""'}), "(num_data, cate_onehot_data, on='AppointmentSerNum', how='inner')\n", (3072, 3137), True, 'import pandas as pd\n'), ((3143, 3188), 'utils.logger.logger.debug', 'logger.debug', (['f"""Add appointment information!"""'], {}), "(f'Add appointment information!')\n", (3155, 3188), False, 'from utils.logger import logger\n'), ((3533, 3610), 'pandas.merge', 'pd.merge', (['feature_data', 'information_data'], {'on': '"""AppointmentSerNum"""', 'how': '"""inner"""'}), "(feature_data, information_data, on='AppointmentSerNum', how='inner')\n", (3541, 3610), True, 'import pandas as pd\n'), ((3616, 3659), 'utils.logger.logger.debug', 'logger.debug', (['f"""Split features and labels!"""'], {}), "(f'Split features and labels!')\n", (3628, 3659), False, 'from utils.logger import logger\n'), ((3971, 3983), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3981, 3983), False, 'from tensorflow.keras.models import Sequential\n'), ((5211, 5228), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (5222, 5228), False, 'import random\n'), ((5233, 5266), 'random.shuffle', 'random.shuffle', (['train_samples_lst'], {}), '(train_samples_lst)\n', (5247, 5266), False, 'import random\n'), ((5271, 5288), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (5282, 5288), False, 'import random\n'), ((5293, 5326), 'random.shuffle', 'random.shuffle', (['label_samples_lst'], {}), '(label_samples_lst)\n', (5307, 5326), False, 'import random\n'), ((5997, 6046), 'utils.logger.logger.debug', 'logger.debug', (['f"""Start training model for {seed}!"""'], {}), "(f'Start training model for {seed}!')\n", (6009, 6046), False, 'from utils.logger import logger\n'), ((6095, 6120), 'tensorflow.keras.optimizers.Adam', 'optimizers.Adam', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (6110, 6120), False, 'from tensorflow.keras import layers, optimizers\n'), ((6278, 6404), 'tensorflow.keras.callbacks.EarlyStopping', 'tf.keras.callbacks.EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(100000.0)', 'patience': '(5)', 'verbose': '(1)', 'restore_best_weights': '(True)'}), "(monitor='val_loss', min_delta=100000.0,\n patience=5, verbose=1, restore_best_weights=True)\n", (6310, 6404), True, 'import tensorflow as tf\n'), ((6643, 6689), 'os.path.join', 'os.path.join', (['DT_MODEL_DIR', 'f"""{model_name}.h5"""'], {}), "(DT_MODEL_DIR, f'{model_name}.h5')\n", (6655, 6689), False, 'import os\n'), ((6714, 6882), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', (['checkpoint_path_industry'], {'monitor': '"""val_loss"""', 'verbose': '(0)', 'save_best_only': '(True)', 'save_weights_only': '(False)', 'mode': '"""min"""', 'period': '(1)'}), "(checkpoint_path_industry, monitor=\n 'val_loss', verbose=0, save_best_only=True, save_weights_only=False,\n mode='min', period=1)\n", (6748, 6882), True, 'import tensorflow as tf\n'), ((7233, 7250), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (7244, 7250), False, 'import random\n'), ((7255, 7280), 'random.shuffle', 'random.shuffle', (['train_set'], {}), '(train_set)\n', (7269, 7280), False, 'import random\n'), ((7285, 7302), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (7296, 7302), False, 'import random\n'), ((7307, 7338), 'random.shuffle', 'random.shuffle', (['label_train_set'], {}), '(label_train_set)\n', (7321, 7338), False, 'import random\n'), ((7833, 7870), 'utils.logger.logger.debug', 'logger.debug', (['f"""Start testing model!"""'], {}), "(f'Start testing model!')\n", (7845, 7870), False, 'from utils.logger import logger\n'), ((8315, 8338), 'data_process.get_apptointment_info', 'get_apptointment_info', ([], {}), '()\n', (8336, 8338), False, 'from data_process import get_apptointment_info, get_treat_info\n'), ((8370, 8386), 'data_process.get_treat_info', 'get_treat_info', ([], {}), '()\n', (8384, 8386), False, 'from data_process import get_apptointment_info, get_treat_info\n'), ((8408, 8518), 'pandas.merge', 'pd.merge', (['processed_appointment_data', 'processed_treatment_data'], {'on': "['PatientSerNum', 'date']", 'how': '"""inner"""'}), "(processed_appointment_data, processed_treatment_data, on=[\n 'PatientSerNum', 'date'], how='inner')\n", (8416, 8518), True, 'import pandas as pd\n'), ((798, 861), 'pandas.get_dummies', 'pd.get_dummies', (['processed_data[[feature]]'], {'prefix': 'f"""{feature}_"""'}), "(processed_data[[feature]], prefix=f'{feature}_')\n", (812, 861), True, 'import pandas as pd\n'), ((938, 980), 'pandas.concat', 'pd.concat', (['[cate_onehot_data, tmp]'], {'axis': '(1)'}), '([cate_onehot_data, tmp], axis=1)\n', (947, 980), True, 'import pandas as pd\n'), ((4007, 4114), 'tensorflow.keras.layers.LSTM', 'layers.LSTM', (['(128)'], {'batch_input_shape': '(None, None, 209)', 'dropout': '(0.1)', 'recurrent_dropout': '(0.5)', 'name': '"""input"""'}), "(128, batch_input_shape=(None, None, 209), dropout=0.1,\n recurrent_dropout=0.5, name='input')\n", (4018, 4114), False, 'from tensorflow.keras import layers, optimizers\n'), ((4361, 4377), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(32)'], {}), '(32)\n', (4373, 4377), False, 'from tensorflow.keras import layers, optimizers\n'), ((4393, 4408), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {}), '(1)\n', (4405, 4408), False, 'from tensorflow.keras import layers, optimizers\n'), ((7940, 7958), 'numpy.array', 'np.array', (['test_set'], {}), '(test_set)\n', (7948, 7958), True, 'import numpy as np\n'), ((8044, 8068), 'numpy.array', 'np.array', (['label_test_set'], {}), '(label_test_set)\n', (8052, 8068), True, 'import numpy as np\n'), ((4969, 4994), 'numpy.array', 'np.array', (['feature_samples'], {}), '(feature_samples)\n', (4977, 4994), True, 'import numpy as np\n'), ((4662, 4676), 'random.seed', 'random.seed', (['(1)'], {}), '(1)\n', (4673, 4676), False, 'import random\n'), ((4693, 4714), 'random.shuffle', 'random.shuffle', (['x_set'], {}), '(x_set)\n', (4707, 4714), False, 'import random\n'), ((4731, 4745), 'random.seed', 'random.seed', (['(1)'], {}), '(1)\n', (4742, 4745), False, 'import random\n'), ((4762, 4783), 'random.shuffle', 'random.shuffle', (['y_set'], {}), '(y_set)\n', (4776, 4783), False, 'import random\n'), ((5043, 5066), 'numpy.array', 'np.array', (['label_samples'], {}), '(label_samples)\n', (5051, 5066), True, 'import numpy as np\n'), ((8125, 8141), 'numpy.abs', 'np.abs', (['residual'], {}), '(residual)\n', (8131, 8141), True, 'import numpy as np\n'), ((8205, 8229), 'numpy.array', 'np.array', (['label_test_set'], {}), '(label_test_set)\n', (8213, 8229), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 13 12:10:41 2019
@author: reiters
"""
import numpy as np
#t1=np.load('/gpfs/laur/sepia_tools/PSGAN_textures/usedTextures/noiseBig_epoch_501_fc1.0_ngf80_ndf80_dep5-5.npy',None,'allow_pickle',True)
#t2=np.load('/gpfs/laur/sepia_tools/PSGAN_textures/usedTextures/noiseBig_epoch_541_fc1.0_ngf80_ndf80_dep5-5.npy',None,'allow_pickle',True)
#t3=np.load('/gpfs/laur/sepia_tools/PSGAN_textures/usedTextures/noiseBig_epoch_507_fc1.0_ngf80_ndf80_dep5-5.npy',None,'allow_pickle',True)
#t4=np.load('/gpfs/laur/sepia_tools/PSGAN_textures/usedTextures/noiseBig_epoch_511_fc1.0_ngf80_ndf80_dep5-5.npy',None,'allow_pickle',True)
t1=np.load('/gpfs/laur/sepia_tools/PSGAN_textures/best_paired_models/curtain_rocks1_evaluated/noiseBig_epoch_500_fc1.0_ngf80_ndf80_dep5-5.npy',None,'allow_pickle',True)
t2=np.load('/gpfs/laur/sepia_tools/PSGAN_textures/best_paired_models/curtain_rocks1_evaluated/noiseBig_epoch_501_fc1.0_ngf80_ndf80_dep5-5.npy',None,'allow_pickle',True)
#t1=np.load('/gpfs/laur/sepia_tools/PSGAN_textures/best_paired_models/curtain_crack2_evaluated/best/noiseBig_epoch_512_fc1.0_ngf80_ndf80_dep5-5.npy',None,'allow_pickle',True)
#t2=np.load('/gpfs/laur/sepia_tools/PSGAN_textures/best_paired_models/curtain_crack2_evaluated/best/noiseBig_epoch_529_fc1.0_ngf80_ndf80_dep5-5.npy',None,'allow_pickle',True)
#t1=np.load('/gpfs/laur/sepia_tools/PSGAN_textures/best_paired_models/curtain_sand2_evaluated/best/noiseBig_epoch_500_fc1.0_ngf80_ndf80_dep5-5.npy',None,'allow_pickle',True)
#t2=np.load('/gpfs/laur/sepia_tools/PSGAN_textures/best_paired_models/curtain_sand2_evaluated/best/noiseBig_epoch_529_fc1.0_ngf80_ndf80_dep5-5.npy',None,'allow_pickle',True)
img1Ratio=np.linspace(0,1,11) # for curtain-rocks
#img1Ratio=np.linspace(0.2,0.35,11) # for curtain-crack
#img1Ratio=np.linspace(0.4,.7,11) # for curtain-sand
intNoise=[]
for x in range( len(img1Ratio)):
intNoise.append(img1Ratio[x]*t1 + [1-img1Ratio[x]]*t2)
#np.save('/gpfs/laur/sepia_tools/PSGAN_textures/best_paired_models/curtain_crack2_evaluated/best/noiseImage1',intNoise)
#np.save('/gpfs/laur/sepia_tools/PSGAN_textures/best_paired_models/curtain_sand2_evaluated/best/noiseImage1',intNoise)
np.save('/gpfs/laur/sepia_tools/PSGAN_textures/best_paired_models/curtain_rocks1_evaluated/noiseImage1',intNoise)
|
[
"numpy.load",
"numpy.save",
"numpy.linspace"
] |
[((689, 867), 'numpy.load', 'np.load', (['"""/gpfs/laur/sepia_tools/PSGAN_textures/best_paired_models/curtain_rocks1_evaluated/noiseBig_epoch_500_fc1.0_ngf80_ndf80_dep5-5.npy"""', 'None', '"""allow_pickle"""', '(True)'], {}), "(\n '/gpfs/laur/sepia_tools/PSGAN_textures/best_paired_models/curtain_rocks1_evaluated/noiseBig_epoch_500_fc1.0_ngf80_ndf80_dep5-5.npy'\n , None, 'allow_pickle', True)\n", (696, 867), True, 'import numpy as np\n'), ((858, 1036), 'numpy.load', 'np.load', (['"""/gpfs/laur/sepia_tools/PSGAN_textures/best_paired_models/curtain_rocks1_evaluated/noiseBig_epoch_501_fc1.0_ngf80_ndf80_dep5-5.npy"""', 'None', '"""allow_pickle"""', '(True)'], {}), "(\n '/gpfs/laur/sepia_tools/PSGAN_textures/best_paired_models/curtain_rocks1_evaluated/noiseBig_epoch_501_fc1.0_ngf80_ndf80_dep5-5.npy'\n , None, 'allow_pickle', True)\n", (865, 1036), True, 'import numpy as np\n'), ((1734, 1755), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(11)'], {}), '(0, 1, 11)\n', (1745, 1755), True, 'import numpy as np\n'), ((2232, 2356), 'numpy.save', 'np.save', (['"""/gpfs/laur/sepia_tools/PSGAN_textures/best_paired_models/curtain_rocks1_evaluated/noiseImage1"""', 'intNoise'], {}), "(\n '/gpfs/laur/sepia_tools/PSGAN_textures/best_paired_models/curtain_rocks1_evaluated/noiseImage1'\n , intNoise)\n", (2239, 2356), True, 'import numpy as np\n')]
|
import abc
import csv
import uuid
import json
import os
import numpy as np
import requests
import joblib
import pandas
from scipy.sparse import csr_matrix
from tworaven_apps.solver_interfaces.models import SAVED_MODELS_PATH, R_SERVICE, get_metric, StatisticalModel
from tworaven_solver import Dataset
from collections import defaultdict
from sklearn import model_selection
from tworaven_solver.model import BaseModelWrapper
class Model(object):
def __init__(self, model, system, predictors, targets, model_id=None, search_id=None, train_specification=None, task=None):
if model_id is None:
db_model = StatisticalModel.objects.create()
model_id = 'oss-' + str(db_model.model_id)
self.model = model
self.system = system
self.model_id = model_id
self.search_id = search_id
self.predictors = predictors
self.targets = targets
# which dataset model is currently trained on
self.train_specification = train_specification
self.task = task
@abc.abstractmethod
def describe(self):
pass
@abc.abstractmethod
def score(self, specification):
pass
@abc.abstractmethod
def produce(self, specification):
pass
@abc.abstractmethod
def save(self):
pass
@staticmethod
def load(model_id):
model_folder_path = os.path.join(SAVED_MODELS_PATH, model_id)
metadata_path = os.path.join(model_folder_path, 'metadata.json')
if not os.path.exists(metadata_path):
raise FileNotFoundError
with open(metadata_path, 'r') as metadata_file:
metadata = json.load(metadata_file)
if metadata['system'] in ['auto_sklearn', 'tpot', 'mlbox', 'mljar-supervised']:
preprocess = None
if os.path.exists(os.path.join(model_folder_path, 'preprocess.joblib')):
preprocess = joblib.load(os.path.join(model_folder_path, 'preprocess.joblib'))
return ModelSklearn(
model=joblib.load(os.path.join(model_folder_path, 'model.joblib')),
predictors=metadata['predictors'],
targets=metadata['targets'],
system=metadata['system'],
model_id=model_id,
search_id=metadata['search_id'],
train_specification=metadata['train_specification'],
preprocess=preprocess,
task=metadata['task'])
if metadata['system'] == 'ludwig':
from ludwig.api import LudwigModel
return ModelLudwig(
model=LudwigModel.load(model_folder_path),
predictors=metadata['predictors'],
targets=metadata['targets'],
model_id=model_id,
search_id=metadata['search_id'],
task=metadata['task'])
if metadata['system'] == 'h2o':
import h2o
h2o.init()
return ModelH2O(
model=h2o.load_model(os.path.join(model_folder_path, metadata['model_filename'])),
model_id=model_id,
predictors=metadata['predictors'],
targets=metadata['targets'],
search_id=metadata['search_id'],
train_specification=metadata['train_specification'],
task=metadata['task'])
if metadata['system'] == 'TwoRavens':
return ModelTwoRavens(
model=BaseModelWrapper.load(model_folder_path, metadata),
system='TwoRavens',
predictors=metadata['predictors'],
targets=metadata['targets'],
model_id=metadata['model_id'],
search_id=metadata['search_id'],
task=metadata['task']
)
raise ValueError(f'System type "{metadata["system"]}" is not recognized.')
def make_splits(self, configuration, data):
if configuration['method'] == 'K_FOLD':
split_arguments = {
'n_splits': configuration.get('folds', 10),
'shuffle': configuration.get('shuffle', False),
'random_state': configuration.get('randomSeed')
}
if configuration['stratified']:
return ((data.iloc[train_indices], data.iloc[test_indices]) for train_indices, test_indices in
model_selection.StratifiedKFold(**split_arguments).split(data, data[self.targets[0]]))
else:
return ((data.iloc[train_indices], data.iloc[test_indices]) for train_indices, test_indices in
model_selection.KFold(**split_arguments).split(data))
elif configuration['method'] == 'HOLDOUT':
try:
return [model_selection.train_test_split(
data,
test_size=float(configuration.get('trainTestRatio', 0.35)),
stratify=data[self.targets[0]] if configuration.get('stratified') else None,
random_state=configuration.get('randomSeed'))]
except (TypeError, ValueError):
try:
return [model_selection.train_test_split(
data,
test_size=float(configuration.get('trainTestRatio', 0.35)),
stratify=None,
random_state=configuration.get('randomSeed'))]
except (TypeError, ValueError):
return [model_selection.train_test_split(
data,
random_state=configuration.get('randomSeed'))]
else:
raise ValueError(f'Invalid evaluation method: {configuration.method}')
class ModelSklearn(Model):
def __init__(self, model, system, predictors, targets, model_id=None, search_id=None, preprocess=None, train_specification=None, task=None):
super().__init__(model, system, predictors, targets, model_id, search_id, train_specification, task)
# categorical one hot encoding
self.preprocess = preprocess
def make_stimulus(self, data):
stimulus = data[self.predictors]
if self.preprocess:
stimulus = self.preprocess.transform(stimulus)
if self.system == 'mlbox':
# must have a dense pandas array
if issubclass(type(stimulus), csr_matrix):
stimulus = stimulus.toarray()
stimulus = pandas.DataFrame(stimulus)
if self.system == 'mljar-supervised':
# must have a pandas array with formatted column names (so they don't get modified by the solver)
stimulus = pandas.DataFrame(stimulus)
stimulus.columns = [str(i).strip() for i in stimulus.columns]
return stimulus
def describe(self):
model_name = self.model.__class__.__name__
description = str(self.model)
if self.system == 'mljar-supervised':
model_name = self.model.get_name()
if self.system == 'mlbox':
model_name = self.model.get_estimator().__class__.__name__
description = str(self.model.get_estimator())
return {
"model": model_name,
"description": description,
"model_id": self.model_id,
"search_id": self.search_id,
"system": self.system
}
def score(self, specification):
dataframe = Dataset(specification['input']).get_dataframe()[self.predictors + self.targets].dropna()
dataframe.reset_index(drop=True, inplace=True)
configuration = specification['configuration']
splits = self.make_splits(configuration, dataframe)
split_scores = defaultdict(list)
split_weights = defaultdict(list)
for train_split, test_split in splits:
self.fit(self.make_stimulus(train_split), train_split[self.targets[0]])
actual = np.array(test_split[self.targets[0]])
predicted = self.model.predict(self.make_stimulus(test_split))
if 'CLASSIFICATION' in self.task:
actual = actual.astype(int)
if self.system == 'mljar-supervised':
predicted = pandas.DataFrame((predicted.idxmax(axis=1) == 'p_1').astype(int))
predicted.columns = [self.targets[0]]
for metric in specification['performanceMetrics']:
try:
split_scores[json.dumps(metric)].append(get_metric(metric)(actual, predicted))
split_weights[json.dumps(metric)].append(test_split.size)
except Exception:
pass
scores = []
for metric in split_scores:
if split_scores[metric]:
scores.append({
'value': np.average(split_scores[metric], weights=split_weights[metric]),
'metric': json.loads(metric),
'target': self.targets[0]
})
return {
'search_id': self.search_id,
'model_id': self.model_id,
'scores': scores,
'system': self.system
}
def fit(self, stimulus, target, specification=None):
# check if model has already been trained for the same dataset
specification_str = json.dumps(specification) if specification else None
if self.train_specification and self.train_specification == specification_str:
return
self.train_specification = specification_str
if self.system == 'auto_sklearn':
self.model.refit(stimulus, target)
elif self.system == 'mljar-supervised':
self.model.train({"train": {
"X": stimulus, 'y': target
}})
else:
self.model.fit(stimulus, target)
self.save()
def produce(self, specification):
configuration = specification.get('configuration', {})
predict_type = configuration.get('predict_type', 'RAW')
# REFIT
dataframe_train = Dataset(specification['train']).get_dataframe().dropna()
stimulus = self.make_stimulus(dataframe_train[self.predictors])
self.fit(stimulus, dataframe_train[self.targets[0]], specification['train'])
# PRODUCE
dataframe = Dataset(specification['input']).get_dataframe().dropna()
dataframe.reset_index(drop=True, inplace=True)
stimulus = self.make_stimulus(dataframe[self.predictors])
output_directory_path = specification['output']['resource_uri'].replace('file://', '')
output_path = '/' + os.path.join(
*output_directory_path.split('/'),
str(uuid.uuid4()) + '.csv')
if self.system == 'mljar-supervised':
predictions = self.model.predict(stimulus)
if predict_type == 'RAW':
predictions = pandas.DataFrame((predictions.idxmax(axis=1) == 'p_1').astype(int))
predictions.columns = [self.targets[0]]
else:
if predict_type == 'RAW':
predictions = self.model.predict(stimulus)
if len(predictions.shape) > 1:
predictions = np.argmax(predictions, axis=-1)
predictions = pandas.DataFrame(predictions, columns=[self.targets[0]]).astype(int)
else:
predictions = self.model.predict_proba(stimulus)
# TODO: standardize probability column names
predictions = pandas.DataFrame(predictions, columns=[f'p_{i}' for i in range(predictions.shape[1])])
predictions.reset_index(drop=True, inplace=True)
predictions.insert(0, 'd3mIndex', dataframe['d3mIndex'])
if not os.path.exists(output_directory_path):
os.makedirs(output_directory_path)
cwd = os.getcwd()
try:
os.chdir('/')
predictions.to_csv(output_path, index=False, quoting=csv.QUOTE_NONNUMERIC)
finally:
os.chdir(cwd)
return {
'produce': {
'input': specification['input'],
'configuration': configuration,
'data_pointer': output_path
},
'search_id': self.search_id,
'model_id': self.model_id,
'system': self.system
}
def save(self):
model_folder_path = os.path.join(SAVED_MODELS_PATH, self.model_id)
metadata_path = os.path.join(model_folder_path, 'metadata.json')
if not os.path.exists(metadata_path):
os.makedirs(model_folder_path)
with open(metadata_path, 'w') as metadata_file:
json.dump({
'system': str(self.system),
'model_id': str(self.model_id),
'predictors': self.predictors,
'targets': self.targets,
'train_specification': self.train_specification,
'search_id': self.search_id,
'task': self.task
}, metadata_file)
joblib.dump(self.model, os.path.join(model_folder_path, 'model.joblib'))
if self.preprocess:
joblib.dump(self.preprocess, os.path.join(model_folder_path, 'preprocess.joblib'))
class ModelCaret(Model):
def __init__(self, model, predictors, targets, model_id=None, search_id=None):
super().__init__(model, 'caret', predictors, targets, model_id, search_id)
def describe(self):
response = requests.post(
R_SERVICE + 'caretDescribe.app',
json={'model_id': self.model_id}).json()
if not response['success']:
raise ValueError(response['message'])
return response['data']
def score(self, specification):
response = requests.post(
R_SERVICE + 'caretScore.app',
json={
'model_id': self.model_id,
'specification': specification
}).json()
if not response['success']:
raise ValueError(response['message'])
return response['data']
def produce(self, specification):
response = requests.post(
R_SERVICE + 'caretProduce.app',
json={
'model_id': self.model_id,
'specification': specification
}).json()
if not response['success']:
raise ValueError(response['message'])
return response['data']
def save(self):
# ignore, model is only present in remote caret.app
raise ValueError('Caret model is not saveable in Python.')
class ModelH2O(Model):
def __init__(self, model, predictors, targets, model_id=None, search_id=None, train_specification=None, task=None):
super().__init__(model, 'h2o', predictors, targets, model_id, search_id, train_specification, task=task)
def describe(self):
return {
"model": f'{self.model.algo}-{self.model.type}',
"description": f'{self.model.algo}-{self.model.type}',
"model_id": self.model_id,
'search_id': self.search_id,
"system": self.system,
}
def fit(self, data, specification=None):
# check if model has already been trained for the same dataset
specification_str = json.dumps(specification) if specification else None
if self.train_specification and self.train_specification == specification_str:
return
self.train_specification = specification_str
self.model.train(y=self.targets[0], x=self.predictors, training_frame=data)
self.save()
def score(self, specification):
import h2o
configuration = specification['configuration']
resource_uri = Dataset(specification['input']).get_resource_uri()
data = h2o.import_file(resource_uri)
y = self.targets[0]
if 'CLASSIFICATION' in self.task:
if data.types[y] == u'real':
data[y] = data[y].ascharacter()
data[y] = data[y].asfactor()
results = pandas.DataFrame({
'predict': self.model.predict(data).as_data_frame()['predict'],
'actual': data[y].as_data_frame()[y]
}).dropna()
if 'CLASSIFICATION' in self.task:
if data.types[y] == u'real':
data[y] = data[y].ascharacter()
results['actual'] = results['actual'].astype(int)
scores = []
for metric_schema in specification['performanceMetrics']:
try:
scores.append({
'value': get_metric(metric_schema)(
results['actual'],
results['predict']),
'metric': metric_schema,
'target': y
})
except ValueError as err:
print(f'Could not evaluate metric: {str(metric_schema)}')
print(err)
# if configuration.get('stratified'):
# # how does h2o know which column to stratify for? weirdness here
# folds = data.stratified_kfold_column(n_folds=configuration['folds'])
# else:
# folds = data.kfold_column(n_folds=configuration['folds'])
#
# split_scores = defaultdict(list)
# split_weights = defaultdict(list)
# for split_id in range(configuration['folds']):
# train, test = data[folds != split_id], data[folds == split_id]
# self.fit(train)
# results = pandas.DataFrame({
# 'predict': self.model.predict(test).as_data_frame()['predict'],
# 'actual': test[self.targets[0]].as_data_frame()[self.targets[0]]
# }).dropna()
#
# if 'CLASSIFICATION' in self.task:
# results['actual'] = results['actual'].astype(int)
#
# for metric_schema in specification['performanceMetrics']:
# try:
# split_scores[json.dumps(metric_schema)].append(get_metric(metric_schema)(
# results['actual'],
# results['predict']))
# split_weights[json.dumps(metric_schema)].append(results.size)
# except ValueError as err:
# print(f'Could not evaluate metric: {str(metric_schema)}')
# print(err)
# for metric in split_scores:
# scores.append({
# 'value': np.average(split_scores[metric], weights=split_weights[metric]),
# 'metric': json.loads(metric),
# 'target': self.targets[0]
# })
return {
'search_id': self.search_id,
'model_id': self.model_id,
'scores': scores,
"system": self.system
}
def produce(self, specification):
import h2o
configuration = specification.get('configuration', {})
predict_type = configuration.get('predict_type', 'RAW')
train = h2o.import_file(Dataset(specification['train']).get_resource_uri())
y = self.targets[0]
if 'CLASSIFICATION' in self.task:
if train.types[y] == u'real':
train[y] = train[y].ascharacter()
train[self.targets[0]] = train[self.targets[0]].asfactor()
self.fit(train, specification['train'])
test_dataset = Dataset(specification['input'])
data = h2o.import_file(test_dataset.get_resource_uri())
if 'CLASSIFICATION' in self.task:
if data.types[y] == u'real':
data[y] = data[y].ascharacter()
data[y] = data[y].asfactor()
# retry once
try:
predictions = self.model.predict(data).as_data_frame()
except Exception as err:
predictions = self.model.predict(data).as_data_frame()
if predict_type == 'RAW':
if 'CLASSIFICATION' in self.task:
if data.types[y] == u'real':
data[y] = data[y].ascharacter()
predictions = predictions[['predict']]
predictions.columns = [y]
else:
# TODO: standardize probability column names
predictions.drop('predict', 1, inplace=True)
predictions['d3mIndex'] = test_dataset.get_dataframe()['d3mIndex']
output_directory_path = specification['output']['resource_uri'].replace('file://', '')
output_path = '/' + os.path.join(
*output_directory_path.split('/'),
str(uuid.uuid4()) + '.csv')
if not os.path.exists(output_directory_path):
os.makedirs(output_directory_path)
cwd = os.getcwd()
try:
os.chdir('/')
predictions.to_csv(output_path, index=False, quoting=csv.QUOTE_NONNUMERIC)
finally:
os.chdir(cwd)
return {
'produce': {
'input': specification['input'],
'configuration': configuration,
'data_pointer': output_path
},
'search_id': self.search_id,
'model_id': self.model_id,
"system": self.system
}
def save(self):
import h2o
model_folder_path = os.path.join(SAVED_MODELS_PATH, self.model_id)
metadata_path = os.path.join(model_folder_path, 'metadata.json')
if not os.path.exists(metadata_path):
os.makedirs(model_folder_path)
model_path = h2o.save_model(self.model, path=model_folder_path, force=True)
with open(metadata_path, 'w') as metadata_file:
json.dump({
'system': self.system,
'model_id': self.model_id,
'search_id': self.search_id,
'model_filename': os.path.basename(model_path),
'predictors': self.predictors,
'targets': self.targets,
'train_specification': self.train_specification,
'task': self.task
}, metadata_file)
class ModelLudwig(Model):
def __init__(self, model, predictors, targets, model_id=None, search_id=None, train_specification=None, task=None):
super().__init__(model, 'ludwig', predictors, targets, model_id, search_id, train_specification, task)
def describe(self):
return {
# TODO: extract more relevant description of model algorithm
"model": 'multilayer feedforward network',
"description": str(self.model),
"model_id": self.model_id,
"search_id": self.search_id,
"system": self.system
}
def score(self, specification):
# TODO: refitting -> respect configuration
configuration = specification['configuration']
dataframe = Dataset(specification['input']).get_dataframe()
target = self.targets[0]
if self.task == 'CLASSIFICATION':
dataframe[target] = dataframe[target].astype(str)
predicted = self.model.predict(dataframe)
scores = []
for metric in specification['performanceMetrics']:
scores.append({
'value': get_metric(metric)(dataframe[target], predicted[f'{target}_predictions']),
'metric': metric,
'target': target
})
return {
'search_id': self.search_id,
'model_id': self.model_id,
'scores': scores,
'system': self.system
}
def produce(self, specification):
configuration = specification.get('configuration', {})
predict_type = configuration.get('predict_type', 'RAW')
dataset = Dataset(specification['input'])
dataframe = dataset.get_dataframe()
predictions = self.model.predict(dataframe)
if predict_type == 'RAW':
predictions = predictions[[f'{self.targets[0]}_predictions']]
predictions.columns = [self.targets[0]]
if predict_type == 'PROBABILITIES':
predictions = predictions[[i for i in predictions.columns.values if i.startswith(f'{self.targets}_probabilities_')]]
predictions.insert(0, 'd3mIndex', dataframe['d3mIndex'])
output_directory_path = specification['output']['resource_uri'].replace('file://', '')
output_path = '/' + os.path.join(
*output_directory_path.split('/'),
str(uuid.uuid4()) + '.csv')
if not os.path.exists(output_directory_path):
os.makedirs(output_directory_path)
cwd = os.getcwd()
try:
os.chdir('/')
predictions.to_csv(output_path, index=False, quoting=csv.QUOTE_NONNUMERIC)
finally:
os.chdir(cwd)
return {
'produce': {
'input': specification['input'],
'configuration': configuration,
'data_pointer': output_path
},
'search_id': self.search_id,
'model_id': self.model_id,
"system": self.system
}
def save(self):
model_folder_path = os.path.join(SAVED_MODELS_PATH, self.model_id)
metadata_path = os.path.join(model_folder_path, 'metadata.json')
if not os.path.exists(metadata_path):
os.makedirs(model_folder_path)
self.model.save(model_folder_path)
with open(metadata_path, 'w') as metadata_file:
json.dump({
'system': self.system,
'model_id': self.model_id,
'search_id': self.search_id,
'model_filename': model_folder_path,
'predictors': self.predictors,
'targets': self.targets,
'task': self.task
}, metadata_file)
class ModelTwoRavens(Model):
def describe(self):
description = self.model.describe() or {}
# print(description)
return {
"model": self.model.pipeline_specification['model']['strategy'].lower(),
"description": str(self.model.model),
**description,
"pipeline_specification": self.model.pipeline_specification,
"problem_specification": self.model.problem_specification,
"model_id": self.model_id,
"search_id": self.search_id,
"system": self.system
}
def score(self, score_specification):
# Looks like this function will only be called when encounter a test split
dataframe = Dataset(score_specification['input']).get_dataframe()
prob_flag = False
for eachMetric in score_specification['performanceMetrics']:
if eachMetric.get('metric', '').startswith('ROC'):
prob_flag = True
if self.task == "FORECASTING":
# For score computation, we only take the given "forecastingHorizon" into account
forecast_length = self.model.problem_specification.get('forecastingHorizon', {"value": 10})
forecast_length = forecast_length.get('value', 10)
predicted = self.model.forecast(dataframe, forecast_length, forecast_mode='test')
elif self.task in ['CLASSIFICATION', 'REGRESSION']:
# TODO: respect configuration on holdout vs cross-validation, do refitting, etc.
if self.task == 'CLASSIFICATION':
for target in self.targets:
dataframe[target] = dataframe[target].astype(str)
predicted = self.model.predict(dataframe)
if prob_flag:
# Compute score if it's a classification problem
predicted_prob = self.model.predict_proba(dataframe)
if len(predicted_prob.shape) > 1 and predicted_prob.shape[1] == 2:
# Binary Classification, keep the probability of greater class only
predicted_prob = predicted_prob[:, [1]].ravel()
if self.task == 'CLASSIFICATION':
for target in self.targets:
predicted[target] = predicted[target].astype(str)
else:
raise NotImplementedError
scores = []
for target in self.targets:
results = pandas.DataFrame({'actual': dataframe[target], 'predicted': predicted[target]})
results.dropna(inplace=True)
for eachMetric in score_specification['performanceMetrics']:
try:
if eachMetric.get('metric', '').startswith('ROC'):
tmp_value = get_metric(eachMetric, self.model.problem_specification)(results['actual'], predicted_prob)
else:
tmp_value = get_metric(eachMetric, self.model.problem_specification)(results['actual'], results['predicted'])
scores.append({
'value': tmp_value,
'metric': eachMetric,
'target': target
})
except ValueError:
pass
return {
'search_id': self.search_id,
'model_id': self.model_id,
'scores': scores,
'system': self.system
}
def fit(self, dataframe=None, data_specification=None):
self.model.refit(
dataframe=dataframe,
data_specification=data_specification)
def produce(self, produce_specification):
# Looks like produce_specification contains input.name -- [train|test|all]
configuration = produce_specification.get('configuration', {})
predict_type = configuration.get('predict_type', 'RAW')
dataframe = Dataset(produce_specification['input']).get_dataframe()
data_type = produce_specification['input'].get('name', 'test')
if self.task in ['REGRESSION', 'CLASSIFICATION']:
dataframe_train = Dataset(produce_specification['train']).get_dataframe().dropna()
# self.fit(dataframe=dataframe_train, data_specification=produce_specification['train'])
if predict_type == 'RAW':
if "FORECASTING" == self.task:
predicted = self.model.forecast(dataframe, len(dataframe.index), data_type)
else:
# This branch seems will never be reached
predicted = self.model.predict(dataframe)
else:
predicted = self.model.predict_proba(dataframe)
# TODO: standardize probability column names
predicted = pandas.DataFrame(predicted, columns=[f'p_{i}' for i in range(predicted.shape[1])])
output_directory_path = produce_specification['output']['resource_uri'].replace('file://', '')
output_path = '/' + os.path.join(
*output_directory_path.split('/'),
str(uuid.uuid4()) + '.csv')
if 'd3mIndex' not in predicted.columns.values:
predicted.insert(0, 'd3mIndex', dataframe['d3mIndex'])
if not os.path.exists(output_directory_path):
os.makedirs(output_directory_path)
cwd = os.getcwd()
try:
os.chdir('/')
predicted.to_csv(output_path, index=False, quoting=csv.QUOTE_NONNUMERIC)
finally:
os.chdir(cwd)
return {
'produce': {
'input': produce_specification['input'],
'configuration': configuration,
'data_pointer': output_path
},
'search_id': self.search_id,
'model_id': self.model_id,
'system': self.system
}
def save(self):
model_folder_dir = os.path.join(SAVED_MODELS_PATH, self.model_id)
metadata_path = os.path.join(model_folder_dir, 'metadata.json')
os.makedirs(model_folder_dir, exist_ok=True)
with open(metadata_path, 'w') as metadata_file:
json.dump({
'system': str(self.system),
'model_id': str(self.model_id),
'predictors': self.predictors,
'targets': self.targets,
'search_id': self.search_id,
'task': self.task
}, metadata_file)
self.model.save(model_folder_dir)
|
[
"numpy.argmax",
"json.dumps",
"collections.defaultdict",
"tworaven_solver.model.BaseModelWrapper.load",
"h2o.import_file",
"requests.post",
"os.path.join",
"os.chdir",
"pandas.DataFrame",
"h2o.save_model",
"json.loads",
"h2o.init",
"os.path.exists",
"tworaven_apps.solver_interfaces.models.StatisticalModel.objects.create",
"json.dump",
"numpy.average",
"os.path.basename",
"ludwig.api.LudwigModel.load",
"json.load",
"uuid.uuid4",
"os.makedirs",
"os.getcwd",
"sklearn.model_selection.KFold",
"tworaven_solver.Dataset",
"numpy.array",
"sklearn.model_selection.StratifiedKFold",
"tworaven_apps.solver_interfaces.models.get_metric"
] |
[((1390, 1431), 'os.path.join', 'os.path.join', (['SAVED_MODELS_PATH', 'model_id'], {}), '(SAVED_MODELS_PATH, model_id)\n', (1402, 1431), False, 'import os\n'), ((1456, 1504), 'os.path.join', 'os.path.join', (['model_folder_path', '"""metadata.json"""'], {}), "(model_folder_path, 'metadata.json')\n", (1468, 1504), False, 'import os\n'), ((7754, 7771), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (7765, 7771), False, 'from collections import defaultdict\n'), ((7796, 7813), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (7807, 7813), False, 'from collections import defaultdict\n'), ((11870, 11881), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (11879, 11881), False, 'import os\n'), ((12423, 12469), 'os.path.join', 'os.path.join', (['SAVED_MODELS_PATH', 'self.model_id'], {}), '(SAVED_MODELS_PATH, self.model_id)\n', (12435, 12469), False, 'import os\n'), ((12494, 12542), 'os.path.join', 'os.path.join', (['model_folder_path', '"""metadata.json"""'], {}), "(model_folder_path, 'metadata.json')\n", (12506, 12542), False, 'import os\n'), ((15839, 15868), 'h2o.import_file', 'h2o.import_file', (['resource_uri'], {}), '(resource_uri)\n', (15854, 15868), False, 'import h2o\n'), ((19430, 19461), 'tworaven_solver.Dataset', 'Dataset', (["specification['input']"], {}), "(specification['input'])\n", (19437, 19461), False, 'from tworaven_solver import Dataset\n'), ((20716, 20727), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (20725, 20727), False, 'import os\n'), ((21288, 21334), 'os.path.join', 'os.path.join', (['SAVED_MODELS_PATH', 'self.model_id'], {}), '(SAVED_MODELS_PATH, self.model_id)\n', (21300, 21334), False, 'import os\n'), ((21359, 21407), 'os.path.join', 'os.path.join', (['model_folder_path', '"""metadata.json"""'], {}), "(model_folder_path, 'metadata.json')\n", (21371, 21407), False, 'import os\n'), ((21520, 21582), 'h2o.save_model', 'h2o.save_model', (['self.model'], {'path': 'model_folder_path', 'force': '(True)'}), '(self.model, path=model_folder_path, force=True)\n', (21534, 21582), False, 'import h2o\n'), ((23716, 23747), 'tworaven_solver.Dataset', 'Dataset', (["specification['input']"], {}), "(specification['input'])\n", (23723, 23747), False, 'from tworaven_solver import Dataset\n'), ((24587, 24598), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (24596, 24598), False, 'import os\n'), ((25140, 25186), 'os.path.join', 'os.path.join', (['SAVED_MODELS_PATH', 'self.model_id'], {}), '(SAVED_MODELS_PATH, self.model_id)\n', (25152, 25186), False, 'import os\n'), ((25211, 25259), 'os.path.join', 'os.path.join', (['model_folder_path', '"""metadata.json"""'], {}), "(model_folder_path, 'metadata.json')\n", (25223, 25259), False, 'import os\n'), ((31089, 31100), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (31098, 31100), False, 'import os\n'), ((31647, 31693), 'os.path.join', 'os.path.join', (['SAVED_MODELS_PATH', 'self.model_id'], {}), '(SAVED_MODELS_PATH, self.model_id)\n', (31659, 31693), False, 'import os\n'), ((31718, 31765), 'os.path.join', 'os.path.join', (['model_folder_dir', '"""metadata.json"""'], {}), "(model_folder_dir, 'metadata.json')\n", (31730, 31765), False, 'import os\n'), ((31775, 31819), 'os.makedirs', 'os.makedirs', (['model_folder_dir'], {'exist_ok': '(True)'}), '(model_folder_dir, exist_ok=True)\n', (31786, 31819), False, 'import os\n'), ((632, 665), 'tworaven_apps.solver_interfaces.models.StatisticalModel.objects.create', 'StatisticalModel.objects.create', ([], {}), '()\n', (663, 665), False, 'from tworaven_apps.solver_interfaces.models import SAVED_MODELS_PATH, R_SERVICE, get_metric, StatisticalModel\n'), ((1521, 1550), 'os.path.exists', 'os.path.exists', (['metadata_path'], {}), '(metadata_path)\n', (1535, 1550), False, 'import os\n'), ((1668, 1692), 'json.load', 'json.load', (['metadata_file'], {}), '(metadata_file)\n', (1677, 1692), False, 'import json\n'), ((2958, 2968), 'h2o.init', 'h2o.init', ([], {}), '()\n', (2966, 2968), False, 'import h2o\n'), ((6494, 6520), 'pandas.DataFrame', 'pandas.DataFrame', (['stimulus'], {}), '(stimulus)\n', (6510, 6520), False, 'import pandas\n'), ((6701, 6727), 'pandas.DataFrame', 'pandas.DataFrame', (['stimulus'], {}), '(stimulus)\n', (6717, 6727), False, 'import pandas\n'), ((7967, 8004), 'numpy.array', 'np.array', (['test_split[self.targets[0]]'], {}), '(test_split[self.targets[0]])\n', (7975, 8004), True, 'import numpy as np\n'), ((9355, 9380), 'json.dumps', 'json.dumps', (['specification'], {}), '(specification)\n', (9365, 9380), False, 'import json\n'), ((11769, 11806), 'os.path.exists', 'os.path.exists', (['output_directory_path'], {}), '(output_directory_path)\n', (11783, 11806), False, 'import os\n'), ((11820, 11854), 'os.makedirs', 'os.makedirs', (['output_directory_path'], {}), '(output_directory_path)\n', (11831, 11854), False, 'import os\n'), ((11907, 11920), 'os.chdir', 'os.chdir', (['"""/"""'], {}), "('/')\n", (11915, 11920), False, 'import os\n'), ((12037, 12050), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (12045, 12050), False, 'import os\n'), ((12559, 12588), 'os.path.exists', 'os.path.exists', (['metadata_path'], {}), '(metadata_path)\n', (12573, 12588), False, 'import os\n'), ((12602, 12632), 'os.makedirs', 'os.makedirs', (['model_folder_path'], {}), '(model_folder_path)\n', (12613, 12632), False, 'import os\n'), ((13101, 13148), 'os.path.join', 'os.path.join', (['model_folder_path', '"""model.joblib"""'], {}), "(model_folder_path, 'model.joblib')\n", (13113, 13148), False, 'import os\n'), ((15322, 15347), 'json.dumps', 'json.dumps', (['specification'], {}), '(specification)\n', (15332, 15347), False, 'import json\n'), ((20615, 20652), 'os.path.exists', 'os.path.exists', (['output_directory_path'], {}), '(output_directory_path)\n', (20629, 20652), False, 'import os\n'), ((20666, 20700), 'os.makedirs', 'os.makedirs', (['output_directory_path'], {}), '(output_directory_path)\n', (20677, 20700), False, 'import os\n'), ((20753, 20766), 'os.chdir', 'os.chdir', (['"""/"""'], {}), "('/')\n", (20761, 20766), False, 'import os\n'), ((20883, 20896), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (20891, 20896), False, 'import os\n'), ((21424, 21453), 'os.path.exists', 'os.path.exists', (['metadata_path'], {}), '(metadata_path)\n', (21438, 21453), False, 'import os\n'), ((21467, 21497), 'os.makedirs', 'os.makedirs', (['model_folder_path'], {}), '(model_folder_path)\n', (21478, 21497), False, 'import os\n'), ((24486, 24523), 'os.path.exists', 'os.path.exists', (['output_directory_path'], {}), '(output_directory_path)\n', (24500, 24523), False, 'import os\n'), ((24537, 24571), 'os.makedirs', 'os.makedirs', (['output_directory_path'], {}), '(output_directory_path)\n', (24548, 24571), False, 'import os\n'), ((24624, 24637), 'os.chdir', 'os.chdir', (['"""/"""'], {}), "('/')\n", (24632, 24637), False, 'import os\n'), ((24754, 24767), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (24762, 24767), False, 'import os\n'), ((25276, 25305), 'os.path.exists', 'os.path.exists', (['metadata_path'], {}), '(metadata_path)\n', (25290, 25305), False, 'import os\n'), ((25319, 25349), 'os.makedirs', 'os.makedirs', (['model_folder_path'], {}), '(model_folder_path)\n', (25330, 25349), False, 'import os\n'), ((25463, 25689), 'json.dump', 'json.dump', (["{'system': self.system, 'model_id': self.model_id, 'search_id': self.\n search_id, 'model_filename': model_folder_path, 'predictors': self.\n predictors, 'targets': self.targets, 'task': self.task}", 'metadata_file'], {}), "({'system': self.system, 'model_id': self.model_id, 'search_id':\n self.search_id, 'model_filename': model_folder_path, 'predictors': self\n .predictors, 'targets': self.targets, 'task': self.task}, metadata_file)\n", (25472, 25689), False, 'import json\n'), ((28237, 28316), 'pandas.DataFrame', 'pandas.DataFrame', (["{'actual': dataframe[target], 'predicted': predicted[target]}"], {}), "({'actual': dataframe[target], 'predicted': predicted[target]})\n", (28253, 28316), False, 'import pandas\n'), ((30988, 31025), 'os.path.exists', 'os.path.exists', (['output_directory_path'], {}), '(output_directory_path)\n', (31002, 31025), False, 'import os\n'), ((31039, 31073), 'os.makedirs', 'os.makedirs', (['output_directory_path'], {}), '(output_directory_path)\n', (31050, 31073), False, 'import os\n'), ((31126, 31139), 'os.chdir', 'os.chdir', (['"""/"""'], {}), "('/')\n", (31134, 31139), False, 'import os\n'), ((31254, 31267), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (31262, 31267), False, 'import os\n'), ((1843, 1895), 'os.path.join', 'os.path.join', (['model_folder_path', '"""preprocess.joblib"""'], {}), "(model_folder_path, 'preprocess.joblib')\n", (1855, 1895), False, 'import os\n'), ((13220, 13272), 'os.path.join', 'os.path.join', (['model_folder_path', '"""preprocess.joblib"""'], {}), "(model_folder_path, 'preprocess.joblib')\n", (13232, 13272), False, 'import os\n'), ((13511, 13596), 'requests.post', 'requests.post', (["(R_SERVICE + 'caretDescribe.app')"], {'json': "{'model_id': self.model_id}"}), "(R_SERVICE + 'caretDescribe.app', json={'model_id': self.model_id}\n )\n", (13524, 13596), False, 'import requests\n'), ((13800, 13913), 'requests.post', 'requests.post', (["(R_SERVICE + 'caretScore.app')"], {'json': "{'model_id': self.model_id, 'specification': specification}"}), "(R_SERVICE + 'caretScore.app', json={'model_id': self.model_id,\n 'specification': specification})\n", (13813, 13913), False, 'import requests\n'), ((14166, 14282), 'requests.post', 'requests.post', (["(R_SERVICE + 'caretProduce.app')"], {'json': "{'model_id': self.model_id, 'specification': specification}"}), "(R_SERVICE + 'caretProduce.app', json={'model_id': self.\n model_id, 'specification': specification})\n", (14179, 14282), False, 'import requests\n'), ((15773, 15804), 'tworaven_solver.Dataset', 'Dataset', (["specification['input']"], {}), "(specification['input'])\n", (15780, 15804), False, 'from tworaven_solver import Dataset\n'), ((22832, 22863), 'tworaven_solver.Dataset', 'Dataset', (["specification['input']"], {}), "(specification['input'])\n", (22839, 22863), False, 'from tworaven_solver import Dataset\n'), ((26534, 26571), 'tworaven_solver.Dataset', 'Dataset', (["score_specification['input']"], {}), "(score_specification['input'])\n", (26541, 26571), False, 'from tworaven_solver import Dataset\n'), ((29692, 29731), 'tworaven_solver.Dataset', 'Dataset', (["produce_specification['input']"], {}), "(produce_specification['input'])\n", (29699, 29731), False, 'from tworaven_solver import Dataset\n'), ((1939, 1991), 'os.path.join', 'os.path.join', (['model_folder_path', '"""preprocess.joblib"""'], {}), "(model_folder_path, 'preprocess.joblib')\n", (1951, 1991), False, 'import os\n'), ((2626, 2661), 'ludwig.api.LudwigModel.load', 'LudwigModel.load', (['model_folder_path'], {}), '(model_folder_path)\n', (2642, 2661), False, 'from ludwig.api import LudwigModel\n'), ((3489, 3539), 'tworaven_solver.model.BaseModelWrapper.load', 'BaseModelWrapper.load', (['model_folder_path', 'metadata'], {}), '(model_folder_path, metadata)\n', (3510, 3539), False, 'from tworaven_solver.model import BaseModelWrapper\n'), ((11238, 11269), 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(-1)'}), '(predictions, axis=-1)\n', (11247, 11269), True, 'import numpy as np\n'), ((19072, 19103), 'tworaven_solver.Dataset', 'Dataset', (["specification['train']"], {}), "(specification['train'])\n", (19079, 19103), False, 'from tworaven_solver import Dataset\n'), ((21824, 21852), 'os.path.basename', 'os.path.basename', (['model_path'], {}), '(model_path)\n', (21840, 21852), False, 'import os\n'), ((2061, 2108), 'os.path.join', 'os.path.join', (['model_folder_path', '"""model.joblib"""'], {}), "(model_folder_path, 'model.joblib')\n", (2073, 2108), False, 'import os\n'), ((3035, 3094), 'os.path.join', 'os.path.join', (['model_folder_path', "metadata['model_filename']"], {}), "(model_folder_path, metadata['model_filename'])\n", (3047, 3094), False, 'import os\n'), ((8846, 8909), 'numpy.average', 'np.average', (['split_scores[metric]'], {'weights': 'split_weights[metric]'}), '(split_scores[metric], weights=split_weights[metric])\n', (8856, 8909), True, 'import numpy as np\n'), ((8941, 8959), 'json.loads', 'json.loads', (['metric'], {}), '(metric)\n', (8951, 8959), False, 'import json\n'), ((10094, 10125), 'tworaven_solver.Dataset', 'Dataset', (["specification['train']"], {}), "(specification['train'])\n", (10101, 10125), False, 'from tworaven_solver import Dataset\n'), ((10348, 10379), 'tworaven_solver.Dataset', 'Dataset', (["specification['input']"], {}), "(specification['input'])\n", (10355, 10379), False, 'from tworaven_solver import Dataset\n'), ((10727, 10739), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (10737, 10739), False, 'import uuid\n'), ((11300, 11356), 'pandas.DataFrame', 'pandas.DataFrame', (['predictions'], {'columns': '[self.targets[0]]'}), '(predictions, columns=[self.targets[0]])\n', (11316, 11356), False, 'import pandas\n'), ((20575, 20587), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (20585, 20587), False, 'import uuid\n'), ((23202, 23220), 'tworaven_apps.solver_interfaces.models.get_metric', 'get_metric', (['metric'], {}), '(metric)\n', (23212, 23220), False, 'from tworaven_apps.solver_interfaces.models import SAVED_MODELS_PATH, R_SERVICE, get_metric, StatisticalModel\n'), ((24446, 24458), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (24456, 24458), False, 'import uuid\n'), ((30825, 30837), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (30835, 30837), False, 'import uuid\n'), ((7470, 7501), 'tworaven_solver.Dataset', 'Dataset', (["specification['input']"], {}), "(specification['input'])\n", (7477, 7501), False, 'from tworaven_solver import Dataset\n'), ((8515, 8533), 'tworaven_apps.solver_interfaces.models.get_metric', 'get_metric', (['metric'], {}), '(metric)\n', (8525, 8533), False, 'from tworaven_apps.solver_interfaces.models import SAVED_MODELS_PATH, R_SERVICE, get_metric, StatisticalModel\n'), ((16611, 16636), 'tworaven_apps.solver_interfaces.models.get_metric', 'get_metric', (['metric_schema'], {}), '(metric_schema)\n', (16621, 16636), False, 'from tworaven_apps.solver_interfaces.models import SAVED_MODELS_PATH, R_SERVICE, get_metric, StatisticalModel\n'), ((28560, 28616), 'tworaven_apps.solver_interfaces.models.get_metric', 'get_metric', (['eachMetric', 'self.model.problem_specification'], {}), '(eachMetric, self.model.problem_specification)\n', (28570, 28616), False, 'from tworaven_apps.solver_interfaces.models import SAVED_MODELS_PATH, R_SERVICE, get_metric, StatisticalModel\n'), ((28714, 28770), 'tworaven_apps.solver_interfaces.models.get_metric', 'get_metric', (['eachMetric', 'self.model.problem_specification'], {}), '(eachMetric, self.model.problem_specification)\n', (28724, 28770), False, 'from tworaven_apps.solver_interfaces.models import SAVED_MODELS_PATH, R_SERVICE, get_metric, StatisticalModel\n'), ((29908, 29947), 'tworaven_solver.Dataset', 'Dataset', (["produce_specification['train']"], {}), "(produce_specification['train'])\n", (29915, 29947), False, 'from tworaven_solver import Dataset\n'), ((4416, 4466), 'sklearn.model_selection.StratifiedKFold', 'model_selection.StratifiedKFold', ([], {}), '(**split_arguments)\n', (4447, 4466), False, 'from sklearn import model_selection\n'), ((4656, 4696), 'sklearn.model_selection.KFold', 'model_selection.KFold', ([], {}), '(**split_arguments)\n', (4677, 4696), False, 'from sklearn import model_selection\n'), ((8488, 8506), 'json.dumps', 'json.dumps', (['metric'], {}), '(metric)\n', (8498, 8506), False, 'import json\n'), ((8588, 8606), 'json.dumps', 'json.dumps', (['metric'], {}), '(metric)\n', (8598, 8606), False, 'import json\n')]
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Face detection loss."""
import numpy as np
import mindspore.nn as nn
from mindspore.ops import operations as P
from mindspore.nn.loss.loss import _Loss
from mindspore.nn import Dense, Cell
from mindspore import Tensor
from mindspore.common import dtype as mstype
class PtLinspace(Cell):
def __init__(self):
super(PtLinspace, self).__init__()
self.TupleToArray = P.TupleToArray()
def construct(self, start, end, steps):
lin_x = ()
step = (end - start + 1) / steps
for i in range(start, end + 1, step):
lin_x += (i,)
lin_x = self.TupleToArray(lin_x)
return lin_x
class MSELoss(_Loss):
def __init__(self):
super(MSELoss, self).__init__()
self.sum = P.Sum()
self.mean = P.ReduceMean(keepdims=False)
self.pow = P.Pow()
self.sqrt = P.Sqrt()
def construct(self, nembeddings1, nembeddings2):
dist = nembeddings1 - nembeddings2
dist_pow = self.pow(dist, 2.0)
dist_sum = self.sum(dist_pow, 1)
dist_sqrt = self.sqrt(dist_sum)
loss = self.mean(dist_sqrt, 0)
return loss
class YoloLoss(Cell):
""" Computes yolo loss from darknet network output and target annotation.
Args:
num_classes (int): number of categories
anchors (list): 2D list representing anchor boxes
coord_scale (float): weight of bounding box coordinates
no_object_scale (float): weight of regions without target boxes
object_scale (float): weight of regions with target boxes
class_scale (float): weight of categorical predictions
thresh (float): minimum iou between a predicted box and ground truth for them to be considered matching
seen (int): How many images the network has already been trained on.
"""
def __init__(self, num_classes, anchors, anchors_mask, reduction=32, seen=0, coord_scale=1.0, no_object_scale=1.0,
object_scale=1.0, class_scale=1.0, thresh=0.5, head_idx=0.0):
super(YoloLoss, self).__init__()
self.num_classes = num_classes
self.num_anchors = len(anchors_mask)
self.anchor_step = len(anchors[0]) # each scale has step anchors
self.anchors = np.array(anchors, dtype=np.float32) / reduction # scale every anchor for every scale
self.tensor_anchors = Tensor(self.anchors, mstype.float32)
self.anchors_mask = anchors_mask
anchors_w = []
anchors_h = []
for i in range(len(anchors_mask)):
anchors_w.append(self.anchors[self.anchors_mask[i]][0])
anchors_h.append(self.anchors[self.anchors_mask[i]][1])
self.anchors_w = Tensor(np.array(anchors_w).reshape(len(self.anchors_mask), 1))
self.anchors_h = Tensor(np.array(anchors_h).reshape(len(self.anchors_mask), 1))
self.reduction = reduction
self.seen = seen
self.head_idx = head_idx
self.zero = Tensor(0)
self.coord_scale = coord_scale
self.no_object_scale = no_object_scale
self.object_scale = object_scale
self.class_scale = class_scale
self.thresh = thresh
self.info = {'avg_iou': 0, 'class': 0, 'obj': 0, 'no_obj': 0,
'recall50': 0, 'recall75': 0, 'obj_cur': 0, 'obj_all': 0,
'coord_xy': 0, 'coord_wh': 0}
self.Shape = P.Shape()
self.Reshape = P.Reshape()
self.Sigmoid = P.Sigmoid()
self.ZerosLike = P.ZerosLike()
self.ScatterNd = P.ScatterNd()
self.ScatterNdUpdate = P.ScatterNdUpdate()
self.concat0 = P.Concat(0)
self.concat0_2 = P.Concat(0)
self.concat0_3 = P.Concat(0)
self.concat0_4 = P.Concat(0)
self.concat1 = P.Concat(1)
self.concat1_2 = P.Concat(1)
self.concat1_3 = P.Concat(1)
self.concat1_4 = P.Concat(1)
self.concat2 = P.Concat(2)
self.concat2_2 = P.Concat(2)
self.concat2_3 = P.Concat(2)
self.concat2_4 = P.Concat(2)
self.Tile = P.Tile()
self.Transpose = P.Transpose()
self.TupleToArray = P.TupleToArray()
self.ScalarToArray = P.ScalarToArray()
self.Cast = P.Cast()
self.Exp = P.Exp()
self.Sum = P.ReduceSum()
self.Log = P.Log()
self.TensorAdd = P.TensorAdd()
self.RealDiv = P.RealDiv()
self.Div = P.Div()
self.SmoothL1Loss = P.SmoothL1Loss()
self.Sub = P.Sub()
self.Greater = P.Greater()
self.GreaterEqual = P.GreaterEqual()
self.Minimum = P.Minimum()
self.Maximum = P.Maximum()
self.Less = P.Less()
self.OnesLike = P.OnesLike()
self.Fill = P.Fill()
self.Equal = P.Equal()
self.BCE = P.SigmoidCrossEntropyWithLogits()
self.CE = P.SoftmaxCrossEntropyWithLogits()
self.DType = P.DType()
self.PtLinspace = PtLinspace()
self.OneHot = nn.OneHot(-1, self.num_classes, 1.0, 0.0)
self.Squeeze2 = P.Squeeze(2)
self.ArgMax = P.Argmax()
self.ArgMaxWithValue1 = P.ArgMaxWithValue(1)
self.ReduceSum = P.ReduceSum()
self.Log = P.Log()
self.GatherNd = P.GatherNd()
self.Abs = P.Abs()
self.Select = P.Select()
self.IOU = P.IOU()
def construct(self, output, coord_mask, conf_pos_mask, conf_neg_mask, cls_mask, t_coord, t_conf, t_cls, gt_list):
"""
Compute Yolo loss.
"""
output_d = self.Shape(output)
num_batch = output_d[0]
num_anchors = self.num_anchors
num_classes = self.num_classes
num_channels = output_d[1] / num_anchors
height = output_d[2]
width = output_d[3]
output = self.Reshape(output, (num_batch, num_anchors, num_channels, height * width))
coord_01 = output[:, :, :2] # tx,ty
coord_23 = output[:, :, 2:4] # tw,th
coord = self.concat2((coord_01, coord_23))
conf = self.Squeeze2(output[:, :, 4:5, :])
cls = output[:, :, 5:]
cls = self.Reshape(cls, (num_batch*num_anchors, num_classes, height*width))
perm = (0, 2, 1)
cls = self.Transpose(cls, perm)
cls_shp = self.Shape(cls)
cls = self.Reshape(cls, (cls_shp[0] * cls_shp[1] * cls_shp[2] / num_classes, num_classes))
lin_x = self.PtLinspace(0, width - 1, width)
lin_x = self.Tile(lin_x, (height, ))
lin_x = self.Cast(lin_x, mstype.float32)
lin_y = self.PtLinspace(0, height - 1, height)
lin_y = self.Reshape(lin_y, (height, 1))
lin_y = self.Tile(lin_y, (1, width))
lin_y = self.Reshape(lin_y, (self.Shape(lin_y)[0] * self.Shape(lin_y)[1], ))
lin_y = self.Cast(lin_y, mstype.float32)
anchor_w = self.anchors_w
anchor_h = self.anchors_h
anchor_w = self.Cast(anchor_w, mstype.float32)
anchor_h = self.Cast(anchor_h, mstype.float32)
coord_x = self.Sigmoid(coord[:, :, 0:1, :])
pred_boxes_0 = self.Squeeze2(coord_x) + lin_x
shape_pb0 = self.Shape(pred_boxes_0)
pred_boxes_0 = self.Reshape(pred_boxes_0, (shape_pb0[0] * shape_pb0[1] * shape_pb0[2], 1))
coord_y = self.Sigmoid(coord[:, :, 1:2, :])
pred_boxes_1 = self.Squeeze2(coord_y) + lin_y
shape_pb1 = self.Shape(pred_boxes_1)
pred_boxes_1 = self.Reshape(pred_boxes_1, (shape_pb1[0] * shape_pb1[1] * shape_pb1[2], 1))
pred_boxes_2 = self.Exp(self.Squeeze2(coord[:, :, 2:3, :])) * anchor_w
shape_pb2 = self.Shape(pred_boxes_2)
pred_boxes_2 = self.Reshape(pred_boxes_2, (shape_pb2[0] * shape_pb2[1] * shape_pb2[2], 1))
pred_boxes_3 = self.Exp(self.Squeeze2(coord[:, :, 3:4, :])) * anchor_h
shape_pb3 = self.Shape(pred_boxes_3)
pred_boxes_3 = self.Reshape(pred_boxes_3, (shape_pb3[0] * shape_pb3[1] * shape_pb3[2], 1))
pred_boxes_x1 = pred_boxes_0 - pred_boxes_2 / 2
pred_boxes_y1 = pred_boxes_1 - pred_boxes_3 / 2
pred_boxes_x2 = pred_boxes_0 + pred_boxes_2 / 2
pred_boxes_y2 = pred_boxes_1 + pred_boxes_3 / 2
pred_boxes_points = self.concat1_4((pred_boxes_x1, pred_boxes_y1, pred_boxes_x2, pred_boxes_y2))
total_anchors = num_anchors * height * width
mask_concat = None
conf_neg_mask_zero = self.ZerosLike(conf_neg_mask)
pred_boxes_points = pred_boxes_points * 64
gt_list = gt_list * 64
for b in range(num_batch):
cur_pred_boxes = pred_boxes_points[b * total_anchors:(b + 1) * total_anchors]
iou_gt_pred = self.IOU(self.Cast(cur_pred_boxes, mstype.float16), self.Cast(gt_list[b], mstype.float16))
mask = self.Cast((iou_gt_pred > self.thresh), mstype.float16)
mask = self.ReduceSum(mask, 0)
mask = mask > 0
shape_neg = self.Shape(conf_neg_mask[0])
mask = self.Reshape(mask, (1, shape_neg[0], shape_neg[1]))
if b == 0:
mask_concat = mask
else:
mask_concat = self.concat0_2((mask_concat, mask))
conf_neg_mask = self.Select(mask_concat, conf_neg_mask_zero, conf_neg_mask)
coord_mask = self.Tile(coord_mask, (1, 1, 4, 1))
coord_mask = coord_mask[:, :, :2]
coord_center = coord[:, :, :2]
t_coord_center = t_coord[:, :, :2]
coord_wh = coord[:, :, 2:]
t_coord_wh = t_coord[:, :, 2:]
one_hot_label = None
shape_cls_mask = None
if num_classes > 1:
shape_t_cls = self.Shape(t_cls)
t_cls = self.Reshape(t_cls, (shape_t_cls[0] * shape_t_cls[1] * shape_t_cls[2],))
one_hot_label = self.OneHot(self.Cast(t_cls, mstype.int32))
shape_cls_mask = self.Shape(cls_mask)
cls_mask = self.Reshape(cls_mask, (1, shape_cls_mask[0] * shape_cls_mask[1] * shape_cls_mask[2]))
added_scale = 1.0 + self.head_idx * 0.5
loss_coord_center = added_scale * 2.0 * 1.0 * self.coord_scale * self.Sum(
coord_mask * self.BCE(coord_center, t_coord_center), ())
loss_coord_wh = added_scale * 2.0 * 1.5 * self.coord_scale * self.Sum(
coord_mask * self.SmoothL1Loss(coord_wh, t_coord_wh), ())
loss_coord = 1.0 * (loss_coord_center + loss_coord_wh)
loss_conf_pos = added_scale * 2.0 * self.object_scale * self.Sum(conf_pos_mask * self.BCE(conf, t_conf), ())
loss_conf_neg = 1.0 * self.no_object_scale * self.Sum(conf_neg_mask * self.BCE(conf, t_conf), ())
loss_conf = loss_conf_pos + loss_conf_neg
loss_cls = None
if num_classes > 1:
loss_cls = self.class_scale * 1.0 * self.Sum(cls_mask * self.CE(cls, one_hot_label)[0], ())
else:
loss_cls = 0.0
cls = self.Squeeze2(output[:, :, 5:6, :])
loss_cls_pos = added_scale * 2.0 * self.object_scale * self.Sum(conf_pos_mask * self.BCE(cls, t_conf), ())
loss_cls_neg = 1.0 * self.no_object_scale * self.Sum(conf_neg_mask * self.BCE(cls, t_conf), ())
loss_cls = loss_cls_pos + loss_cls_neg
loss_tot = loss_coord + 0.5 * loss_conf + 0.5 * loss_cls
return loss_tot
|
[
"mindspore.ops.operations.SigmoidCrossEntropyWithLogits",
"mindspore.ops.operations.GatherNd",
"mindspore.Tensor",
"mindspore.ops.operations.Cast",
"mindspore.ops.operations.DType",
"mindspore.ops.operations.IOU",
"mindspore.nn.OneHot",
"mindspore.ops.operations.Fill",
"mindspore.ops.operations.Transpose",
"mindspore.ops.operations.Concat",
"mindspore.ops.operations.Maximum",
"mindspore.ops.operations.ReduceSum",
"mindspore.ops.operations.ReduceMean",
"mindspore.ops.operations.TensorAdd",
"mindspore.ops.operations.Select",
"mindspore.ops.operations.Argmax",
"mindspore.ops.operations.TupleToArray",
"mindspore.ops.operations.Log",
"mindspore.ops.operations.ArgMaxWithValue",
"mindspore.ops.operations.Sum",
"mindspore.ops.operations.Exp",
"mindspore.ops.operations.Reshape",
"mindspore.ops.operations.Squeeze",
"mindspore.ops.operations.ScalarToArray",
"mindspore.ops.operations.Minimum",
"mindspore.ops.operations.Sqrt",
"mindspore.ops.operations.ScatterNdUpdate",
"mindspore.ops.operations.Abs",
"mindspore.ops.operations.Less",
"mindspore.ops.operations.Equal",
"mindspore.ops.operations.ScatterNd",
"mindspore.ops.operations.Greater",
"mindspore.ops.operations.Sigmoid",
"mindspore.ops.operations.OnesLike",
"mindspore.ops.operations.RealDiv",
"mindspore.ops.operations.ZerosLike",
"mindspore.ops.operations.SoftmaxCrossEntropyWithLogits",
"mindspore.ops.operations.Pow",
"mindspore.ops.operations.Shape",
"mindspore.ops.operations.Sub",
"numpy.array",
"mindspore.ops.operations.GreaterEqual",
"mindspore.ops.operations.Tile",
"mindspore.ops.operations.SmoothL1Loss",
"mindspore.ops.operations.Div"
] |
[((1055, 1071), 'mindspore.ops.operations.TupleToArray', 'P.TupleToArray', ([], {}), '()\n', (1069, 1071), True, 'from mindspore.ops import operations as P\n'), ((1419, 1426), 'mindspore.ops.operations.Sum', 'P.Sum', ([], {}), '()\n', (1424, 1426), True, 'from mindspore.ops import operations as P\n'), ((1447, 1475), 'mindspore.ops.operations.ReduceMean', 'P.ReduceMean', ([], {'keepdims': '(False)'}), '(keepdims=False)\n', (1459, 1475), True, 'from mindspore.ops import operations as P\n'), ((1495, 1502), 'mindspore.ops.operations.Pow', 'P.Pow', ([], {}), '()\n', (1500, 1502), True, 'from mindspore.ops import operations as P\n'), ((1523, 1531), 'mindspore.ops.operations.Sqrt', 'P.Sqrt', ([], {}), '()\n', (1529, 1531), True, 'from mindspore.ops import operations as P\n'), ((3026, 3062), 'mindspore.Tensor', 'Tensor', (['self.anchors', 'mstype.float32'], {}), '(self.anchors, mstype.float32)\n', (3032, 3062), False, 'from mindspore import Tensor\n'), ((3619, 3628), 'mindspore.Tensor', 'Tensor', (['(0)'], {}), '(0)\n', (3625, 3628), False, 'from mindspore import Tensor\n'), ((4047, 4056), 'mindspore.ops.operations.Shape', 'P.Shape', ([], {}), '()\n', (4054, 4056), True, 'from mindspore.ops import operations as P\n'), ((4080, 4091), 'mindspore.ops.operations.Reshape', 'P.Reshape', ([], {}), '()\n', (4089, 4091), True, 'from mindspore.ops import operations as P\n'), ((4115, 4126), 'mindspore.ops.operations.Sigmoid', 'P.Sigmoid', ([], {}), '()\n', (4124, 4126), True, 'from mindspore.ops import operations as P\n'), ((4152, 4165), 'mindspore.ops.operations.ZerosLike', 'P.ZerosLike', ([], {}), '()\n', (4163, 4165), True, 'from mindspore.ops import operations as P\n'), ((4191, 4204), 'mindspore.ops.operations.ScatterNd', 'P.ScatterNd', ([], {}), '()\n', (4202, 4204), True, 'from mindspore.ops import operations as P\n'), ((4236, 4255), 'mindspore.ops.operations.ScatterNdUpdate', 'P.ScatterNdUpdate', ([], {}), '()\n', (4253, 4255), True, 'from mindspore.ops import operations as P\n'), ((4279, 4290), 'mindspore.ops.operations.Concat', 'P.Concat', (['(0)'], {}), '(0)\n', (4287, 4290), True, 'from mindspore.ops import operations as P\n'), ((4316, 4327), 'mindspore.ops.operations.Concat', 'P.Concat', (['(0)'], {}), '(0)\n', (4324, 4327), True, 'from mindspore.ops import operations as P\n'), ((4353, 4364), 'mindspore.ops.operations.Concat', 'P.Concat', (['(0)'], {}), '(0)\n', (4361, 4364), True, 'from mindspore.ops import operations as P\n'), ((4390, 4401), 'mindspore.ops.operations.Concat', 'P.Concat', (['(0)'], {}), '(0)\n', (4398, 4401), True, 'from mindspore.ops import operations as P\n'), ((4425, 4436), 'mindspore.ops.operations.Concat', 'P.Concat', (['(1)'], {}), '(1)\n', (4433, 4436), True, 'from mindspore.ops import operations as P\n'), ((4462, 4473), 'mindspore.ops.operations.Concat', 'P.Concat', (['(1)'], {}), '(1)\n', (4470, 4473), True, 'from mindspore.ops import operations as P\n'), ((4499, 4510), 'mindspore.ops.operations.Concat', 'P.Concat', (['(1)'], {}), '(1)\n', (4507, 4510), True, 'from mindspore.ops import operations as P\n'), ((4536, 4547), 'mindspore.ops.operations.Concat', 'P.Concat', (['(1)'], {}), '(1)\n', (4544, 4547), True, 'from mindspore.ops import operations as P\n'), ((4571, 4582), 'mindspore.ops.operations.Concat', 'P.Concat', (['(2)'], {}), '(2)\n', (4579, 4582), True, 'from mindspore.ops import operations as P\n'), ((4608, 4619), 'mindspore.ops.operations.Concat', 'P.Concat', (['(2)'], {}), '(2)\n', (4616, 4619), True, 'from mindspore.ops import operations as P\n'), ((4645, 4656), 'mindspore.ops.operations.Concat', 'P.Concat', (['(2)'], {}), '(2)\n', (4653, 4656), True, 'from mindspore.ops import operations as P\n'), ((4682, 4693), 'mindspore.ops.operations.Concat', 'P.Concat', (['(2)'], {}), '(2)\n', (4690, 4693), True, 'from mindspore.ops import operations as P\n'), ((4715, 4723), 'mindspore.ops.operations.Tile', 'P.Tile', ([], {}), '()\n', (4721, 4723), True, 'from mindspore.ops import operations as P\n'), ((4749, 4762), 'mindspore.ops.operations.Transpose', 'P.Transpose', ([], {}), '()\n', (4760, 4762), True, 'from mindspore.ops import operations as P\n'), ((4791, 4807), 'mindspore.ops.operations.TupleToArray', 'P.TupleToArray', ([], {}), '()\n', (4805, 4807), True, 'from mindspore.ops import operations as P\n'), ((4837, 4854), 'mindspore.ops.operations.ScalarToArray', 'P.ScalarToArray', ([], {}), '()\n', (4852, 4854), True, 'from mindspore.ops import operations as P\n'), ((4875, 4883), 'mindspore.ops.operations.Cast', 'P.Cast', ([], {}), '()\n', (4881, 4883), True, 'from mindspore.ops import operations as P\n'), ((4903, 4910), 'mindspore.ops.operations.Exp', 'P.Exp', ([], {}), '()\n', (4908, 4910), True, 'from mindspore.ops import operations as P\n'), ((4930, 4943), 'mindspore.ops.operations.ReduceSum', 'P.ReduceSum', ([], {}), '()\n', (4941, 4943), True, 'from mindspore.ops import operations as P\n'), ((4963, 4970), 'mindspore.ops.operations.Log', 'P.Log', ([], {}), '()\n', (4968, 4970), True, 'from mindspore.ops import operations as P\n'), ((4996, 5009), 'mindspore.ops.operations.TensorAdd', 'P.TensorAdd', ([], {}), '()\n', (5007, 5009), True, 'from mindspore.ops import operations as P\n'), ((5033, 5044), 'mindspore.ops.operations.RealDiv', 'P.RealDiv', ([], {}), '()\n', (5042, 5044), True, 'from mindspore.ops import operations as P\n'), ((5064, 5071), 'mindspore.ops.operations.Div', 'P.Div', ([], {}), '()\n', (5069, 5071), True, 'from mindspore.ops import operations as P\n'), ((5100, 5116), 'mindspore.ops.operations.SmoothL1Loss', 'P.SmoothL1Loss', ([], {}), '()\n', (5114, 5116), True, 'from mindspore.ops import operations as P\n'), ((5136, 5143), 'mindspore.ops.operations.Sub', 'P.Sub', ([], {}), '()\n', (5141, 5143), True, 'from mindspore.ops import operations as P\n'), ((5167, 5178), 'mindspore.ops.operations.Greater', 'P.Greater', ([], {}), '()\n', (5176, 5178), True, 'from mindspore.ops import operations as P\n'), ((5207, 5223), 'mindspore.ops.operations.GreaterEqual', 'P.GreaterEqual', ([], {}), '()\n', (5221, 5223), True, 'from mindspore.ops import operations as P\n'), ((5247, 5258), 'mindspore.ops.operations.Minimum', 'P.Minimum', ([], {}), '()\n', (5256, 5258), True, 'from mindspore.ops import operations as P\n'), ((5282, 5293), 'mindspore.ops.operations.Maximum', 'P.Maximum', ([], {}), '()\n', (5291, 5293), True, 'from mindspore.ops import operations as P\n'), ((5314, 5322), 'mindspore.ops.operations.Less', 'P.Less', ([], {}), '()\n', (5320, 5322), True, 'from mindspore.ops import operations as P\n'), ((5347, 5359), 'mindspore.ops.operations.OnesLike', 'P.OnesLike', ([], {}), '()\n', (5357, 5359), True, 'from mindspore.ops import operations as P\n'), ((5380, 5388), 'mindspore.ops.operations.Fill', 'P.Fill', ([], {}), '()\n', (5386, 5388), True, 'from mindspore.ops import operations as P\n'), ((5410, 5419), 'mindspore.ops.operations.Equal', 'P.Equal', ([], {}), '()\n', (5417, 5419), True, 'from mindspore.ops import operations as P\n'), ((5439, 5472), 'mindspore.ops.operations.SigmoidCrossEntropyWithLogits', 'P.SigmoidCrossEntropyWithLogits', ([], {}), '()\n', (5470, 5472), True, 'from mindspore.ops import operations as P\n'), ((5491, 5524), 'mindspore.ops.operations.SoftmaxCrossEntropyWithLogits', 'P.SoftmaxCrossEntropyWithLogits', ([], {}), '()\n', (5522, 5524), True, 'from mindspore.ops import operations as P\n'), ((5546, 5555), 'mindspore.ops.operations.DType', 'P.DType', ([], {}), '()\n', (5553, 5555), True, 'from mindspore.ops import operations as P\n'), ((5617, 5658), 'mindspore.nn.OneHot', 'nn.OneHot', (['(-1)', 'self.num_classes', '(1.0)', '(0.0)'], {}), '(-1, self.num_classes, 1.0, 0.0)\n', (5626, 5658), True, 'import mindspore.nn as nn\n'), ((5683, 5695), 'mindspore.ops.operations.Squeeze', 'P.Squeeze', (['(2)'], {}), '(2)\n', (5692, 5695), True, 'from mindspore.ops import operations as P\n'), ((5718, 5728), 'mindspore.ops.operations.Argmax', 'P.Argmax', ([], {}), '()\n', (5726, 5728), True, 'from mindspore.ops import operations as P\n'), ((5761, 5781), 'mindspore.ops.operations.ArgMaxWithValue', 'P.ArgMaxWithValue', (['(1)'], {}), '(1)\n', (5778, 5781), True, 'from mindspore.ops import operations as P\n'), ((5807, 5820), 'mindspore.ops.operations.ReduceSum', 'P.ReduceSum', ([], {}), '()\n', (5818, 5820), True, 'from mindspore.ops import operations as P\n'), ((5840, 5847), 'mindspore.ops.operations.Log', 'P.Log', ([], {}), '()\n', (5845, 5847), True, 'from mindspore.ops import operations as P\n'), ((5872, 5884), 'mindspore.ops.operations.GatherNd', 'P.GatherNd', ([], {}), '()\n', (5882, 5884), True, 'from mindspore.ops import operations as P\n'), ((5904, 5911), 'mindspore.ops.operations.Abs', 'P.Abs', ([], {}), '()\n', (5909, 5911), True, 'from mindspore.ops import operations as P\n'), ((5934, 5944), 'mindspore.ops.operations.Select', 'P.Select', ([], {}), '()\n', (5942, 5944), True, 'from mindspore.ops import operations as P\n'), ((5964, 5971), 'mindspore.ops.operations.IOU', 'P.IOU', ([], {}), '()\n', (5969, 5971), True, 'from mindspore.ops import operations as P\n'), ((2910, 2945), 'numpy.array', 'np.array', (['anchors'], {'dtype': 'np.float32'}), '(anchors, dtype=np.float32)\n', (2918, 2945), True, 'import numpy as np\n'), ((3361, 3380), 'numpy.array', 'np.array', (['anchors_w'], {}), '(anchors_w)\n', (3369, 3380), True, 'import numpy as np\n'), ((3449, 3468), 'numpy.array', 'np.array', (['anchors_h'], {}), '(anchors_h)\n', (3457, 3468), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Basic and Monitor-Curve Exponent Transfer Functions
===================================================
Defines the exponent transfer functions:
- :func:`colour.models.exponent_function_basic`
- :func:`colour.models.exponent_function_monitor_curve`
References
----------
- :cite: `TheAcademyofMotionPictureArtsandSciences2020` : The Academy of
Motion Picture Arts and Sciences, Science and Technology Council, & Academy
Color Encoding System (ACES) Project Subcommittee. (2020). Specification
S-2014-006 - Common LUT Format (CLF) - A Common File Format for Look-Up
Tables. Retrieved June 24, 2020, from http://j.mp/S-2014-006
"""
import numpy as np
from colour.utilities import as_float, as_float_array, suppress_warnings
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = ['exponent_function_basic', 'exponent_function_monitor_curve']
def exponent_function_basic(x, exponent=1, style='basicFwd'):
"""
Defines the *basic* exponent transfer function.
Parameters
----------
x : numeric or array_like
Data to undergo the basic exponent conversion.
exponent : numeric or array_like, optional
Exponent value used for the conversion.
style : unicode, optional
**{'basicFwd', 'basicRev', 'basicMirrorFwd', 'basicMirrorRev',
'basicPassThruFwd', 'basicPassThruRev'}**,
Defines the behaviour for the transfer function to operate:
- *basicFwd*: *Basic Forward* exponential behaviour where the
definition applies a basic power law using the exponent. Values
less than zero are clamped.
- *basicRev*: *Basic Reverse* exponential behaviour where the
definition applies a basic power law using the exponent. Values
less than zero are clamped.
- *basicMirrorFwd*: *Basic Mirror Forward* exponential behaviour
where the definition applies a basic power law using the exponent
for values greater than or equal to zero and mirrors the function
for values less than zero (i.e. rotationally symmetric
around the origin).
- *basicMirrorRev*: *Basic Mirror Reverse* exponential behaviour
where the definition applies a basic power law using the exponent
for values greater than or equal to zero and mirrors the function
for values less than zero (i.e. rotationally symmetric around the
origin).
- *basicPassThruFwd*: *Basic Pass Forward* exponential behaviour
where the definition applies a basic power law using the exponent
for values greater than or equal to zero and passes values less
than zero unchanged.
- *basicPassThruRev*: *Basic Pass Reverse* exponential behaviour
where the definition applies a basic power law using the exponent
for values greater than or equal to zero and passes values less
than zero unchanged.
Returns
-------
numeric or ndarray
Exponentially converted data.
Raises
------
ValueError
If the *style* is not defined.
Examples
--------
>>> exponent_function_basic(0.18, 2.2) # doctest: +ELLIPSIS
0.0229932...
>>> exponent_function_basic(-0.18, 2.2)
0.0
>>> exponent_function_basic(0.18, 2.2, 'basicRev') # doctest: +ELLIPSIS
0.4586564...
>>> exponent_function_basic(-0.18, 2.2, 'basicRev')
0.0
>>> exponent_function_basic( # doctest: +ELLIPSIS
... 0.18, 2.2, 'basicMirrorFwd')
0.0229932...
>>> exponent_function_basic( # doctest: +ELLIPSIS
... -0.18, 2.2, 'basicMirrorFwd')
-0.0229932...
>>> exponent_function_basic( # doctest: +ELLIPSIS
... 0.18, 2.2, 'basicMirrorRev')
0.4586564...
>>> exponent_function_basic( # doctest: +ELLIPSIS
... -0.18, 2.2, 'basicMirrorRev')
-0.4586564...
>>> exponent_function_basic( # doctest: +ELLIPSIS
... 0.18, 2.2, 'basicPassThruFwd')
0.0229932...
>>> exponent_function_basic( # doctest: +ELLIPSIS
... -0.18, 2.2, 'basicPassThruFwd')
-0.1799999...
>>> exponent_function_basic( # doctest: +ELLIPSIS
... 0.18, 2.2, 'basicPassThruRev')
0.4586564...
>>> exponent_function_basic( # doctest: +ELLIPSIS
... -0.18, 2.2, 'basicPassThruRev')
-0.1799999...
"""
x = as_float_array(x)
exponent = as_float_array(exponent)
def exponent_forward(x):
"""
Returns the input raised to the exponent value.
"""
return x ** exponent
def exponent_reverse(y):
"""
Returns the input raised to the inverse exponent value.
"""
return y ** (1 / exponent)
style = style.lower()
if style == 'basicfwd':
return as_float(np.where(x >= 0, exponent_forward(x), 0))
elif style == 'basicrev':
return as_float(np.where(x >= 0, exponent_reverse(x), 0))
elif style == 'basicmirrorfwd':
return as_float(
np.where(x >= 0, exponent_forward(x), -exponent_forward(-x)))
elif style == 'basicmirrorrev':
return as_float(
np.where(x >= 0, exponent_reverse(x), -exponent_reverse(-x)))
elif style == 'basicpassthrufwd':
return as_float(np.where(x >= 0, exponent_forward(x), x))
elif style == 'basicpassthrurev':
return as_float(np.where(x >= 0, exponent_reverse(x), x))
else:
raise ValueError(
'Undefined style used: "{0}", must be one of the following: '
'"{1}".'.format(
style, ', '.join([
'basicFwd', 'basicRev', 'basicMirrorFwd', 'basicMirrorRev',
'basicPassThruFwd', 'basicPassThruRev'
])))
def exponent_function_monitor_curve(x,
exponent=1,
offset=0,
style='monCurveFwd'):
"""
Defines the *Monitor Curve* exponent transfer function.
Parameters
----------
x : numeric or array_like
Data to undergo the monitor curve exponential conversion.
exponent : numeric or array_like, optional
Exponent value used for the conversion.
offset: numeric or array_like, optional
Offset value used for the conversion.
style : unicode, optional
**{'monCurveFwd', 'monCurveRev', 'monCurveMirrorFwd',
'monCurveMirrorRev'}**,
Defines the behaviour for the transfer function to operate:
- *monCurveFwd*: *Monitor Curve Forward* exponential behaviour
where the definition applies a power law function with a linear
segment near the origin.
- *monCurveRev*: *Monitor Curve Reverse* exponential behaviour
where the definition applies a power law function with a linear
segment near the origin.
- *monCurveMirrorFwd*: *Monitor Curve Mirror Forward* exponential
behaviour where the definition applies a power law function with a
linear segment near the origin and mirrors the function for values
less than zero (i.e. rotationally symmetric around the origin).
- *monCurveMirrorRev*: *Monitor Curve Mirror Reverse* exponential
behaviour where the definition applies a power law function with a
linear segment near the origin and mirrors the function for values
less than zero (i.e. rotationally symmetric around the origin).
Returns
-------
numeric or ndarray
Exponentially converted data.
Raises
------
ValueError
If the *style* is not defined.
Examples
--------
>>> exponent_function_monitor_curve( # doctest: +ELLIPSIS
... 0.18, 2.2, 0.001)
0.0232240...
>>> exponent_function_monitor_curve( # doctest: +ELLIPSIS
... -0.18, 2.2, 0.001)
-0.0002054...
>>> exponent_function_monitor_curve( # doctest: +ELLIPSIS
... 0.18, 2.2, 0.001, 'monCurveRev')
0.4581151...
>>> exponent_function_monitor_curve( # doctest: +ELLIPSIS
... -0.18, 2.2, 0.001, 'monCurveRev')
-157.7302795...
>>> exponent_function_monitor_curve( # doctest: +ELLIPSIS
... 0.18, 2.2, 2, 'monCurveMirrorFwd')
0.1679399...
>>> exponent_function_monitor_curve( # doctest: +ELLIPSIS
... -0.18, 2.2, 0.001, 'monCurveMirrorFwd')
-0.0232240...
>>> exponent_function_monitor_curve( # doctest: +ELLIPSIS
... 0.18, 2.2, 0.001, 'monCurveMirrorRev')
0.4581151...
>>> exponent_function_monitor_curve( # doctest: +ELLIPSIS
... -0.18, 2.2, 0.001, 'monCurveMirrorRev')
-0.4581151...
"""
x = as_float_array(x)
exponent = as_float_array(exponent)
offset = as_float_array(offset)
with suppress_warnings(python_warnings=True):
s = as_float_array(((exponent - 1) / offset) * ((exponent * offset) / (
(exponent - 1) * (offset + 1))) ** exponent)
s[np.isnan(s)] = 1
def monitor_curve_forward(x):
"""
Defines the *Monitor Curve Forward* function.
"""
x_break = offset / (exponent - 1)
return np.where(
x >= x_break,
((x + offset) / (1 + offset)) ** exponent,
x * s,
)
def monitor_curve_reverse(y):
"""
Defines the *Monitor Curve Reverse* function.
"""
y_break = ((exponent * offset) / (
(exponent - 1) * (1 + offset))) ** exponent
return np.where(
y >= y_break,
((1 + offset) * (y ** (1 / exponent))) - offset,
y / s,
)
style = style.lower()
if style == 'moncurvefwd':
return as_float(monitor_curve_forward(x))
elif style == 'moncurverev':
return as_float(monitor_curve_reverse(x))
elif style == 'moncurvemirrorfwd':
return as_float(
np.where(
x >= 0,
monitor_curve_forward(x),
-monitor_curve_forward(-x),
))
elif style == 'moncurvemirrorrev':
return as_float(
np.where(
x >= 0,
monitor_curve_reverse(x),
-monitor_curve_reverse(-x),
))
else:
raise ValueError(
'Undefined style used: "{0}", must be one of the following: '
'"{1}".'.format(
style, ', '.join([
'monCurveFwd', 'monCurveRev', 'monCurveMirrorFwd',
'monCurveMirrorRev'
])))
|
[
"colour.utilities.suppress_warnings",
"numpy.where",
"numpy.isnan",
"colour.utilities.as_float_array"
] |
[((4646, 4663), 'colour.utilities.as_float_array', 'as_float_array', (['x'], {}), '(x)\n', (4660, 4663), False, 'from colour.utilities import as_float, as_float_array, suppress_warnings\n'), ((4679, 4703), 'colour.utilities.as_float_array', 'as_float_array', (['exponent'], {}), '(exponent)\n', (4693, 4703), False, 'from colour.utilities import as_float, as_float_array, suppress_warnings\n'), ((8992, 9009), 'colour.utilities.as_float_array', 'as_float_array', (['x'], {}), '(x)\n', (9006, 9009), False, 'from colour.utilities import as_float, as_float_array, suppress_warnings\n'), ((9025, 9049), 'colour.utilities.as_float_array', 'as_float_array', (['exponent'], {}), '(exponent)\n', (9039, 9049), False, 'from colour.utilities import as_float, as_float_array, suppress_warnings\n'), ((9063, 9085), 'colour.utilities.as_float_array', 'as_float_array', (['offset'], {}), '(offset)\n', (9077, 9085), False, 'from colour.utilities import as_float, as_float_array, suppress_warnings\n'), ((9096, 9135), 'colour.utilities.suppress_warnings', 'suppress_warnings', ([], {'python_warnings': '(True)'}), '(python_warnings=True)\n', (9113, 9135), False, 'from colour.utilities import as_float, as_float_array, suppress_warnings\n'), ((9149, 9261), 'colour.utilities.as_float_array', 'as_float_array', (['((exponent - 1) / offset * (exponent * offset / ((exponent - 1) * (offset +\n 1))) ** exponent)'], {}), '((exponent - 1) / offset * (exponent * offset / ((exponent - \n 1) * (offset + 1))) ** exponent)\n', (9163, 9261), False, 'from colour.utilities import as_float, as_float_array, suppress_warnings\n'), ((9474, 9546), 'numpy.where', 'np.where', (['(x >= x_break)', '(((x + offset) / (1 + offset)) ** exponent)', '(x * s)'], {}), '(x >= x_break, ((x + offset) / (1 + offset)) ** exponent, x * s)\n', (9482, 9546), True, 'import numpy as np\n'), ((9823, 9897), 'numpy.where', 'np.where', (['(y >= y_break)', '((1 + offset) * y ** (1 / exponent) - offset)', '(y / s)'], {}), '(y >= y_break, (1 + offset) * y ** (1 / exponent) - offset, y / s)\n', (9831, 9897), True, 'import numpy as np\n'), ((9285, 9296), 'numpy.isnan', 'np.isnan', (['s'], {}), '(s)\n', (9293, 9296), True, 'import numpy as np\n')]
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Last Change: Tue Jul 17 05:00 PM 2007 J
# The code and descriptive text is copyrighted and offered under the terms of
# the BSD License from the authors; see below. However, the actual dataset may
# have a different origin and intellectual property status. See the SOURCE and
# COPYRIGHT variables for this information.
# Copyright (c) 2007 <NAME> <<EMAIL>>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the author nor the names of any contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from ..vtypes import integer
from ..utils import standard_properties, standard_classification_loader
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is public domain. """
name = "German Dataset"
short_name = "German Dataset"
url = 'http://www.liacc.up.pt/ML/old/statlog/datasets.html'
SOURCE = """
http://www.liacc.up.pt/ML/old/statlog/datasets.html
Professor Dr. <NAME>
Institut für Statistik und Ökonometrie Universität Hamburg
FB Wirtschaftswissenschaften
Von-Melle-Park 5
2000 Hamburg 13
Two datasets are provided. the original dataset, in the form provided by Prof.
Hofmann, contains categorical/symbolic attributes and is in the file
"german.dat". For algorithms that need numerical attributes, Strathclyde
University produced the file "german.numer". This file has been edited and
several indicator variables added to make it suitable for algorithms which
cannot cope with categorical variables. Several attributes that are ordered
categorical (such as attribute 17) have been coded as integer. This was the
form used by StatLog.
Here (milksets), only the numeric datasets are provided.
"""
notes = """
Number of Instances: 1000. 700 for class 0 (good credit) and 300 for class 1
(bad credit).
Number of Attributes: 24.
label: 0 for good credit, +1 for bad credit
"""
label_names = ['good_credit', 'bad_credit']
missing_values = False
value_types = [
# FIXME
# This is wrong! Not all outputs are integers (some are categorical),
# but the above does not give enough information to know which features are what.
integer('feat{}'.format(i+1)) for i in range(24)
]
@standard_classification_loader(name)
def load(force_contiguous=True):
"""load the german data and returns them.
:returns:
data: dict
Contains the following values:
'data' : the actual data
'label' : label[i] is the label index of data[i]
'class' : class[label[i]] is the label name of data[i]
"""
import numpy
import pickle
import gzip
from os.path import dirname, join
features,labels = pickle.load(gzip.GzipFile(join(dirname(__file__), 'data', 'german.pp.gz')))
featnames = list(features.keys())
featnames.sort()
nfeatures = []
for k in featnames:
nfeatures.append(list(map(float,features[k])))
nfeatures = np.array(nfeatures)
features = nfeatures.T
if force_contiguous:
features = features.copy()
labels = np.array([(lab == '+1') for lab in labels])
labels = labels.astype(np.int)
return features,labels
|
[
"os.path.dirname",
"numpy.array"
] |
[((4312, 4331), 'numpy.array', 'np.array', (['nfeatures'], {}), '(nfeatures)\n', (4320, 4331), True, 'import numpy as np\n'), ((4432, 4475), 'numpy.array', 'np.array', (["[(lab == '+1') for lab in labels]"], {}), "([(lab == '+1') for lab in labels])\n", (4440, 4475), True, 'import numpy as np\n'), ((4094, 4111), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (4101, 4111), False, 'from os.path import dirname, join\n')]
|
from __future__ import print_function, division
import abc
import numpy as np
class StreamProcessor(object):
"""Base class for stream processors"""
def __call__(self, items):
"""Processed the whole stream of items.
Args:
items (Iterable(object)) the stream of items to process.
"""
for item in items:
self.put(item)
@abc.abstractmethod
def put(self, item):
"""The method for processing one item"""
raise NotImplementedError('')
@abc.abstractmethod
def reset(self):
"""Resets the stream processor"""
raise NotImplementedError('')
class ReservoirSampling(StreamProcessor):
"""Maintains a UNIFORM SAMPLE of processed items up to any time t."""
def __init__(self, size, seed=None):
self.size = size
self.t = None
self.reservoir = None
self.seed = seed
np.random.seed(seed)
self.reset()
def put(self, item):
self.t += 1
if len(self.reservoir) < self.size:
self.reservoir.append(item)
else:
replace_probability = self.size / self.t
if np.random.random() < replace_probability:
replace_idx = np.random.randint(0, self.size)
self.reservoir[replace_idx] = item
def reset(self):
self.reservoir = []
self.t = 0
|
[
"numpy.random.randint",
"numpy.random.random",
"numpy.random.seed"
] |
[((917, 937), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (931, 937), True, 'import numpy as np\n'), ((1171, 1189), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1187, 1189), True, 'import numpy as np\n'), ((1243, 1274), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.size'], {}), '(0, self.size)\n', (1260, 1274), True, 'import numpy as np\n')]
|
import numpy as np
from config import FEEDRATE, X_STEP, Y_STEP, HEIGHT, WIDTH
# TO DO:
# * We assume that the head's nozzles extend along the Y direction.
# (This is apparently the case.)
def array_to_gcode(array):
"""Convert numpy array into a sequence of gcodes, saved to file."""
assert isinstance(array, np.ndarray)
height = array.shape[0]
#assert height == HEIGHT
width = array.shape[1]
#assert width == WIDTH
strip_number = int(np.ceil(height/12.0))
gcode = ""
x_pos = 0
y_pos = 0
for strip_idx in xrange(strip_number):
x_pos = 0
for column_idx in xrange(width):
nozzles_gcode = fire_nozzles(array[12*strip_idx:12*(strip_idx+1),
column_idx])
if nozzles_gcode: # Only print and move if there's any non-white
# pixels
gcode += move(x_pos, y_pos)
gcode += nozzles_gcode
x_pos += X_STEP
y_pos += Y_STEP
return gcode
def move(x_pos, y_pos):
"""Return the G-CODE describing motion to x_pos, y_pos."""
out = ""
out += "G1X"+str(x_pos)+"Y"+str(y_pos)+"F"+str(FEEDRATE)+";\n"
out += "M400;\n"
return out
def fire_nozzles(firing_column):
"""Return the G-CODE describing the printing sequence. If there
is nothing to be printed, return an empty string.
"""
out = ''
if np.all(firing_column == 0):
return out
else:
while np.any(firing_column != 0):
firing_pattern, firing_column = salvo_integer(firing_column)
out += "M700 P0 S"+str(firing_pattern)+";\n"
return out
def salvo_integer(firing_column):
"""Given a column from a numpy array, return the decimal firing pattern
and a new firing column ()
The decimal firing pattern is a decimal integer which, written in binary,
designates the nozzles that ought to be fired. It is a component of the
firing G-CODE.
"""
pattern = ''
for idx, entry in enumerate(firing_column):
if entry > 0:
pattern += '1'
firing_column[idx] -= 1
else:
pattern += '0'
# Pad with zeroes
pattern += '0'*(12 - len(pattern))
return int(pattern, 2), firing_column
|
[
"numpy.any",
"numpy.ceil",
"numpy.all"
] |
[((1442, 1468), 'numpy.all', 'np.all', (['(firing_column == 0)'], {}), '(firing_column == 0)\n', (1448, 1468), True, 'import numpy as np\n'), ((477, 499), 'numpy.ceil', 'np.ceil', (['(height / 12.0)'], {}), '(height / 12.0)\n', (484, 499), True, 'import numpy as np\n'), ((1513, 1539), 'numpy.any', 'np.any', (['(firing_column != 0)'], {}), '(firing_column != 0)\n', (1519, 1539), True, 'import numpy as np\n')]
|
import copy
from typing import List, Dict
import numpy as np
from prettytable import PrettyTable
from ase import Atoms
from dscribe.descriptors import SineMatrix
from dscribe.descriptors import CoulombMatrix
from dscribe.descriptors import ACSF
from dscribe.descriptors import SOAP
from matminer.featurizers.composition import ElementProperty
from matminer.featurizers.site import ChemicalSRO
from matminer.featurizers.site import OPSiteFingerprint
from matminer.featurizers.site import CrystalNNFingerprint
from pymatgen.io.ase import AseAtomsAdaptor
from pymatgen.core.periodic_table import Element
SUPPORTED_MATMINER_CLASSES = [
ElementProperty,
ChemicalSRO,
OPSiteFingerprint,
CrystalNNFingerprint,
]
SUPPORTED_DSCRIBE_CLASSES = [SineMatrix, CoulombMatrix, ACSF, SOAP]
class FeaturizerError(Exception):
pass
class Featurizer:
def __init__(
self,
featurizer_class=None, # black
design_space_structures: List[Atoms] = None,
species_list: List[str] = None,
max_size: int = None,
preset: str = None,
kwargs: Dict = None,
):
self._featurizer_class = SineMatrix
self.featurizer_class = featurizer_class
self._preset = None
self.preset = preset
self._kwargs = None
self.kwargs = kwargs
self._max_size = 100
self.max_size = max_size
self._species_list = ["Fe", "Ni", "Pt", "Pd", "Cu", "C", "N", "O", "H"]
self.species_list = species_list
# overrides max_size and species_list if given
self._design_space_structures = None
self.design_space_structures = design_space_structures
def __eq__(self, other: object) -> bool:
if isinstance(other, Featurizer):
for attr in [
"featurizer_class",
"species_list",
"max_size",
"preset",
"kwargs",
]:
if getattr(self, attr) != getattr(other, attr):
return False
return True
return False
def __repr__(self) -> str:
pt = PrettyTable()
pt.field_names = ["", "Featurizer"]
class_name = (
self.featurizer_class.__module__ + "." + self.featurizer_class.__name__
)
pt.add_row(["class", class_name])
pt.add_row(["kwargs", self.kwargs])
pt.add_row(["species list", self.species_list])
pt.add_row(["maximum structure size", self.max_size])
pt.add_row(["preset", self.preset])
pt.add_row(
[
"design space structures provided?",
self.design_space_structures is not None,
]
)
pt.max_width = 70
return str(pt)
def copy(self):
"""
Returns a copy of the featurizer
"""
ds_structs_copy = (
[struct.copy() for struct in self.design_space_structures]
if self.design_space_structures
else None
)
feat = self.__class__(
featurizer_class=self.featurizer_class,
design_space_structures=ds_structs_copy,
species_list=self.species_list.copy(),
max_size=self.max_size,
kwargs=copy.deepcopy(self.kwargs) if self.kwargs else None,
)
return feat
@property
def featurizer_class(self):
return self._featurizer_class
@featurizer_class.setter
def featurizer_class(self, featurizer_class):
if (
featurizer_class in SUPPORTED_MATMINER_CLASSES
or featurizer_class in SUPPORTED_DSCRIBE_CLASSES
):
self._featurizer_class = featurizer_class
self._preset = None
self._kwargs = None
else:
msg = f"Featurization class {featurizer_class} is not currently supported."
raise FeaturizerError(msg)
@property
def preset(self):
return self._preset
@preset.setter
def preset(self, preset):
if self.featurizer_class in [CrystalNNFingerprint, ElementProperty]:
self._preset = preset
elif preset is None:
self._preset = preset
else:
msg = f"Presets are not supported for {self.featurizer_class.__module__}"
raise FeaturizerError(msg)
@property
def kwargs(self):
return self._kwargs
@kwargs.setter
def kwargs(self, kwargs):
if kwargs is not None:
self._kwargs = kwargs.copy()
@property
def design_space_structures(self):
return self._design_space_structures
@design_space_structures.setter
def design_space_structures(self, design_space_structures: List[Atoms]):
if design_space_structures is not None:
self._design_space_structures = [
struct.copy() for struct in design_space_structures
]
# analyze new design space
ds_structs = design_space_structures
_species_list = []
for s in ds_structs:
# get all unique species
found_species = np.unique(s.get_chemical_symbols()).tolist()
new_species = [
spec for spec in found_species if spec not in _species_list
]
_species_list.extend(new_species)
# sort species list
sorted_species_list = sorted(
_species_list, key=lambda el: Element(el).mendeleev_no
)
self._max_size = max([len(s) for s in ds_structs])
self._species_list = sorted_species_list
@property
def max_size(self):
return self._max_size
@max_size.setter
def max_size(self, max_size):
if max_size is not None:
self._max_size = max_size
@property
def species_list(self):
return self._species_list
@species_list.setter
def species_list(self, species_list: List[str]):
if species_list is not None:
_species_list = species_list.copy()
# sort species list by mendeleev number
sorted_species_list = sorted(
_species_list, key=lambda el: Element(el).mendeleev_no
)
self._species_list = sorted_species_list
# TODO: "get_featurization_object" -> "get_featurizer"
@property
def featurization_object(self):
return self._get_featurization_object()
def _get_featurization_object(self):
# instantiate featurizer object
if hasattr(self.featurizer_class, "from_preset") and self.preset is not None:
return self.featurizer_class.from_preset(self.preset)
if self.featurizer_class in [SineMatrix, CoulombMatrix]:
return self.featurizer_class(
n_atoms_max=self.max_size, permutation="none", **self.kwargs or {},
)
if self.featurizer_class in [SOAP, ACSF]:
return self.featurizer_class(species=self.species_list, **self.kwargs or {})
return self.featurizer_class(**self.kwargs or {})
def featurize_single(self, structure: Atoms):
"""
Featurize a single structure. Returns a single vector
Parameters
----------
structure:
ase.Atoms object of structure to be featurized
Returns
-------
representation:
Numpy array of feature vector (not flattened)
"""
feat_class = self.featurizer_class
featurization_object = self.featurization_object
# dscribe classes
if feat_class in [SOAP, ACSF]:
adsorbate_indices = np.where(structure.get_tags() <= 0)[0].tolist()
return featurization_object.create(structure, positions=adsorbate_indices,)
if feat_class in [SineMatrix, CoulombMatrix]:
return featurization_object.create(structure).reshape(-1,)
# matminer classes
pym_struct = AseAtomsAdaptor().get_structure(structure)
if feat_class == ElementProperty:
return np.array(featurization_object.featurize(pym_struct.composition))
representation = np.array([])
if feat_class in [CrystalNNFingerprint, OPSiteFingerprint]:
adsorbate_indices = np.where(structure.get_tags() <= 0)[0].tolist()
for idx in adsorbate_indices:
feat = featurization_object.featurize(pym_struct, idx)
representation = np.concatenate((representation, feat))
return representation
if feat_class == ChemicalSRO:
adsorbate_indices = np.where(structure.get_tags() <= 0)[0].tolist()
formatted_list = [[pym_struct, idx] for idx in adsorbate_indices]
featurization_object.fit(formatted_list)
for idx in adsorbate_indices:
feat = featurization_object.featurize(pym_struct, idx)
representation = np.concatenate((representation, feat))
return representation
return None
def featurize_multiple(self, structures: List[Atoms]):
"""
Featurize multiple structures. Returns a matrix where each
row is the flattened feature vector of each system
Parameters
----------
structures:
List of ase.Atoms structures to be featurized
Returns
-------
X:
Numpy array of shape (number of structures, number of features)
"""
first_vec = self.featurize_single(structures[0]).flatten()
num_features = len(first_vec)
# if adsorbate featurization, assumes only 1 adsorbate in design space
# (otherwise would require padding)
X = np.zeros((len(structures), num_features))
X[0, :] = first_vec.copy()
for i in range(1, len(structures)):
X[i, :] = self.featurize_single(structures[i]).flatten()
return X
|
[
"copy.deepcopy",
"pymatgen.io.ase.AseAtomsAdaptor",
"numpy.array",
"prettytable.PrettyTable",
"pymatgen.core.periodic_table.Element",
"numpy.concatenate"
] |
[((2141, 2154), 'prettytable.PrettyTable', 'PrettyTable', ([], {}), '()\n', (2152, 2154), False, 'from prettytable import PrettyTable\n'), ((8201, 8213), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (8209, 8213), True, 'import numpy as np\n'), ((8007, 8024), 'pymatgen.io.ase.AseAtomsAdaptor', 'AseAtomsAdaptor', ([], {}), '()\n', (8022, 8024), False, 'from pymatgen.io.ase import AseAtomsAdaptor\n'), ((8508, 8546), 'numpy.concatenate', 'np.concatenate', (['(representation, feat)'], {}), '((representation, feat))\n', (8522, 8546), True, 'import numpy as np\n'), ((8976, 9014), 'numpy.concatenate', 'np.concatenate', (['(representation, feat)'], {}), '((representation, feat))\n', (8990, 9014), True, 'import numpy as np\n'), ((3285, 3311), 'copy.deepcopy', 'copy.deepcopy', (['self.kwargs'], {}), '(self.kwargs)\n', (3298, 3311), False, 'import copy\n'), ((5510, 5521), 'pymatgen.core.periodic_table.Element', 'Element', (['el'], {}), '(el)\n', (5517, 5521), False, 'from pymatgen.core.periodic_table import Element\n'), ((6243, 6254), 'pymatgen.core.periodic_table.Element', 'Element', (['el'], {}), '(el)\n', (6250, 6254), False, 'from pymatgen.core.periodic_table import Element\n')]
|
# coding=utf-8
# Copyright 2019 The Weak Disentangle Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Affine modules.
"""
import numpy as np
import tensorflow as tf
from collections import OrderedDict
from weak_disentangle.tensorsketch import utils as tsu
from weak_disentangle.tensorsketch.modules.base import build_with_name_scope
from weak_disentangle.tensorsketch.modules.base import Module
class Affine(Module):
"""Abstract class for modules that apply an affine transformation to input.
Affine includes several special functionalities to ensure that classes that
extend it are amenable to the injection of kernel normalizers (based on the
respects_kernel_norm flag). All classes that extend Affine should adhere to
the following contract: Never access self.orig_kernel directly in forward
call, and parameter initialization/building.
"""
def __init__(self, bias=True, name=None, initializer=None):
super().__init__(name=name)
self.use_bias = bias
self.kernel = None
self.bias = None
self.initializer = initializer
self.kernel_normalizers = OrderedDict()
@property
def normalized_kernel(self):
kernel = self.kernel
for km in self.kernel_normalizers.values():
kernel = km(kernel)
return kernel
@build_with_name_scope
def build_parameters(self, x):
raise NotImplementedError("Implement parameter building for Affine class")
def reset_parameters(self):
if self.initializer is not None:
self.initializer(self.kernel, self.bias)
return
# By default, all affine layers are initialized via
# Unif(-a, a), where a = sqrt(1 / fan_in)
fan_in, _ = tsu.compute_fan(self.kernel)
limit = np.sqrt(1 / fan_in)
self.kernel.assign(tf.random.uniform(self.kernel.shape, -limit, limit))
if self.use_bias:
self.bias.assign(tf.random.uniform(self.bias.shape, -limit, limit))
class Dense(Affine):
"""Applies a dense affine transformation to input.
"""
def __init__(self, out_dims, bias=True, initializer=None, name=None):
super().__init__(bias=bias, initializer=initializer, name=name)
self.out_dims = out_dims
@build_with_name_scope
def build_parameters(self, x):
self.in_dims = int(x.shape[-1])
self.kernel = tf.Variable(tf.random.normal((self.in_dims, self.out_dims)),
trainable=True)
if self.use_bias:
self.bias = tf.Variable(tf.random.normal([self.out_dims]), trainable=True)
self.reset_parameters()
def forward(self, x):
x = tf.matmul(x, self.normalized_kernel)
if self.bias is not None:
x = tf.nn.bias_add(x, self.bias)
return x
def extra_repr(self):
return "({}, bias={})".format(self.out_dims, self.use_bias)
class Conv2d(Affine):
"""Applies 2d convolutional transformation (and bias) to input.
"""
def __init__(self,
out_channels,
kernel_size,
strides,
padding="same",
dilation=1,
bias=True,
initializer=None,
name=None):
super().__init__(bias=bias, initializer=initializer, name=name)
self.out_channels = out_channels
self.kernel_size = kernel_size
self.strides = strides
self.padding = padding
self.dilation = dilation
@build_with_name_scope
def build_parameters(self, x):
self.in_channels = int(x.shape[-1])
self.kernel = tf.Variable(tf.random.normal((self.kernel_size,
self.kernel_size,
self.in_channels,
self.out_channels)),
trainable=True)
if self.use_bias:
self.bias = tf.Variable(tf.random.normal([self.out_channels]),
trainable=True)
self.reset_parameters()
def forward(self, x):
x = tf.nn.conv2d(
x, filter=self.normalized_kernel,
strides=self.strides,
padding=self.padding.upper(),
dilations=self.dilation)
if self.use_bias:
x = tf.nn.bias_add(x, self.bias)
return x
def extra_repr(self):
return "({}, {}, {}, {}, bias={})".format(self.out_channels,
self.kernel_size,
self.strides,
self.padding,
self.use_bias)
class ConvTranspose2d(Affine):
"""Applies 2d transposed convolutional transformation (and bias) to input.
"""
def __init__(self,
out_channels,
kernel_size,
strides,
padding="same",
output_padding=None,
dilation=1,
bias=True,
initializer=None,
name=None):
super().__init__(bias=bias, initializer=initializer, name=name)
self.out_channels = out_channels
self.kernel_size = kernel_size
self.strides = strides
self.padding = padding
self.output_padding = output_padding
self.dilation = dilation
@build_with_name_scope
def build_parameters(self, x):
self.in_channels = int(x.shape[-1])
self.kernel = tf.Variable(tf.random.normal((self.kernel_size,
self.kernel_size,
self.out_channels,
self.in_channels)),
trainable=True)
if self.use_bias:
self.bias = tf.Variable(tf.random.normal([self.out_channels]),
trainable=True)
self.reset_parameters()
def forward(self, x):
n, h, w, _ = x.shape
h = tsu.compute_out_dims(h, self.kernel_size,
self.strides,
self.padding,
self.output_padding,
self.dilation)
w = tsu.compute_out_dims(w, self.kernel_size,
self.strides,
self.padding,
self.output_padding,
self.dilation)
output_shape = (n, h, w, self.out_channels)
x = tf.nn.conv2d_transpose(
x, filter=self.normalized_kernel,
strides=self.strides,
padding=self.padding.upper(),
output_shape=output_shape,
dilations=self.dilation)
if self.use_bias:
x = tf.nn.bias_add(x, self.bias)
return x
def extra_repr(self):
return "({}, {}, {}, {}, bias={})".format(self.out_channels,
self.kernel_size,
self.strides,
self.padding,
self.use_bias)
|
[
"tensorflow.random.uniform",
"tensorflow.random.normal",
"weak_disentangle.tensorsketch.utils.compute_fan",
"weak_disentangle.tensorsketch.utils.compute_out_dims",
"tensorflow.matmul",
"collections.OrderedDict",
"tensorflow.nn.bias_add",
"numpy.sqrt"
] |
[((1612, 1625), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1623, 1625), False, 'from collections import OrderedDict\n'), ((2172, 2200), 'weak_disentangle.tensorsketch.utils.compute_fan', 'tsu.compute_fan', (['self.kernel'], {}), '(self.kernel)\n', (2187, 2200), True, 'from weak_disentangle.tensorsketch import utils as tsu\n'), ((2213, 2232), 'numpy.sqrt', 'np.sqrt', (['(1 / fan_in)'], {}), '(1 / fan_in)\n', (2220, 2232), True, 'import numpy as np\n'), ((3044, 3080), 'tensorflow.matmul', 'tf.matmul', (['x', 'self.normalized_kernel'], {}), '(x, self.normalized_kernel)\n', (3053, 3080), True, 'import tensorflow as tf\n'), ((6296, 6406), 'weak_disentangle.tensorsketch.utils.compute_out_dims', 'tsu.compute_out_dims', (['h', 'self.kernel_size', 'self.strides', 'self.padding', 'self.output_padding', 'self.dilation'], {}), '(h, self.kernel_size, self.strides, self.padding, self.\n output_padding, self.dilation)\n', (6316, 6406), True, 'from weak_disentangle.tensorsketch import utils as tsu\n'), ((6527, 6637), 'weak_disentangle.tensorsketch.utils.compute_out_dims', 'tsu.compute_out_dims', (['w', 'self.kernel_size', 'self.strides', 'self.padding', 'self.output_padding', 'self.dilation'], {}), '(w, self.kernel_size, self.strides, self.padding, self.\n output_padding, self.dilation)\n', (6547, 6637), True, 'from weak_disentangle.tensorsketch import utils as tsu\n'), ((2256, 2307), 'tensorflow.random.uniform', 'tf.random.uniform', (['self.kernel.shape', '(-limit)', 'limit'], {}), '(self.kernel.shape, -limit, limit)\n', (2273, 2307), True, 'import tensorflow as tf\n'), ((2783, 2830), 'tensorflow.random.normal', 'tf.random.normal', (['(self.in_dims, self.out_dims)'], {}), '((self.in_dims, self.out_dims))\n', (2799, 2830), True, 'import tensorflow as tf\n'), ((3122, 3150), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['x', 'self.bias'], {}), '(x, self.bias)\n', (3136, 3150), True, 'import tensorflow as tf\n'), ((3948, 4043), 'tensorflow.random.normal', 'tf.random.normal', (['(self.kernel_size, self.kernel_size, self.in_channels, self.out_channels)'], {}), '((self.kernel_size, self.kernel_size, self.in_channels,\n self.out_channels))\n', (3964, 4043), True, 'import tensorflow as tf\n'), ((4620, 4648), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['x', 'self.bias'], {}), '(x, self.bias)\n', (4634, 4648), True, 'import tensorflow as tf\n'), ((5789, 5884), 'tensorflow.random.normal', 'tf.random.normal', (['(self.kernel_size, self.kernel_size, self.out_channels, self.in_channels)'], {}), '((self.kernel_size, self.kernel_size, self.out_channels,\n self.in_channels))\n', (5805, 5884), True, 'import tensorflow as tf\n'), ((7041, 7069), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['x', 'self.bias'], {}), '(x, self.bias)\n', (7055, 7069), True, 'import tensorflow as tf\n'), ((2355, 2404), 'tensorflow.random.uniform', 'tf.random.uniform', (['self.bias.shape', '(-limit)', 'limit'], {}), '(self.bias.shape, -limit, limit)\n', (2372, 2404), True, 'import tensorflow as tf\n'), ((2931, 2964), 'tensorflow.random.normal', 'tf.random.normal', (['[self.out_dims]'], {}), '([self.out_dims])\n', (2947, 2964), True, 'import tensorflow as tf\n'), ((4284, 4321), 'tensorflow.random.normal', 'tf.random.normal', (['[self.out_channels]'], {}), '([self.out_channels])\n', (4300, 4321), True, 'import tensorflow as tf\n'), ((6125, 6162), 'tensorflow.random.normal', 'tf.random.normal', (['[self.out_channels]'], {}), '([self.out_channels])\n', (6141, 6162), True, 'import tensorflow as tf\n')]
|
import streamlit as st
from collections import defaultdict
from kafka import KafkaConsumer
from json import loads
import time
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
import plotly.graph_objects as go
import plotly.express as px
import pandas as pd
import PIL
from PIL import Image
import streamlit.components.v1 as components
import os
import tweepy
import logging
import sys
from collections import deque
from geopy.geocoders import Nominatim
import threading
import pickle
# Streamlit layout CSS
st.markdown(
f"""
<style>
.reportview-container .main .block-container{{
max-width: 100vw;
padding-top: 1rem;
padding-right: 1rem;
padding-left: 1rem;
padding-bottom: 1rem;
}}
.reportview-container .main {{
color: black;
background-color: white;
}}
</style>
""",
unsafe_allow_html=True,
)
# Lambdas and Constants
def normalize(x): return (x - np.mean(x) + np.finfo(x.dtype).eps) / (np.std(x) + np.finfo(x.dtype).eps)
def timestamp_seconds(x): return datetime.fromisoformat(x).timestamp()
wave_dict = defaultdict(list)
pick_dict = defaultdict(list)
event_dict = defaultdict(dict)
EVENT_MIN_GAP = 5
WINDOW_LENGTH = 100
WINDOW_NUMBER = 60
HOP_LENGTH = 10
REFRESH_SEC = 1.0
MAP_WIDTH = 900
MAP_HEIGHT = 650
MAP_ZOOM = 9
BOT_MAGNITUDE_THRESHOLD = 1.5
GEOLOC_TOUT = 5 # in seconds
I_MADE_A_TWEET = False
dt = 0.01
prev_event_bundle = None
prev_event_bundle = (0.0, 0.0, 0.0, 0.0)
CONFIG_PKL = "config_hawaii.pkl"
STATION_CSV = "stations_hawaii.csv"
with open(CONFIG_PKL, "rb") as fp:
CONFIG = pickle.load(fp)
STATIONS = pd.read_csv(STATION_CSV, delimiter="\t")
STATIONS = STATIONS.rename(columns={"station":"id"})
NUM_STATION = len(STATIONS)
consumer = None
# Connection to Kafka
try:
print('Connecting to k8s kafka')
BROKER_URL = 'quakeflow-kafka:9092'
consumer = KafkaConsumer(
bootstrap_servers=[BROKER_URL],
auto_offset_reset='earliest',
enable_auto_commit=True,
key_deserializer=lambda x: loads(x.decode('utf-8')),
value_deserializer=lambda x: loads(x.decode('utf-8'))
)
print('k8s kafka connection success!')
consumer.subscribe(['waveform_raw', 'phasenet_picks', 'gmma_events'])
except BaseException:
print('k8s Kafka connection error')
try:
print('Connecting to local kafka')
BROKER_URL = 'localhost:9092'
consumer = KafkaConsumer(
bootstrap_servers=[BROKER_URL],
auto_offset_reset='earliest',
enable_auto_commit=True,
key_deserializer=lambda x: loads(x.decode('utf-8')),
value_deserializer=lambda x: loads(x.decode('utf-8'))
)
print('local kafka connection success!')
consumer.subscribe(['waveform_raw', 'phasenet_picks', 'gmma_events'])
except BaseException:
print('local Kafka connection error')
if not consumer:
print('No kafka server found!')
# Setting up Tweepy
consumer_key = os.getenv('CONSUMER_KEY')
consumer_secret = os.getenv('CONSUMER_SECRET')
access_token = os.getenv('ACCESS_TOKEN')
access_token_secret = os.getenv('ACCESS_TOKEN_SECRET')
print(consumer_key)
print(consumer_secret)
print(access_token)
print(access_token_secret)
logger = logging.getLogger()
def create_api():
consumer_key = os.getenv("CONSUMER_KEY")
consumer_secret = os.getenv("CONSUMER_SECRET")
access_token = os.getenv("ACCESS_TOKEN")
access_token_secret = os.getenv("ACCESS_TOKEN_SECRET")
if not consumer_key:
return
try:
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True,
wait_on_rate_limit_notify=True)
api.verify_credentials()
logger.info("API created")
return api
except Exception as e:
logger.error("Error creating API", exc_info=True)
return None
api = create_api()
# Functions
def latlon2address(lat, lon, geolocator):
try:
location = geolocator.reverse(f"{lat}, {lon}")
print(location)
return location.address
except BaseException:
return None
geolocator = Nominatim(user_agent="https", timeout=5)
def update_figure_layout(figure):
figure.update_layout(
mapbox_style="white-bg",
mapbox_layers=[
{
"below": 'traces',
"sourcetype": "raster",
"sourceattribution": "United States Geological Survey",
"source": [
"https://basemap.nationalmap.gov/arcgis/rest/services/USGSImageryOnly/MapServer/tile/{z}/{y}/{x}"
]
}
])
figure.update_layout(
showlegend=True,
width=MAP_WIDTH,
height=MAP_HEIGHT,
geo=dict(
landcolor='rgb(217, 217, 217)',
lonaxis=dict(
showgrid=True,
gridwidth=0.05,
range=CONFIG["xlim_degree"],
dtick=5
),
lataxis=dict(
showgrid=True,
gridwidth=0.05,
range=CONFIG["ylim_degree"],
dtick=5
)
),
)
figure.update_layout(margin={"r": 0.5, "t": 0.5, "l": 0, "b": 0})
return figure
def get_plot_picks(message, t0, tn):
t0_idx = 0
t_picks = []
colors = []
for i, x in enumerate(message):
if timestamp_seconds(x["timestamp"]) >= t0:
if t0_idx == 0:
t0_idx = i
if timestamp_seconds(x["timestamp"]) <= tn:
t_picks.append(timestamp_seconds(x["timestamp"]) - t0)
if x["type"] == "p":
colors.append("b")
elif x["type"] == "s":
colors.append("r")
else:
raise("Phase type error!")
else:
return t_picks, colors, t0_idx
return t_picks, colors, t0_idx
def get_plot_events(message, t0, tn):
t0_idx = 0
t_events = []
mag_events = []
loc_events = []
for k, x in message.items():
if timestamp_seconds(x["time"]) >= t0:
# if t0_idx == 0:
# t0_idx = i
if timestamp_seconds(x["time"]) <= tn - 8:
t_events.append(timestamp_seconds(x["time"]) - t0)
mag_events.append(x["magnitude"])
loc_events.append(x["location"])
else:
return t_events, mag_events, loc_events, t0_idx
return t_events, mag_events, loc_events, t0_idx
def update_figure(figure, lat_list, lng_list, z_list, mag_events, t_events):
if(figure is not None):
figure.data = []
figure_df = pd.DataFrame({'lat': lat_list, 'lon': lng_list, 'z': z_list, 'mag': mag_events,
'time': t_events, 'size': [(mag_event**4) / 3.5 for mag_event in mag_events]})
figure = px.scatter_mapbox(
figure_df,
lat="lat",
lon="lon",
hover_data=[
"mag",
"time",
"lat",
"lon"],
size="size",
color_discrete_sequence=["fuchsia"],
zoom=MAP_ZOOM,
height=300)
figure = update_figure_layout(figure)
return figure
def update_figure_with_cols(figure, col1, col2, lat_list, lng_list, z_list, mag_events, t_events):
with col1:
figure = update_figure(figure, lat_list, lng_list, z_list, mag_events, t_events)
return figure
def tweep_update_with_media(api, mag, lng, lat, z, event_time, geolocator):
temp_time = time.time()
# get figure using update_figure
figure = update_figure(None, [lat], [lng], [z], [mag], [event_time])
figure.write_image("twitter_fig.png")
print("Time taken to render: %f" % (time.time() - temp_time))
address = latlon2address(lat, lng, geolocator)
if address is not None:
caption = f"Magnitude {mag} earthquake occurred at address {address} at time {event_time}"
else:
caption = "Magnitude %f earthquake happened at longitude %f degrees, latitude %f degrees at depth %f km at time %s" % (
mag, lng, lat, z, event_time)
try:
api.update_with_media("twitter_fig.png", caption)
print('Update Twitter with media success!', flush=True)
global I_MADE_A_TWEET
I_MADE_A_TWEET = True # Demo purpose, don't want to use up all the Twitter API Quota
print("Time taken to from start to end to fully upload to twitter: %f" % (time.time() - temp_time))
except BaseException:
pass
def tweepy_status_update(event_dict):
if(len(event_dict) > 0):
event = list(event_dict.values())[-1]
print("tweepy_status_update (event): ", event)
event_time = event['time']
lng = lng_from_x(event['location'][0])
lat = lat_from_y(event['location'][1])
z = event['location'][2]
mag = event['magnitude']
bundle = (lng, lat, z, mag)
global prev_event_bundle
if(bundle != prev_event_bundle):
print("----------New Event----------")
prev_event_bundle = bundle
if mag > BOT_MAGNITUDE_THRESHOLD and api is not None and not I_MADE_A_TWEET:
print("time is %s, current time is %f" % (event_time, time.time()))
print("Try to update status on twitter............")
print("Magnitude %f earthquake happened at longitude %f, latitude %f at depth %f at time %s" % (mag, lng, lat, z, event_time))
upload_thread = threading.Thread(
target=tweep_update_with_media, name="Uploader", args=(
api, mag, lng, lat, z, event_time, geolocator, ))
upload_thread.start()
temp_time = time.time()
# Pure text upload, will be fast
# api.update_status(
# "Magnitude %f earthquake happened at longitude %f, latitude %f at depth %f at time %s" %
# (mag, lng, lat, z, event_time))
print("Time taken for fast alert: %f" % (time.time() - temp_time)) # It took: 0.161690 seconds
def extract_df_from_event_dict(event_dict):
event_dict_values = list(event_dict.values())
event_dict_values.reverse()
lat_values = []
lon_values = []
z_values = []
mag_values = []
time_values = []
for event in event_dict_values:
lon_values.append(lng_from_x(event['location'][0]))
lat_values.append(lat_from_y(event['location'][1]))
z_values.append(event['location'][2])
mag_values.append(event['magnitude'])
time_values.append(event['time'])
event_dict_df = pd.DataFrame({'Magnitude': mag_values, 'Time': time_values, 'Latitude (deg)': lat_values,
'Longitude (deg)': lon_values, 'Depth (km)': z_values})
return event_dict_df
# Page header
image_data = np.asarray(Image.open('quakeflow logo design 2.jpg'))
st.image(image_data, caption=None, width=None, use_column_width=None, clamp=False, channels='RGB', output_format='auto')
st.balloons()
# Streamlit layout
col1, col2 = st.beta_columns([1, 1])
# Initial plotting
with col1:
experimental_df = pd.DataFrame({'lat': [], 'lon': [], 'z': [], 'mag': [], 'time': [], 'size': []})
event_df = pd.DataFrame({'Magnitude': [], 'Time': [], 'Latitude (deg)': [], 'Longitude (deg)': [], 'Depth (km)': []})
experimental = px.scatter_mapbox(
experimental_df,
lat="lat",
lon="lon",
hover_data=[
"mag",
"time",
"lat",
"lon"],
color_discrete_sequence=["fuchsia"],
zoom=MAP_ZOOM,
height=300)
experimental = update_figure_layout(experimental)
map_figure_experimental = st.plotly_chart(experimental, width=MAP_WIDTH, height=MAP_HEIGHT)
fig, (ax1) = plt.subplots(1, 1, figsize=(8, 5.8))
x = np.arange(WINDOW_LENGTH * WINDOW_NUMBER // HOP_LENGTH) * (dt * HOP_LENGTH)
ax1.set_ylim(-1, NUM_STATION)
ax1.set_xlim(np.around(x[0]), np.around(x[-1]))
lines = []
for i in range(NUM_STATION):
line, = ax1.plot(x, np.zeros(len(x)) + i, linewidth=0.5)
lines.append(line)
scatters = []
for i in range(NUM_STATION):
scatter = ax1.scatter([-1], [-1], s=300, c="white", marker="|")
scatters.append(scatter)
ax1.scatter([-1], [-1], s=200, c="blue", marker="|", label="P-wave")
ax1.scatter([-1], [-1], s=200, c="red", marker="|", label="S-wave")
ax1.legend(loc="upper left")
ax1.title.set_text("Streaming Seismic Waveforms and Detected P/S Phases")
with col2:
ui_plot = st.pyplot(plt)
catalog_df_visual = st.empty()
prev_time = time.time()
prev_time_bot = time.time()
# Handle messages from Kafka
for i, message in enumerate(consumer):
if message.topic == "waveform_raw":
key = message.key.strip('"')
timestamp = message.value['timestamp']
# print(timestamp)
vec = message.value['vec']
wave_dict[key].append([message.value['timestamp'], message.value['vec']])
wave_dict[key] = wave_dict[key][-WINDOW_NUMBER:]
elif message.topic == "phasenet_picks":
# print("phasenet!")
key = message.key
pick = message.value
pick_dict[key].append(pick)
elif message.topic == "gmma_events":
# print("gmma!")
key = np.round(timestamp_seconds(message.key) / EVENT_MIN_GAP) * EVENT_MIN_GAP
event = message.value
# event_list.extend(event)
# event_dict[key].append(event)
event_dict[key] = event
else:
print(message.topic)
raise("Topic Error!")
# Tweepy timer
if time.time() - prev_time_bot > EVENT_MIN_GAP:
tweepy_status_update(event_dict)
prev_time_bot = time.time()
if time.time() - prev_time > REFRESH_SEC:
prev_time = time.time()
keys = sorted(wave_dict.keys())
print("refreshing...")
min_t = prev_time
max_t = 0
# print("len(pick_dict): ", len(pick_dict))
for j, k in enumerate(keys):
tmp_vec = []
tmp_t = []
for _ in range(WINDOW_NUMBER - len(wave_dict[k])):
tmp_vec.extend([[0] * 3] * WINDOW_LENGTH)
for v in wave_dict[k]:
tmp_vec.extend(v[1])
tmp_t.append(v[0])
lines[j].set_ydata(normalize(np.array(tmp_vec)[::HOP_LENGTH, -1]) / 5 + j)
# print(pick_dict.keys())
# print(k, len(k))
if k in pick_dict:
t0 = timestamp_seconds(max(tmp_t)) - WINDOW_LENGTH * (WINDOW_NUMBER - 1) * dt
tn = timestamp_seconds(max(tmp_t)) + WINDOW_LENGTH * dt
if tn > max_t:
max_t = tn
if t0 < min_t:
min_t = t0
t_picks, colors, t0_idx = get_plot_picks(pick_dict[k], t0, tn)
scatters[j].set_offsets(np.c_[t_picks, np.ones_like(t_picks) * j])
scatters[j].set_color(colors)
if len(event_dict) > 0:
t_events, mag_events, loc_events, t0_idx = get_plot_events(event_dict, min_t, max_t)
if len(t_events) > 0:
loc_events = np.array(loc_events)
# organize data into the correct form
lng_list, lat_list, z_list = loc_events_organize(loc_events)
# update figure
experimental = update_figure_with_cols(experimental, col1, col2, lat_list, lng_list, z_list, mag_events, t_events)
event_df = extract_df_from_event_dict(event_dict)
if len(keys) > 0:
print("plotting...")
with col2:
ui_plot.pyplot(plt)
catalog_df_visual.dataframe(event_df)
with col1:
map_figure_experimental.plotly_chart(experimental, width=MAP_WIDTH, height=MAP_HEIGHT)
if message.topic == "waveform_raw":
time.sleep(REFRESH_SEC / NUM_STATION / 20)
|
[
"streamlit.balloons",
"streamlit.image",
"pandas.read_csv",
"plotly.express.scatter_mapbox",
"collections.defaultdict",
"numpy.around",
"pickle.load",
"numpy.arange",
"numpy.mean",
"pandas.DataFrame",
"datetime.datetime.fromisoformat",
"numpy.std",
"numpy.finfo",
"streamlit.beta_columns",
"matplotlib.pyplot.subplots",
"threading.Thread",
"streamlit.plotly_chart",
"tweepy.API",
"numpy.ones_like",
"time.sleep",
"streamlit.pyplot",
"streamlit.empty",
"tweepy.OAuthHandler",
"os.getenv",
"streamlit.markdown",
"geopy.geocoders.Nominatim",
"time.time",
"PIL.Image.open",
"numpy.array",
"logging.getLogger"
] |
[((541, 908), 'streamlit.markdown', 'st.markdown', (['f"""\n<style>\n .reportview-container .main .block-container{{\n max-width: 100vw;\n padding-top: 1rem;\n padding-right: 1rem;\n padding-left: 1rem;\n padding-bottom: 1rem;\n }}\n .reportview-container .main {{\n color: black;\n background-color: white;\n }}\n</style>\n"""'], {'unsafe_allow_html': '(True)'}), '(\n f"""\n<style>\n .reportview-container .main .block-container{{\n max-width: 100vw;\n padding-top: 1rem;\n padding-right: 1rem;\n padding-left: 1rem;\n padding-bottom: 1rem;\n }}\n .reportview-container .main {{\n color: black;\n background-color: white;\n }}\n</style>\n"""\n , unsafe_allow_html=True)\n', (552, 908), True, 'import streamlit as st\n'), ((1123, 1140), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1134, 1140), False, 'from collections import defaultdict\n'), ((1153, 1170), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1164, 1170), False, 'from collections import defaultdict\n'), ((1184, 1201), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (1195, 1201), False, 'from collections import defaultdict\n'), ((1643, 1683), 'pandas.read_csv', 'pd.read_csv', (['STATION_CSV'], {'delimiter': '"""\t"""'}), "(STATION_CSV, delimiter='\\t')\n", (1654, 1683), True, 'import pandas as pd\n'), ((3014, 3039), 'os.getenv', 'os.getenv', (['"""CONSUMER_KEY"""'], {}), "('CONSUMER_KEY')\n", (3023, 3039), False, 'import os\n'), ((3058, 3086), 'os.getenv', 'os.getenv', (['"""CONSUMER_SECRET"""'], {}), "('CONSUMER_SECRET')\n", (3067, 3086), False, 'import os\n'), ((3102, 3127), 'os.getenv', 'os.getenv', (['"""ACCESS_TOKEN"""'], {}), "('ACCESS_TOKEN')\n", (3111, 3127), False, 'import os\n'), ((3150, 3182), 'os.getenv', 'os.getenv', (['"""ACCESS_TOKEN_SECRET"""'], {}), "('ACCESS_TOKEN_SECRET')\n", (3159, 3182), False, 'import os\n'), ((3283, 3302), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (3300, 3302), False, 'import logging\n'), ((4260, 4300), 'geopy.geocoders.Nominatim', 'Nominatim', ([], {'user_agent': '"""https"""', 'timeout': '(5)'}), "(user_agent='https', timeout=5)\n", (4269, 4300), False, 'from geopy.geocoders import Nominatim\n'), ((11093, 11218), 'streamlit.image', 'st.image', (['image_data'], {'caption': 'None', 'width': 'None', 'use_column_width': 'None', 'clamp': '(False)', 'channels': '"""RGB"""', 'output_format': '"""auto"""'}), "(image_data, caption=None, width=None, use_column_width=None, clamp\n =False, channels='RGB', output_format='auto')\n", (11101, 11218), True, 'import streamlit as st\n'), ((11214, 11227), 'streamlit.balloons', 'st.balloons', ([], {}), '()\n', (11225, 11227), True, 'import streamlit as st\n'), ((11261, 11284), 'streamlit.beta_columns', 'st.beta_columns', (['[1, 1]'], {}), '([1, 1])\n', (11276, 11284), True, 'import streamlit as st\n'), ((11993, 12029), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(8, 5.8)'}), '(1, 1, figsize=(8, 5.8))\n', (12005, 12029), True, 'import matplotlib.pyplot as plt\n'), ((12781, 12792), 'time.time', 'time.time', ([], {}), '()\n', (12790, 12792), False, 'import time\n'), ((12809, 12820), 'time.time', 'time.time', ([], {}), '()\n', (12818, 12820), False, 'import time\n'), ((1616, 1631), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (1627, 1631), False, 'import pickle\n'), ((3341, 3366), 'os.getenv', 'os.getenv', (['"""CONSUMER_KEY"""'], {}), "('CONSUMER_KEY')\n", (3350, 3366), False, 'import os\n'), ((3389, 3417), 'os.getenv', 'os.getenv', (['"""CONSUMER_SECRET"""'], {}), "('CONSUMER_SECRET')\n", (3398, 3417), False, 'import os\n'), ((3437, 3462), 'os.getenv', 'os.getenv', (['"""ACCESS_TOKEN"""'], {}), "('ACCESS_TOKEN')\n", (3446, 3462), False, 'import os\n'), ((3489, 3521), 'os.getenv', 'os.getenv', (['"""ACCESS_TOKEN_SECRET"""'], {}), "('ACCESS_TOKEN_SECRET')\n", (3498, 3521), False, 'import os\n'), ((6818, 6986), 'pandas.DataFrame', 'pd.DataFrame', (["{'lat': lat_list, 'lon': lng_list, 'z': z_list, 'mag': mag_events, 'time':\n t_events, 'size': [(mag_event ** 4 / 3.5) for mag_event in mag_events]}"], {}), "({'lat': lat_list, 'lon': lng_list, 'z': z_list, 'mag':\n mag_events, 'time': t_events, 'size': [(mag_event ** 4 / 3.5) for\n mag_event in mag_events]})\n", (6830, 6986), True, 'import pandas as pd\n'), ((7020, 7197), 'plotly.express.scatter_mapbox', 'px.scatter_mapbox', (['figure_df'], {'lat': '"""lat"""', 'lon': '"""lon"""', 'hover_data': "['mag', 'time', 'lat', 'lon']", 'size': '"""size"""', 'color_discrete_sequence': "['fuchsia']", 'zoom': 'MAP_ZOOM', 'height': '(300)'}), "(figure_df, lat='lat', lon='lon', hover_data=['mag',\n 'time', 'lat', 'lon'], size='size', color_discrete_sequence=['fuchsia'],\n zoom=MAP_ZOOM, height=300)\n", (7037, 7197), True, 'import plotly.express as px\n'), ((7681, 7692), 'time.time', 'time.time', ([], {}), '()\n', (7690, 7692), False, 'import time\n'), ((10805, 10958), 'pandas.DataFrame', 'pd.DataFrame', (["{'Magnitude': mag_values, 'Time': time_values, 'Latitude (deg)': lat_values,\n 'Longitude (deg)': lon_values, 'Depth (km)': z_values}"], {}), "({'Magnitude': mag_values, 'Time': time_values,\n 'Latitude (deg)': lat_values, 'Longitude (deg)': lon_values,\n 'Depth (km)': z_values})\n", (10817, 10958), True, 'import pandas as pd\n'), ((11050, 11091), 'PIL.Image.open', 'Image.open', (['"""quakeflow logo design 2.jpg"""'], {}), "('quakeflow logo design 2.jpg')\n", (11060, 11091), False, 'from PIL import Image\n'), ((11338, 11423), 'pandas.DataFrame', 'pd.DataFrame', (["{'lat': [], 'lon': [], 'z': [], 'mag': [], 'time': [], 'size': []}"], {}), "({'lat': [], 'lon': [], 'z': [], 'mag': [], 'time': [], 'size': []}\n )\n", (11350, 11423), True, 'import pandas as pd\n'), ((11434, 11544), 'pandas.DataFrame', 'pd.DataFrame', (["{'Magnitude': [], 'Time': [], 'Latitude (deg)': [], 'Longitude (deg)': [],\n 'Depth (km)': []}"], {}), "({'Magnitude': [], 'Time': [], 'Latitude (deg)': [],\n 'Longitude (deg)': [], 'Depth (km)': []})\n", (11446, 11544), True, 'import pandas as pd\n'), ((11560, 11731), 'plotly.express.scatter_mapbox', 'px.scatter_mapbox', (['experimental_df'], {'lat': '"""lat"""', 'lon': '"""lon"""', 'hover_data': "['mag', 'time', 'lat', 'lon']", 'color_discrete_sequence': "['fuchsia']", 'zoom': 'MAP_ZOOM', 'height': '(300)'}), "(experimental_df, lat='lat', lon='lon', hover_data=['mag',\n 'time', 'lat', 'lon'], color_discrete_sequence=['fuchsia'], zoom=\n MAP_ZOOM, height=300)\n", (11577, 11731), True, 'import plotly.express as px\n'), ((11913, 11978), 'streamlit.plotly_chart', 'st.plotly_chart', (['experimental'], {'width': 'MAP_WIDTH', 'height': 'MAP_HEIGHT'}), '(experimental, width=MAP_WIDTH, height=MAP_HEIGHT)\n', (11928, 11978), True, 'import streamlit as st\n'), ((12034, 12088), 'numpy.arange', 'np.arange', (['(WINDOW_LENGTH * WINDOW_NUMBER // HOP_LENGTH)'], {}), '(WINDOW_LENGTH * WINDOW_NUMBER // HOP_LENGTH)\n', (12043, 12088), True, 'import numpy as np\n'), ((12152, 12167), 'numpy.around', 'np.around', (['x[0]'], {}), '(x[0])\n', (12161, 12167), True, 'import numpy as np\n'), ((12169, 12185), 'numpy.around', 'np.around', (['x[-1]'], {}), '(x[-1])\n', (12178, 12185), True, 'import numpy as np\n'), ((12718, 12732), 'streamlit.pyplot', 'st.pyplot', (['plt'], {}), '(plt)\n', (12727, 12732), True, 'import streamlit as st\n'), ((12757, 12767), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (12765, 12767), True, 'import streamlit as st\n'), ((3587, 3637), 'tweepy.OAuthHandler', 'tweepy.OAuthHandler', (['consumer_key', 'consumer_secret'], {}), '(consumer_key, consumer_secret)\n', (3606, 3637), False, 'import tweepy\n'), ((3717, 3790), 'tweepy.API', 'tweepy.API', (['auth'], {'wait_on_rate_limit': '(True)', 'wait_on_rate_limit_notify': '(True)'}), '(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)\n', (3727, 3790), False, 'import tweepy\n'), ((13877, 13888), 'time.time', 'time.time', ([], {}), '()\n', (13886, 13888), False, 'import time\n'), ((13956, 13967), 'time.time', 'time.time', ([], {}), '()\n', (13965, 13967), False, 'import time\n'), ((16062, 16104), 'time.sleep', 'time.sleep', (['(REFRESH_SEC / NUM_STATION / 20)'], {}), '(REFRESH_SEC / NUM_STATION / 20)\n', (16072, 16104), False, 'import time\n'), ((1004, 1013), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (1010, 1013), True, 'import numpy as np\n'), ((1072, 1097), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['x'], {}), '(x)\n', (1094, 1097), False, 'from datetime import datetime\n'), ((13767, 13778), 'time.time', 'time.time', ([], {}), '()\n', (13776, 13778), False, 'import time\n'), ((13897, 13908), 'time.time', 'time.time', ([], {}), '()\n', (13906, 13908), False, 'import time\n'), ((965, 975), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (972, 975), True, 'import numpy as np\n'), ((978, 995), 'numpy.finfo', 'np.finfo', (['x.dtype'], {}), '(x.dtype)\n', (986, 995), True, 'import numpy as np\n'), ((1016, 1033), 'numpy.finfo', 'np.finfo', (['x.dtype'], {}), '(x.dtype)\n', (1024, 1033), True, 'import numpy as np\n'), ((7885, 7896), 'time.time', 'time.time', ([], {}), '()\n', (7894, 7896), False, 'import time\n'), ((9658, 9781), 'threading.Thread', 'threading.Thread', ([], {'target': 'tweep_update_with_media', 'name': '"""Uploader"""', 'args': '(api, mag, lng, lat, z, event_time, geolocator)'}), "(target=tweep_update_with_media, name='Uploader', args=(api,\n mag, lng, lat, z, event_time, geolocator))\n", (9674, 9781), False, 'import threading\n'), ((9893, 9904), 'time.time', 'time.time', ([], {}), '()\n', (9902, 9904), False, 'import time\n'), ((15331, 15351), 'numpy.array', 'np.array', (['loc_events'], {}), '(loc_events)\n', (15339, 15351), True, 'import numpy as np\n'), ((8609, 8620), 'time.time', 'time.time', ([], {}), '()\n', (8618, 8620), False, 'import time\n'), ((9399, 9410), 'time.time', 'time.time', ([], {}), '()\n', (9408, 9410), False, 'import time\n'), ((10213, 10224), 'time.time', 'time.time', ([], {}), '()\n', (10222, 10224), False, 'import time\n'), ((14492, 14509), 'numpy.array', 'np.array', (['tmp_vec'], {}), '(tmp_vec)\n', (14500, 14509), True, 'import numpy as np\n'), ((15064, 15085), 'numpy.ones_like', 'np.ones_like', (['t_picks'], {}), '(t_picks)\n', (15076, 15085), True, 'import numpy as np\n')]
|
from ..tools.velocity_embedding import velocity_embedding
from ..tools.utils import groups_to_bool
from .utils import default_basis, default_size, default_color, get_components, savefig_or_show, make_unique_list, get_basis
from .velocity_embedding_grid import compute_velocity_on_grid
from .scatter import scatter
from .docs import doc_scatter, doc_params
from matplotlib import rcParams
import matplotlib.pyplot as pl
import numpy as np
@doc_params(scatter=doc_scatter)
def velocity_embedding_stream(adata, basis=None, vkey='velocity', density=None, smooth=None, linewidth=None,
n_neighbors=None, X=None, V=None, X_grid=None, V_grid=None, color=None, use_raw=None,
layer=None, color_map=None, colorbar=True, palette=None, size=None, alpha=.1, perc=None,
sort_order=True, groups=None, components=None, legend_loc='on data',
legend_fontsize=None, legend_fontweight=None, right_margin=None, left_margin=None,
xlabel=None, ylabel=None, title=None, fontsize=None, figsize=None, dpi=None, frameon=None,
show=True, save=None, ax=None, ncols=None, **kwargs):
"""\
Stream plot of velocities on the embedding.
Arguments
---------
adata: :class:`~anndata.AnnData`
Annotated data matrix.
x: `str`, `np.ndarray` or `None` (default: `None`)
x coordinate
y: `str`, `np.ndarray` or `None` (default: `None`)
y coordinate
vkey: `str` or `None` (default: `None`)
Key for annotations of observations/cells or variables/genes.
density: `float` (default: 1)
Amount of velocities to show - 0 none to 1 all
smooth: `float` (default: 0.5)
Multiplication factor for scale in Gaussian kernel around grid point.
linewidth: `float` (default: 1)
Line width for streamplot.
n_neighbors: `int` (default: None)
Number of neighbors to consider around grid point.
X: `np.ndarray` (default: None)
Embedding grid point coordinates
V: `np.ndarray` (default: None)
Embedding grid velocity coordinates
{scatter}
Returns
-------
`matplotlib.Axis` if `show==False`
"""
basis = default_basis(adata) if basis is None else get_basis(adata, basis)
vkey = [key for key in adata.layers.keys() if 'velocity' in key and '_u' not in key] if vkey is 'all' else vkey
colors, layers, vkeys = make_unique_list(color, allow_array=True), make_unique_list(layer), make_unique_list(vkey)
for key in vkeys:
if key + '_' + basis not in adata.obsm_keys() and V is None:
velocity_embedding(adata, basis=basis, vkey=key)
color, layer, vkey = colors[0], layers[0], vkeys[0]
color = default_color(adata) if color is None else color
if X_grid is None or V_grid is None:
_adata = adata[groups_to_bool(adata, groups, groupby=color)] \
if groups is not None and color in adata.obs.keys() else adata
X_emb = np.array(_adata.obsm['X_' + basis][:, get_components(components, basis)]) if X is None else X[:, :2]
V_emb = np.array(_adata.obsm[vkey + '_' + basis][:, get_components(components, basis)]) if V is None else V[:, :2]
X_grid, V_grid = compute_velocity_on_grid(X_emb=X_emb, V_emb=V_emb, density=1, smooth=smooth,
n_neighbors=n_neighbors, autoscale=False, adjust_for_stream=True)
lengths = np.sqrt((V_grid ** 2).sum(0))
linewidth = 1 if linewidth is None else linewidth
linewidth *= 2 * lengths / lengths[~np.isnan(lengths)].max()
scatter_kwargs = {"basis": basis, "perc": perc, "use_raw": use_raw, "sort_order": sort_order, "alpha": alpha,
"components": components, "legend_loc": legend_loc, "groups": groups,
"legend_fontsize": legend_fontsize, "legend_fontweight": legend_fontweight, "palette": palette,
"color_map": color_map, "frameon": frameon, "xlabel": xlabel, "ylabel": ylabel,
"right_margin": right_margin, "left_margin": left_margin, "colorbar": colorbar, "dpi": dpi,
"fontsize": fontsize, "show": False, "save": None}
multikey = colors if len(colors) > 1 else layers if len(layers) > 1 else vkeys if len(vkeys) > 1 else None
if multikey is not None:
if title is None: title = list(multikey)
elif isinstance(title, (list, tuple)): title *= int(np.ceil(len(multikey) / len(title)))
ncols = len(multikey) if ncols is None else min(len(multikey), ncols)
nrows = int(np.ceil(len(multikey) / ncols))
figsize = rcParams['figure.figsize'] if figsize is None else figsize
ax = []
for i, gs in enumerate(
pl.GridSpec(nrows, ncols, pl.figure(None, (figsize[0] * ncols, figsize[1] * nrows), dpi=dpi))):
if i < len(multikey):
ax.append(velocity_embedding_stream(adata, density=density, size=size, smooth=smooth, n_neighbors=n_neighbors,
linewidth=linewidth, ax=pl.subplot(gs),
color=colors[i] if len(colors) > 1 else color,
layer=layers[i] if len(layers) > 1 else layer,
vkey=vkeys[i] if len(vkeys) > 1 else vkey,
title=title[i] if isinstance(title, (list, tuple)) else title,
X_grid=None if len(vkeys) > 1 else X_grid,
V_grid=None if len(vkeys) > 1 else V_grid, **scatter_kwargs, **kwargs))
savefig_or_show('' if basis is None else basis, dpi=dpi, save=save, show=show)
if not show: return ax
else:
ax = pl.figure(None, figsize, dpi=dpi).gca() if ax is None else ax
density = 1 if density is None else density
stream_kwargs = {"linewidth": linewidth, "density": 2 * density}
stream_kwargs.update(kwargs)
pl.streamplot(X_grid[0], X_grid[1], V_grid[0], V_grid[1], color='grey', zorder=3, **stream_kwargs)
size = 4 * default_size(adata) if size is None else size
ax = scatter(adata, layer=layer, color=color, size=size, title=title, ax=ax, zorder=0, **scatter_kwargs)
savefig_or_show('' if basis is None else basis, dpi=dpi, save=save, show=show)
if not show: return ax
|
[
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.figure",
"numpy.isnan",
"matplotlib.pyplot.streamplot"
] |
[((6200, 6302), 'matplotlib.pyplot.streamplot', 'pl.streamplot', (['X_grid[0]', 'X_grid[1]', 'V_grid[0]', 'V_grid[1]'], {'color': '"""grey"""', 'zorder': '(3)'}), "(X_grid[0], X_grid[1], V_grid[0], V_grid[1], color='grey',\n zorder=3, **stream_kwargs)\n", (6213, 6302), True, 'import matplotlib.pyplot as pl\n'), ((4875, 4941), 'matplotlib.pyplot.figure', 'pl.figure', (['None', '(figsize[0] * ncols, figsize[1] * nrows)'], {'dpi': 'dpi'}), '(None, (figsize[0] * ncols, figsize[1] * nrows), dpi=dpi)\n', (4884, 4941), True, 'import matplotlib.pyplot as pl\n'), ((5967, 6000), 'matplotlib.pyplot.figure', 'pl.figure', (['None', 'figsize'], {'dpi': 'dpi'}), '(None, figsize, dpi=dpi)\n', (5976, 6000), True, 'import matplotlib.pyplot as pl\n'), ((3652, 3669), 'numpy.isnan', 'np.isnan', (['lengths'], {}), '(lengths)\n', (3660, 3669), True, 'import numpy as np\n'), ((5182, 5196), 'matplotlib.pyplot.subplot', 'pl.subplot', (['gs'], {}), '(gs)\n', (5192, 5196), True, 'import matplotlib.pyplot as pl\n')]
|
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import hashlib
from copy import deepcopy
import numpy as np
from graph.dim import Dim
from graph.types import (ActivationParameters, ConstantInputParameters, NNEdge,
ReshapeParameters)
from importer.common.provisional_dim import ProvisionalDim
from quantization.new_qrec import QRec
from quantization.qtype import QType
from utils.node_id import NodeId
from ..tflite_schema_head.ActivationFunctionType import ActivationFunctionType
from .handler import Handler
class BackendHandler(Handler):
""" This class is base backend handler class.
All backend operator handler class MUST inherit this class.
In backend, operator handler class's name should be pascal case of file name
which should be snake case.
Use ONNX operator name as class name.
"""
VAR_COUNT = 0
TF_ACTIVATIONS = {
ActivationFunctionType.RELU: "relu",
ActivationFunctionType.RELU6: "relu6",
ActivationFunctionType.SIGN_BIT: "sign_bit",
ActivationFunctionType.TANH: "tanh"
}
@classmethod
def _get_real_dim(cls, shape):
return np.array([elem for elem in shape if elem is not None])
@classmethod
def _get_real_dims(cls, dims):
return [cls._get_real_dim(dim.shape) for dim in dims]
@classmethod
def _verify_constant(cls, inp):
if cls._is_constant(inp):
return cls._get_constant(inp)
raise ValueError("expected node %s to be constant input" % inp[0].name)
@classmethod
def _is_constant(cls, inp):
return isinstance(inp[0], ConstantInputParameters)
@classmethod
def _get_constant(cls, inp):
return inp[0].value
@classmethod
def _slice_len(cls, vstart, vend, vstep):
if vstep < 0:
vstart, vend = vend, vstart
vstep = -vstep
return (vend - vstart - 1) // vstep + 1
@classmethod
def fuse_activation(cls, tfl_opts, name, params, **kwargs):
G = kwargs['G']
opts = kwargs['opts']
ext = hashlib.sha1(name.encode(
"UTF-8")).hexdigest()[:8] if opts.get('anonymise') else 'activation'
if opts.get('load_quantization') and NodeId(params) in G.quantization:
node_qrec = G.quantization[NodeId(params)]
else:
node_qrec = None
# if node_qrec is not None and None in node_qrec.in_qs + node_qrec.out_qs:
# # one of the input is a constant or strange behaviour -> may be is something fusions will get rid of
# return add_node(self.G, node)
aparams = None
if tfl_opts.FusedActivationFunction() == ActivationFunctionType.NONE:
if node_qrec is not None and node_qrec.ktype.startswith('scaled'): # and opts.get('insert_relus'):
# here we have no activation in an asymmetric qtype -> may be an omitted relu
if node_qrec.out_qs[0] is not None and node_qrec.out_qs[0].min_val == 0:
if np.all(np.round(node_qrec.out_qs[0].max_val) == 6):
aparams = ActivationParameters.get_activation(
'relu6', name + f"_{ext}")
else:
aparams = ActivationParameters.get_activation(
'relu', name + f"_{ext}")
else:
aparams = ActivationParameters.get_activation(cls.TF_ACTIVATIONS[tfl_opts.FusedActivationFunction()],
name + f"_{ext}")
if aparams:
G.add_edge(NNEdge(from_node=params, to_node=aparams))
if opts.get('load_quantization'):
# In between the fused operation and activation the
# transfer is in int32 representation
node_qrec = G.quantization[NodeId(params)]
ina_qtype = deepcopy(node_qrec.out_qs[0])
outa_qtype = deepcopy(ina_qtype)
G.quantization[NodeId(aparams)] = QRec.scaled(
in_qs=[ina_qtype], out_qs=[outa_qtype])
params = aparams
return params
@classmethod
def remove_unspecified_dim(cls, shape):
return [dim for dim in shape if dim is not None]
@classmethod
def get_all_const_inputs(cls, G, all_nodes, opts, node, params,
exclude=None, names=None,
short_names=None,
adjust_transposes=None,
load_quantization_if_present=False,
skip_empty_tensors=True):
if exclude is None:
exclude = []
if names is None:
names = [None] * len(node.inputs)
if short_names is None:
short_names = [None] * len(node.inputs)
if adjust_transposes is None:
adjust_transposes = [None] * len(node.nputs)
const_params = []
# TODO - this should just be picking up the existing constant nodes not creating new ones.
for idx, tensor in enumerate(node.input):
if tensor is None or idx in exclude or (skip_empty_tensors and not tensor.is_constant):
const_params.append(None)
continue
tensor.used = True
if tensor not in all_nodes:
# this can occur for RNN/LSTM state nodes that have a buffer idx of 0
const_param = ConstantInputParameters(
tensor.name,
dims=Dim.unnamed(tensor.shape),
value=tensor.value)
all_nodes[tensor] = (
const_param,
0,
ProvisionalDim.from_tflite_shape(tensor.shape)
)
else:
const_param = all_nodes[tensor][0]
# some constant nodes can be connected to multiple nodes
# changing their name is not a good idea
if const_param not in G.nodes():
const_param.name = names[idx]
const_param.adjust_transpose = adjust_transposes[idx]
const_param.is_mutated = node.is_mutated(idx)
const_param.is_intermediate = node.is_intermediate(idx)
const_param.short_name = short_names[idx]
const_param.value = np.reshape(tensor.value, tensor.shape)
if opts.get('load_quantization'):
G.quantization[NodeId(const_param)] = QRec.scaled(
in_qs=[tensor.qtype],
out_qs=[tensor.qtype])
if load_quantization_if_present and tensor.qtype:
const_param.value_quantization = tensor.qtype
const_params.append(const_param)
G.add_edge(NNEdge(const_param, params, to_idx=idx))
return const_params
@classmethod
def remove_none_from_constants(cls, inputs, model):
if None not in model:
return
for inp in inputs:
if not isinstance(inp[0], ConstantInputParameters):
continue
val = inp[0].value
if val is None or len(val.shape) != len(model):
continue
assert all(val.shape[idx] == 1 for idx, dim in enumerate(model) if dim is None),\
"value has axis that is larger than one in an unknown dimension"
new_shape = [dim for idx, dim in enumerate(
val.shape) if model[idx] is not None]
inp[0].value = np.reshape(inp[0].value, new_shape)
inp[0].dims = Dim.unnamed(new_shape)
@classmethod
def convert_to_symmetric(cls, qtypes):
return [QType.from_min_max_sq(qtype.min_val, qtype.max_val)
if qtype is not None and (qtype.asymmetric or not qtype.signed) else qtype for qtype in qtypes]
@classmethod
def load_tf_quantization(cls, input_tensors, output_tensors, in_qs=None, out_qs=None, qrec_class=None):
if qrec_class is None:
qrec = QRec.scaled(
in_qs=cls.convert_to_symmetric(
in_qs if in_qs is not None else [tensor.qtype if tensor is not None else None for tensor in input_tensors]),
out_qs=cls.convert_to_symmetric(
out_qs if out_qs is not None else [tensor.qtype for tensor in output_tensors]))
else:
qrec = qrec_class(
in_qs=cls.convert_to_symmetric(
in_qs if in_qs is not None else [tensor.qtype if tensor is not None else None for tensor in input_tensors]),
out_qs=cls.convert_to_symmetric(
out_qs if out_qs is not None else [tensor.qtype for tensor in output_tensors]))
return qrec
@classmethod
def remove_known_batch_dimension(cls, G, x, node, batch_axis=0):
x_shape = x[2].shape
if x_shape[batch_axis] is not None:
if x_shape[0] > 1:
raise ValueError(
f'multi batch (n={x_shape[batch_axis]}) operations are not supported by {node.name}')
rparams = ReshapeParameters(
f'{node.name}_batch',
old_shape=Dim.unnamed(x_shape),
shape=Dim.unnamed(x_shape[0:batch_axis:]+x_shape[batch_axis+1::]))
if G.quantization:
qrec = G.quantization[NodeId(x[0])]
G.quantization[NodeId(rparams)] = QRec.copy_ktype(
qrec,
in_qs=[qrec.out_qs[0]],
out_qs=[qrec.out_qs[0]])
G.add_edge(
NNEdge(from_node=x[0], to_node=rparams, from_idx=x[1], to_idx=0))
return (rparams, 0, ProvisionalDim(x_shape[0:batch_axis:]+[None]+x_shape[batch_axis+1::]))
else:
return x
|
[
"quantization.new_qrec.QRec.copy_ktype",
"utils.node_id.NodeId",
"copy.deepcopy",
"graph.types.NNEdge",
"importer.common.provisional_dim.ProvisionalDim",
"importer.common.provisional_dim.ProvisionalDim.from_tflite_shape",
"numpy.array",
"numpy.reshape",
"quantization.qtype.QType.from_min_max_sq",
"quantization.new_qrec.QRec.scaled",
"numpy.round",
"graph.types.ActivationParameters.get_activation",
"graph.dim.Dim.unnamed"
] |
[((1811, 1865), 'numpy.array', 'np.array', (['[elem for elem in shape if elem is not None]'], {}), '([elem for elem in shape if elem is not None])\n', (1819, 1865), True, 'import numpy as np\n'), ((8214, 8249), 'numpy.reshape', 'np.reshape', (['inp[0].value', 'new_shape'], {}), '(inp[0].value, new_shape)\n', (8224, 8249), True, 'import numpy as np\n'), ((8276, 8298), 'graph.dim.Dim.unnamed', 'Dim.unnamed', (['new_shape'], {}), '(new_shape)\n', (8287, 8298), False, 'from graph.dim import Dim\n'), ((2882, 2896), 'utils.node_id.NodeId', 'NodeId', (['params'], {}), '(params)\n', (2888, 2896), False, 'from utils.node_id import NodeId\n'), ((2955, 2969), 'utils.node_id.NodeId', 'NodeId', (['params'], {}), '(params)\n', (2961, 2969), False, 'from utils.node_id import NodeId\n'), ((4250, 4291), 'graph.types.NNEdge', 'NNEdge', ([], {'from_node': 'params', 'to_node': 'aparams'}), '(from_node=params, to_node=aparams)\n', (4256, 4291), False, 'from graph.types import ActivationParameters, ConstantInputParameters, NNEdge, ReshapeParameters\n'), ((4549, 4578), 'copy.deepcopy', 'deepcopy', (['node_qrec.out_qs[0]'], {}), '(node_qrec.out_qs[0])\n', (4557, 4578), False, 'from copy import deepcopy\n'), ((4608, 4627), 'copy.deepcopy', 'deepcopy', (['ina_qtype'], {}), '(ina_qtype)\n', (4616, 4627), False, 'from copy import deepcopy\n'), ((4678, 4729), 'quantization.new_qrec.QRec.scaled', 'QRec.scaled', ([], {'in_qs': '[ina_qtype]', 'out_qs': '[outa_qtype]'}), '(in_qs=[ina_qtype], out_qs=[outa_qtype])\n', (4689, 4729), False, 'from quantization.new_qrec import QRec\n'), ((7029, 7067), 'numpy.reshape', 'np.reshape', (['tensor.value', 'tensor.shape'], {}), '(tensor.value, tensor.shape)\n', (7039, 7067), True, 'import numpy as np\n'), ((7477, 7516), 'graph.types.NNEdge', 'NNEdge', (['const_param', 'params'], {'to_idx': 'idx'}), '(const_param, params, to_idx=idx)\n', (7483, 7516), False, 'from graph.types import ActivationParameters, ConstantInputParameters, NNEdge, ReshapeParameters\n'), ((8376, 8427), 'quantization.qtype.QType.from_min_max_sq', 'QType.from_min_max_sq', (['qtype.min_val', 'qtype.max_val'], {}), '(qtype.min_val, qtype.max_val)\n', (8397, 8427), False, 'from quantization.qtype import QType\n'), ((10120, 10190), 'quantization.new_qrec.QRec.copy_ktype', 'QRec.copy_ktype', (['qrec'], {'in_qs': '[qrec.out_qs[0]]', 'out_qs': '[qrec.out_qs[0]]'}), '(qrec, in_qs=[qrec.out_qs[0]], out_qs=[qrec.out_qs[0]])\n', (10135, 10190), False, 'from quantization.new_qrec import QRec\n'), ((10292, 10356), 'graph.types.NNEdge', 'NNEdge', ([], {'from_node': 'x[0]', 'to_node': 'rparams', 'from_idx': 'x[1]', 'to_idx': '(0)'}), '(from_node=x[0], to_node=rparams, from_idx=x[1], to_idx=0)\n', (10298, 10356), False, 'from graph.types import ActivationParameters, ConstantInputParameters, NNEdge, ReshapeParameters\n'), ((10390, 10463), 'importer.common.provisional_dim.ProvisionalDim', 'ProvisionalDim', (['(x_shape[0:batch_axis] + [None] + x_shape[batch_axis + 1:])'], {}), '(x_shape[0:batch_axis] + [None] + x_shape[batch_axis + 1:])\n', (10404, 10463), False, 'from importer.common.provisional_dim import ProvisionalDim\n'), ((4505, 4519), 'utils.node_id.NodeId', 'NodeId', (['params'], {}), '(params)\n', (4511, 4519), False, 'from utils.node_id import NodeId\n'), ((4659, 4674), 'utils.node_id.NodeId', 'NodeId', (['aparams'], {}), '(aparams)\n', (4665, 4674), False, 'from utils.node_id import NodeId\n'), ((6382, 6428), 'importer.common.provisional_dim.ProvisionalDim.from_tflite_shape', 'ProvisionalDim.from_tflite_shape', (['tensor.shape'], {}), '(tensor.shape)\n', (6414, 6428), False, 'from importer.common.provisional_dim import ProvisionalDim\n'), ((7177, 7233), 'quantization.new_qrec.QRec.scaled', 'QRec.scaled', ([], {'in_qs': '[tensor.qtype]', 'out_qs': '[tensor.qtype]'}), '(in_qs=[tensor.qtype], out_qs=[tensor.qtype])\n', (7188, 7233), False, 'from quantization.new_qrec import QRec\n'), ((9882, 9902), 'graph.dim.Dim.unnamed', 'Dim.unnamed', (['x_shape'], {}), '(x_shape)\n', (9893, 9902), False, 'from graph.dim import Dim\n'), ((9926, 9987), 'graph.dim.Dim.unnamed', 'Dim.unnamed', (['(x_shape[0:batch_axis] + x_shape[batch_axis + 1:])'], {}), '(x_shape[0:batch_axis] + x_shape[batch_axis + 1:])\n', (9937, 9987), False, 'from graph.dim import Dim\n'), ((10056, 10068), 'utils.node_id.NodeId', 'NodeId', (['x[0]'], {}), '(x[0])\n', (10062, 10068), False, 'from utils.node_id import NodeId\n'), ((10101, 10116), 'utils.node_id.NodeId', 'NodeId', (['rparams'], {}), '(rparams)\n', (10107, 10116), False, 'from utils.node_id import NodeId\n'), ((3760, 3822), 'graph.types.ActivationParameters.get_activation', 'ActivationParameters.get_activation', (['"""relu6"""', "(name + f'_{ext}')"], {}), "('relu6', name + f'_{ext}')\n", (3795, 3822), False, 'from graph.types import ActivationParameters, ConstantInputParameters, NNEdge, ReshapeParameters\n'), ((3912, 3973), 'graph.types.ActivationParameters.get_activation', 'ActivationParameters.get_activation', (['"""relu"""', "(name + f'_{ext}')"], {}), "('relu', name + f'_{ext}')\n", (3947, 3973), False, 'from graph.types import ActivationParameters, ConstantInputParameters, NNEdge, ReshapeParameters\n'), ((6201, 6226), 'graph.dim.Dim.unnamed', 'Dim.unnamed', (['tensor.shape'], {}), '(tensor.shape)\n', (6212, 6226), False, 'from graph.dim import Dim\n'), ((7154, 7173), 'utils.node_id.NodeId', 'NodeId', (['const_param'], {}), '(const_param)\n', (7160, 7173), False, 'from utils.node_id import NodeId\n'), ((3681, 3718), 'numpy.round', 'np.round', (['node_qrec.out_qs[0].max_val'], {}), '(node_qrec.out_qs[0].max_val)\n', (3689, 3718), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# Data Science
import numpy as np
import pandas as pd
# Visualization
import seaborn as sns
import matplotlib.pyplot as plt
# Tricks
sns.set(style='ticks', context='talk', font_scale=1.15)
# In[ ]:
import os, sys
from skimage.io import imread as skIR
from PIL import Image
# Root directory of the project
ROOT_DIR = os.path.abspath(Mask_RCNN_ROOT)
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
from mrcnn import visualize
# In[ ]:
CLASS_NAMES = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddy bear', 'hair drier', 'toothbrush']
VIP_CLASS = ['person','skis','snowboard']
# IMAGE_SHAPE = (467, 700, 3)
IMAGE_SHAPE = (667, 1000, 3)
# ---
# In[ ]:
def Show_Img(obj, showBox=True, showMask=True, getArray=False):
"""
Show image for given image ID.
Parameters (Input)
----------
obj : DataFrame, Series, str
The Mask R-CNN record for a image
or the path to the image file
showBox : bool
Show the Boxes generated by Mask R-CNN
showMask : bool
Show the Masks generated by Mask R-CNN
getArray : bool
Return Array, not show image, will overwrite
showBox=False, showMask=False
Returns
-------
None (Just show the image)
or
Numpy array of the image
"""
assert isinstance( obj, (str, pd.DataFrame, pd.Series) ), 'Input should be a Pandas DataFrame or Series.'
if isinstance(obj, str):
imgFile = obj
elif isinstance(obj['imgID'], str):
imgFile = obj['imgID']
elif isinstance(obj['imgID'], pd.Series):
imgFile = obj['imgID'].unique()[0]
obj = obj.where(obj['imgID']==imgFile).dropna()
else:
assert isinstance( obj['imgID'], (str, pd.Series) ), 'Unable to process:' + type(obj['imgID'])
if not os.path.exists(imgFile):
assert None, 'Not such image! ' + imgFile
image = skIR(imgFile)
if getArray:
return np.array(image)
if isinstance(obj, str):
return visualize.display_instances(
image,
np.zeros((2,2)), # Placeholder, rois
np.zeros((2,2)), # Placeholder, masks
np.zeros((2,2)), # Placeholder, class_ids
np.array(0), # Placeholder, CLASS_NAMES
np.array(0), # Placeholder, scores
figsize=(8,8),
show_mask=False,
show_bbox=False,
)
else:
result = {}
if isinstance( obj, pd.DataFrame ):
result['class_ids'] = np.array( obj['class_ids'].to_list() )
result['scores'] = np.array( obj['scores'].to_list() )
result['rois'] = np.array( obj[['x1','y1','x2','y2']].values)
else:
result['class_ids'] = np.array([obj['class_ids']])
result['scores'] = np.array([obj['scores']])
result['rois'] = np.array( obj[['x1','y1','x2','y2']].values)[np.newaxis, :]
if showMask:
result['masks'] = pd.Series(obj['masks']).apply( lambda row: list(map(int, list(row))) ).tolist()
result['masks'] = np.rollaxis(
np.array(result['masks']).reshape(-1, IMAGE_SHAPE[0], IMAGE_SHAPE[1]), 0, 3
).astype(bool)
else:
result['masks'] = np.zeros((IMAGE_SHAPE[0], IMAGE_SHAPE[1], result['scores'].shape[0]))
return visualize.display_instances(
image,
result['rois'],
result['masks'],
result['class_ids'].astype(int),
CLASS_NAMES,
result['scores'],
figsize=(8,8),
show_mask=showMask,
show_bbox=showBox,
)
# ---
# In[ ]:
def extInBoxPixels(obj, getMask=False, show=False):
"""
Extract InBox pixels from given Box and image ID.
Parameters (Input)
----------
obj : Series
The record for a box
getMask : bool
Only extract the InMask pixels
show : bool
Show the extracted pixels
Returns
-------
ext_Box : Array
Numpy array (Matrix) with InBox pixels
Shape = (Unknown, Unknown, 3)
"""
assert isinstance( obj, pd.Series ), 'Input should be a Pandas Series.'
imgFile = obj['imgID']
if not os.path.exists(imgFile):
assert None, 'Not such image!'
image = skIR(imgFile)
(x1, y1, x2, y2) = obj[['x1','y1','x2','y2']].map(int)
# Check image shape
if image.shape != IMAGE_SHAPE:
# Some are vertical image
image = np.swapaxes(image,0,1)
# Check again
if image.shape != IMAGE_SHAPE:
return None # Placehoder
if not getMask:
ext_Box = image[x1:x2, y1:y2, :]
else:
# Mask Invert
ext_Mask = np.invert(
np.array(
pd.Series(obj['masks'])
.apply( lambda row: list(map(int, list(row))) )
.tolist()
).reshape(-1, IMAGE_SHAPE[0], IMAGE_SHAPE[1]).astype(bool)[0]
)
# First, Make Inverted Mask as a white/snow background (255,255,255)
# Next, Add image to the Inverted Mask
# Then, Clip the overflow (>255) pixels (make them white)
ext_Img = (
255*np.stack( [ext_Mask]*3, axis=2 )
+image
).clip(max=255)
# Finally, Crop the box
ext_Box = ext_Img[x1:x2, y1:y2, :]
if show:
plt.imshow(ext_Box)
return ext_Box
# ---
# In[ ]:
def squareBox (BoxArray):
"""
Reshape a Unknow shape Box with pixels to a square Box with 150x150.
Parameters (Input)
----------
BoxArray : numpy array (Matrix)
Array with InBox pixels
Returns
-------
BoxArraySquared : Array
Numpy array (Matrix) with InBox pixels
Shape = (150, 150, 3)
"""
assert isinstance( BoxArray, np.ndarray ), 'Input should be a Numpy array.'
BoxArraySquared = np.array(
resize_tool(
Image.fromarray(BoxArray.astype('uint8')),
width = 150,
height = 150,
)
)
return BoxArraySquared
################################################################################
def resize_tool(image_pil, width, height):
'''
Resize PIL image keeping ratio and using white background.
From https://stackoverflow.com/questions/44370469/python-image-resizing-keep-proportion-add-white-background
'''
ratio_w = width / image_pil.width
ratio_h = height / image_pil.height
if ratio_w < ratio_h:
# It must be fixed by width
resize_width = width
resize_height = round(ratio_w * image_pil.height)
else:
# Fixed by height
resize_width = round(ratio_h * image_pil.width)
resize_height = height
image_resize = image_pil.resize((resize_width, resize_height), Image.ANTIALIAS)
background = Image.new('RGB', (width, height), (255, 255, 255))
offset = (round((width - resize_width) / 2), round((height - resize_height) / 2))
background.paste(image_resize, offset)
return background
# In[ ]:
|
[
"sys.path.append",
"numpy.stack",
"os.path.abspath",
"PIL.Image.new",
"matplotlib.pyplot.imshow",
"os.path.exists",
"numpy.zeros",
"numpy.array",
"numpy.swapaxes",
"pandas.Series",
"seaborn.set",
"skimage.io.imread"
] |
[((184, 239), 'seaborn.set', 'sns.set', ([], {'style': '"""ticks"""', 'context': '"""talk"""', 'font_scale': '(1.15)'}), "(style='ticks', context='talk', font_scale=1.15)\n", (191, 239), True, 'import seaborn as sns\n'), ((372, 403), 'os.path.abspath', 'os.path.abspath', (['Mask_RCNN_ROOT'], {}), '(Mask_RCNN_ROOT)\n', (387, 403), False, 'import os, sys\n'), ((424, 449), 'sys.path.append', 'sys.path.append', (['ROOT_DIR'], {}), '(ROOT_DIR)\n', (439, 449), False, 'import os, sys\n'), ((2949, 2962), 'skimage.io.imread', 'skIR', (['imgFile'], {}), '(imgFile)\n', (2953, 2962), True, 'from skimage.io import imread as skIR\n'), ((5382, 5395), 'skimage.io.imread', 'skIR', (['imgFile'], {}), '(imgFile)\n', (5386, 5395), True, 'from skimage.io import imread as skIR\n'), ((7912, 7962), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(width, height)', '(255, 255, 255)'], {}), "('RGB', (width, height), (255, 255, 255))\n", (7921, 7962), False, 'from PIL import Image\n'), ((2860, 2883), 'os.path.exists', 'os.path.exists', (['imgFile'], {}), '(imgFile)\n', (2874, 2883), False, 'import os, sys\n'), ((2996, 3011), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (3004, 3011), True, 'import numpy as np\n'), ((5305, 5328), 'os.path.exists', 'os.path.exists', (['imgFile'], {}), '(imgFile)\n', (5319, 5328), False, 'import os, sys\n'), ((5566, 5590), 'numpy.swapaxes', 'np.swapaxes', (['image', '(0)', '(1)'], {}), '(image, 0, 1)\n', (5577, 5590), True, 'import numpy as np\n'), ((6449, 6468), 'matplotlib.pyplot.imshow', 'plt.imshow', (['ext_Box'], {}), '(ext_Box)\n', (6459, 6468), True, 'import matplotlib.pyplot as plt\n'), ((3117, 3133), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (3125, 3133), True, 'import numpy as np\n'), ((3166, 3182), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (3174, 3182), True, 'import numpy as np\n'), ((3216, 3232), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (3224, 3232), True, 'import numpy as np\n'), ((3270, 3281), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (3278, 3281), True, 'import numpy as np\n'), ((3326, 3337), 'numpy.array', 'np.array', (['(0)'], {}), '(0)\n', (3334, 3337), True, 'import numpy as np\n'), ((3711, 3757), 'numpy.array', 'np.array', (["obj[['x1', 'y1', 'x2', 'y2']].values"], {}), "(obj[['x1', 'y1', 'x2', 'y2']].values)\n", (3719, 3757), True, 'import numpy as np\n'), ((3804, 3832), 'numpy.array', 'np.array', (["[obj['class_ids']]"], {}), "([obj['class_ids']])\n", (3812, 3832), True, 'import numpy as np\n'), ((3867, 3892), 'numpy.array', 'np.array', (["[obj['scores']]"], {}), "([obj['scores']])\n", (3875, 3892), True, 'import numpy as np\n'), ((4329, 4398), 'numpy.zeros', 'np.zeros', (["(IMAGE_SHAPE[0], IMAGE_SHAPE[1], result['scores'].shape[0])"], {}), "((IMAGE_SHAPE[0], IMAGE_SHAPE[1], result['scores'].shape[0]))\n", (4337, 4398), True, 'import numpy as np\n'), ((3927, 3973), 'numpy.array', 'np.array', (["obj[['x1', 'y1', 'x2', 'y2']].values"], {}), "(obj[['x1', 'y1', 'x2', 'y2']].values)\n", (3935, 3973), True, 'import numpy as np\n'), ((6275, 6307), 'numpy.stack', 'np.stack', (['([ext_Mask] * 3)'], {'axis': '(2)'}), '([ext_Mask] * 3, axis=2)\n', (6283, 6307), True, 'import numpy as np\n'), ((4039, 4062), 'pandas.Series', 'pd.Series', (["obj['masks']"], {}), "(obj['masks'])\n", (4048, 4062), True, 'import pandas as pd\n'), ((4182, 4207), 'numpy.array', 'np.array', (["result['masks']"], {}), "(result['masks'])\n", (4190, 4207), True, 'import numpy as np\n'), ((5849, 5872), 'pandas.Series', 'pd.Series', (["obj['masks']"], {}), "(obj['masks'])\n", (5858, 5872), True, 'import pandas as pd\n')]
|
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import numpy as np
import pandas as pd
import seaborn as sns
def __add_name_labels(ax, xs, ys):
last_y_pos = 9999
for i, name in enumerate(xs):
y_pos = ys[name] - 0.1
if np.abs(y_pos - last_y_pos) < 0.1:
y_pos = last_y_pos - 0.1
last_y_pos = y_pos
ax.text(
i, y_pos, name, ha="center", va="center", bbox=dict(
boxstyle="round",
ec=(1, 1, 1, 0),
fc=(1, 1, 1, 0.7),
)
)
# Remove original ticks
ax.set_xticks([])
ax.set_xticks([], minor=True)
def visualize_ratings(file_name, df, x='beer', plot_type="box", show=False, figsize=(16, 9), sort=False):
order = df.groupby(x).median()['normalized rating'].sort_values(ascending=False).index if sort else df[x].unique()
fig = plt.figure(figsize=figsize)
# Plot ratings
if plot_type == "box":
ax = sns.boxplot(data=df, x=x, y='normalized rating', order=order, whis=[0, 100])
elif plot_type == "violin":
ax = sns.violinplot(data=df, x=x, y='normalized rating', order=order, inner="point", bw=0.15, scale="count")
ax.grid(linestyle=':')
# Add nice name labels
__add_name_labels(ax, xs=order, ys=df.groupby(x, sort=False)['normalized rating'].min())
plt.tight_layout()
if file_name is not None:
fig.savefig(file_name)
if show:
plt.show()
plt.close(fig)
def visualize_ratings_per_person(file_name, df, show=False, figsize=(16, 9)):
fig = plt.figure(figsize=figsize)
ax = sns.scatterplot(data=df, x='beer', y='normalized rating', hue='person', s=50, edgecolor=(0, 0, 0, 0))
ax.grid(linestyle=':')
# Add nice name labels
__add_name_labels(ax, xs=df['beer'].unique(), ys=df.groupby('beer', sort=False)['normalized rating'].min())
plt.tight_layout()
if file_name is not None:
fig.savefig(file_name)
if show:
plt.show()
plt.close(fig)
def visualize_ratings_per_price(file_name, df, show=False, figsize=(16, 9)):
fig = plt.figure(figsize=figsize)
data = df.groupby('beer').agg(
price=pd.NamedAgg(column='price', aggfunc="first"),
rating=pd.NamedAgg(column='normalized rating', aggfunc="mean"),
beer=pd.NamedAgg(column='beer', aggfunc="first"),
)
# Plot ratings
ax = sns.scatterplot(data=data, x='price', y='rating', s=50, color="black", edgecolor=(0, 0, 0, 0))
ax.set_xlabel('€ / l')
ax.set_ylabel('normalized rating')
ax.grid(linestyle=':')
for _, price, rating, beer in data.itertuples():
ax.annotate(
beer,
xytext=(8, -5),
textcoords='offset pixels',
xy=(price, rating),
)
ax.set_xlim(right=2)
ax.imshow(
[[1, 0.5], [0.5, 0]],
cmap=plt.cm.RdYlGn,
interpolation='bicubic',
extent=plt.xlim() + plt.ylim(),
aspect="auto"
)
plt.tight_layout()
if file_name is not None:
fig.savefig(file_name)
if show:
plt.show()
plt.close(fig)
def visualize_alcohol_per_beer(file_name, df, show=False, figsize=(16, 9)):
fig = plt.figure(figsize=figsize)
data = df.sort_values(['vol', 'beer']).groupby('beer', sort=False).agg(
beer=pd.NamedAgg(column='beer', aggfunc="first"),
rating=pd.NamedAgg(column='normalized rating', aggfunc="mean"),
vol=pd.NamedAgg(column='vol', aggfunc="first"),
)
# Plot ratings
ax = sns.scatterplot(data=data, x='vol', y='rating', s=50, color="black", edgecolor=(0, 0, 0, 0))
ax.grid(linestyle=':')
ax.xaxis.set_major_formatter(mtick.PercentFormatter())
# Plot trend fit
from sklearn.linear_model import LinearRegression
reg = LinearRegression().fit(data['vol'].values.reshape(-1, 1), data['rating'])
plt.plot(
plt.xlim(),
reg.predict(np.array(plt.xlim()).reshape(-1, 1)),
linewidth=1,
color="black",
linestyle="dashed",
label="Trend"
)
plt.legend()
for _, beer, rating, vol in data.itertuples():
ax.annotate(
beer,
xytext=(8, -5),
textcoords='offset pixels',
xy=(vol, rating),
bbox=dict(
boxstyle="round",
ec=(1, 1, 1, 0),
fc=(1, 1, 1, 0.7),
),
)
plt.tight_layout()
if file_name is not None:
fig.savefig(file_name)
if show:
plt.show()
plt.close(fig)
|
[
"matplotlib.pyplot.xlim",
"pandas.NamedAgg",
"matplotlib.pyplot.show",
"numpy.abs",
"seaborn.scatterplot",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"seaborn.violinplot",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.figure",
"seaborn.boxplot",
"matplotlib.pyplot.tight_layout",
"matplotlib.ticker.PercentFormatter"
] |
[((921, 948), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (931, 948), True, 'import matplotlib.pyplot as plt\n'), ((1402, 1420), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1418, 1420), True, 'import matplotlib.pyplot as plt\n'), ((1523, 1537), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (1532, 1537), True, 'import matplotlib.pyplot as plt\n'), ((1632, 1659), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (1642, 1659), True, 'import matplotlib.pyplot as plt\n'), ((1672, 1778), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'data': 'df', 'x': '"""beer"""', 'y': '"""normalized rating"""', 'hue': '"""person"""', 's': '(50)', 'edgecolor': '(0, 0, 0, 0)'}), "(data=df, x='beer', y='normalized rating', hue='person', s=\n 50, edgecolor=(0, 0, 0, 0))\n", (1687, 1778), True, 'import seaborn as sns\n'), ((1952, 1970), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1968, 1970), True, 'import matplotlib.pyplot as plt\n'), ((2073, 2087), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (2082, 2087), True, 'import matplotlib.pyplot as plt\n'), ((2181, 2208), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (2191, 2208), True, 'import matplotlib.pyplot as plt\n'), ((2479, 2577), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'data': 'data', 'x': '"""price"""', 'y': '"""rating"""', 's': '(50)', 'color': '"""black"""', 'edgecolor': '(0, 0, 0, 0)'}), "(data=data, x='price', y='rating', s=50, color='black',\n edgecolor=(0, 0, 0, 0))\n", (2494, 2577), True, 'import seaborn as sns\n'), ((3099, 3117), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3115, 3117), True, 'import matplotlib.pyplot as plt\n'), ((3220, 3234), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (3229, 3234), True, 'import matplotlib.pyplot as plt\n'), ((3327, 3354), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (3337, 3354), True, 'import matplotlib.pyplot as plt\n'), ((3662, 3758), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'data': 'data', 'x': '"""vol"""', 'y': '"""rating"""', 's': '(50)', 'color': '"""black"""', 'edgecolor': '(0, 0, 0, 0)'}), "(data=data, x='vol', y='rating', s=50, color='black',\n edgecolor=(0, 0, 0, 0))\n", (3677, 3758), True, 'import seaborn as sns\n'), ((4212, 4224), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4222, 4224), True, 'import matplotlib.pyplot as plt\n'), ((4584, 4602), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4600, 4602), True, 'import matplotlib.pyplot as plt\n'), ((4705, 4719), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (4714, 4719), True, 'import matplotlib.pyplot as plt\n'), ((1015, 1091), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': 'df', 'x': 'x', 'y': '"""normalized rating"""', 'order': 'order', 'whis': '[0, 100]'}), "(data=df, x=x, y='normalized rating', order=order, whis=[0, 100])\n", (1026, 1091), True, 'import seaborn as sns\n'), ((1507, 1517), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1515, 1517), True, 'import matplotlib.pyplot as plt\n'), ((2057, 2067), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2065, 2067), True, 'import matplotlib.pyplot as plt\n'), ((3204, 3214), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3212, 3214), True, 'import matplotlib.pyplot as plt\n'), ((3817, 3841), 'matplotlib.ticker.PercentFormatter', 'mtick.PercentFormatter', ([], {}), '()\n', (3839, 3841), True, 'import matplotlib.ticker as mtick\n'), ((4031, 4041), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (4039, 4041), True, 'import matplotlib.pyplot as plt\n'), ((4689, 4699), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4697, 4699), True, 'import matplotlib.pyplot as plt\n'), ((273, 299), 'numpy.abs', 'np.abs', (['(y_pos - last_y_pos)'], {}), '(y_pos - last_y_pos)\n', (279, 299), True, 'import numpy as np\n'), ((1139, 1247), 'seaborn.violinplot', 'sns.violinplot', ([], {'data': 'df', 'x': 'x', 'y': '"""normalized rating"""', 'order': 'order', 'inner': '"""point"""', 'bw': '(0.15)', 'scale': '"""count"""'}), "(data=df, x=x, y='normalized rating', order=order, inner=\n 'point', bw=0.15, scale='count')\n", (1153, 1247), True, 'import seaborn as sns\n'), ((2262, 2306), 'pandas.NamedAgg', 'pd.NamedAgg', ([], {'column': '"""price"""', 'aggfunc': '"""first"""'}), "(column='price', aggfunc='first')\n", (2273, 2306), True, 'import pandas as pd\n'), ((2324, 2379), 'pandas.NamedAgg', 'pd.NamedAgg', ([], {'column': '"""normalized rating"""', 'aggfunc': '"""mean"""'}), "(column='normalized rating', aggfunc='mean')\n", (2335, 2379), True, 'import pandas as pd\n'), ((2395, 2438), 'pandas.NamedAgg', 'pd.NamedAgg', ([], {'column': '"""beer"""', 'aggfunc': '"""first"""'}), "(column='beer', aggfunc='first')\n", (2406, 2438), True, 'import pandas as pd\n'), ((3448, 3491), 'pandas.NamedAgg', 'pd.NamedAgg', ([], {'column': '"""beer"""', 'aggfunc': '"""first"""'}), "(column='beer', aggfunc='first')\n", (3459, 3491), True, 'import pandas as pd\n'), ((3509, 3564), 'pandas.NamedAgg', 'pd.NamedAgg', ([], {'column': '"""normalized rating"""', 'aggfunc': '"""mean"""'}), "(column='normalized rating', aggfunc='mean')\n", (3520, 3564), True, 'import pandas as pd\n'), ((3579, 3621), 'pandas.NamedAgg', 'pd.NamedAgg', ([], {'column': '"""vol"""', 'aggfunc': '"""first"""'}), "(column='vol', aggfunc='first')\n", (3590, 3621), True, 'import pandas as pd\n'), ((3933, 3951), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (3949, 3951), False, 'from sklearn.linear_model import LinearRegression\n'), ((3037, 3047), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (3045, 3047), True, 'import matplotlib.pyplot as plt\n'), ((3050, 3060), 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), '()\n', (3058, 3060), True, 'import matplotlib.pyplot as plt\n'), ((4073, 4083), 'matplotlib.pyplot.xlim', 'plt.xlim', ([], {}), '()\n', (4081, 4083), True, 'import matplotlib.pyplot as plt\n')]
|
import sys
sys.path.append('../')
sys.path.append('/opt/nvidia/deepstream/deepstream/lib')
from time import sleep
import time
import numpy as np
import gi
gi.require_version('Gst', '1.0')
gi.require_version('GstVideo', '1.0')
from gi.repository import GObject, Gst, GstVideo
from common.FPS import GETFPS
import pyds
from gstutils import get_num_channels, get_np_dtype
from my_utils import Segmentor
PGIE_CLASS_ID_VEHICLE = 0
PGIE_CLASS_ID_BICYCLE = 1
PGIE_CLASS_ID_PERSON = 2
PGIE_CLASS_ID_ROADSIGN = 3
segmentor = Segmentor((720, 1280, 3), network_name='fcn-resnet18-cityscapes-1024x512')
def gst_to_np(sample):
buffer = sample.get_buffer()
# print(f'pts: {buffer.pts / 1e9} -- dts: {buffer.dts / 1e9} -- offset: {buffer.offset} -- duration: {buffer.duration / 1e9}')
caps = sample.get_caps()
# print(caps.get_structure(0).get_value('format'))
# print(caps.get_structure(0).get_value('height'))
# print(caps.get_structure(0).get_value('width'))
# batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(buffer))
# l_frame = batch_meta.frame_meta_list
# frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
# frame_number = frame_meta.frame_num
# pts = frame_meta.buf_pts
# ntp_ts = frame_meta.ntp_timestamp
# print(f'frame number: {frame_number}')
# print(f'frame pts (seconds): {pts / 1e9}')
# print(f'ntp timestamp (seconds): {ntp_ts / 1e9}')
print(f'from appsink ------- pts: {buffer.pts / 1e9}')
caps_format = caps.get_structure(0)
video_format = GstVideo.VideoFormat.from_string(
caps_format.get_value('format'))
w, h = caps_format.get_value('width'), caps_format.get_value('height')
c = get_num_channels(video_format)
buffer_size = buffer.get_size()
shape = (h, w, c) if (h * w * c == buffer_size) else buffer_size
array = np.ndarray(shape=shape, buffer=buffer.extract_dup(0, buffer_size),
dtype=get_np_dtype(video_format))
return np.squeeze(array), buffer.pts
def new_buffer(sink, data):
start_time = time.time()
sample = sink.emit("pull-sample")
arr, pts = gst_to_np(sample)
# print(f'data type: {arr.dtype}')
segmentor.do_segmentation(arr, str(pts))
# seg_map = segnet.predict(arr)
# cv2.imwrite(f'{pts}.jpg', cv2.cvtColor(seg_map, cv2.COLOR_RGB2BGR))
print(f'--------- segmentation done: {time.time() - start_time} ----------')
return Gst.FlowReturn.OK
class Pipeline:
def __init__(self,
input_file_path,
model_config_path='./model/config_infer_primary_detectnet_v2.txt',
labels_path='./model/detectnet_v2_labels.txt',
output_file_path='./out.mp4'):
self.model_config_path = model_config_path
self.labels_path = labels_path
self.output_file_path = output_file_path
self.width = 1280
self.height = 720
GObject.threads_init()
Gst.init(None)
self.pipeline = Gst.Pipeline()
if not self.pipeline:
sys.stderr.write(" Unable to create Pipeline \n")
self.source, self.h264parser, self.decoder = self._create_source_elements(input_file_path)
self.streammux, self.pgie = self._create_middle_elements()
self.nvvidconv, self.capsfilter, self.sink = self._create_sink_elements()
# Link the elements
print("Linking elements in the Pipeline \n")
self._link()
# osdsinkpad = self.nvosd.get_static_pad("sink")
# if not osdsinkpad:
# sys.stderr.write(" Unable to get sink pad of nvosd \n")
#
# osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, self.osd_sink_pad_buffer_probe, 0)
self.loop = GObject.MainLoop()
self.bus = self.pipeline.get_bus()
self.bus.add_signal_watch()
self.bus.connect("message::eos", self._bus_call, self.loop)
def start(self):
# start play back and listen to events
print("Starting pipeline \n")
self.pipeline.set_state(Gst.State.PLAYING)
self.loop.run()
def _create_source_elements(self, file_path):
# Source element for reading from the file
source = Gst.ElementFactory.make("filesrc", "file-source")
if not source:
sys.stderr.write(" Unable to create Source \n")
# Since the data format in the input file is elementary h264 stream,
# we need a h264parser
h264parser = Gst.ElementFactory.make("h264parse", "h264-parser")
if not h264parser:
sys.stderr.write(" Unable to create h264 parser \n")
# Use nvdec_h264 for hardware accelerated decode on GPU
decoder = Gst.ElementFactory.make("nvv4l2decoder", "nvv4l2-decoder")
if not decoder:
sys.stderr.write(" Unable to create Nvv4l2 Decoder \n")
source.set_property('location', file_path)
self.pipeline.add(source)
self.pipeline.add(h264parser)
self.pipeline.add(decoder)
return source, h264parser, decoder
def _create_middle_elements(self):
streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
if not streammux:
sys.stderr.write(" Unable to create NvStreamMux \n")
# Use nvinfer to run inferencing on decoder's output,
# behaviour of inferencing is set through config file
pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
if not pgie:
sys.stderr.write(" Unable to create pgie \n")
# # Use convertor to convert from NV12 to RGBA as required by nvosd
# nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
# if not nvvidconv:
# sys.stderr.write(" Unable to create nvvidconv \n")
# Create OSD to draw on the converted RGBA buffer
# nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
# if not nvosd:
# sys.stderr.write(" Unable to create nvosd \n")
#
# nvosd.set_property('display-clock', 1) # here: https://docs.nvidia.com/metropolis/deepstream/dev-guide/text/DS_plugin_gst-nvdsosd.html
streammux.set_property('width', self.width)
streammux.set_property('height', self.height)
streammux.set_property('batch-size', 1)
streammux.set_property('batched-push-timeout', 4000000)
pgie.set_property('config-file-path', self.model_config_path)
self.pipeline.add(streammux)
self.pipeline.add(pgie)
# self.pipeline.add(nvvidconv)
# self.pipeline.add(nvosd)
return streammux, pgie
def _create_sink_elements(self):
nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor appsink")
if not nvvidconv:
sys.stderr.write(" Unable to create nvvidconv2 \n")
capsfilter = Gst.ElementFactory.make("capsfilter", "capsfilter")
if not capsfilter:
sys.stderr.write(" Unable to create capsfilter \n")
caps = Gst.Caps.from_string("video/x-raw, format=RGBA")
capsfilter.set_property("caps", caps)
sink = Gst.ElementFactory.make("appsink", "sink")
if not sink:
sys.stderr.write(" Unable to create appsink \n")
sink.set_property("emit-signals", True)
caps = Gst.caps_from_string("video/x-raw, format=RGBA")
sink.set_property("caps", caps)
# sink.set_property("drop", True)
# sink.set_property("max_buffers", 3)
# sink.set_property("sync", False)
sink.set_property("wait-on-eos", False)
sink.connect("new-sample", new_buffer, sink)
self.pipeline.add(nvvidconv)
self.pipeline.add(capsfilter)
self.pipeline.add(sink)
return nvvidconv, capsfilter, sink
def _link(self):
self.source.link(self.h264parser)
self.h264parser.link(self.decoder)
sinkpad = self.streammux.get_request_pad("sink_0")
if not sinkpad:
sys.stderr.write(" Unable to get the sink pad of streammux \n")
srcpad = self.decoder.get_static_pad("src")
if not srcpad:
sys.stderr.write(" Unable to get source pad of decoder \n")
srcpad.link(sinkpad)
self.streammux.link(self.pgie)
self.pgie.link(self.nvvidconv)
self.nvvidconv.link(self.capsfilter)
self.capsfilter.link(self.sink)
@staticmethod
def _bus_call(bus, message, loop):
print('buss called on {}'.format(message))
t = message.type
if t == Gst.MessageType.EOS:
sys.stdout.write("End-of-stream\n")
loop.quit()
elif t == Gst.MessageType.WARNING:
err, debug = message.parse_warning()
sys.stderr.write("Warning: %s: %s\n" % (err, debug))
elif t == Gst.MessageType.ERROR:
err, debug = message.parse_error()
sys.stderr.write("Error: %s: %s\n" % (err, debug))
loop.quit()
return True
@staticmethod
def osd_sink_pad_buffer_probe(pad, info, u_data):
obj_counter = {
PGIE_CLASS_ID_VEHICLE: 0,
PGIE_CLASS_ID_PERSON: 0,
PGIE_CLASS_ID_BICYCLE: 0,
PGIE_CLASS_ID_ROADSIGN: 0
}
gst_buffer = info.get_buffer()
if not gst_buffer:
print("Unable to get GstBuffer ")
return
# Retrieve batch metadata from the gst_buffer
# Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
# C address of gst_buffer as input, which is obtained with hash(gst_buffer)
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
l_frame = batch_meta.frame_meta_list
while l_frame is not None:
try:
# Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
# The casting is done by pyds.glist_get_nvds_frame_meta()
# The casting also keeps ownership of the underlying memory
# in the C code, so the Python garbage collector will leave
# it alone.
# frame_meta = pyds.glist_get_nvds_frame_meta(l_frame.data)
frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
except StopIteration:
break
frame_number = frame_meta.frame_num
num_rects = frame_meta.num_obj_meta
l_obj = frame_meta.obj_meta_list
while l_obj is not None:
try:
# Casting l_obj.data to pyds.NvDsObjectMeta
# obj_meta=pyds.glist_get_nvds_object_meta(l_obj.data)
obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
except StopIteration:
break
obj_counter[obj_meta.class_id] += 1
obj_meta.rect_params.border_color.set(0.0, 0.0, 1.0, 0.0)
try:
l_obj = l_obj.next
except StopIteration:
break
# Acquiring a display meta object. The memory ownership remains in
# the C code so downstream plugins can still access it. Otherwise
# the garbage collector will claim it when this probe function exits.
display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
display_meta.num_labels = 1
py_nvosd_text_params = display_meta.text_params[0]
# Setting display text to be shown on screen
# Note that the pyds module allocates a buffer for the string, and the
# memory will not be claimed by the garbage collector.
# Reading the display_text field here will return the C address of the
# allocated string. Use pyds.get_string() to get the string content.
fps_stream.get_fps()
py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}" \
.format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON])
# Now set the offsets where the string should appear
py_nvosd_text_params.x_offset = 10
py_nvosd_text_params.y_offset = 12
# Font , font-color and font-size
py_nvosd_text_params.font_params.font_name = "Serif"
py_nvosd_text_params.font_params.font_size = 10
# set(red, green, blue, alpha); set to White
py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)
# Text background color
py_nvosd_text_params.set_bg_clr = 1
# set(red, green, blue, alpha); set to Black
py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
# Using pyds.get_string() to get display_text as string
print(pyds.get_string(py_nvosd_text_params.display_text))
pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
# if WRITE_FRAMES:
# n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id)
# # convert python array into numy array format.
# frame_image = np.array(n_frame, copy=True, order='C')
# # covert the array into cv2 default color format
# frame_image = cv2.cvtColor(frame_image, cv2.COLOR_RGBA2BGRA)
# cv2.imwrite("./frame_" + str(frame_number) + ".jpg",
# frame_image)
# print('saved to')
try:
l_frame = l_frame.next
except StopIteration:
break
return Gst.PadProbeReturn.OK
class PipelineCamera:
def __init__(self,
model_config_path='./model/config_infer_primary_detectnet_v2.txt',
labels_path='./model/detectnet_v2_labels.txt',
output_file_path='./out.mp4'):
self.model_config_path = model_config_path
self.labels_path = labels_path
self.output_file_path = output_file_path
self.width = 1280
self.height = 720
GObject.threads_init()
Gst.init(None)
self.pipeline = Gst.Pipeline()
if not self.pipeline:
sys.stderr.write(" Unable to create Pipeline \n")
self.source, self.nvvidconv_src, self.caps_nvvidconv_src = self._create_source_elements()
self.tee, self.queue_od, self.queue_seg = self._create_branching_elements()
self.streammux, self.pgie, self.nvvidconvosd, self.nvosd = self._create_middle_elements()
self.nvvidconv, self.capsfilter, self.sink, self.fake_sink = self._create_sink_elements()
# Link the elements
print("Linking elements in the Pipeline \n")
self._link()
od_sink_pad = self.queue_od.get_static_pad("sink")
seg_sink_pad = self.queue_seg.get_static_pad("sink")
tee_od_pad = self.tee.get_request_pad("src_%u")
tee_seg_pad = self.tee.get_request_pad("src_%u")
if not tee_od_pad or not tee_seg_pad:
sys.stderr.write("Unable to get request pads\n")
tee_od_pad.link(od_sink_pad)
tee_seg_pad.link(seg_sink_pad)
osdsinkpad = self.nvosd.get_static_pad("sink")
if not osdsinkpad:
sys.stderr.write(" Unable to get sink pad of nvosd \n")
osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, self.osd_sink_pad_buffer_probe, 0)
self.loop = GObject.MainLoop()
self.bus = self.pipeline.get_bus()
self.bus.add_signal_watch()
self.bus.connect("message::eos", self._bus_call, self.loop)
def start(self):
# start play back and listen to events
print("Starting pipeline \n")
self.pipeline.set_state(Gst.State.PLAYING)
self.loop.run()
def _create_source_elements(self):
source = Gst.ElementFactory.make("nvarguscamerasrc", "src-elem")
if not source:
sys.stderr.write(" Unable to create Source \n")
# Converter to scale the image
nvvidconv_src = Gst.ElementFactory.make("nvvideoconvert", "convertor_src")
if not nvvidconv_src:
sys.stderr.write(" Unable to create nvvidconv_src \n")
# Caps for NVMM and resolution scaling
caps_nvvidconv_src = Gst.ElementFactory.make("capsfilter", "nvmm_caps")
if not caps_nvvidconv_src:
sys.stderr.write(" Unable to create capsfilter \n")
source.set_property('bufapi-version', True)
caps_nvvidconv_src.set_property('caps', Gst.Caps.from_string(
'video/x-raw(memory:NVMM), width={}, height={}'.format(self.width, self.height)))
self.pipeline.add(source)
self.pipeline.add(nvvidconv_src)
self.pipeline.add(caps_nvvidconv_src)
return source, nvvidconv_src, caps_nvvidconv_src
def _create_middle_elements(self):
streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
if not streammux:
sys.stderr.write(" Unable to create NvStreamMux \n")
# Use nvinfer to run inferencing on decoder's output,
# behaviour of inferencing is set through config file
pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
if not pgie:
sys.stderr.write(" Unable to create pgie \n")
# Use convertor to convert from NV12 to RGBA as required by nvosd
nvvidconvosd = Gst.ElementFactory.make("nvvideoconvert", "convertor")
if not nvvidconvosd:
sys.stderr.write(" Unable to create nvvidconv \n")
# Create OSD to draw on the converted RGBA buffer
nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
if not nvosd:
sys.stderr.write(" Unable to create nvosd \n")
nvosd.set_property('display-clock', 1) # here: https://docs.nvidia.com/metropolis/deepstream/dev-guide/text/DS_plugin_gst-nvdsosd.html
streammux.set_property('width', self.width)
streammux.set_property('height', self.height)
streammux.set_property('batch-size', 1)
streammux.set_property('batched-push-timeout', 4000000)
pgie.set_property('config-file-path', self.model_config_path)
self.pipeline.add(streammux)
self.pipeline.add(pgie)
self.pipeline.add(nvvidconvosd)
self.pipeline.add(nvosd)
return streammux, pgie, nvvidconvosd, nvosd
def _create_sink_elements(self):
nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor appsink")
if not nvvidconv:
sys.stderr.write(" Unable to create nvvidconv2 \n")
capsfilter = Gst.ElementFactory.make("capsfilter", "capsfilter")
if not capsfilter:
sys.stderr.write(" Unable to create capsfilter \n")
caps = Gst.Caps.from_string("video/x-raw, format=RGBA")
capsfilter.set_property("caps", caps)
sink = Gst.ElementFactory.make("appsink", "sink")
if not sink:
sys.stderr.write(" Unable to create appsink \n")
sink.set_property("emit-signals", True)
caps = Gst.caps_from_string("video/x-raw, format=RGBA")
sink.set_property("caps", caps)
sink.set_property("drop", True)
sink.set_property("max_buffers", 1)
# sink.set_property("sync", False)
sink.set_property("wait-on-eos", False)
sink.connect("new-sample", new_buffer, sink)
fakesink = Gst.ElementFactory.make("fakesink", "fakesink")
self.pipeline.add(nvvidconv)
self.pipeline.add(capsfilter)
self.pipeline.add(sink)
self.pipeline.add(fakesink)
return nvvidconv, capsfilter, sink, fakesink
def _create_branching_elements(self):
tee = Gst.ElementFactory.make("tee", "tee")
queue_od = Gst.ElementFactory.make("queue", "object detection queue")
queue_seg = Gst.ElementFactory.make("queue", "segmentation queue")
queue_od.set_property("max-size-buffers", 1)
queue_seg.set_property("max-size-buffers", 1)
queue_seg.set_property("leaky", 2)
self.pipeline.add(tee)
self.pipeline.add(queue_od)
self.pipeline.add(queue_seg)
return tee, queue_od, queue_seg
def _link(self):
self.source.link(self.tee)
self.queue_od.link(self.nvvidconv_src)
self.nvvidconv_src.link(self.caps_nvvidconv_src)
sinkpad = self.streammux.get_request_pad("sink_0")
if not sinkpad:
sys.stderr.write(" Unable to get the sink pad of streammux \n")
srcpad = self.caps_nvvidconv_src.get_static_pad("src")
if not srcpad:
sys.stderr.write(" Unable to get source pad of decoder \n")
srcpad.link(sinkpad)
self.streammux.link(self.pgie)
self.pgie.link(self.nvvidconvosd)
self.nvvidconvosd.link(self.nvosd)
self.nvosd.link(self.fake_sink)
# self.pgie.link(self.nvvidconv)
# self.nvvidconv.link(self.capsfilter)
# self.capsfilter.link(self.sink)
self.queue_seg.link(self.nvvidconv)
self.nvvidconv.link(self.capsfilter)
self.capsfilter.link(self.sink)
@staticmethod
def _bus_call(bus, message, loop):
print('buss called on {}'.format(message))
t = message.type
if t == Gst.MessageType.EOS:
sys.stdout.write("End-of-stream\n")
loop.quit()
elif t == Gst.MessageType.WARNING:
err, debug = message.parse_warning()
sys.stderr.write("Warning: %s: %s\n" % (err, debug))
elif t == Gst.MessageType.ERROR:
err, debug = message.parse_error()
sys.stderr.write("Error: %s: %s\n" % (err, debug))
loop.quit()
return True
@staticmethod
def osd_sink_pad_buffer_probe(pad, info, u_data):
obj_counter = {
PGIE_CLASS_ID_VEHICLE: 0,
PGIE_CLASS_ID_PERSON: 0,
PGIE_CLASS_ID_BICYCLE: 0,
PGIE_CLASS_ID_ROADSIGN: 0
}
gst_buffer = info.get_buffer()
if not gst_buffer:
print("Unable to get GstBuffer ")
return
# Retrieve batch metadata from the gst_buffer
# Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
# C address of gst_buffer as input, which is obtained with hash(gst_buffer)
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
l_frame = batch_meta.frame_meta_list
while l_frame is not None:
try:
# Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
# The casting is done by pyds.glist_get_nvds_frame_meta()
# The casting also keeps ownership of the underlying memory
# in the C code, so the Python garbage collector will leave
# it alone.
# frame_meta = pyds.glist_get_nvds_frame_meta(l_frame.data)
frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
except StopIteration:
break
frame_number = frame_meta.frame_num
num_rects = frame_meta.num_obj_meta
l_obj = frame_meta.obj_meta_list
pts = frame_meta.buf_pts
print(f'from osd ------- pts: {pts / 1e9}')
while l_obj is not None:
try:
# Casting l_obj.data to pyds.NvDsObjectMeta
# obj_meta=pyds.glist_get_nvds_object_meta(l_obj.data)
obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
except StopIteration:
break
obj_counter[obj_meta.class_id] += 1
obj_meta.rect_params.border_color.set(0.0, 0.0, 1.0, 0.0)
try:
l_obj = l_obj.next
except StopIteration:
break
# Acquiring a display meta object. The memory ownership remains in
# the C code so downstream plugins can still access it. Otherwise
# the garbage collector will claim it when this probe function exits.
display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
display_meta.num_labels = 1
py_nvosd_text_params = display_meta.text_params[0]
# Setting display text to be shown on screen
# Note that the pyds module allocates a buffer for the string, and the
# memory will not be claimed by the garbage collector.
# Reading the display_text field here will return the C address of the
# allocated string. Use pyds.get_string() to get the string content.
fps_stream.get_fps()
py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Vehicle_count={} Person_count={}" \
.format(frame_number, num_rects, obj_counter[PGIE_CLASS_ID_VEHICLE], obj_counter[PGIE_CLASS_ID_PERSON])
# Now set the offsets where the string should appear
py_nvosd_text_params.x_offset = 10
py_nvosd_text_params.y_offset = 12
# Font , font-color and font-size
py_nvosd_text_params.font_params.font_name = "Serif"
py_nvosd_text_params.font_params.font_size = 10
# set(red, green, blue, alpha); set to White
py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)
# Text background color
py_nvosd_text_params.set_bg_clr = 1
# set(red, green, blue, alpha); set to Black
py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
# Using pyds.get_string() to get display_text as string
print(pyds.get_string(py_nvosd_text_params.display_text))
pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
# if WRITE_FRAMES:
# n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id)
# # convert python array into numy array format.
# frame_image = np.array(n_frame, copy=True, order='C')
# # covert the array into cv2 default color format
# frame_image = cv2.cvtColor(frame_image, cv2.COLOR_RGBA2BGRA)
# cv2.imwrite("./frame_" + str(frame_number) + ".jpg",
# frame_image)
# print('saved to')
try:
l_frame = l_frame.next
except StopIteration:
break
return Gst.PadProbeReturn.OK
if __name__ == '__main__':
fps_stream = GETFPS(0)
# out_file_name = '{}.mp4'.format(sys.argv[1])
# in_file_path = sys.argv[2]
out_file_name = 'out.mp4'
# pipeline = Pipeline(output_file_path=out_file_name)
# pipeline = Pipeline(in_file_path, output_file_path=out_file_name)
pipeline = PipelineCamera(output_file_path=out_file_name)
try:
pipeline.start()
except KeyboardInterrupt as e:
# sink.get_static_pad('sink').send_event(Gst.Event.new_eos())
# pipeline.send_event(Gst.Event.new_eos())
# pipeline.set_state(Gst.State.NULL)
pipeline.pipeline.send_event(Gst.Event.new_eos())
# Wait for EOS to be catched up by the bus
msg = pipeline.bus.timed_pop_filtered(
Gst.CLOCK_TIME_NONE,
Gst.MessageType.EOS
)
print(msg)
sleep(5)
except Exception as e:
print(e)
finally:
pyds.unset_callback_funcs()
pipeline.pipeline.set_state(Gst.State.NULL)
|
[
"sys.stdout.write",
"pyds.unset_callback_funcs",
"gstutils.get_np_dtype",
"pyds.get_string",
"my_utils.Segmentor",
"pyds.nvds_add_display_meta_to_frame",
"sys.path.append",
"gi.repository.Gst.Caps.from_string",
"gi.repository.GObject.MainLoop",
"gi.repository.GObject.threads_init",
"pyds.nvds_acquire_display_meta_from_pool",
"time.sleep",
"gi.repository.Gst.init",
"numpy.squeeze",
"gi.require_version",
"gi.repository.Gst.ElementFactory.make",
"pyds.NvDsFrameMeta.cast",
"gstutils.get_num_channels",
"gi.repository.Gst.Pipeline",
"gi.repository.Gst.caps_from_string",
"time.time",
"gi.repository.Gst.Event.new_eos",
"common.FPS.GETFPS",
"sys.stderr.write",
"pyds.NvDsObjectMeta.cast"
] |
[((11, 33), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (26, 33), False, 'import sys\n'), ((34, 90), 'sys.path.append', 'sys.path.append', (['"""/opt/nvidia/deepstream/deepstream/lib"""'], {}), "('/opt/nvidia/deepstream/deepstream/lib')\n", (49, 90), False, 'import sys\n'), ((157, 189), 'gi.require_version', 'gi.require_version', (['"""Gst"""', '"""1.0"""'], {}), "('Gst', '1.0')\n", (175, 189), False, 'import gi\n'), ((190, 227), 'gi.require_version', 'gi.require_version', (['"""GstVideo"""', '"""1.0"""'], {}), "('GstVideo', '1.0')\n", (208, 227), False, 'import gi\n'), ((525, 599), 'my_utils.Segmentor', 'Segmentor', (['(720, 1280, 3)'], {'network_name': '"""fcn-resnet18-cityscapes-1024x512"""'}), "((720, 1280, 3), network_name='fcn-resnet18-cityscapes-1024x512')\n", (534, 599), False, 'from my_utils import Segmentor\n'), ((1693, 1723), 'gstutils.get_num_channels', 'get_num_channels', (['video_format'], {}), '(video_format)\n', (1709, 1723), False, 'from gstutils import get_num_channels, get_np_dtype\n'), ((2055, 2066), 'time.time', 'time.time', ([], {}), '()\n', (2064, 2066), False, 'import time\n'), ((26582, 26591), 'common.FPS.GETFPS', 'GETFPS', (['(0)'], {}), '(0)\n', (26588, 26591), False, 'from common.FPS import GETFPS\n'), ((1978, 1995), 'numpy.squeeze', 'np.squeeze', (['array'], {}), '(array)\n', (1988, 1995), True, 'import numpy as np\n'), ((2919, 2941), 'gi.repository.GObject.threads_init', 'GObject.threads_init', ([], {}), '()\n', (2939, 2941), False, 'from gi.repository import GObject, Gst, GstVideo\n'), ((2950, 2964), 'gi.repository.Gst.init', 'Gst.init', (['None'], {}), '(None)\n', (2958, 2964), False, 'from gi.repository import GObject, Gst, GstVideo\n'), ((2990, 3004), 'gi.repository.Gst.Pipeline', 'Gst.Pipeline', ([], {}), '()\n', (3002, 3004), False, 'from gi.repository import GObject, Gst, GstVideo\n'), ((3728, 3746), 'gi.repository.GObject.MainLoop', 'GObject.MainLoop', ([], {}), '()\n', (3744, 3746), False, 'from gi.repository import GObject, Gst, GstVideo\n'), ((4195, 4244), 'gi.repository.Gst.ElementFactory.make', 'Gst.ElementFactory.make', (['"""filesrc"""', '"""file-source"""'], {}), "('filesrc', 'file-source')\n", (4218, 4244), False, 'from gi.repository import GObject, Gst, GstVideo\n'), ((4458, 4509), 'gi.repository.Gst.ElementFactory.make', 'Gst.ElementFactory.make', (['"""h264parse"""', '"""h264-parser"""'], {}), "('h264parse', 'h264-parser')\n", (4481, 4509), False, 'from gi.repository import GObject, Gst, GstVideo\n'), ((4685, 4743), 'gi.repository.Gst.ElementFactory.make', 'Gst.ElementFactory.make', (['"""nvv4l2decoder"""', '"""nvv4l2-decoder"""'], {}), "('nvv4l2decoder', 'nvv4l2-decoder')\n", (4708, 4743), False, 'from gi.repository import GObject, Gst, GstVideo\n'), ((5099, 5153), 'gi.repository.Gst.ElementFactory.make', 'Gst.ElementFactory.make', (['"""nvstreammux"""', '"""Stream-muxer"""'], {}), "('nvstreammux', 'Stream-muxer')\n", (5122, 5153), False, 'from gi.repository import GObject, Gst, GstVideo\n'), ((5385, 5440), 'gi.repository.Gst.ElementFactory.make', 'Gst.ElementFactory.make', (['"""nvinfer"""', '"""primary-inference"""'], {}), "('nvinfer', 'primary-inference')\n", (5408, 5440), False, 'from gi.repository import GObject, Gst, GstVideo\n'), ((6662, 6724), 'gi.repository.Gst.ElementFactory.make', 'Gst.ElementFactory.make', (['"""nvvideoconvert"""', '"""convertor appsink"""'], {}), "('nvvideoconvert', 'convertor appsink')\n", (6685, 6724), False, 'from gi.repository import GObject, Gst, GstVideo\n'), ((6837, 6888), 'gi.repository.Gst.ElementFactory.make', 'Gst.ElementFactory.make', (['"""capsfilter"""', '"""capsfilter"""'], {}), "('capsfilter', 'capsfilter')\n", (6860, 6888), False, 'from gi.repository import GObject, Gst, GstVideo\n'), ((6996, 7044), 'gi.repository.Gst.Caps.from_string', 'Gst.Caps.from_string', (['"""video/x-raw, format=RGBA"""'], {}), "('video/x-raw, format=RGBA')\n", (7016, 7044), False, 'from gi.repository import GObject, Gst, GstVideo\n'), ((7107, 7149), 'gi.repository.Gst.ElementFactory.make', 'Gst.ElementFactory.make', (['"""appsink"""', '"""sink"""'], {}), "('appsink', 'sink')\n", (7130, 7149), False, 'from gi.repository import GObject, Gst, GstVideo\n'), ((7295, 7343), 'gi.repository.Gst.caps_from_string', 'Gst.caps_from_string', (['"""video/x-raw, format=RGBA"""'], {}), "('video/x-raw, format=RGBA')\n", (7315, 7343), False, 'from gi.repository import GObject, Gst, GstVideo\n'), ((14110, 14132), 'gi.repository.GObject.threads_init', 'GObject.threads_init', ([], {}), '()\n', (14130, 14132), False, 'from gi.repository import GObject, Gst, GstVideo\n'), ((14141, 14155), 'gi.repository.Gst.init', 'Gst.init', (['None'], {}), '(None)\n', (14149, 14155), False, 'from gi.repository import GObject, Gst, GstVideo\n'), ((14181, 14195), 'gi.repository.Gst.Pipeline', 'Gst.Pipeline', ([], {}), '()\n', (14193, 14195), False, 'from gi.repository import GObject, Gst, GstVideo\n'), ((15450, 15468), 'gi.repository.GObject.MainLoop', 'GObject.MainLoop', ([], {}), '()\n', (15466, 15468), False, 'from gi.repository import GObject, Gst, GstVideo\n'), ((15855, 15910), 'gi.repository.Gst.ElementFactory.make', 'Gst.ElementFactory.make', (['"""nvarguscamerasrc"""', '"""src-elem"""'], {}), "('nvarguscamerasrc', 'src-elem')\n", (15878, 15910), False, 'from gi.repository import GObject, Gst, GstVideo\n'), ((16058, 16116), 'gi.repository.Gst.ElementFactory.make', 'Gst.ElementFactory.make', (['"""nvvideoconvert"""', '"""convertor_src"""'], {}), "('nvvideoconvert', 'convertor_src')\n", (16081, 16116), False, 'from gi.repository import GObject, Gst, GstVideo\n'), ((16291, 16341), 'gi.repository.Gst.ElementFactory.make', 'Gst.ElementFactory.make', (['"""capsfilter"""', '"""nvmm_caps"""'], {}), "('capsfilter', 'nvmm_caps')\n", (16314, 16341), False, 'from gi.repository import GObject, Gst, GstVideo\n'), ((16897, 16951), 'gi.repository.Gst.ElementFactory.make', 'Gst.ElementFactory.make', (['"""nvstreammux"""', '"""Stream-muxer"""'], {}), "('nvstreammux', 'Stream-muxer')\n", (16920, 16951), False, 'from gi.repository import GObject, Gst, GstVideo\n'), ((17183, 17238), 'gi.repository.Gst.ElementFactory.make', 'Gst.ElementFactory.make', (['"""nvinfer"""', '"""primary-inference"""'], {}), "('nvinfer', 'primary-inference')\n", (17206, 17238), False, 'from gi.repository import GObject, Gst, GstVideo\n'), ((17416, 17470), 'gi.repository.Gst.ElementFactory.make', 'Gst.ElementFactory.make', (['"""nvvideoconvert"""', '"""convertor"""'], {}), "('nvvideoconvert', 'convertor')\n", (17439, 17470), False, 'from gi.repository import GObject, Gst, GstVideo\n'), ((17638, 17691), 'gi.repository.Gst.ElementFactory.make', 'Gst.ElementFactory.make', (['"""nvdsosd"""', '"""onscreendisplay"""'], {}), "('nvdsosd', 'onscreendisplay')\n", (17661, 17691), False, 'from gi.repository import GObject, Gst, GstVideo\n'), ((18461, 18523), 'gi.repository.Gst.ElementFactory.make', 'Gst.ElementFactory.make', (['"""nvvideoconvert"""', '"""convertor appsink"""'], {}), "('nvvideoconvert', 'convertor appsink')\n", (18484, 18523), False, 'from gi.repository import GObject, Gst, GstVideo\n'), ((18636, 18687), 'gi.repository.Gst.ElementFactory.make', 'Gst.ElementFactory.make', (['"""capsfilter"""', '"""capsfilter"""'], {}), "('capsfilter', 'capsfilter')\n", (18659, 18687), False, 'from gi.repository import GObject, Gst, GstVideo\n'), ((18795, 18843), 'gi.repository.Gst.Caps.from_string', 'Gst.Caps.from_string', (['"""video/x-raw, format=RGBA"""'], {}), "('video/x-raw, format=RGBA')\n", (18815, 18843), False, 'from gi.repository import GObject, Gst, GstVideo\n'), ((18906, 18948), 'gi.repository.Gst.ElementFactory.make', 'Gst.ElementFactory.make', (['"""appsink"""', '"""sink"""'], {}), "('appsink', 'sink')\n", (18929, 18948), False, 'from gi.repository import GObject, Gst, GstVideo\n'), ((19094, 19142), 'gi.repository.Gst.caps_from_string', 'Gst.caps_from_string', (['"""video/x-raw, format=RGBA"""'], {}), "('video/x-raw, format=RGBA')\n", (19114, 19142), False, 'from gi.repository import GObject, Gst, GstVideo\n'), ((19431, 19478), 'gi.repository.Gst.ElementFactory.make', 'Gst.ElementFactory.make', (['"""fakesink"""', '"""fakesink"""'], {}), "('fakesink', 'fakesink')\n", (19454, 19478), False, 'from gi.repository import GObject, Gst, GstVideo\n'), ((19734, 19771), 'gi.repository.Gst.ElementFactory.make', 'Gst.ElementFactory.make', (['"""tee"""', '"""tee"""'], {}), "('tee', 'tee')\n", (19757, 19771), False, 'from gi.repository import GObject, Gst, GstVideo\n'), ((19791, 19849), 'gi.repository.Gst.ElementFactory.make', 'Gst.ElementFactory.make', (['"""queue"""', '"""object detection queue"""'], {}), "('queue', 'object detection queue')\n", (19814, 19849), False, 'from gi.repository import GObject, Gst, GstVideo\n'), ((19870, 19924), 'gi.repository.Gst.ElementFactory.make', 'Gst.ElementFactory.make', (['"""queue"""', '"""segmentation queue"""'], {}), "('queue', 'segmentation queue')\n", (19893, 19924), False, 'from gi.repository import GObject, Gst, GstVideo\n'), ((27468, 27495), 'pyds.unset_callback_funcs', 'pyds.unset_callback_funcs', ([], {}), '()\n', (27493, 27495), False, 'import pyds\n'), ((1938, 1964), 'gstutils.get_np_dtype', 'get_np_dtype', (['video_format'], {}), '(video_format)\n', (1950, 1964), False, 'from gstutils import get_num_channels, get_np_dtype\n'), ((3047, 3096), 'sys.stderr.write', 'sys.stderr.write', (['""" Unable to create Pipeline \n"""'], {}), "(' Unable to create Pipeline \\n')\n", (3063, 3096), False, 'import sys\n'), ((4280, 4327), 'sys.stderr.write', 'sys.stderr.write', (['""" Unable to create Source \n"""'], {}), "(' Unable to create Source \\n')\n", (4296, 4327), False, 'import sys\n'), ((4549, 4601), 'sys.stderr.write', 'sys.stderr.write', (['""" Unable to create h264 parser \n"""'], {}), "(' Unable to create h264 parser \\n')\n", (4565, 4601), False, 'import sys\n'), ((4780, 4835), 'sys.stderr.write', 'sys.stderr.write', (['""" Unable to create Nvv4l2 Decoder \n"""'], {}), "(' Unable to create Nvv4l2 Decoder \\n')\n", (4796, 4835), False, 'import sys\n'), ((5192, 5244), 'sys.stderr.write', 'sys.stderr.write', (['""" Unable to create NvStreamMux \n"""'], {}), "(' Unable to create NvStreamMux \\n')\n", (5208, 5244), False, 'import sys\n'), ((5474, 5519), 'sys.stderr.write', 'sys.stderr.write', (['""" Unable to create pgie \n"""'], {}), "(' Unable to create pgie \\n')\n", (5490, 5519), False, 'import sys\n'), ((6763, 6814), 'sys.stderr.write', 'sys.stderr.write', (['""" Unable to create nvvidconv2 \n"""'], {}), "(' Unable to create nvvidconv2 \\n')\n", (6779, 6814), False, 'import sys\n'), ((6928, 6979), 'sys.stderr.write', 'sys.stderr.write', (['""" Unable to create capsfilter \n"""'], {}), "(' Unable to create capsfilter \\n')\n", (6944, 6979), False, 'import sys\n'), ((7183, 7231), 'sys.stderr.write', 'sys.stderr.write', (['""" Unable to create appsink \n"""'], {}), "(' Unable to create appsink \\n')\n", (7199, 7231), False, 'import sys\n'), ((7973, 8036), 'sys.stderr.write', 'sys.stderr.write', (['""" Unable to get the sink pad of streammux \n"""'], {}), "(' Unable to get the sink pad of streammux \\n')\n", (7989, 8036), False, 'import sys\n'), ((8124, 8183), 'sys.stderr.write', 'sys.stderr.write', (['""" Unable to get source pad of decoder \n"""'], {}), "(' Unable to get source pad of decoder \\n')\n", (8140, 8183), False, 'import sys\n'), ((8560, 8595), 'sys.stdout.write', 'sys.stdout.write', (['"""End-of-stream\n"""'], {}), "('End-of-stream\\n')\n", (8576, 8595), False, 'import sys\n'), ((11260, 11312), 'pyds.nvds_acquire_display_meta_from_pool', 'pyds.nvds_acquire_display_meta_from_pool', (['batch_meta'], {}), '(batch_meta)\n', (11300, 11312), False, 'import pyds\n'), ((12894, 12955), 'pyds.nvds_add_display_meta_to_frame', 'pyds.nvds_add_display_meta_to_frame', (['frame_meta', 'display_meta'], {}), '(frame_meta, display_meta)\n', (12929, 12955), False, 'import pyds\n'), ((14238, 14287), 'sys.stderr.write', 'sys.stderr.write', (['""" Unable to create Pipeline \n"""'], {}), "(' Unable to create Pipeline \\n')\n", (14254, 14287), False, 'import sys\n'), ((15063, 15111), 'sys.stderr.write', 'sys.stderr.write', (['"""Unable to get request pads\n"""'], {}), "('Unable to get request pads\\n')\n", (15079, 15111), False, 'import sys\n'), ((15283, 15338), 'sys.stderr.write', 'sys.stderr.write', (['""" Unable to get sink pad of nvosd \n"""'], {}), "(' Unable to get sink pad of nvosd \\n')\n", (15299, 15338), False, 'import sys\n'), ((15946, 15993), 'sys.stderr.write', 'sys.stderr.write', (['""" Unable to create Source \n"""'], {}), "(' Unable to create Source \\n')\n", (15962, 15993), False, 'import sys\n'), ((16159, 16213), 'sys.stderr.write', 'sys.stderr.write', (['""" Unable to create nvvidconv_src \n"""'], {}), "(' Unable to create nvvidconv_src \\n')\n", (16175, 16213), False, 'import sys\n'), ((16389, 16440), 'sys.stderr.write', 'sys.stderr.write', (['""" Unable to create capsfilter \n"""'], {}), "(' Unable to create capsfilter \\n')\n", (16405, 16440), False, 'import sys\n'), ((16990, 17042), 'sys.stderr.write', 'sys.stderr.write', (['""" Unable to create NvStreamMux \n"""'], {}), "(' Unable to create NvStreamMux \\n')\n", (17006, 17042), False, 'import sys\n'), ((17272, 17317), 'sys.stderr.write', 'sys.stderr.write', (['""" Unable to create pgie \n"""'], {}), "(' Unable to create pgie \\n')\n", (17288, 17317), False, 'import sys\n'), ((17512, 17562), 'sys.stderr.write', 'sys.stderr.write', (['""" Unable to create nvvidconv \n"""'], {}), "(' Unable to create nvvidconv \\n')\n", (17528, 17562), False, 'import sys\n'), ((17726, 17772), 'sys.stderr.write', 'sys.stderr.write', (['""" Unable to create nvosd \n"""'], {}), "(' Unable to create nvosd \\n')\n", (17742, 17772), False, 'import sys\n'), ((18562, 18613), 'sys.stderr.write', 'sys.stderr.write', (['""" Unable to create nvvidconv2 \n"""'], {}), "(' Unable to create nvvidconv2 \\n')\n", (18578, 18613), False, 'import sys\n'), ((18727, 18778), 'sys.stderr.write', 'sys.stderr.write', (['""" Unable to create capsfilter \n"""'], {}), "(' Unable to create capsfilter \\n')\n", (18743, 18778), False, 'import sys\n'), ((18982, 19030), 'sys.stderr.write', 'sys.stderr.write', (['""" Unable to create appsink \n"""'], {}), "(' Unable to create appsink \\n')\n", (18998, 19030), False, 'import sys\n'), ((20479, 20542), 'sys.stderr.write', 'sys.stderr.write', (['""" Unable to get the sink pad of streammux \n"""'], {}), "(' Unable to get the sink pad of streammux \\n')\n", (20495, 20542), False, 'import sys\n'), ((20641, 20700), 'sys.stderr.write', 'sys.stderr.write', (['""" Unable to get source pad of decoder \n"""'], {}), "(' Unable to get source pad of decoder \\n')\n", (20657, 20700), False, 'import sys\n'), ((21338, 21373), 'sys.stdout.write', 'sys.stdout.write', (['"""End-of-stream\n"""'], {}), "('End-of-stream\\n')\n", (21354, 21373), False, 'import sys\n'), ((24131, 24183), 'pyds.nvds_acquire_display_meta_from_pool', 'pyds.nvds_acquire_display_meta_from_pool', (['batch_meta'], {}), '(batch_meta)\n', (24171, 24183), False, 'import pyds\n'), ((25765, 25826), 'pyds.nvds_add_display_meta_to_frame', 'pyds.nvds_add_display_meta_to_frame', (['frame_meta', 'display_meta'], {}), '(frame_meta, display_meta)\n', (25800, 25826), False, 'import pyds\n'), ((27394, 27402), 'time.sleep', 'sleep', (['(5)'], {}), '(5)\n', (27399, 27402), False, 'from time import sleep\n'), ((8724, 8776), 'sys.stderr.write', 'sys.stderr.write', (["('Warning: %s: %s\\n' % (err, debug))"], {}), "('Warning: %s: %s\\n' % (err, debug))\n", (8740, 8776), False, 'import sys\n'), ((10178, 10215), 'pyds.NvDsFrameMeta.cast', 'pyds.NvDsFrameMeta.cast', (['l_frame.data'], {}), '(l_frame.data)\n', (10201, 10215), False, 'import pyds\n'), ((12830, 12880), 'pyds.get_string', 'pyds.get_string', (['py_nvosd_text_params.display_text'], {}), '(py_nvosd_text_params.display_text)\n', (12845, 12880), False, 'import pyds\n'), ((21502, 21554), 'sys.stderr.write', 'sys.stderr.write', (["('Warning: %s: %s\\n' % (err, debug))"], {}), "('Warning: %s: %s\\n' % (err, debug))\n", (21518, 21554), False, 'import sys\n'), ((22956, 22993), 'pyds.NvDsFrameMeta.cast', 'pyds.NvDsFrameMeta.cast', (['l_frame.data'], {}), '(l_frame.data)\n', (22979, 22993), False, 'import pyds\n'), ((25701, 25751), 'pyds.get_string', 'pyds.get_string', (['py_nvosd_text_params.display_text'], {}), '(py_nvosd_text_params.display_text)\n', (25716, 25751), False, 'import pyds\n'), ((27172, 27191), 'gi.repository.Gst.Event.new_eos', 'Gst.Event.new_eos', ([], {}), '()\n', (27189, 27191), False, 'from gi.repository import GObject, Gst, GstVideo\n'), ((2378, 2389), 'time.time', 'time.time', ([], {}), '()\n', (2387, 2389), False, 'import time\n'), ((8877, 8927), 'sys.stderr.write', 'sys.stderr.write', (["('Error: %s: %s\\n' % (err, debug))"], {}), "('Error: %s: %s\\n' % (err, debug))\n", (8893, 8927), False, 'import sys\n'), ((10642, 10678), 'pyds.NvDsObjectMeta.cast', 'pyds.NvDsObjectMeta.cast', (['l_obj.data'], {}), '(l_obj.data)\n', (10666, 10678), False, 'import pyds\n'), ((21655, 21705), 'sys.stderr.write', 'sys.stderr.write', (["('Error: %s: %s\\n' % (err, debug))"], {}), "('Error: %s: %s\\n' % (err, debug))\n", (21671, 21705), False, 'import sys\n'), ((23513, 23549), 'pyds.NvDsObjectMeta.cast', 'pyds.NvDsObjectMeta.cast', (['l_obj.data'], {}), '(l_obj.data)\n', (23537, 23549), False, 'import pyds\n')]
|
"""
This module contains the functions necessary for the estimation process of transition
probabilities.
"""
import numba
import numpy as np
import pandas as pd
from estimagic.optimization.optimize import minimize
def estimate_transitions(df):
"""Estimating the transition proabilities.
The sub function for managing the estimation of the transition probabilities.
Parameters
----------
df : pandas.DataFrame
see :ref:`df`
Returns
-------
result_transitions : dictionary
see :ref:`result_trans`
"""
result_transitions = {}
usage = df["usage"].to_numpy(dtype=float)
usage = usage[~np.isnan(usage)].astype(int)
result_transitions["trans_count"] = transition_count = np.bincount(usage)
# Prepare DataFrame for estimagic
name = ["trans_prob"]
number = np.arange(1, len(transition_count) + 1)
index = pd.MultiIndex.from_product([name, number], names=["name", "number"])
params = pd.DataFrame(
transition_count / sum(transition_count),
columns=["value"],
index=index,
)
params.loc[params["value"] == 0] = 1e-20
constr = [{"loc": "trans_prob", "type": "probability"}]
raw_result_trans = minimize(
criterion=loglike_trans,
params=params,
algorithm="scipy_lbfgsb",
constraints=constr,
criterion_kwargs={"transition_count": transition_count},
logging=False,
)
result_transitions["x"] = raw_result_trans["solution_params"]["value"].to_numpy()
result_transitions["fun"] = raw_result_trans["solution_criterion"]
return result_transitions
def loglike_trans_individual(params, transition_count):
"""
Individual negative Log-likelihood function of transition probability estimation.
Parameters
----------
p_raw : pandas.DataFrame
The untransformed transition probability guess.
transition_count : numpy.array
The pooled count of state increases per period in the data.
Returns
-------
log_like_individual : numpy.array
The individual negative log-likelihood contributions of the transition probabilities
"""
p_raw = params.loc["trans_prob", "value"].to_numpy()
log_like_individual = -np.multiply(transition_count, np.log(p_raw))
return log_like_individual
def loglike_trans(params, transition_count):
"""
Sum the individual negative log-likelihood.
Parameters
----------
params : pd.DataFrame
parameter guess of the transition probabilities.
transition_count : numpy.array
The pooled count of state increases per period in the data.
Returns
-------
log_like : float
the negative log likelihood given some transition probability guess.
"""
log_like = loglike_trans_individual(params, transition_count).sum()
return log_like
def loglike_trans_individual_derivative(params, transition_count):
"""
generates the jacobian of the individual log likelihood function of the
transition probabilities. This function is currently not used but is kept
for further development of the package when estimagic can handle constrains
with analytical derivatives.
Parameters
----------
params : pd.DataFrame
parameter guess of the transition probabilities.
transition_count : numpy.array
The pooled count of state increases per period in the data.
Returns
-------
jacobian : np.array
a dim(params) x dim(params) matrix containing the Jacobian.
"""
p_raw = params.loc["trans_prob", "value"].to_numpy()
diagonal = -np.multiply(transition_count, 1 / p_raw)
jacobian = diagonal * np.eye(len(p_raw))
return jacobian
def loglike_trans_derivative(params, transition_count):
gradient = loglike_trans_individual_derivative(params, transition_count).sum(axis=1)
return gradient
@numba.jit(nopython=True)
def create_transition_matrix(num_states, trans_prob):
"""
Creating the transition matrix with the assumption, that in every row the state
increases have the same probability.
Parameters
----------
num_states : int
The size of the state space.
trans_prob : numpy.array
The probabilities of an state increase.
Returns
-------
trans_mat : numpy.array
see :ref:`trans_mat`
"""
trans_mat = np.zeros((num_states, num_states))
for i in range(num_states): # Loop over all states.
for j, p in enumerate(trans_prob): # Loop over the possible increases.
if i + j < num_states - 1:
trans_mat[i, i + j] = p
elif i + j == num_states - 1:
trans_mat[i, num_states - 1] = trans_prob[j:].sum()
else:
pass
return trans_mat
|
[
"numpy.multiply",
"numpy.log",
"numpy.zeros",
"numpy.isnan",
"pandas.MultiIndex.from_product",
"numba.jit",
"estimagic.optimization.optimize.minimize",
"numpy.bincount"
] |
[((3902, 3926), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (3911, 3926), False, 'import numba\n'), ((738, 756), 'numpy.bincount', 'np.bincount', (['usage'], {}), '(usage)\n', (749, 756), True, 'import numpy as np\n'), ((887, 955), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (['[name, number]'], {'names': "['name', 'number']"}), "([name, number], names=['name', 'number'])\n", (913, 955), True, 'import pandas as pd\n'), ((1216, 1390), 'estimagic.optimization.optimize.minimize', 'minimize', ([], {'criterion': 'loglike_trans', 'params': 'params', 'algorithm': '"""scipy_lbfgsb"""', 'constraints': 'constr', 'criterion_kwargs': "{'transition_count': transition_count}", 'logging': '(False)'}), "(criterion=loglike_trans, params=params, algorithm='scipy_lbfgsb',\n constraints=constr, criterion_kwargs={'transition_count':\n transition_count}, logging=False)\n", (1224, 1390), False, 'from estimagic.optimization.optimize import minimize\n'), ((4387, 4421), 'numpy.zeros', 'np.zeros', (['(num_states, num_states)'], {}), '((num_states, num_states))\n', (4395, 4421), True, 'import numpy as np\n'), ((3625, 3665), 'numpy.multiply', 'np.multiply', (['transition_count', '(1 / p_raw)'], {}), '(transition_count, 1 / p_raw)\n', (3636, 3665), True, 'import numpy as np\n'), ((2275, 2288), 'numpy.log', 'np.log', (['p_raw'], {}), '(p_raw)\n', (2281, 2288), True, 'import numpy as np\n'), ((650, 665), 'numpy.isnan', 'np.isnan', (['usage'], {}), '(usage)\n', (658, 665), True, 'import numpy as np\n')]
|
#
# Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
# Licensed under the Universal Permissive License v 1.0 as shown at http://oss.oracle.com/licenses/upl.
#
from __future__ import print_function
import tensorflow as tf
from numpy import genfromtxt
import numpy as np
import os
from os.path import dirname
# set logging parameters
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
tf.logging.set_verbosity(tf.logging.ERROR)
def oneHotEncoder(data, n_classes):
y = np.array([int(i[0]) for i in data])
y_onehot = [0]*len(y)
for i,j in enumerate(y):
y_onehot[i] = [0]*n_classes
y_onehot[i][j]= 1
return (y,y_onehot)
# model parameters
n_input = 200
n_classes = 2
# tunable hyper-parameters
shape1 = 10
shape2 = 20
init_learning_rate = 0.004
training_iters = 40000
batch_size = 256
dropout = 0.5 # probability to keep units (for dropout)
seed = 0
# display test metrics
test_step = 10
# set random seed for reproducability
tf.set_random_seed(seed)
#NCI109 dataset
data_dir = os.path.join(dirname(os.getcwd()), "data/")
train_data = genfromtxt(data_dir+"NCI09_train.csv", delimiter=',') # Training data
test_data = genfromtxt(data_dir+"NCI09_test.csv", delimiter=',') # Training data
x_train = np.array([ i[1::] for i in train_data])
y_train, y_train_onehot = oneHotEncoder(train_data, n_classes)
x_test = np.array([ i[1::] for i in test_data])
y_test, y_test_onehot = oneHotEncoder(test_data, n_classes)
# tf Graph input
x = tf.placeholder(tf.float32, [None, n_input])
y = tf.placeholder(tf.float32, [None, n_classes])
keep_prob = tf.placeholder(tf.float32)
# Conv2D wrapper, with bias and relu activation
def conv2d(x, W, b, strides=1):
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
x = tf.nn.bias_add(x, b)
return tf.nn.relu(x)
# MaxPool2D wrapper
def pool2d(x, k=2):
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')
# convnet model
def conv_net(x, keep_prob):
# Store layers weight & bias
weights = {
'conv_layer1': tf.Variable(tf.random_normal([5, 5, 1, 32], seed=seed)),
'conv_layer2': tf.Variable(tf.random_normal([5, 5, 32, 64], seed=seed)),
'dense_layer': tf.Variable(tf.random_normal([shape1*shape2*64, 1024], seed=seed)),
'output_layer': tf.Variable(tf.random_normal([1024, n_classes], seed=seed))
}
biases = {
'conv_layer1': tf.Variable(tf.random_normal([32], seed=seed)),
'conv_layer2': tf.Variable(tf.random_normal([64], seed=seed)),
'dense_layer': tf.Variable(tf.random_normal([1024], seed=seed)),
'output_layer': tf.Variable(tf.random_normal([n_classes], seed=seed))
}
# Reshape input picture
x = tf.reshape(x, shape=[-1, shape1, shape2, 1])
# Convolution Layer
conv1 = conv2d(x, weights['conv_layer1'], biases['conv_layer1'])
# Max Pooling (down-sampling)
conv1 = pool2d(conv1, k=1)
# Convolution Layer
conv2 = conv2d(conv1, weights['conv_layer2'], biases['conv_layer2'])
# Max Pooling (down-sampling)
conv2 = pool2d(conv2, k=1)
fc1 = tf.reshape(conv2, [-1, weights['dense_layer'].get_shape().as_list()[0]])
fc1 = tf.add(tf.matmul(fc1, weights['dense_layer']), biases['dense_layer'])
fc1 = tf.nn.relu(fc1)
# Apply Dropout
fc1 = tf.nn.dropout(fc1, keep_prob, seed=seed)
# Output, class prediction
out = tf.add(tf.matmul(fc1, weights['output_layer']), biases['output_layer'])
return out
global_step = tf.Variable(0, trainable=False)
learning_rate = tf.train.exponential_decay(init_learning_rate, global_step, 10000, 0.96, staircase=True)
# construct model with input data
pred = conv_net(x, keep_prob)
# define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost,global_step=global_step)
# evaluate model
correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
# initializing the variables
init = tf.global_variables_initializer()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
step = 1
# keep training until reach max iterations
while step * batch_size < training_iters:
np.random.seed(step+seed)
idx = np.random.randint(len(x_train), size=batch_size)
batch_x = x_train[idx,:]
batch_y = np.asarray(y_train_onehot)[idx,:]
# run optimization op (backprop)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y, keep_prob: dropout})
if step % test_step == 0:
# calculate accuracy for test data
loss, acc = sess.run([cost, accuracy], feed_dict={x: x_test, y: np.asarray(y_test_onehot), keep_prob: 1.})
print("Iterations: %s, Test Accuracy: %f" % (str(step*batch_size),acc))
step += 1
print("Complete!")
|
[
"numpy.random.seed",
"tensorflow.reshape",
"tensorflow.logging.set_verbosity",
"tensorflow.matmul",
"tensorflow.Variable",
"tensorflow.nn.conv2d",
"tensorflow.nn.relu",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"numpy.genfromtxt",
"tensorflow.set_random_seed",
"tensorflow.placeholder",
"tensorflow.cast",
"tensorflow.nn.bias_add",
"tensorflow.global_variables_initializer",
"numpy.asarray",
"tensorflow.Session",
"tensorflow.nn.max_pool",
"tensorflow.random_normal",
"tensorflow.train.exponential_decay",
"tensorflow.argmax",
"os.getcwd",
"numpy.array",
"tensorflow.train.AdamOptimizer",
"tensorflow.nn.dropout"
] |
[((399, 441), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.ERROR'], {}), '(tf.logging.ERROR)\n', (423, 441), True, 'import tensorflow as tf\n'), ((974, 998), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['seed'], {}), '(seed)\n', (992, 998), True, 'import tensorflow as tf\n'), ((1085, 1140), 'numpy.genfromtxt', 'genfromtxt', (["(data_dir + 'NCI09_train.csv')"], {'delimiter': '""","""'}), "(data_dir + 'NCI09_train.csv', delimiter=',')\n", (1095, 1140), False, 'from numpy import genfromtxt\n'), ((1168, 1222), 'numpy.genfromtxt', 'genfromtxt', (["(data_dir + 'NCI09_test.csv')"], {'delimiter': '""","""'}), "(data_dir + 'NCI09_test.csv', delimiter=',')\n", (1178, 1222), False, 'from numpy import genfromtxt\n'), ((1250, 1287), 'numpy.array', 'np.array', (['[i[1:] for i in train_data]'], {}), '([i[1:] for i in train_data])\n', (1258, 1287), True, 'import numpy as np\n'), ((1363, 1399), 'numpy.array', 'np.array', (['[i[1:] for i in test_data]'], {}), '([i[1:] for i in test_data])\n', (1371, 1399), True, 'import numpy as np\n'), ((1484, 1527), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, n_input]'], {}), '(tf.float32, [None, n_input])\n', (1498, 1527), True, 'import tensorflow as tf\n'), ((1532, 1577), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, n_classes]'], {}), '(tf.float32, [None, n_classes])\n', (1546, 1577), True, 'import tensorflow as tf\n'), ((1590, 1616), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {}), '(tf.float32)\n', (1604, 1616), True, 'import tensorflow as tf\n'), ((3520, 3551), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'trainable': '(False)'}), '(0, trainable=False)\n', (3531, 3551), True, 'import tensorflow as tf\n'), ((3568, 3660), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['init_learning_rate', 'global_step', '(10000)', '(0.96)'], {'staircase': '(True)'}), '(init_learning_rate, global_step, 10000, 0.96,\n staircase=True)\n', (3594, 3660), True, 'import tensorflow as tf\n'), ((4117, 4150), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4148, 4150), True, 'import tensorflow as tf\n'), ((1706, 1774), 'tensorflow.nn.conv2d', 'tf.nn.conv2d', (['x', 'W'], {'strides': '[1, strides, strides, 1]', 'padding': '"""SAME"""'}), "(x, W, strides=[1, strides, strides, 1], padding='SAME')\n", (1718, 1774), True, 'import tensorflow as tf\n'), ((1783, 1803), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['x', 'b'], {}), '(x, b)\n', (1797, 1803), True, 'import tensorflow as tf\n'), ((1815, 1828), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {}), '(x)\n', (1825, 1828), True, 'import tensorflow as tf\n'), ((1882, 1957), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['x'], {'ksize': '[1, k, k, 1]', 'strides': '[1, k, k, 1]', 'padding': '"""SAME"""'}), "(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME')\n", (1896, 1957), True, 'import tensorflow as tf\n'), ((2747, 2791), 'tensorflow.reshape', 'tf.reshape', (['x'], {'shape': '[-1, shape1, shape2, 1]'}), '(x, shape=[-1, shape1, shape2, 1])\n', (2757, 2791), True, 'import tensorflow as tf\n'), ((3288, 3303), 'tensorflow.nn.relu', 'tf.nn.relu', (['fc1'], {}), '(fc1)\n', (3298, 3303), True, 'import tensorflow as tf\n'), ((3334, 3374), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['fc1', 'keep_prob'], {'seed': 'seed'}), '(fc1, keep_prob, seed=seed)\n', (3347, 3374), True, 'import tensorflow as tf\n'), ((3773, 3835), 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'logits': 'pred', 'labels': 'y'}), '(logits=pred, labels=y)\n', (3812, 3835), True, 'import tensorflow as tf\n'), ((3982, 4000), 'tensorflow.argmax', 'tf.argmax', (['pred', '(1)'], {}), '(pred, 1)\n', (3991, 4000), True, 'import tensorflow as tf\n'), ((4002, 4017), 'tensorflow.argmax', 'tf.argmax', (['y', '(1)'], {}), '(y, 1)\n', (4011, 4017), True, 'import tensorflow as tf\n'), ((4045, 4078), 'tensorflow.cast', 'tf.cast', (['correct_pred', 'tf.float32'], {}), '(correct_pred, tf.float32)\n', (4052, 4078), True, 'import tensorflow as tf\n'), ((4176, 4188), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4186, 4188), True, 'import tensorflow as tf\n'), ((1049, 1060), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1058, 1060), False, 'import os\n'), ((3215, 3253), 'tensorflow.matmul', 'tf.matmul', (['fc1', "weights['dense_layer']"], {}), "(fc1, weights['dense_layer'])\n", (3224, 3253), True, 'import tensorflow as tf\n'), ((3424, 3463), 'tensorflow.matmul', 'tf.matmul', (['fc1', "weights['output_layer']"], {}), "(fc1, weights['output_layer'])\n", (3433, 3463), True, 'import tensorflow as tf\n'), ((3849, 3900), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (3871, 3900), True, 'import tensorflow as tf\n'), ((4331, 4358), 'numpy.random.seed', 'np.random.seed', (['(step + seed)'], {}), '(step + seed)\n', (4345, 4358), True, 'import numpy as np\n'), ((2088, 2130), 'tensorflow.random_normal', 'tf.random_normal', (['[5, 5, 1, 32]'], {'seed': 'seed'}), '([5, 5, 1, 32], seed=seed)\n', (2104, 2130), True, 'import tensorflow as tf\n'), ((2168, 2211), 'tensorflow.random_normal', 'tf.random_normal', (['[5, 5, 32, 64]'], {'seed': 'seed'}), '([5, 5, 32, 64], seed=seed)\n', (2184, 2211), True, 'import tensorflow as tf\n'), ((2249, 2306), 'tensorflow.random_normal', 'tf.random_normal', (['[shape1 * shape2 * 64, 1024]'], {'seed': 'seed'}), '([shape1 * shape2 * 64, 1024], seed=seed)\n', (2265, 2306), True, 'import tensorflow as tf\n'), ((2341, 2387), 'tensorflow.random_normal', 'tf.random_normal', (['[1024, n_classes]'], {'seed': 'seed'}), '([1024, n_classes], seed=seed)\n', (2357, 2387), True, 'import tensorflow as tf\n'), ((2446, 2479), 'tensorflow.random_normal', 'tf.random_normal', (['[32]'], {'seed': 'seed'}), '([32], seed=seed)\n', (2462, 2479), True, 'import tensorflow as tf\n'), ((2517, 2550), 'tensorflow.random_normal', 'tf.random_normal', (['[64]'], {'seed': 'seed'}), '([64], seed=seed)\n', (2533, 2550), True, 'import tensorflow as tf\n'), ((2588, 2623), 'tensorflow.random_normal', 'tf.random_normal', (['[1024]'], {'seed': 'seed'}), '([1024], seed=seed)\n', (2604, 2623), True, 'import tensorflow as tf\n'), ((2662, 2702), 'tensorflow.random_normal', 'tf.random_normal', (['[n_classes]'], {'seed': 'seed'}), '([n_classes], seed=seed)\n', (2678, 2702), True, 'import tensorflow as tf\n'), ((4471, 4497), 'numpy.asarray', 'np.asarray', (['y_train_onehot'], {}), '(y_train_onehot)\n', (4481, 4497), True, 'import numpy as np\n'), ((4787, 4812), 'numpy.asarray', 'np.asarray', (['y_test_onehot'], {}), '(y_test_onehot)\n', (4797, 4812), True, 'import numpy as np\n')]
|
#!/usr/bin/python
import numpy as np
import pyMolecular as mol
import pyMolecular.testing as moltest
import matplotlib.pyplot as plt
# ==================== Compare two point distributions (permutation inveriant)
'''
points_ref = np.array([
[1.0,0.0,0.0], [-1.0, 0.0, 0.0],
[0.0,1.0,0.0], [ 0.0,-1.0, 0.0],
[0.0,0.0,1.0], [ 0.0, 0.0,-1.0]
], dtype=np.float64 )
mol.initComparator( points_ref )
points = points_ref.copy()
dist = mol.compDistance( points ); print( "dist (identical)", dist )
drnd = np.random.rand( points.shape[0], points.shape[1] )
points += drnd*0.01
print "========== drandom"
print points
dist = mol.compDistance( points.copy() ); print( "dist (drandom)", dist )
np.random.shuffle(points)
print points
dist = mol.compDistance( points.copy() ); print( "dist (shuffled)", dist )
'''
# ==================== Compare two TypePoint distributions (like atoms in molecule with different atom types) (permutation inveriant)
'''
atoms=np.genfromtxt( "/home/prokop/git/SimpleSimulationEngine/cpp/apps/MolecularEditor/inputs/PTCDA/PTCDA.bas", skip_header=1 )
#print "atoms=", atoms
points_ref = atoms[:,1:4].copy();
types_ref = atoms[:,0 ].astype(np.int32).copy();
print points_ref
mol.initComparatorT ( points_ref, types_ref )
print "========= identical"
points = atoms[:,1:4].copy();
types = atoms[:,0 ].astype(np.int32).copy(); print( "types = ", types)
dist = mol.compDistanceT( points_ref, types_ref ); print " >>> dist = ", dist
print "========= shuffled"
np.random.shuffle(atoms);
points = atoms[:,1:4].copy();
types = atoms[:,0 ].astype(np.int32).copy(); print( "types = ", types)
dist = mol.compDistanceT( points, types ); print " >>> dist = ", dist
print "========= drandom"
drnd = np.random.rand( points.shape[0], points.shape[1] )
points += drnd*0.01
dist = mol.compDistanceT( points, types ); print " >>> dist = ", dist
'''
# ==================== Compute fast Hash by plane waves projection of atomic coordinets (permutation inveriant)
'''
atoms=np.genfromtxt( "/home/prokop/git/SimpleSimulationEngine/cpp/apps/MolecularEditor/inputs/PTCDA/PTCDA.bas", skip_header=1 )
ks = np.array([
[1.0,0.0,0.0],
[0.0,1.0,0.0],
[0.0,0.0,1.0]
])
points_ref = atoms[:,1:4].copy();
coefs_ref = mol.getPlaneWaveDescriptor( points_ref, ks ); print "coefs (ref) ", coefs_ref
points_1 = points_ref.copy()
coefs = mol.getPlaneWaveDescriptor( points_1, ks ); print "coefs (identical) ", coefs
np.random.shuffle(points_1); points_1 = points_1.copy()
coefs = mol.getPlaneWaveDescriptor( points_1, ks ); print "coefs (shufled) ", coefs
points_3 = points_ref.copy() + np.random.rand( len(atoms), 3 ) * 0.25
coefs = mol.getPlaneWaveDescriptor( points_3, ks ); print "coefs (drand) ", coefs
'''
# ==================== Testing of statistical poperties of plane-wave hash
nrep = 10
natoms = 100
Ns = range( 1, natoms )
dx = 0.5
k = 3.0
'''
xs = np.linspace(-10.0,10.0,1000)
ys = moltest.saw_sine( xs+100 )
plt.plot( xs, ys )
'''
xs_ref = np.random.rand( natoms ); #print "xs_ref = ", xs_ref
#xs = moltest.mutateN( xs_ref.copy(), 3, 0.1 ); print "xs = ", xs
coef_ref = moltest.hash_saw( xs_ref, k )
result = np.zeros((len(Ns)*nrep,2))
ires = 0
for N in Ns:
dx_ = dx/float(N)
for i in range(nrep):
xs = moltest.mutateN( xs_ref.copy(), N, dx_ )
coef = moltest.hash_saw( xs, k )
result[ ires, 0 ] = N; result[ ires, 1 ] = coef;
ires+=1
plt.axhline(coef_ref)
plt.plot( result[:,0], result[:,1], '.' )
plt.show()
|
[
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"pyMolecular.testing.hash_saw",
"numpy.random.rand"
] |
[((3075, 3097), 'numpy.random.rand', 'np.random.rand', (['natoms'], {}), '(natoms)\n', (3089, 3097), True, 'import numpy as np\n'), ((3208, 3235), 'pyMolecular.testing.hash_saw', 'moltest.hash_saw', (['xs_ref', 'k'], {}), '(xs_ref, k)\n', (3224, 3235), True, 'import pyMolecular.testing as moltest\n'), ((3518, 3539), 'matplotlib.pyplot.axhline', 'plt.axhline', (['coef_ref'], {}), '(coef_ref)\n', (3529, 3539), True, 'import matplotlib.pyplot as plt\n'), ((3544, 3585), 'matplotlib.pyplot.plot', 'plt.plot', (['result[:, 0]', 'result[:, 1]', '"""."""'], {}), "(result[:, 0], result[:, 1], '.')\n", (3552, 3585), True, 'import matplotlib.pyplot as plt\n'), ((3587, 3597), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3595, 3597), True, 'import matplotlib.pyplot as plt\n'), ((3417, 3440), 'pyMolecular.testing.hash_saw', 'moltest.hash_saw', (['xs', 'k'], {}), '(xs, k)\n', (3433, 3440), True, 'import pyMolecular.testing as moltest\n')]
|
"""This module evaluates the forecasted trajectories against the ground truth."""
import argparse
from typing import Dict, List, Union
from collections import OrderedDict
import numpy as np
import pandas as pd
import pickle as pkl
from argoverse.evaluation.eval_forecasting import compute_forecasting_metrics
from argoverse.map_representation.map_api import ArgoverseMap
from utils.baseline_config import FEATURE_FORMAT
import matplotlib.pyplot as plt
def viz_predictions(
input_: np.ndarray,
output: np.ndarray,
target: np.ndarray,
centerlines: np.ndarray,
city_names: np.ndarray,
idx=None,
show: bool = False,
) -> None:
"""Visualize predicted trjectories.
Args:
input_ (numpy array): Input Trajectory with shape (num_tracks x obs_len x 2)
output (numpy array of list): Top-k predicted trajectories, each with shape (num_tracks x pred_len x 2)
target (numpy array): Ground Truth Trajectory with shape (num_tracks x pred_len x 2)
centerlines (numpy array of list of centerlines): Centerlines (Oracle/Top-k) for each trajectory
city_names (numpy array): city names for each trajectory
show (bool): if True, show
"""
num_tracks = input_.shape[0]
obs_len = input_.shape[1]
pred_len = target.shape[1]
plt.figure(0, figsize=(8, 7))
avm = ArgoverseMap()
for i in range(num_tracks):
plt.plot(
input_[i, :, 0],
input_[i, :, 1],
color="#ECA154",
label="Observed",
alpha=1,
linewidth=3,
zorder=15,
)
plt.plot(
input_[i, -1, 0],
input_[i, -1, 1],
"o",
color="#ECA154",
label="Observed",
alpha=1,
linewidth=3,
zorder=15,
markersize=9,
)
plt.plot(
target[i, :, 0],
target[i, :, 1],
color="#d33e4c",
label="Target",
alpha=1,
linewidth=3,
zorder=20,
)
plt.plot(
target[i, -1, 0],
target[i, -1, 1],
"o",
color="#d33e4c",
label="Target",
alpha=1,
linewidth=3,
zorder=20,
markersize=9,
)
for j in range(len(centerlines[i])):
plt.plot(
centerlines[i][j][:, 0],
centerlines[i][j][:, 1],
"--",
color="grey",
alpha=1,
linewidth=1,
zorder=0,
)
for j in range(len(output[i])):
plt.plot(
output[i][j][:, 0],
output[i][j][:, 1],
color="#007672",
label="Predicted",
alpha=1,
linewidth=3,
zorder=15,
)
plt.plot(
output[i][j][-1, 0],
output[i][j][-1, 1],
"o",
color="#007672",
label="Predicted",
alpha=1,
linewidth=3,
zorder=15,
markersize=9,
)
for k in range(pred_len):
lane_ids = avm.get_lane_ids_in_xy_bbox(
output[i][j][k, 0],
output[i][j][k, 1],
city_names[i],
query_search_range_manhattan=2.5,
)
for j in range(obs_len):
lane_ids = avm.get_lane_ids_in_xy_bbox(
input_[i, j, 0],
input_[i, j, 1],
city_names[i],
query_search_range_manhattan=2.5,
)
[avm.draw_lane(lane_id, city_names[i]) for lane_id in lane_ids]
for j in range(pred_len):
lane_ids = avm.get_lane_ids_in_xy_bbox(
target[i, j, 0],
target[i, j, 1],
city_names[i],
query_search_range_manhattan=2.5,
)
[avm.draw_lane(lane_id, city_names[i]) for lane_id in lane_ids]
plt.axis("equal")
plt.xticks([])
plt.yticks([])
handles, labels = plt.gca().get_legend_handles_labels()
by_label = OrderedDict(zip(labels, handles))
if show:
plt.savefig('result_images/'+str(idx)+'.jpg')
plt.show()
def parse_arguments():
"""Parse command line arguments.
Returns:
parsed arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument("--metrics",
action="store_true",
help="If true, compute metrics")
parser.add_argument("--gt", default="", type=str, help="path to gt file")
parser.add_argument("--forecast",
default="",
type=str,
help="path to forecast file")
parser.add_argument("--horizon",
default="",
type=int,
help="forecast horizon")
parser.add_argument("--obs_len",
default=20,
type=int,
help="Observed Length")
parser.add_argument("--miss_threshold",
default=2.0,
type=float,
help="Threshold for miss rate")
parser.add_argument("--features",
default="",
type=str,
help="path to test features pkl file")
parser.add_argument("--max_n_guesses",
default=0,
type=int,
help="Max number of guesses")
parser.add_argument(
"--prune_n_guesses",
default=0,
type=int,
help="Pruned number of guesses of non-map baseline using map",
)
parser.add_argument(
"--n_guesses_cl",
default=0,
type=int,
help="Number of guesses along each centerline",
)
parser.add_argument("--n_cl",
default=0,
type=int,
help="Number of centerlines to consider")
parser.add_argument("--viz",
action="store_true",
help="If true, visualize predictions")
parser.add_argument(
"--viz_seq_id",
default="",
type=str,
help="Sequence ids for the trajectories to be visualized",
)
parser.add_argument(
"--max_neighbors_cl",
default=3,
type=int,
help="Number of neighbors obtained for each centerline by the baseline",
)
return parser.parse_args()
def get_city_names_from_features(features_df: pd.DataFrame) -> Dict[int, str]:
"""Get sequence id to city name mapping from the features.
Args:
features_df: DataFrame containing the features
Returns:
city_names: Dict mapping sequence id to city name
"""
city_names = {}
for index, row in features_df.iterrows():
city_names[row["SEQUENCE"]] = row["FEATURES"][0][
FEATURE_FORMAT["CITY_NAME"]]
return city_names
def get_pruned_guesses(
forecasted_trajectories: Dict[int, List[np.ndarray]],
city_names: Dict[int, str],
gt_trajectories: Dict[int, np.ndarray],
) -> Dict[int, List[np.ndarray]]:
"""Prune the number of guesses using map.
Args:
forecasted_trajectories: Trajectories forecasted by the algorithm.
city_names: Dict mapping sequence id to city name.
gt_trajectories: Ground Truth trajectories.
Returns:
Pruned number of forecasted trajectories.
"""
args = parse_arguments()
avm = ArgoverseMap()
pruned_guesses = {}
for seq_id, trajectories in forecasted_trajectories.items():
city_name = city_names[seq_id]
da_points = []
for trajectory in trajectories:
raster_layer = avm.get_raster_layer_points_boolean(
trajectory, city_name, "driveable_area")
da_points.append(np.sum(raster_layer))
sorted_idx = np.argsort(da_points)[::-1]
pruned_guesses[seq_id] = [
trajectories[i] for i in sorted_idx[:args.prune_n_guesses]
]
return pruned_guesses
def get_m_trajectories_along_n_cl(
forecasted_trajectories: Dict[int, List[np.ndarray]]
) -> Dict[int, List[np.ndarray]]:
"""Given forecasted trajectories, get <args.n_guesses_cl> trajectories along each of <args.n_cl> centerlines.
Args:
forecasted_trajectories: Trajectories forecasted by the algorithm.
Returns:
<args.n_guesses_cl> trajectories along each of <args.n_cl> centerlines.
"""
args = parse_arguments()
selected_trajectories = {}
for seq_id, trajectories in forecasted_trajectories.items():
curr_selected_trajectories = []
max_predictions_along_cl = min(len(forecasted_trajectories[seq_id]),
args.n_cl * args.max_neighbors_cl)
for i in range(0, max_predictions_along_cl, args.max_neighbors_cl):
for j in range(i, i + args.n_guesses_cl):
curr_selected_trajectories.append(
forecasted_trajectories[seq_id][j])
selected_trajectories[seq_id] = curr_selected_trajectories
return selected_trajectories
def viz_predictions_helper(
forecasted_trajectories: Dict[int, List[np.ndarray]],
gt_trajectories: Dict[int, np.ndarray],
features_df: pd.DataFrame,
viz_seq_id: Union[None, List[int]],
) -> None:
"""Visualize predictions.
Args:
forecasted_trajectories: Trajectories forecasted by the algorithm.
gt_trajectories: Ground Truth trajectories.
features_df: DataFrame containing the features
viz_seq_id: Sequence ids to be visualized
"""
args = parse_arguments()
seq_ids = gt_trajectories.keys() if viz_seq_id is None else viz_seq_id
for seq_id in seq_ids:
gt_trajectory = gt_trajectories[seq_id]
curr_features_df = features_df[features_df["SEQUENCE"] == seq_id]
input_trajectory = (
curr_features_df["FEATURES"].values[0]
[:args.obs_len, [FEATURE_FORMAT["X"], FEATURE_FORMAT["Y"]]].astype(
"float"))
output_trajectories = forecasted_trajectories[seq_id]
candidate_centerlines = curr_features_df[
"CANDIDATE_CENTERLINES"].values[0]
city_name = curr_features_df["FEATURES"].values[0][
0, FEATURE_FORMAT["CITY_NAME"]]
gt_trajectory = np.expand_dims(gt_trajectory, 0)
input_trajectory = np.expand_dims(input_trajectory, 0)
output_trajectories = np.expand_dims(np.array(output_trajectories), 0)
candidate_centerlines = np.expand_dims(np.array(candidate_centerlines),
0)
city_name = np.array([city_name])
viz_predictions(
input_trajectory,
output_trajectories,
gt_trajectory,
candidate_centerlines,
city_name,
idx=seq_id,
show=False,
)
if __name__ == "__main__":
args = parse_arguments()
with open(args.gt, "rb") as f:
gt_trajectories: Dict[int, np.ndarray] = pkl.load(f)
with open(args.forecast, "rb") as f:
forecasted_trajectories: Dict[int, List[np.ndarray]] = pkl.load(f)
with open(args.features, "rb") as f:
features_df: pd.DataFrame = pkl.load(f)
if args.metrics:
city_names = get_city_names_from_features(features_df)
# Get displacement error and dac on multiple guesses along each centerline
if not args.prune_n_guesses and args.n_cl:
forecasted_trajectories = get_m_trajectories_along_n_cl(
forecasted_trajectories)
num_trajectories = args.n_cl * args.n_guesses_cl
# Get displacement error and dac on pruned guesses
elif args.prune_n_guesses:
forecasted_trajectories = get_pruned_guesses(
forecasted_trajectories, city_names, gt_trajectories)
num_trajectories = args.prune_n_guesses
# Normal case
else:
num_trajectories = args.max_n_guesses
compute_forecasting_metrics(
forecasted_trajectories,
gt_trajectories,
city_names,
num_trajectories,
args.horizon,
args.miss_threshold,
)
if args.viz:
id_for_viz = None
if args.viz_seq_id:
with open(args.viz_seq_id, "rb") as f:
id_for_viz = pkl.load(f)
viz_predictions_helper(forecasted_trajectories, gt_trajectories,
features_df, id_for_viz)
|
[
"matplotlib.pyplot.show",
"argoverse.map_representation.map_api.ArgoverseMap",
"matplotlib.pyplot.plot",
"argparse.ArgumentParser",
"numpy.sum",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.axis",
"numpy.expand_dims",
"argoverse.evaluation.eval_forecasting.compute_forecasting_metrics",
"numpy.argsort",
"matplotlib.pyplot.figure",
"pickle.load",
"numpy.array",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.xticks"
] |
[((1335, 1364), 'matplotlib.pyplot.figure', 'plt.figure', (['(0)'], {'figsize': '(8, 7)'}), '(0, figsize=(8, 7))\n', (1345, 1364), True, 'import matplotlib.pyplot as plt\n'), ((1375, 1389), 'argoverse.map_representation.map_api.ArgoverseMap', 'ArgoverseMap', ([], {}), '()\n', (1387, 1389), False, 'from argoverse.map_representation.map_api import ArgoverseMap\n'), ((4626, 4651), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (4649, 4651), False, 'import argparse\n'), ((7889, 7903), 'argoverse.map_representation.map_api.ArgoverseMap', 'ArgoverseMap', ([], {}), '()\n', (7901, 7903), False, 'from argoverse.map_representation.map_api import ArgoverseMap\n'), ((1430, 1545), 'matplotlib.pyplot.plot', 'plt.plot', (['input_[i, :, 0]', 'input_[i, :, 1]'], {'color': '"""#ECA154"""', 'label': '"""Observed"""', 'alpha': '(1)', 'linewidth': '(3)', 'zorder': '(15)'}), "(input_[i, :, 0], input_[i, :, 1], color='#ECA154', label=\n 'Observed', alpha=1, linewidth=3, zorder=15)\n", (1438, 1545), True, 'import matplotlib.pyplot as plt\n'), ((1644, 1780), 'matplotlib.pyplot.plot', 'plt.plot', (['input_[i, -1, 0]', 'input_[i, -1, 1]', '"""o"""'], {'color': '"""#ECA154"""', 'label': '"""Observed"""', 'alpha': '(1)', 'linewidth': '(3)', 'zorder': '(15)', 'markersize': '(9)'}), "(input_[i, -1, 0], input_[i, -1, 1], 'o', color='#ECA154', label=\n 'Observed', alpha=1, linewidth=3, zorder=15, markersize=9)\n", (1652, 1780), True, 'import matplotlib.pyplot as plt\n'), ((1903, 2015), 'matplotlib.pyplot.plot', 'plt.plot', (['target[i, :, 0]', 'target[i, :, 1]'], {'color': '"""#d33e4c"""', 'label': '"""Target"""', 'alpha': '(1)', 'linewidth': '(3)', 'zorder': '(20)'}), "(target[i, :, 0], target[i, :, 1], color='#d33e4c', label='Target',\n alpha=1, linewidth=3, zorder=20)\n", (1911, 2015), True, 'import matplotlib.pyplot as plt\n'), ((2115, 2249), 'matplotlib.pyplot.plot', 'plt.plot', (['target[i, -1, 0]', 'target[i, -1, 1]', '"""o"""'], {'color': '"""#d33e4c"""', 'label': '"""Target"""', 'alpha': '(1)', 'linewidth': '(3)', 'zorder': '(20)', 'markersize': '(9)'}), "(target[i, -1, 0], target[i, -1, 1], 'o', color='#d33e4c', label=\n 'Target', alpha=1, linewidth=3, zorder=20, markersize=9)\n", (2123, 2249), True, 'import matplotlib.pyplot as plt\n'), ((4204, 4221), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (4212, 4221), True, 'import matplotlib.pyplot as plt\n'), ((4230, 4244), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (4240, 4244), True, 'import matplotlib.pyplot as plt\n'), ((4253, 4267), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (4263, 4267), True, 'import matplotlib.pyplot as plt\n'), ((10794, 10826), 'numpy.expand_dims', 'np.expand_dims', (['gt_trajectory', '(0)'], {}), '(gt_trajectory, 0)\n', (10808, 10826), True, 'import numpy as np\n'), ((10854, 10889), 'numpy.expand_dims', 'np.expand_dims', (['input_trajectory', '(0)'], {}), '(input_trajectory, 0)\n', (10868, 10889), True, 'import numpy as np\n'), ((11119, 11140), 'numpy.array', 'np.array', (['[city_name]'], {}), '([city_name])\n', (11127, 11140), True, 'import numpy as np\n'), ((11516, 11527), 'pickle.load', 'pkl.load', (['f'], {}), '(f)\n', (11524, 11527), True, 'import pickle as pkl\n'), ((11633, 11644), 'pickle.load', 'pkl.load', (['f'], {}), '(f)\n', (11641, 11644), True, 'import pickle as pkl\n'), ((11723, 11734), 'pickle.load', 'pkl.load', (['f'], {}), '(f)\n', (11731, 11734), True, 'import pickle as pkl\n'), ((12498, 12636), 'argoverse.evaluation.eval_forecasting.compute_forecasting_metrics', 'compute_forecasting_metrics', (['forecasted_trajectories', 'gt_trajectories', 'city_names', 'num_trajectories', 'args.horizon', 'args.miss_threshold'], {}), '(forecasted_trajectories, gt_trajectories,\n city_names, num_trajectories, args.horizon, args.miss_threshold)\n', (12525, 12636), False, 'from argoverse.evaluation.eval_forecasting import compute_forecasting_metrics\n'), ((2422, 2537), 'matplotlib.pyplot.plot', 'plt.plot', (['centerlines[i][j][:, 0]', 'centerlines[i][j][:, 1]', '"""--"""'], {'color': '"""grey"""', 'alpha': '(1)', 'linewidth': '(1)', 'zorder': '(0)'}), "(centerlines[i][j][:, 0], centerlines[i][j][:, 1], '--', color=\n 'grey', alpha=1, linewidth=1, zorder=0)\n", (2430, 2537), True, 'import matplotlib.pyplot as plt\n'), ((2713, 2835), 'matplotlib.pyplot.plot', 'plt.plot', (['output[i][j][:, 0]', 'output[i][j][:, 1]'], {'color': '"""#007672"""', 'label': '"""Predicted"""', 'alpha': '(1)', 'linewidth': '(3)', 'zorder': '(15)'}), "(output[i][j][:, 0], output[i][j][:, 1], color='#007672', label=\n 'Predicted', alpha=1, linewidth=3, zorder=15)\n", (2721, 2835), True, 'import matplotlib.pyplot as plt\n'), ((2970, 3112), 'matplotlib.pyplot.plot', 'plt.plot', (['output[i][j][-1, 0]', 'output[i][j][-1, 1]', '"""o"""'], {'color': '"""#007672"""', 'label': '"""Predicted"""', 'alpha': '(1)', 'linewidth': '(3)', 'zorder': '(15)', 'markersize': '(9)'}), "(output[i][j][-1, 0], output[i][j][-1, 1], 'o', color='#007672',\n label='Predicted', alpha=1, linewidth=3, zorder=15, markersize=9)\n", (2978, 3112), True, 'import matplotlib.pyplot as plt\n'), ((4472, 4482), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4480, 4482), True, 'import matplotlib.pyplot as plt\n'), ((8292, 8313), 'numpy.argsort', 'np.argsort', (['da_points'], {}), '(da_points)\n', (8302, 8313), True, 'import numpy as np\n'), ((10935, 10964), 'numpy.array', 'np.array', (['output_trajectories'], {}), '(output_trajectories)\n', (10943, 10964), True, 'import numpy as np\n'), ((11016, 11047), 'numpy.array', 'np.array', (['candidate_centerlines'], {}), '(candidate_centerlines)\n', (11024, 11047), True, 'import numpy as np\n'), ((4294, 4303), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4301, 4303), True, 'import matplotlib.pyplot as plt\n'), ((8248, 8268), 'numpy.sum', 'np.sum', (['raster_layer'], {}), '(raster_layer)\n', (8254, 8268), True, 'import numpy as np\n'), ((12868, 12879), 'pickle.load', 'pkl.load', (['f'], {}), '(f)\n', (12876, 12879), True, 'import pickle as pkl\n')]
|
"""
Code modified from allen.
"""
import io
import logging
import itertools
from typing import Optional, Tuple, Iterator, Any
import numpy
import torch
from torch.nn.functional import embedding
from ..common import Vocabulary
from ..common.util import printf, get_file_extension
from ..modules import util
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
class Embedding(torch.nn.Module):
"""
A more featureful embedding module than the default in Pytorch. Adds the ability to:
1. embed higher-order inputs
2. pre-specify the weight matrix
3. use a non-trainable embedding
4. project the resultant embeddings to some other dimension (which only makes sense with
non-trainable embeddings).
Parameters
----------
num_embeddings : int
Size of the dictionary of embeddings (vocabulary size).
embedding_dim : int
The size of each embedding vector.
weight : torch.FloatTensor, (optional, default=None)
A pre-initialised weight matrix for the embedding lookup, allowing the use of
pretrained vectors.
padding_index : int, (optional, default=None)
If given, pads the output with zeros whenever it encounters the index.
trainable : bool, (optional, default=True)
Whether or not to optimize the embedding parameters.
max_norm : float, (optional, default=None)
If given, will renormalize the embeddings to always have a norm lesser than this
norm_type : float, (optional, default=2)
The p of the p-norm to compute for the max_norm option
scale_grad_by_freq : boolean, (optional, default=False)
If given, this will scale gradients by the frequency of the words in the mini-batch.
sparse : bool, (optional, default=False)
Whether or not the Pytorch backend should use a sparse representation of the embedding weight.
vocab_namespace : str, (optional, default=None)
In case of fine-tuning/transfer learning, the model's embedding matrix needs to be
extended according to the size of extended-vocabulary. To be able to know how much to
extend the embedding-matrix, it's necessary to know which vocab_namspace was used to
construct it in the original training. We store vocab_namespace used during the original
training as an attribute, so that it can be retrieved during fine-tuning.
pretrained_file : str, (optional, default=None)
Used to keep track of what is the source of the weights and loading more embeddings at test time.
**It does not load the weights from this pretrained_file.** For that purpose, use
``Embedding.from_params``.
Returns
-------
An Embedding module.
"""
def __init__(self,
num_embeddings: int,
embedding_dim: int,
weight: torch.FloatTensor = None,
padding_index: int = 0,
trainable: bool = True,
max_norm: float = None,
norm_type: float = 2.,
scale_grad_by_freq: bool = False,
sparse: bool = False,
**kwargs: Any) -> None:
super(Embedding, self).__init__()
self.num_embeddings = num_embeddings
self.padding_index = padding_index
self.max_norm = max_norm
self.norm_type = norm_type
self.scale_grad_by_freq = scale_grad_by_freq
self.sparse = sparse
self.output_dim = embedding_dim
if weight is None:
weight = torch.FloatTensor(num_embeddings, embedding_dim)
self.weight = torch.nn.Parameter(weight, requires_grad=trainable)
torch.nn.init.xavier_uniform_(self.weight)
else:
if weight.size() != (num_embeddings, embedding_dim):
raise Exception("A weight matrix was passed with contradictory embedding shapes.")
self.weight = torch.nn.Parameter(weight, requires_grad=trainable)
if self.padding_index is not None:
self.weight.data[self.padding_index].fill_(0)
def forward(self, inputs, **kwargs): # pylint: disable=arguments-differ
# inputs may have extra dimensions (batch_size, d1, ..., dn, sequence_length),
# but embedding expects (batch_size, sequence_length), so pass inputs to
# util.combine_initial_dims (which is a no-op if there are no extra dimensions).
# Remember the original size.
original_size = inputs.size()
inputs = util.combine_initial_dims(inputs)
embedded = embedding(inputs, self.weight,
padding_idx=self.padding_index,
max_norm=self.max_norm,
norm_type=self.norm_type,
scale_grad_by_freq=self.scale_grad_by_freq,
sparse=self.sparse)
# Now (if necessary) add back in the extra dimensions.
embedded = util.uncombine_initial_dims(embedded, original_size)
return embedded
@classmethod
def from_pretrain(cls,
vocab: Vocabulary,
pretrained_file: str,
vocab_namespace: str,
padding_index: int = 0,
trainable: bool = False,
max_norm: float = None,
norm_type: float = 2.,
scale_grad_by_freq: bool = False,
sparse: bool = False
) -> 'Embedding': # type: ignore
"""
We need the vocabulary here to know how many items we need to embed, and we look for a
``vocab_namespace`` key in the parameter dictionary to know which vocabulary to use. If
you know beforehand exactly how many embeddings you need, or aren't using a vocabulary
mapping for the things getting embedded here, then you can pass in the ``num_embeddings``
key directly, and the vocabulary will be ignored.
In the configuration file, a file containing pretrained embeddings can be specified
using the parameter ``"pretrained_file"``.
It can be the path to a local file.
Format:
* text file - an utf-8 encoded text file with space separated fields::
[word] [dim 1] [dim 2] ...
The text file can eventually be compressed with gzip, bz2, lzma or zip.
"""
# If we're loading a saved model, we don't want to actually read a pre-trained
# embedding file - the embeddings will just be in our saved weights, and we might not
# have the original embedding file anymore, anyway.
tokens_to_keep = set(vocab.get_index_to_token_vocabulary(vocab_namespace).values())
vocab_size = vocab.get_vocab_size(vocab_namespace)
embeddings = dict()
# First we read the embeddings from the file, only keeping vectors for the words we need.
printf("Reading pretrained embeddings from file")
with EmbeddingsTextFile(pretrained_file) as embeddings_file:
embedding_dim = embeddings_file.embedding_dim
for line in embeddings_file:
token = line.split(' ', 1)[0]
if token in tokens_to_keep:
fields = line.rstrip().split(' ')
if len(fields) - 1 != embedding_dim:
# Sometimes there are funny unicode parsing problems that lead to different
# fields lengths (e.g., a word with a unicode space character that splits
# into more than one column). We skip those lines. Note that if you have
# some kind of long header, this could result in all of your lines getting
# skipped. It's hard to check for that here; you just have to look in the
# embedding_misses_file and at the model summary to make sure things look
# like they are supposed to.
logger.warning("Found line with wrong number of dimensions (expected: %d; actual: %d): %s",
embedding_dim, len(fields) - 1, line)
continue
vector = numpy.asarray(fields[1:], dtype='float32')
embeddings[token] = vector
if not embeddings:
raise Exception("No embeddings of correct dimension found; you probably "
"misspecified your embedding_dim parameter, or didn't "
"pre-populate your Vocabulary")
all_embeddings = numpy.asarray(list(embeddings.values()))
embeddings_mean = float(numpy.mean(all_embeddings))
embeddings_std = float(numpy.std(all_embeddings))
# Now we initialize the weight matrix for an embedding layer, starting with random vectors,
# then filling in the word vectors we just read.
printf("Initializing pre-trained embedding layer")
embedding_matrix = torch.FloatTensor(vocab_size, embedding_dim).normal_(embeddings_mean,
embeddings_std)
num_tokens_found = 0
index_to_token = vocab.get_index_to_token_vocabulary(vocab_namespace)
for i in range(vocab_size):
token = index_to_token[i]
# If we don't have a pre-trained vector for this word, we'll just leave this row alone,
# so the word has a random initialization.
if token in embeddings:
embedding_matrix[i] = torch.FloatTensor(embeddings[token])
num_tokens_found += 1
else:
logger.debug("Token %s was not found in the embedding file. Initialising randomly.", token)
printf(f"Pretrained embeddings were found for {num_tokens_found} out of {vocab_size} tokens")
return cls(num_embeddings=embedding_matrix.size(0),
embedding_dim=embedding_matrix.size(1),
weight=embedding_matrix,
padding_index=padding_index,
trainable=trainable,
max_norm=max_norm,
norm_type=norm_type,
scale_grad_by_freq=scale_grad_by_freq,
sparse=sparse)
class EmbeddingsTextFile(Iterator[str]):
"""
Utility class for opening embeddings text files. Handles various compression formats,
as well as context management.
Parameters
----------
file_uri: a file system path or a URL of an eventually compressed text file
encoding: str
"""
DEFAULT_ENCODING = 'utf-8'
def __init__(self,
file_uri: str,
encoding: str = DEFAULT_ENCODING) -> None:
# All the python packages for compressed files share the same interface of io.open
extension = get_file_extension(file_uri)
# Some systems don't have support for all of these libraries, so we import them only
# when necessary.
package = None
if extension in ['.txt', '.vec']:
package = io
elif extension == '.gz':
import gzip
package = gzip
elif extension == ".bz2":
import bz2
package = bz2
elif extension == ".lzma":
import lzma
package = lzma
if package is None:
logger.warning('The embeddings file has an unknown file extension "%s". '
'We will assume the file is an (uncompressed) text file', extension)
package = io
self._handle = package.open(file_uri, 'rt', encoding=encoding) # type: ignore
# To use this with tqdm we'd like to know the number of tokens. It's possible that the
# first line of the embeddings file contains this: if it does, we want to start iteration
# from the 2nd line, otherwise we want to start from the 1st.
# Unfortunately, once we read the first line, we cannot move back the file iterator
# because the underlying file may be "not seekable"; we use itertools.chain instead.
first_line = next(self._handle) # this moves the iterator forward
self.num_tokens, self.embedding_dim = self._read_first_line(first_line)
if self.num_tokens:
# the first line is a header line: start iterating from the 2nd line
self._iterator = self._handle
else:
# the first line is not a header line: start iterating from the 1st line
self._iterator = itertools.chain([first_line], self._handle)
def read(self) -> str:
return ''.join(self._iterator)
def readline(self) -> str:
return next(self._iterator)
def __enter__(self) -> 'EmbeddingsTextFile':
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
self._handle.close()
def __iter__(self) -> 'EmbeddingsTextFile':
return self
def __next__(self) -> str:
return next(self._iterator)
def __len__(self) -> Optional[int]:
""" Hack for tqdm: no need for explicitly passing ``total=file.num_tokens`` """
if self.num_tokens:
return self.num_tokens
raise AttributeError('an object of type EmbeddingsTextFile has "len()" only if the underlying '
'text file declares the number of tokens (i.e. the number of lines following)'
'in the first line. That is not the case of this particular instance.')
@staticmethod
def _read_first_line(line: str) -> Optional[Tuple]:
""" This function takes in input a string and if it contains 1 or 2 integers, it assumes the
largest one it the number of tokens. Returns None if the line doesn't match that pattern. """
fields = line.split(' ')
if 1 <= len(fields) <= 2:
try:
int_fields = [int(x) for x in fields]
except ValueError:
return None, None
else:
num_tokens, embedding_dim = max(int_fields), min(int_fields)
logger.info('Recognized a header line with number of tokens: %d',
num_tokens)
return num_tokens, embedding_dim
else:
raise ValueError('Unrecognized header line!')
|
[
"torch.nn.Parameter",
"numpy.std",
"torch.nn.init.xavier_uniform_",
"numpy.asarray",
"torch.FloatTensor",
"torch.nn.functional.embedding",
"numpy.mean",
"itertools.chain",
"logging.getLogger"
] |
[((319, 346), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (336, 346), False, 'import logging\n'), ((4593, 4779), 'torch.nn.functional.embedding', 'embedding', (['inputs', 'self.weight'], {'padding_idx': 'self.padding_index', 'max_norm': 'self.max_norm', 'norm_type': 'self.norm_type', 'scale_grad_by_freq': 'self.scale_grad_by_freq', 'sparse': 'self.sparse'}), '(inputs, self.weight, padding_idx=self.padding_index, max_norm=\n self.max_norm, norm_type=self.norm_type, scale_grad_by_freq=self.\n scale_grad_by_freq, sparse=self.sparse)\n', (4602, 4779), False, 'from torch.nn.functional import embedding\n'), ((3571, 3619), 'torch.FloatTensor', 'torch.FloatTensor', (['num_embeddings', 'embedding_dim'], {}), '(num_embeddings, embedding_dim)\n', (3588, 3619), False, 'import torch\n'), ((3646, 3697), 'torch.nn.Parameter', 'torch.nn.Parameter', (['weight'], {'requires_grad': 'trainable'}), '(weight, requires_grad=trainable)\n', (3664, 3697), False, 'import torch\n'), ((3710, 3752), 'torch.nn.init.xavier_uniform_', 'torch.nn.init.xavier_uniform_', (['self.weight'], {}), '(self.weight)\n', (3739, 3752), False, 'import torch\n'), ((3957, 4008), 'torch.nn.Parameter', 'torch.nn.Parameter', (['weight'], {'requires_grad': 'trainable'}), '(weight, requires_grad=trainable)\n', (3975, 4008), False, 'import torch\n'), ((8776, 8802), 'numpy.mean', 'numpy.mean', (['all_embeddings'], {}), '(all_embeddings)\n', (8786, 8802), False, 'import numpy\n'), ((8835, 8860), 'numpy.std', 'numpy.std', (['all_embeddings'], {}), '(all_embeddings)\n', (8844, 8860), False, 'import numpy\n'), ((12678, 12721), 'itertools.chain', 'itertools.chain', (['[first_line]', 'self._handle'], {}), '([first_line], self._handle)\n', (12693, 12721), False, 'import itertools\n'), ((9105, 9149), 'torch.FloatTensor', 'torch.FloatTensor', (['vocab_size', 'embedding_dim'], {}), '(vocab_size, embedding_dim)\n', (9122, 9149), False, 'import torch\n'), ((9682, 9718), 'torch.FloatTensor', 'torch.FloatTensor', (['embeddings[token]'], {}), '(embeddings[token])\n', (9699, 9718), False, 'import torch\n'), ((8329, 8371), 'numpy.asarray', 'numpy.asarray', (['fields[1:]'], {'dtype': '"""float32"""'}), "(fields[1:], dtype='float32')\n", (8342, 8371), False, 'import numpy\n')]
|
# Lint as: python3
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tf_client."""
from concurrent import futures
import time
import numpy as np
from reverb import client as reverb_client
from reverb import item_selectors
from reverb import rate_limiters
from reverb import server
from reverb import tf_client
import tensorflow.compat.v1 as tf
def make_tables_and_server():
tables = [
server.Table(
'dist',
sampler=item_selectors.Prioritized(priority_exponent=1),
remover=item_selectors.Fifo(),
max_size=1000000,
rate_limiter=rate_limiters.MinSize(1)),
server.Table(
'dist2',
sampler=item_selectors.Prioritized(priority_exponent=1),
remover=item_selectors.Fifo(),
max_size=1000000,
rate_limiter=rate_limiters.MinSize(1)),
]
return tables, server.Server(tables=tables)
class SampleOpTest(tf.test.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._tables, cls._server = make_tables_and_server()
cls._client = reverb_client.Client(f'localhost:{cls._server.port}')
def tearDown(self):
super().tearDown()
self._client.reset('dist')
@classmethod
def tearDownClass(cls):
super().tearDownClass()
cls._server.stop()
def test_sets_meta_data_fields(self):
input_data = [np.ones((81, 81), dtype=np.float64)]
self._client.insert(input_data, {'dist': 1})
with self.session() as session:
client = tf_client.TFClient(self._client.server_address)
sample = session.run(client.sample('dist', [tf.float64]))
np.testing.assert_equal(input_data, sample.data)
self.assertNotEqual(sample.info.key, 0)
self.assertEqual(sample.info.probability, 1)
self.assertEqual(sample.info.table_size, 1)
self.assertEqual(sample.info.priority, 1)
def test_dtype_mismatch_result_in_error_raised(self):
data = [np.zeros((81, 81))]
self._client.insert(data, {'dist': 1})
with self.session() as session:
client = tf_client.TFClient(self._client.server_address)
with self.assertRaises(tf.errors.InternalError):
session.run(client.sample('dist', [tf.float32]))
def test_forwards_server_error(self):
with self.session() as session:
client = tf_client.TFClient(self._client.server_address)
with self.assertRaises(tf.errors.NotFoundError):
session.run(client.sample('invalid', [tf.float64]))
def test_retries_until_success_or_fatal_error(self):
with self.session() as session:
client = tf_client.TFClient(self._client.server_address)
with futures.ThreadPoolExecutor(max_workers=1) as executor:
sample = executor.submit(session.run,
client.sample('dist', [tf.float64]))
input_data = [np.zeros((81, 81))]
self._client.insert(input_data, {'dist': 1})
np.testing.assert_equal(input_data, sample.result().data)
class UpdatePrioritiesOpTest(tf.test.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._tables, cls._server = make_tables_and_server()
cls._client = reverb_client.Client(f'localhost:{cls._server.port}')
def tearDown(self):
super().tearDown()
self._client.reset('dist')
@classmethod
def tearDownClass(cls):
super().tearDownClass()
cls._server.stop()
def test_shape_result_in_error_raised(self):
with self.session() as session:
client = tf_client.TFClient(self._client.server_address)
update_op = client.update_priorities(
tf.constant('dist'), tf.constant([1, 2], dtype=tf.uint64),
tf.constant([1], dtype=tf.float64))
with self.assertRaises(tf.errors.InvalidArgumentError):
session.run(update_op)
def test_priority_update_is_applied(self):
# Start with uniform distribution
for i in range(4):
self._client.insert([np.array([i], dtype=np.uint32)], {'dist': 1})
for _ in range(100):
if self._tables[0].info.current_size == 4:
break
time.sleep(0.01)
self.assertEqual(self._tables[0].info.current_size, 4)
# Until we have recieved all 4 items.
items = {}
while len(items) < 4:
item = next(self._client.sample('dist'))[0]
items[item.info.key] = item.info.probability
self.assertEqual(item.info.probability, 0.25)
# Update the priority of one of the items.
update_key = next(iter(items.keys()))
with self.session() as session:
client = tf_client.TFClient(self._client.server_address)
update_op = client.update_priorities(
table=tf.constant('dist'),
keys=tf.constant([update_key], dtype=tf.uint64),
priorities=tf.constant([3], dtype=tf.float64))
self.assertIsNone(session.run(update_op))
# The updated item now has priority 3 and the other 3 items have priority 1
# each. The probability of sampling the new item should thus be 50%. We
# sample until the updated item is seen and check that the probability (and
# thus the priority) has been updated.
for _ in range(1000):
item = next(self._client.sample('dist'))[0]
if item.info.key == update_key:
self.assertEqual(item.info.probability, 0.5)
break
else:
self.fail('Updated item was not found')
class InsertOpTest(tf.test.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._tables, cls._server = make_tables_and_server()
cls._client = reverb_client.Client(f'localhost:{cls._server.port}')
def tearDown(self):
super().tearDown()
self._client.reset('dist')
self._client.reset('dist2')
@classmethod
def tearDownClass(cls):
super().tearDownClass()
cls._server.stop()
def setUp(self):
super().setUp()
self.data = [tf.constant([1, 2, 3], dtype=tf.int8)]
def test_checks_that_table_has_rank_1(self):
client = tf_client.TFClient(self._client.server_address)
priorities = tf.constant([1.0], dtype=tf.float64)
# Works for rank 1.
client.insert(self.data, tf.constant(['dist']), priorities)
# Does not work for rank > 1.
with self.assertRaises(ValueError):
client.insert(self.data, tf.constant([['dist']]), priorities)
# Does not work for rank < 1.
with self.assertRaises(ValueError):
client.insert(self.data, tf.constant('dist'), priorities)
def test_checks_dtype_of_table_argument(self):
client = tf_client.TFClient(self._client.server_address)
with self.assertRaises(ValueError):
client.insert(self.data, tf.constant([1]),
tf.constant([1.0], dtype=tf.float64))
def test_checks_that_priorities_argument_has_rank_1(self):
client = tf_client.TFClient(self._client.server_address)
data = [tf.constant([1, 2])]
tables = tf.constant(['dist'])
# Works for rank 1.
client.insert(data, tables, tf.constant([1.0], dtype=tf.float64))
# Does not work for rank > 1.
with self.assertRaises(ValueError):
client.insert(data, tables, tf.constant([[1.0]], dtype=tf.float64))
# Does not work for rank < 1.
with self.assertRaises(ValueError):
client.insert(data, tables, tf.constant(1.0, dtype=tf.float64))
def test_checks_that_priorities_argument_has_dtype_float64(self):
client = tf_client.TFClient(self._client.server_address)
with self.assertRaises(ValueError):
client.insert(self.data, tf.constant(['dist']),
tf.constant([1.0], dtype=tf.float32))
def test_checks_that_tables_and_priorities_arguments_have_same_shape(self):
client = tf_client.TFClient(self._client.server_address)
with self.assertRaises(ValueError):
client.insert(self.data, tf.constant(['dist', 'dist2']),
tf.constant([1.0], dtype=tf.float64))
def test_single_table_insert(self):
with self.session() as session:
client = tf_client.TFClient(self._client.server_address)
insert_op = client.insert(
data=[tf.constant([1, 2, 3], dtype=tf.int8)],
tables=tf.constant(['dist']),
priorities=tf.constant([1.0], dtype=tf.float64))
sample_op = client.sample('dist', [tf.int8])
# Check that insert op succeeds.
self.assertIsNone(session.run(insert_op))
# Check that the sampled data matches the inserted.
sample = session.run(sample_op)
self.assertLen(sample.data, 1)
np.testing.assert_equal(
np.array([1, 2, 3], dtype=np.int8), sample.data[0])
def test_multi_table_insert(self):
with self.session() as session:
client = tf_client.TFClient(self._client.server_address)
insert_op = client.insert(
data=[tf.constant([1, 2, 3], dtype=tf.int8)],
tables=tf.constant(['dist', 'dist2']),
priorities=tf.constant([1.0, 2.0], dtype=tf.float64))
sample_ops = [
client.sample('dist', [tf.int8]),
client.sample('dist2', [tf.int8])
]
# Check that insert op succeeds.
self.assertIsNone(session.run(insert_op))
# Check that the sampled data matches the inserted in all tables.
for sample_op in sample_ops:
sample = session.run(sample_op)
self.assertLen(sample.data, 1)
np.testing.assert_equal(
np.array([1, 2, 3], dtype=np.int8), sample.data[0])
if __name__ == '__main__':
tf.disable_eager_execution()
tf.test.main()
|
[
"reverb.item_selectors.Fifo",
"reverb.rate_limiters.MinSize",
"tensorflow.compat.v1.constant",
"numpy.zeros",
"numpy.ones",
"time.sleep",
"tensorflow.compat.v1.disable_eager_execution",
"tensorflow.compat.v1.test.main",
"numpy.array",
"reverb.client.Client",
"numpy.testing.assert_equal",
"concurrent.futures.ThreadPoolExecutor",
"reverb.tf_client.TFClient",
"reverb.server.Server",
"reverb.item_selectors.Prioritized"
] |
[((9890, 9918), 'tensorflow.compat.v1.disable_eager_execution', 'tf.disable_eager_execution', ([], {}), '()\n', (9916, 9918), True, 'import tensorflow.compat.v1 as tf\n'), ((9921, 9935), 'tensorflow.compat.v1.test.main', 'tf.test.main', ([], {}), '()\n', (9933, 9935), True, 'import tensorflow.compat.v1 as tf\n'), ((1419, 1447), 'reverb.server.Server', 'server.Server', ([], {'tables': 'tables'}), '(tables=tables)\n', (1432, 1447), False, 'from reverb import server\n'), ((1626, 1679), 'reverb.client.Client', 'reverb_client.Client', (['f"""localhost:{cls._server.port}"""'], {}), "(f'localhost:{cls._server.port}')\n", (1646, 1679), True, 'from reverb import client as reverb_client\n'), ((3692, 3745), 'reverb.client.Client', 'reverb_client.Client', (['f"""localhost:{cls._server.port}"""'], {}), "(f'localhost:{cls._server.port}')\n", (3712, 3745), True, 'from reverb import client as reverb_client\n'), ((6031, 6084), 'reverb.client.Client', 'reverb_client.Client', (['f"""localhost:{cls._server.port}"""'], {}), "(f'localhost:{cls._server.port}')\n", (6051, 6084), True, 'from reverb import client as reverb_client\n'), ((6444, 6491), 'reverb.tf_client.TFClient', 'tf_client.TFClient', (['self._client.server_address'], {}), '(self._client.server_address)\n', (6462, 6491), False, 'from reverb import tf_client\n'), ((6509, 6545), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1.0]'], {'dtype': 'tf.float64'}), '([1.0], dtype=tf.float64)\n', (6520, 6545), True, 'import tensorflow.compat.v1 as tf\n'), ((6980, 7027), 'reverb.tf_client.TFClient', 'tf_client.TFClient', (['self._client.server_address'], {}), '(self._client.server_address)\n', (6998, 7027), False, 'from reverb import tf_client\n'), ((7250, 7297), 'reverb.tf_client.TFClient', 'tf_client.TFClient', (['self._client.server_address'], {}), '(self._client.server_address)\n', (7268, 7297), False, 'from reverb import tf_client\n'), ((7344, 7365), 'tensorflow.compat.v1.constant', 'tf.constant', (["['dist']"], {}), "(['dist'])\n", (7355, 7365), True, 'import tensorflow.compat.v1 as tf\n'), ((7837, 7884), 'reverb.tf_client.TFClient', 'tf_client.TFClient', (['self._client.server_address'], {}), '(self._client.server_address)\n', (7855, 7884), False, 'from reverb import tf_client\n'), ((8129, 8176), 'reverb.tf_client.TFClient', 'tf_client.TFClient', (['self._client.server_address'], {}), '(self._client.server_address)\n', (8147, 8176), False, 'from reverb import tf_client\n'), ((1909, 1944), 'numpy.ones', 'np.ones', (['(81, 81)'], {'dtype': 'np.float64'}), '((81, 81), dtype=np.float64)\n', (1916, 1944), True, 'import numpy as np\n'), ((2046, 2093), 'reverb.tf_client.TFClient', 'tf_client.TFClient', (['self._client.server_address'], {}), '(self._client.server_address)\n', (2064, 2093), False, 'from reverb import tf_client\n'), ((2164, 2212), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['input_data', 'sample.data'], {}), '(input_data, sample.data)\n', (2187, 2212), True, 'import numpy as np\n'), ((2477, 2495), 'numpy.zeros', 'np.zeros', (['(81, 81)'], {}), '((81, 81))\n', (2485, 2495), True, 'import numpy as np\n'), ((2591, 2638), 'reverb.tf_client.TFClient', 'tf_client.TFClient', (['self._client.server_address'], {}), '(self._client.server_address)\n', (2609, 2638), False, 'from reverb import tf_client\n'), ((2843, 2890), 'reverb.tf_client.TFClient', 'tf_client.TFClient', (['self._client.server_address'], {}), '(self._client.server_address)\n', (2861, 2890), False, 'from reverb import tf_client\n'), ((3113, 3160), 'reverb.tf_client.TFClient', 'tf_client.TFClient', (['self._client.server_address'], {}), '(self._client.server_address)\n', (3131, 3160), False, 'from reverb import tf_client\n'), ((4015, 4062), 'reverb.tf_client.TFClient', 'tf_client.TFClient', (['self._client.server_address'], {}), '(self._client.server_address)\n', (4033, 4062), False, 'from reverb import tf_client\n'), ((4590, 4606), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (4600, 4606), False, 'import time\n'), ((5043, 5090), 'reverb.tf_client.TFClient', 'tf_client.TFClient', (['self._client.server_address'], {}), '(self._client.server_address)\n', (5061, 5090), False, 'from reverb import tf_client\n'), ((6344, 6381), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1, 2, 3]'], {'dtype': 'tf.int8'}), '([1, 2, 3], dtype=tf.int8)\n', (6355, 6381), True, 'import tensorflow.compat.v1 as tf\n'), ((6600, 6621), 'tensorflow.compat.v1.constant', 'tf.constant', (["['dist']"], {}), "(['dist'])\n", (6611, 6621), True, 'import tensorflow.compat.v1 as tf\n'), ((7310, 7329), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1, 2]'], {}), '([1, 2])\n', (7321, 7329), True, 'import tensorflow.compat.v1 as tf\n'), ((7423, 7459), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1.0]'], {'dtype': 'tf.float64'}), '([1.0], dtype=tf.float64)\n', (7434, 7459), True, 'import tensorflow.compat.v1 as tf\n'), ((8428, 8475), 'reverb.tf_client.TFClient', 'tf_client.TFClient', (['self._client.server_address'], {}), '(self._client.server_address)\n', (8446, 8475), False, 'from reverb import tf_client\n'), ((9119, 9166), 'reverb.tf_client.TFClient', 'tf_client.TFClient', (['self._client.server_address'], {}), '(self._client.server_address)\n', (9137, 9166), False, 'from reverb import tf_client\n'), ((1005, 1052), 'reverb.item_selectors.Prioritized', 'item_selectors.Prioritized', ([], {'priority_exponent': '(1)'}), '(priority_exponent=1)\n', (1031, 1052), False, 'from reverb import item_selectors\n'), ((1072, 1093), 'reverb.item_selectors.Fifo', 'item_selectors.Fifo', ([], {}), '()\n', (1091, 1093), False, 'from reverb import item_selectors\n'), ((1146, 1170), 'reverb.rate_limiters.MinSize', 'rate_limiters.MinSize', (['(1)'], {}), '(1)\n', (1167, 1170), False, 'from reverb import rate_limiters\n'), ((1230, 1277), 'reverb.item_selectors.Prioritized', 'item_selectors.Prioritized', ([], {'priority_exponent': '(1)'}), '(priority_exponent=1)\n', (1256, 1277), False, 'from reverb import item_selectors\n'), ((1297, 1318), 'reverb.item_selectors.Fifo', 'item_selectors.Fifo', ([], {}), '()\n', (1316, 1318), False, 'from reverb import item_selectors\n'), ((1371, 1395), 'reverb.rate_limiters.MinSize', 'rate_limiters.MinSize', (['(1)'], {}), '(1)\n', (1392, 1395), False, 'from reverb import rate_limiters\n'), ((3172, 3213), 'concurrent.futures.ThreadPoolExecutor', 'futures.ThreadPoolExecutor', ([], {'max_workers': '(1)'}), '(max_workers=1)\n', (3198, 3213), False, 'from concurrent import futures\n'), ((4117, 4136), 'tensorflow.compat.v1.constant', 'tf.constant', (['"""dist"""'], {}), "('dist')\n", (4128, 4136), True, 'import tensorflow.compat.v1 as tf\n'), ((4138, 4174), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1, 2]'], {'dtype': 'tf.uint64'}), '([1, 2], dtype=tf.uint64)\n', (4149, 4174), True, 'import tensorflow.compat.v1 as tf\n'), ((4186, 4220), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1]'], {'dtype': 'tf.float64'}), '([1], dtype=tf.float64)\n', (4197, 4220), True, 'import tensorflow.compat.v1 as tf\n'), ((6741, 6764), 'tensorflow.compat.v1.constant', 'tf.constant', (["[['dist']]"], {}), "([['dist']])\n", (6752, 6764), True, 'import tensorflow.compat.v1 as tf\n'), ((6884, 6903), 'tensorflow.compat.v1.constant', 'tf.constant', (['"""dist"""'], {}), "('dist')\n", (6895, 6903), True, 'import tensorflow.compat.v1 as tf\n'), ((7099, 7115), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1]'], {}), '([1])\n', (7110, 7115), True, 'import tensorflow.compat.v1 as tf\n'), ((7137, 7173), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1.0]'], {'dtype': 'tf.float64'}), '([1.0], dtype=tf.float64)\n', (7148, 7173), True, 'import tensorflow.compat.v1 as tf\n'), ((7570, 7608), 'tensorflow.compat.v1.constant', 'tf.constant', (['[[1.0]]'], {'dtype': 'tf.float64'}), '([[1.0]], dtype=tf.float64)\n', (7581, 7608), True, 'import tensorflow.compat.v1 as tf\n'), ((7719, 7753), 'tensorflow.compat.v1.constant', 'tf.constant', (['(1.0)'], {'dtype': 'tf.float64'}), '(1.0, dtype=tf.float64)\n', (7730, 7753), True, 'import tensorflow.compat.v1 as tf\n'), ((7956, 7977), 'tensorflow.compat.v1.constant', 'tf.constant', (["['dist']"], {}), "(['dist'])\n", (7967, 7977), True, 'import tensorflow.compat.v1 as tf\n'), ((7999, 8035), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1.0]'], {'dtype': 'tf.float32'}), '([1.0], dtype=tf.float32)\n', (8010, 8035), True, 'import tensorflow.compat.v1 as tf\n'), ((8248, 8278), 'tensorflow.compat.v1.constant', 'tf.constant', (["['dist', 'dist2']"], {}), "(['dist', 'dist2'])\n", (8259, 8278), True, 'import tensorflow.compat.v1 as tf\n'), ((8300, 8336), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1.0]'], {'dtype': 'tf.float64'}), '([1.0], dtype=tf.float64)\n', (8311, 8336), True, 'import tensorflow.compat.v1 as tf\n'), ((8978, 9012), 'numpy.array', 'np.array', (['[1, 2, 3]'], {'dtype': 'np.int8'}), '([1, 2, 3], dtype=np.int8)\n', (8986, 9012), True, 'import numpy as np\n'), ((3365, 3383), 'numpy.zeros', 'np.zeros', (['(81, 81)'], {}), '((81, 81))\n', (3373, 3383), True, 'import numpy as np\n'), ((4449, 4479), 'numpy.array', 'np.array', (['[i]'], {'dtype': 'np.uint32'}), '([i], dtype=np.uint32)\n', (4457, 4479), True, 'import numpy as np\n'), ((5151, 5170), 'tensorflow.compat.v1.constant', 'tf.constant', (['"""dist"""'], {}), "('dist')\n", (5162, 5170), True, 'import tensorflow.compat.v1 as tf\n'), ((5187, 5229), 'tensorflow.compat.v1.constant', 'tf.constant', (['[update_key]'], {'dtype': 'tf.uint64'}), '([update_key], dtype=tf.uint64)\n', (5198, 5229), True, 'import tensorflow.compat.v1 as tf\n'), ((5252, 5286), 'tensorflow.compat.v1.constant', 'tf.constant', (['[3]'], {'dtype': 'tf.float64'}), '([3], dtype=tf.float64)\n', (5263, 5286), True, 'import tensorflow.compat.v1 as tf\n'), ((8582, 8603), 'tensorflow.compat.v1.constant', 'tf.constant', (["['dist']"], {}), "(['dist'])\n", (8593, 8603), True, 'import tensorflow.compat.v1 as tf\n'), ((8626, 8662), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1.0]'], {'dtype': 'tf.float64'}), '([1.0], dtype=tf.float64)\n', (8637, 8662), True, 'import tensorflow.compat.v1 as tf\n'), ((9273, 9303), 'tensorflow.compat.v1.constant', 'tf.constant', (["['dist', 'dist2']"], {}), "(['dist', 'dist2'])\n", (9284, 9303), True, 'import tensorflow.compat.v1 as tf\n'), ((9326, 9367), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1.0, 2.0]'], {'dtype': 'tf.float64'}), '([1.0, 2.0], dtype=tf.float64)\n', (9337, 9367), True, 'import tensorflow.compat.v1 as tf\n'), ((9807, 9841), 'numpy.array', 'np.array', (['[1, 2, 3]'], {'dtype': 'np.int8'}), '([1, 2, 3], dtype=np.int8)\n', (9815, 9841), True, 'import numpy as np\n'), ((8525, 8562), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1, 2, 3]'], {'dtype': 'tf.int8'}), '([1, 2, 3], dtype=tf.int8)\n', (8536, 8562), True, 'import tensorflow.compat.v1 as tf\n'), ((9216, 9253), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1, 2, 3]'], {'dtype': 'tf.int8'}), '([1, 2, 3], dtype=tf.int8)\n', (9227, 9253), True, 'import tensorflow.compat.v1 as tf\n')]
|
from kernel_tuner import tune_kernel
import numpy
import argparse
import json
def generate_code(tuning_parameters):
code = \
"__global__ void fct_ale_c_horizontal(const int maxLevels, const int * __restrict__ nLevels, const int * __restrict__ nodesPerEdge, const int * __restrict__ elementsPerEdge, <%REAL_TYPE%> * __restrict__ del_ttf_advhoriz, const <%REAL_TYPE%> * __restrict__ fct_adf_h, const <%REAL_TYPE%> dt, const <%REAL_TYPE%> * __restrict__ area)\n" \
"{\n" \
"const <%INT_TYPE%> edge = blockIdx.x * 2;\n" \
"<%INT_TYPE%> levelBound = 0;\n" \
"const <%INT_TYPE%> nodeOne = (nodesPerEdge[edge] - 1) * maxLevels;\n" \
"const <%INT_TYPE%> nodeTwo = (nodesPerEdge[edge + 1] - 1) * maxLevels;\n" \
"\n" \
"/* Compute the upper bound for the level */\n" \
"levelBound = elementsPerEdge[edge + 1];\n" \
"if ( levelBound > 0 )\n" \
"{\n" \
"levelBound = max(nLevels[(elementsPerEdge[edge]) - 1], nLevels[levelBound - 1]);\n" \
"}\n" \
"else\n" \
"{\n" \
"levelBound = max(nLevels[(elementsPerEdge[edge]) - 1], 0);\n" \
"}\n" \
"\n" \
"for ( <%INT_TYPE%> level = threadIdx.x; level < levelBound - 1; level += <%BLOCK_SIZE%> )\n" \
"{\n" \
"<%REAL_TYPE%> fct_adf_h_item = 0;\n" \
"<%COMPUTE_BLOCK%>" \
"}\n" \
"}\n"
compute_block = \
"fct_adf_h_item = fct_adf_h[(blockIdx.x * maxLevels) + level + <%OFFSET%>];\n" \
"atomicAdd(&(del_ttf_advhoriz[nodeOne + level + <%OFFSET%>]), (fct_adf_h_item * (dt / area[nodeOne + level + <%OFFSET%>])));\n" \
"atomicAdd(&(del_ttf_advhoriz[nodeTwo + level + <%OFFSET%>]), -(fct_adf_h_item * (dt / area[nodeTwo + level + <%OFFSET%>])));\n"
if tuning_parameters["tiling_x"] > 1:
code = code.replace("<%BLOCK_SIZE%>", str(tuning_parameters["block_size_x"] * tuning_parameters["tiling_x"]))
else:
code = code.replace("<%BLOCK_SIZE%>", str(tuning_parameters["block_size_x"]))
compute = str()
for tile in range(0, tuning_parameters["tiling_x"]):
if tile == 0:
compute = compute + compute_block.replace(" + <%OFFSET%>", "")
else:
offset = tuning_parameters["block_size_x"] * tile
compute = compute + "if ( level + {} < (levelBound - 1) )\n{{\n{}}}\n".format(str(offset), compute_block.replace("<%OFFSET%>", str(offset)))
code = code.replace("<%COMPUTE_BLOCK%>", compute)
code = code.replace("<%INT_TYPE%>", tuning_parameters["int_type"].replace("_", " "))
code = code.replace("<%REAL_TYPE%>", tuning_parameters["real_type"])
return code
def reference(edges, nodes_per_edge, elements_per_edge, levels, max_levels, del_ttf_advhoriz, fct_adf_h, dt, area, numpy_real_type):
memory_bytes = 0
for edge in range(0, edges):
memory_bytes = memory_bytes + (3 * 4)
node_one = nodes_per_edge[edge * 2] - 1
node_two = nodes_per_edge[(edge * 2) + 1] - 1
element_one = elements_per_edge[edge * 2] - 1
element_two = elements_per_edge[(edge * 2) + 1] - 1
if element_two < 0:
memory_bytes = memory_bytes + (4)
number_levels = max(levels[element_one], 0)
else:
memory_bytes = memory_bytes + (2 * 4)
number_levels = max(levels[element_one], levels[element_two])
for level in range(0, number_levels - 1):
memory_bytes = memory_bytes + (7 * numpy.dtype(numpy_real_type).itemsize)
del_ttf_advhoriz[(node_one * max_levels) + level] = del_ttf_advhoriz[(node_one * max_levels) + level] + (fct_adf_h[(edge * max_levels) + level] * (dt / area[(node_one * max_levels) + level]))
del_ttf_advhoriz[(node_two * max_levels) + level] = del_ttf_advhoriz[(node_two * max_levels) + level] - (fct_adf_h[(edge * max_levels) + level] * (dt / area[(node_two * max_levels) + level]))
return memory_bytes
def tune(nodes, edges, elements, max_levels, max_tile, real_type, quiet=True):
numpy_real_type = None
if real_type == "float":
numpy_real_type = numpy.float32
elif real_type == "double":
numpy_real_type = numpy.float64
else:
raise ValueError
# Tuning and code generation parameters
tuning_parameters = dict()
tuning_parameters["int_type"] = ["unsigned_int", "int"]
tuning_parameters["real_type"] = [real_type]
tuning_parameters["max_levels"] = [str(max_levels)]
tuning_parameters["block_size_x"] = [32 * i for i in range(1, 33)]
tuning_parameters["tiling_x"] = [i for i in range(1, max_tile)]
constraints = list()
constraints.append("block_size_x * tiling_x <= max_levels")
# Memory allocation and initialization
del_ttf_advhoriz = numpy.random.randn(nodes * max_levels).astype(numpy_real_type)
del_ttf_advhoriz_control = numpy.copy(del_ttf_advhoriz)
fct_adf_h = numpy.random.randn(edges * max_levels).astype(numpy_real_type)
area = numpy.random.randn(nodes * max_levels).astype(numpy_real_type)
dt = numpy.random.random()
levels = numpy.zeros(elements).astype(numpy.int32)
for element in range(0, elements):
levels[element] = numpy.random.randint(3, max_levels)
nodes_per_edge = numpy.zeros(edges * 2).astype(numpy.int32)
elements_per_edge = numpy.zeros(edges * 2).astype(numpy.int32)
for edge in range(0, edges):
nodes_per_edge[edge * 2] = numpy.random.randint(1, nodes + 1)
nodes_per_edge[(edge * 2) + 1] = numpy.random.randint(1, nodes + 1)
elements_per_edge[edge * 2] = numpy.random.randint(1, elements + 1)
elements_per_edge[(edge * 2) + 1] = numpy.random.randint(0, elements + 1)
if real_type == "float":
arguments = [numpy.int32(max_levels), levels, nodes_per_edge, elements_per_edge, del_ttf_advhoriz, fct_adf_h, numpy.float32(dt), area]
elif real_type == "double":
arguments = [numpy.int32(max_levels), levels, nodes_per_edge, elements_per_edge, del_ttf_advhoriz, fct_adf_h, numpy.float64(dt), area]
else:
raise ValueError
# Reference
memory_bytes = reference(edges, nodes_per_edge, elements_per_edge, levels, max_levels, del_ttf_advhoriz_control, fct_adf_h, dt, area, numpy_real_type)
arguments_control = [None, None, None, None, del_ttf_advhoriz_control, None, None, None]
# Tuning
results, _ = tune_kernel("fct_ale_c_horizontal", generate_code, "{} * block_size_x".format(edges), arguments, tuning_parameters, lang="CUDA", answer=arguments_control, restrictions=constraints, quiet=quiet, atol=1e-03)
# Memory bandwidth
for result in results:
result["memory_bandwidth"] = memory_bytes / (result["time"] / 10**3)
return results
def parse_command_line():
parser = argparse.ArgumentParser(description="FESOM2 FCT ALE C HORIZONTAL")
parser.add_argument("--nodes", help="The number of nodes.", type=int, required=True)
parser.add_argument("--edges", help="The number of edges.", type=int, required=True)
parser.add_argument("--elements", help="The number of elements.", type=int, required=True)
parser.add_argument("--max_levels", help="The maximum number of horizontal levels per node.", type=int, required=True)
parser.add_argument("--max_tile", help="The maximum tiling factor.", type=int, default=2)
parser.add_argument("--real_type", help="The floating point type to use.", choices=["float", "double"], type=str, required=True)
parser.add_argument("--verbose", help="Print all kernel configurations.", default=True, action="store_false")
parser.add_argument("--store", help="Store performance results in a JSON file.", default=False, action="store_true")
return parser.parse_args()
if __name__ == "__main__":
command_line = parse_command_line()
results = tune(command_line.nodes, command_line.edges, command_line.elements, command_line.max_levels, command_line.max_tile, command_line.real_type, command_line.verbose)
best_configuration = min(results, key=lambda x : x["time"])
print("/* Memory bandwidth: {:.2f} GB/s */".format(best_configuration["memory_bandwidth"] / 10**9))
print("/* Block size X: {} */".format(best_configuration["block_size_x"]))
print(generate_code(best_configuration))
if command_line.store:
try:
with open("fct_ale_c_horizontal_{}_{}_{}_{}_{}.json".format(command_line.nodes, command_line.elements, command_line.edges, command_line.max_levels, command_line.real_type), "x") as fp:
json.dump(results, fp)
except FileExistsError:
print("Impossible to save the results, a results file already exists for a similar experiment.")
|
[
"json.dump",
"argparse.ArgumentParser",
"numpy.copy",
"numpy.random.randn",
"numpy.float32",
"numpy.dtype",
"numpy.zeros",
"numpy.random.random",
"numpy.random.randint",
"numpy.int32",
"numpy.float64"
] |
[((4888, 4916), 'numpy.copy', 'numpy.copy', (['del_ttf_advhoriz'], {}), '(del_ttf_advhoriz)\n', (4898, 4916), False, 'import numpy\n'), ((5079, 5100), 'numpy.random.random', 'numpy.random.random', ([], {}), '()\n', (5098, 5100), False, 'import numpy\n'), ((6793, 6859), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""FESOM2 FCT ALE C HORIZONTAL"""'}), "(description='FESOM2 FCT ALE C HORIZONTAL')\n", (6816, 6859), False, 'import argparse\n'), ((5221, 5256), 'numpy.random.randint', 'numpy.random.randint', (['(3)', 'max_levels'], {}), '(3, max_levels)\n', (5241, 5256), False, 'import numpy\n'), ((5456, 5490), 'numpy.random.randint', 'numpy.random.randint', (['(1)', '(nodes + 1)'], {}), '(1, nodes + 1)\n', (5476, 5490), False, 'import numpy\n'), ((5532, 5566), 'numpy.random.randint', 'numpy.random.randint', (['(1)', '(nodes + 1)'], {}), '(1, nodes + 1)\n', (5552, 5566), False, 'import numpy\n'), ((5605, 5642), 'numpy.random.randint', 'numpy.random.randint', (['(1)', '(elements + 1)'], {}), '(1, elements + 1)\n', (5625, 5642), False, 'import numpy\n'), ((5687, 5724), 'numpy.random.randint', 'numpy.random.randint', (['(0)', '(elements + 1)'], {}), '(0, elements + 1)\n', (5707, 5724), False, 'import numpy\n'), ((4794, 4832), 'numpy.random.randn', 'numpy.random.randn', (['(nodes * max_levels)'], {}), '(nodes * max_levels)\n', (4812, 4832), False, 'import numpy\n'), ((4933, 4971), 'numpy.random.randn', 'numpy.random.randn', (['(edges * max_levels)'], {}), '(edges * max_levels)\n', (4951, 4971), False, 'import numpy\n'), ((5007, 5045), 'numpy.random.randn', 'numpy.random.randn', (['(nodes * max_levels)'], {}), '(nodes * max_levels)\n', (5025, 5045), False, 'import numpy\n'), ((5114, 5135), 'numpy.zeros', 'numpy.zeros', (['elements'], {}), '(elements)\n', (5125, 5135), False, 'import numpy\n'), ((5278, 5300), 'numpy.zeros', 'numpy.zeros', (['(edges * 2)'], {}), '(edges * 2)\n', (5289, 5300), False, 'import numpy\n'), ((5345, 5367), 'numpy.zeros', 'numpy.zeros', (['(edges * 2)'], {}), '(edges * 2)\n', (5356, 5367), False, 'import numpy\n'), ((5775, 5798), 'numpy.int32', 'numpy.int32', (['max_levels'], {}), '(max_levels)\n', (5786, 5798), False, 'import numpy\n'), ((5872, 5889), 'numpy.float32', 'numpy.float32', (['dt'], {}), '(dt)\n', (5885, 5889), False, 'import numpy\n'), ((5950, 5973), 'numpy.int32', 'numpy.int32', (['max_levels'], {}), '(max_levels)\n', (5961, 5973), False, 'import numpy\n'), ((6047, 6064), 'numpy.float64', 'numpy.float64', (['dt'], {}), '(dt)\n', (6060, 6064), False, 'import numpy\n'), ((8538, 8560), 'json.dump', 'json.dump', (['results', 'fp'], {}), '(results, fp)\n', (8547, 8560), False, 'import json\n'), ((3506, 3534), 'numpy.dtype', 'numpy.dtype', (['numpy_real_type'], {}), '(numpy_real_type)\n', (3517, 3534), False, 'import numpy\n')]
|
import numpy as np
import random
import matplotlib.pyplot as plt
n = 10
s = 0.5
S = 2
demand = []
replenish = []
x = [0]
y = [-s]
lambdas = np.array([1,2])
p = np.array([0.5,0.5])
for i in range(n):
demand.append(random.uniform(0,1))
if x[-1] < s:
y.append(S - s)
replenish.append(S - x[-1])
x.append(max(S - demand[-1],0))
else:
y.append(x[-1] - s)
replenish.append(0)
x.append(max(x[-1] - demand[-1],0))
plt.plot(x)
plt.plot(y)
plt.plot(replenish)
plt.legend(['inventory','excess','replenish'])
plt.show()
|
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"random.uniform",
"matplotlib.pyplot.legend",
"numpy.array"
] |
[((141, 157), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (149, 157), True, 'import numpy as np\n'), ((161, 181), 'numpy.array', 'np.array', (['[0.5, 0.5]'], {}), '([0.5, 0.5])\n', (169, 181), True, 'import numpy as np\n'), ((467, 478), 'matplotlib.pyplot.plot', 'plt.plot', (['x'], {}), '(x)\n', (475, 478), True, 'import matplotlib.pyplot as plt\n'), ((479, 490), 'matplotlib.pyplot.plot', 'plt.plot', (['y'], {}), '(y)\n', (487, 490), True, 'import matplotlib.pyplot as plt\n'), ((491, 510), 'matplotlib.pyplot.plot', 'plt.plot', (['replenish'], {}), '(replenish)\n', (499, 510), True, 'import matplotlib.pyplot as plt\n'), ((511, 559), 'matplotlib.pyplot.legend', 'plt.legend', (["['inventory', 'excess', 'replenish']"], {}), "(['inventory', 'excess', 'replenish'])\n", (521, 559), True, 'import matplotlib.pyplot as plt\n'), ((558, 568), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (566, 568), True, 'import matplotlib.pyplot as plt\n'), ((218, 238), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (232, 238), False, 'import random\n')]
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
from numpy.testing import assert_allclose
import pytest
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.data import get_pkg_data_filename
from astropy.io import fits
from astropy.wcs import WCS
from ...tests.helpers import make_simple_wcs
from ...core import PixCoord
from ..line import LinePixelRegion, LineSkyRegion
from .utils import ASTROPY_LT_13, HAS_MATPLOTLIB # noqa
from .test_common import BaseTestPixelRegion, BaseTestSkyRegion
@pytest.fixture(scope='session')
def wcs():
filename = get_pkg_data_filename('data/example_header.fits')
header = fits.getheader(filename)
return WCS(header)
class TestLinePixelRegion(BaseTestPixelRegion):
reg = LinePixelRegion(PixCoord(3, 4), PixCoord(4, 4))
sample_box = [-2, 8, -1, 9]
inside = []
outside = [(3.1, 4.2), (5, 4)]
expected_area = 0
expected_repr = '<LinePixelRegion(start=PixCoord(x=3, y=4), end=PixCoord(x=4, y=4))>'
expected_str = 'Region: LinePixelRegion\nstart: PixCoord(x=3, y=4)\nend: PixCoord(x=4, y=4)'
def test_pix_sky_roundtrip(self):
wcs = make_simple_wcs(SkyCoord(2 * u.deg, 3 * u.deg), 0.1 * u.deg, 20)
reg_new = self.reg.to_sky(wcs).to_pixel(wcs)
assert_allclose(reg_new.start.x, self.reg.start.x)
assert_allclose(reg_new.start.y, self.reg.start.y)
assert_allclose(reg_new.end.x, self.reg.end.x)
assert_allclose(reg_new.end.y, self.reg.end.y)
@pytest.mark.skipif('not HAS_MATPLOTLIB')
def test_as_patch(self):
patch = self.reg.as_patch()
assert 'Arrow' in str(patch)
class TestLineSkyRegion(BaseTestSkyRegion):
start = SkyCoord(3 * u.deg, 4 * u.deg, frame='galactic')
end = SkyCoord(3 * u.deg, 5 * u.deg, frame='galactic')
reg = LineSkyRegion(start, end)
if ASTROPY_LT_13:
expected_repr = ('<LineSkyRegion(start=<SkyCoord (Galactic): (l, b) in deg\n'
' (3.0, 4.0)>, end=<SkyCoord (Galactic): (l, b) in deg\n'
' (3.0, 5.0)>)>')
expected_str = ('Region: LineSkyRegion\nstart: <SkyCoord (Galactic): (l, b) in deg\n'
' (3.0, 4.0)>\nend: <SkyCoord (Galactic): (l, b) in deg\n'
' (3.0, 5.0)>')
else:
expected_repr = ('<LineSkyRegion(start=<SkyCoord (Galactic): (l, b) in deg\n'
' ( 3., 4.)>, end=<SkyCoord (Galactic): (l, b) in deg\n'
' ( 3., 5.)>)>')
expected_str = ('Region: LineSkyRegion\nstart: <SkyCoord (Galactic): (l, b) in deg\n'
' ( 3., 4.)>\nend: <SkyCoord (Galactic): (l, b) in deg\n'
' ( 3., 5.)>')
def test_transformation(self, wcs):
pixline = self.reg.to_pixel(wcs)
assert_allclose(pixline.start.x, -50.5)
assert_allclose(pixline.start.y, 299.5)
assert_allclose(pixline.end.x, -50.5)
assert_allclose(pixline.end.y, 349.5)
skyline = pixline.to_sky(wcs)
assert_quantity_allclose(skyline.start.data.lon, self.reg.start.data.lon)
assert_quantity_allclose(skyline.start.data.lat, self.reg.start.data.lat)
assert_quantity_allclose(skyline.end.data.lon, self.reg.end.data.lon)
assert_quantity_allclose(skyline.end.data.lat, self.reg.end.data.lat)
|
[
"astropy.tests.helper.assert_quantity_allclose",
"pytest.fixture",
"astropy.utils.data.get_pkg_data_filename",
"astropy.io.fits.getheader",
"astropy.wcs.WCS",
"pytest.mark.skipif",
"numpy.testing.assert_allclose",
"astropy.coordinates.SkyCoord"
] |
[((694, 725), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (708, 725), False, 'import pytest\n'), ((752, 801), 'astropy.utils.data.get_pkg_data_filename', 'get_pkg_data_filename', (['"""data/example_header.fits"""'], {}), "('data/example_header.fits')\n", (773, 801), False, 'from astropy.utils.data import get_pkg_data_filename\n'), ((815, 839), 'astropy.io.fits.getheader', 'fits.getheader', (['filename'], {}), '(filename)\n', (829, 839), False, 'from astropy.io import fits\n'), ((851, 862), 'astropy.wcs.WCS', 'WCS', (['header'], {}), '(header)\n', (854, 862), False, 'from astropy.wcs import WCS\n'), ((1669, 1709), 'pytest.mark.skipif', 'pytest.mark.skipif', (['"""not HAS_MATPLOTLIB"""'], {}), "('not HAS_MATPLOTLIB')\n", (1687, 1709), False, 'import pytest\n'), ((1871, 1919), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(3 * u.deg)', '(4 * u.deg)'], {'frame': '"""galactic"""'}), "(3 * u.deg, 4 * u.deg, frame='galactic')\n", (1879, 1919), False, 'from astropy.coordinates import SkyCoord\n'), ((1930, 1978), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(3 * u.deg)', '(5 * u.deg)'], {'frame': '"""galactic"""'}), "(3 * u.deg, 5 * u.deg, frame='galactic')\n", (1938, 1978), False, 'from astropy.coordinates import SkyCoord\n'), ((1443, 1493), 'numpy.testing.assert_allclose', 'assert_allclose', (['reg_new.start.x', 'self.reg.start.x'], {}), '(reg_new.start.x, self.reg.start.x)\n', (1458, 1493), False, 'from numpy.testing import assert_allclose\n'), ((1502, 1552), 'numpy.testing.assert_allclose', 'assert_allclose', (['reg_new.start.y', 'self.reg.start.y'], {}), '(reg_new.start.y, self.reg.start.y)\n', (1517, 1552), False, 'from numpy.testing import assert_allclose\n'), ((1561, 1607), 'numpy.testing.assert_allclose', 'assert_allclose', (['reg_new.end.x', 'self.reg.end.x'], {}), '(reg_new.end.x, self.reg.end.x)\n', (1576, 1607), False, 'from numpy.testing import assert_allclose\n'), ((1616, 1662), 'numpy.testing.assert_allclose', 'assert_allclose', (['reg_new.end.y', 'self.reg.end.y'], {}), '(reg_new.end.y, self.reg.end.y)\n', (1631, 1662), False, 'from numpy.testing import assert_allclose\n'), ((3025, 3064), 'numpy.testing.assert_allclose', 'assert_allclose', (['pixline.start.x', '(-50.5)'], {}), '(pixline.start.x, -50.5)\n', (3040, 3064), False, 'from numpy.testing import assert_allclose\n'), ((3073, 3112), 'numpy.testing.assert_allclose', 'assert_allclose', (['pixline.start.y', '(299.5)'], {}), '(pixline.start.y, 299.5)\n', (3088, 3112), False, 'from numpy.testing import assert_allclose\n'), ((3121, 3158), 'numpy.testing.assert_allclose', 'assert_allclose', (['pixline.end.x', '(-50.5)'], {}), '(pixline.end.x, -50.5)\n', (3136, 3158), False, 'from numpy.testing import assert_allclose\n'), ((3167, 3204), 'numpy.testing.assert_allclose', 'assert_allclose', (['pixline.end.y', '(349.5)'], {}), '(pixline.end.y, 349.5)\n', (3182, 3204), False, 'from numpy.testing import assert_allclose\n'), ((3253, 3326), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['skyline.start.data.lon', 'self.reg.start.data.lon'], {}), '(skyline.start.data.lon, self.reg.start.data.lon)\n', (3277, 3326), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((3335, 3408), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['skyline.start.data.lat', 'self.reg.start.data.lat'], {}), '(skyline.start.data.lat, self.reg.start.data.lat)\n', (3359, 3408), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((3417, 3486), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['skyline.end.data.lon', 'self.reg.end.data.lon'], {}), '(skyline.end.data.lon, self.reg.end.data.lon)\n', (3441, 3486), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((3495, 3564), 'astropy.tests.helper.assert_quantity_allclose', 'assert_quantity_allclose', (['skyline.end.data.lat', 'self.reg.end.data.lat'], {}), '(skyline.end.data.lat, self.reg.end.data.lat)\n', (3519, 3564), False, 'from astropy.tests.helper import assert_quantity_allclose\n'), ((1333, 1363), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['(2 * u.deg)', '(3 * u.deg)'], {}), '(2 * u.deg, 3 * u.deg)\n', (1341, 1363), False, 'from astropy.coordinates import SkyCoord\n')]
|
from __future__ import division, absolute_import, print_function
import sys
import numpy as np
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises,
assert_array_equal
)
class TestTake(TestCase):
def test_simple(self):
a = [[1, 2], [3, 4]]
a_str = [[b'1', b'2'], [b'3', b'4']]
modes = ['raise', 'wrap', 'clip']
indices = [-1, 4]
index_arrays = [np.empty(0, dtype=np.intp),
np.empty(tuple(), dtype=np.intp),
np.empty((1, 1), dtype=np.intp)]
real_indices = {}
real_indices['raise'] = {-1:1, 4:IndexError}
real_indices['wrap'] = {-1:1, 4:0}
real_indices['clip'] = {-1:0, 4:1}
# Currently all types but object, use the same function generation.
# So it should not be necessary to test all. However test also a non
# refcounted struct on top of object.
types = np.int, np.object, np.dtype([('', 'i', 2)])
for t in types:
# ta works, even if the array may be odd if buffer interface is used
ta = np.array(a if np.issubdtype(t, np.number) else a_str, dtype=t)
tresult = list(ta.T.copy())
for index_array in index_arrays:
if index_array.size != 0:
tresult[0].shape = (2,) + index_array.shape
tresult[1].shape = (2,) + index_array.shape
for mode in modes:
for index in indices:
real_index = real_indices[mode][index]
if real_index is IndexError and index_array.size != 0:
index_array.put(0, index)
assert_raises(IndexError, ta.take, index_array,
mode=mode, axis=1)
elif index_array.size != 0:
index_array.put(0, index)
res = ta.take(index_array, mode=mode, axis=1)
assert_array_equal(res, tresult[real_index])
else:
res = ta.take(index_array, mode=mode, axis=1)
assert_(res.shape == (2,) + index_array.shape)
def test_refcounting(self):
objects = [object() for i in range(10)]
for mode in ('raise', 'clip', 'wrap'):
a = np.array(objects)
b = np.array([2, 2, 4, 5, 3, 5])
a.take(b, out=a[:6])
del a
if hasattr(sys, 'getrefcount'):
assert_(all(sys.getrefcount(o) == 3 for o in objects))
# not contiguous, example:
a = np.array(objects * 2)[::2]
a.take(b, out=a[:6])
del a
if hasattr(sys, 'getrefcount'):
assert_(all(sys.getrefcount(o) == 3 for o in objects))
def test_unicode_mode(self):
d = np.arange(10)
k = b'\xc3\xa4'.decode("UTF8")
assert_raises(ValueError, d.take, 5, mode=k)
def test_empty_partition(self):
# In reference to github issue #6530
a_original = np.array([0, 2, 4, 6, 8, 10])
a = a_original.copy()
# An empty partition should be a successful no-op
a.partition(np.array([], dtype=np.int16))
assert_array_equal(a, a_original)
def test_empty_argpartition(self):
# In reference to github issue #6530
a = np.array([0, 2, 4, 6, 8, 10])
a = a.argpartition(np.array([], dtype=np.int16))
b = np.array([0, 1, 2, 3, 4, 5])
assert_array_equal(a, b)
if __name__ == "__main__":
run_module_suite()
|
[
"numpy.testing.run_module_suite",
"numpy.testing.assert_raises",
"numpy.testing.assert_array_equal",
"numpy.empty",
"numpy.dtype",
"sys.getrefcount",
"numpy.testing.assert_",
"numpy.arange",
"numpy.array",
"numpy.issubdtype"
] |
[((3676, 3694), 'numpy.testing.run_module_suite', 'run_module_suite', ([], {}), '()\n', (3692, 3694), False, 'from numpy.testing import TestCase, run_module_suite, assert_, assert_raises, assert_array_equal\n'), ((2943, 2956), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (2952, 2956), True, 'import numpy as np\n'), ((3004, 3048), 'numpy.testing.assert_raises', 'assert_raises', (['ValueError', 'd.take', '(5)'], {'mode': 'k'}), '(ValueError, d.take, 5, mode=k)\n', (3017, 3048), False, 'from numpy.testing import TestCase, run_module_suite, assert_, assert_raises, assert_array_equal\n'), ((3152, 3181), 'numpy.array', 'np.array', (['[0, 2, 4, 6, 8, 10]'], {}), '([0, 2, 4, 6, 8, 10])\n', (3160, 3181), True, 'import numpy as np\n'), ((3330, 3363), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['a', 'a_original'], {}), '(a, a_original)\n', (3348, 3363), False, 'from numpy.testing import TestCase, run_module_suite, assert_, assert_raises, assert_array_equal\n'), ((3469, 3498), 'numpy.array', 'np.array', (['[0, 2, 4, 6, 8, 10]'], {}), '([0, 2, 4, 6, 8, 10])\n', (3477, 3498), True, 'import numpy as np\n'), ((3577, 3605), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5]'], {}), '([0, 1, 2, 3, 4, 5])\n', (3585, 3605), True, 'import numpy as np\n'), ((3618, 3642), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['a', 'b'], {}), '(a, b)\n', (3636, 3642), False, 'from numpy.testing import TestCase, run_module_suite, assert_, assert_raises, assert_array_equal\n'), ((427, 453), 'numpy.empty', 'np.empty', (['(0)'], {'dtype': 'np.intp'}), '(0, dtype=np.intp)\n', (435, 453), True, 'import numpy as np\n'), ((537, 568), 'numpy.empty', 'np.empty', (['(1, 1)'], {'dtype': 'np.intp'}), '((1, 1), dtype=np.intp)\n', (545, 568), True, 'import numpy as np\n'), ((969, 993), 'numpy.dtype', 'np.dtype', (["[('', 'i', 2)]"], {}), "([('', 'i', 2)])\n", (977, 993), True, 'import numpy as np\n'), ((2420, 2437), 'numpy.array', 'np.array', (['objects'], {}), '(objects)\n', (2428, 2437), True, 'import numpy as np\n'), ((2454, 2482), 'numpy.array', 'np.array', (['[2, 2, 4, 5, 3, 5]'], {}), '([2, 2, 4, 5, 3, 5])\n', (2462, 2482), True, 'import numpy as np\n'), ((3291, 3319), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int16'}), '([], dtype=np.int16)\n', (3299, 3319), True, 'import numpy as np\n'), ((3530, 3558), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.int16'}), '([], dtype=np.int16)\n', (3538, 3558), True, 'import numpy as np\n'), ((2704, 2725), 'numpy.array', 'np.array', (['(objects * 2)'], {}), '(objects * 2)\n', (2712, 2725), True, 'import numpy as np\n'), ((1130, 1157), 'numpy.issubdtype', 'np.issubdtype', (['t', 'np.number'], {}), '(t, np.number)\n', (1143, 1157), True, 'import numpy as np\n'), ((1735, 1801), 'numpy.testing.assert_raises', 'assert_raises', (['IndexError', 'ta.take', 'index_array'], {'mode': 'mode', 'axis': '(1)'}), '(IndexError, ta.take, index_array, mode=mode, axis=1)\n', (1748, 1801), False, 'from numpy.testing import TestCase, run_module_suite, assert_, assert_raises, assert_array_equal\n'), ((2052, 2096), 'numpy.testing.assert_array_equal', 'assert_array_equal', (['res', 'tresult[real_index]'], {}), '(res, tresult[real_index])\n', (2070, 2096), False, 'from numpy.testing import TestCase, run_module_suite, assert_, assert_raises, assert_array_equal\n'), ((2229, 2275), 'numpy.testing.assert_', 'assert_', (['(res.shape == (2,) + index_array.shape)'], {}), '(res.shape == (2,) + index_array.shape)\n', (2236, 2275), False, 'from numpy.testing import TestCase, run_module_suite, assert_, assert_raises, assert_array_equal\n'), ((2606, 2624), 'sys.getrefcount', 'sys.getrefcount', (['o'], {}), '(o)\n', (2621, 2624), False, 'import sys\n'), ((2854, 2872), 'sys.getrefcount', 'sys.getrefcount', (['o'], {}), '(o)\n', (2869, 2872), False, 'import sys\n')]
|
import tensorflow as tf
import numpy as np, h5py
import scipy.io as sio
import sys
import random
import kNN
import re
import os
from numpy import *
def weight_variable(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def compute_accuracy(test_att, test_visual, test_id, test_label):
global left_a2
att_pre = sess.run(left_a2, feed_dict={att_features: test_att})
test_id = np.squeeze(np.asarray(test_id))
outpre = [0]*2933
test_label = np.squeeze(np.asarray(test_label))
test_label = test_label.astype("float32")
for i in range(2933):
outputLabel = kNN.kNNClassify(test_visual[i,:], att_pre, test_id, 1)
outpre[i] = outputLabel
correct_prediction = tf.equal(outpre, test_label)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
result = sess.run(accuracy, feed_dict={att_features: test_att, visual_features: test_visual})
return result
f=sio.loadmat('./data/CUB_data/train_attr.mat')
att=np.array(f['train_attr'])
att.shape
f=sio.loadmat('./data/CUB_data/train_cub_googlenet_bn.mat')
x=np.array(f['train_cub_googlenet_bn'])
x.shape
f=sio.loadmat('./data/CUB_data/test_cub_googlenet_bn.mat')
x_test=np.array(f['test_cub_googlenet_bn'])
x_test.shape
f=sio.loadmat('./data/CUB_data/test_labels_cub.mat')
test_label=np.array(f['test_labels_cub'])
test_label.shape
f=sio.loadmat('./data/CUB_data/testclasses_id.mat')
test_id=np.array(f['testclasses_id'])
f=sio.loadmat('./data/CUB_data/test_proto.mat')
att_pro=np.array(f['test_proto'])
# # data shuffle
def data_iterator():
""" A simple data iterator """
batch_idx = 0
while True:
# shuffle labels and features
idxs = np.arange(0, len(x))
np.random.shuffle(idxs)
shuf_visual = x[idxs]
shuf_att = att[idxs]
batch_size = 100
for batch_idx in range(0, len(x), batch_size):
visual_batch = shuf_visual[batch_idx:batch_idx+batch_size]
visual_batch = visual_batch.astype("float32")
att_batch = shuf_att[batch_idx:batch_idx+batch_size]
yield att_batch, visual_batch
# # Placeholder
# define placeholder for inputs to network
att_features = tf.placeholder(tf.float32, [None, 312])
visual_features = tf.placeholder(tf.float32, [None, 1024])
# # Network
# CUB 312 700 1024 ReLu, 1e-2 * regularisers, 100 batch, 0.00001 Adam
W_left_a1 = weight_variable([312, 700])
b_left_a1 = bias_variable([700])
left_a1 = tf.nn.relu(tf.matmul(att_features, W_left_a1) + b_left_a1)
W_left_a2 = weight_variable([700, 1024])
b_left_a2 = bias_variable([1024])
left_a2 = tf.nn.relu(tf.matmul(left_a1, W_left_a2) + b_left_a2)
# # loss
loss_a = tf.reduce_mean(tf.square(left_a2 - visual_features))
# L2 regularisation for the fully connected parameters.
regularizers_a = (tf.nn.l2_loss(W_left_a1) + tf.nn.l2_loss(b_left_a1)
+ tf.nn.l2_loss(W_left_a2) + tf.nn.l2_loss(b_left_a2))
# Add the regularization term to the loss.
loss_a += 1e-2 * regularizers_a
train_step = tf.train.AdamOptimizer(0.00001).minimize(loss_a)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# # Run
iter_ = data_iterator()
for i in range(1000000):
att_batch_val, visual_batch_val = iter_.next()
sess.run(train_step, feed_dict={att_features: att_batch_val, visual_features: visual_batch_val})
if i % 1000 == 0:
print(compute_accuracy(att_pro, x_test, test_id, test_label))
|
[
"numpy.random.shuffle",
"scipy.io.loadmat",
"tensorflow.global_variables_initializer",
"numpy.asarray",
"tensorflow.Session",
"tensorflow.constant",
"tensorflow.placeholder",
"tensorflow.cast",
"tensorflow.Variable",
"numpy.array",
"tensorflow.matmul",
"tensorflow.square",
"tensorflow.nn.l2_loss",
"kNN.kNNClassify",
"tensorflow.train.AdamOptimizer",
"tensorflow.truncated_normal",
"tensorflow.equal"
] |
[((1074, 1119), 'scipy.io.loadmat', 'sio.loadmat', (['"""./data/CUB_data/train_attr.mat"""'], {}), "('./data/CUB_data/train_attr.mat')\n", (1085, 1119), True, 'import scipy.io as sio\n'), ((1124, 1149), 'numpy.array', 'np.array', (["f['train_attr']"], {}), "(f['train_attr'])\n", (1132, 1149), True, 'import numpy as np, h5py\n'), ((1163, 1220), 'scipy.io.loadmat', 'sio.loadmat', (['"""./data/CUB_data/train_cub_googlenet_bn.mat"""'], {}), "('./data/CUB_data/train_cub_googlenet_bn.mat')\n", (1174, 1220), True, 'import scipy.io as sio\n'), ((1223, 1260), 'numpy.array', 'np.array', (["f['train_cub_googlenet_bn']"], {}), "(f['train_cub_googlenet_bn'])\n", (1231, 1260), True, 'import numpy as np, h5py\n'), ((1272, 1328), 'scipy.io.loadmat', 'sio.loadmat', (['"""./data/CUB_data/test_cub_googlenet_bn.mat"""'], {}), "('./data/CUB_data/test_cub_googlenet_bn.mat')\n", (1283, 1328), True, 'import scipy.io as sio\n'), ((1336, 1372), 'numpy.array', 'np.array', (["f['test_cub_googlenet_bn']"], {}), "(f['test_cub_googlenet_bn'])\n", (1344, 1372), True, 'import numpy as np, h5py\n'), ((1389, 1439), 'scipy.io.loadmat', 'sio.loadmat', (['"""./data/CUB_data/test_labels_cub.mat"""'], {}), "('./data/CUB_data/test_labels_cub.mat')\n", (1400, 1439), True, 'import scipy.io as sio\n'), ((1451, 1481), 'numpy.array', 'np.array', (["f['test_labels_cub']"], {}), "(f['test_labels_cub'])\n", (1459, 1481), True, 'import numpy as np, h5py\n'), ((1502, 1551), 'scipy.io.loadmat', 'sio.loadmat', (['"""./data/CUB_data/testclasses_id.mat"""'], {}), "('./data/CUB_data/testclasses_id.mat')\n", (1513, 1551), True, 'import scipy.io as sio\n'), ((1560, 1589), 'numpy.array', 'np.array', (["f['testclasses_id']"], {}), "(f['testclasses_id'])\n", (1568, 1589), True, 'import numpy as np, h5py\n'), ((1593, 1638), 'scipy.io.loadmat', 'sio.loadmat', (['"""./data/CUB_data/test_proto.mat"""'], {}), "('./data/CUB_data/test_proto.mat')\n", (1604, 1638), True, 'import scipy.io as sio\n'), ((1647, 1672), 'numpy.array', 'np.array', (["f['test_proto']"], {}), "(f['test_proto'])\n", (1655, 1672), True, 'import numpy as np, h5py\n'), ((2353, 2392), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 312]'], {}), '(tf.float32, [None, 312])\n', (2367, 2392), True, 'import tensorflow as tf\n'), ((2411, 2451), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 1024]'], {}), '(tf.float32, [None, 1024])\n', (2425, 2451), True, 'import tensorflow as tf\n'), ((3275, 3287), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (3285, 3287), True, 'import tensorflow as tf\n'), ((195, 233), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'stddev': '(0.1)'}), '(shape, stddev=0.1)\n', (214, 233), True, 'import tensorflow as tf\n'), ((245, 265), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (256, 265), True, 'import tensorflow as tf\n'), ((307, 336), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {'shape': 'shape'}), '(0.1, shape=shape)\n', (318, 336), True, 'import tensorflow as tf\n'), ((348, 368), 'tensorflow.Variable', 'tf.Variable', (['initial'], {}), '(initial)\n', (359, 368), True, 'import tensorflow as tf\n'), ((854, 882), 'tensorflow.equal', 'tf.equal', (['outpre', 'test_label'], {}), '(outpre, test_label)\n', (862, 882), True, 'import tensorflow as tf\n'), ((2855, 2891), 'tensorflow.square', 'tf.square', (['(left_a2 - visual_features)'], {}), '(left_a2 - visual_features)\n', (2864, 2891), True, 'import tensorflow as tf\n'), ((3065, 3089), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['b_left_a2'], {}), '(b_left_a2)\n', (3078, 3089), True, 'import tensorflow as tf\n'), ((3297, 3330), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3328, 3330), True, 'import tensorflow as tf\n'), ((550, 569), 'numpy.asarray', 'np.asarray', (['test_id'], {}), '(test_id)\n', (560, 569), True, 'import numpy as np, h5py\n'), ((622, 644), 'numpy.asarray', 'np.asarray', (['test_label'], {}), '(test_label)\n', (632, 644), True, 'import numpy as np, h5py\n'), ((741, 796), 'kNN.kNNClassify', 'kNN.kNNClassify', (['test_visual[i, :]', 'att_pre', 'test_id', '(1)'], {}), '(test_visual[i, :], att_pre, test_id, 1)\n', (756, 796), False, 'import kNN\n'), ((913, 952), 'tensorflow.cast', 'tf.cast', (['correct_prediction', 'tf.float32'], {}), '(correct_prediction, tf.float32)\n', (920, 952), True, 'import tensorflow as tf\n'), ((1864, 1887), 'numpy.random.shuffle', 'np.random.shuffle', (['idxs'], {}), '(idxs)\n', (1881, 1887), True, 'import numpy as np, h5py\n'), ((2631, 2665), 'tensorflow.matmul', 'tf.matmul', (['att_features', 'W_left_a1'], {}), '(att_features, W_left_a1)\n', (2640, 2665), True, 'import tensorflow as tf\n'), ((2776, 2805), 'tensorflow.matmul', 'tf.matmul', (['left_a1', 'W_left_a2'], {}), '(left_a1, W_left_a2)\n', (2785, 2805), True, 'import tensorflow as tf\n'), ((3038, 3062), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['W_left_a2'], {}), '(W_left_a2)\n', (3051, 3062), True, 'import tensorflow as tf\n'), ((3218, 3247), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['(1e-05)'], {}), '(1e-05)\n', (3240, 3247), True, 'import tensorflow as tf\n'), ((2968, 2992), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['W_left_a1'], {}), '(W_left_a1)\n', (2981, 2992), True, 'import tensorflow as tf\n'), ((2995, 3019), 'tensorflow.nn.l2_loss', 'tf.nn.l2_loss', (['b_left_a1'], {}), '(b_left_a1)\n', (3008, 3019), True, 'import tensorflow as tf\n')]
|
import os
import random
import numpy as np
from torch.utils.data import Dataset
from PIL import Image
from utils.cartoongan import smooth_image_edges
class CartoonDataset(Dataset):
def __init__(self, data_dir, src_style='real', tar_style='gongqijun', src_transform=None, tar_transform=None):
self.data_dir = data_dir
self.src_data, self.tar_data = self._load_data(data_dir, src_style, tar_style)
print("total {} {} images for training".format(len(self.src_data), src_style))
print("total {} {} images for training".format(len(self.tar_data), tar_style))
self.src_transform = src_transform
self.tar_transform = tar_transform
def _load_data(self, data_dir, src_style, tar_style):
src_data = []
with open(os.path.join(data_dir, '{}_train.txt'.format(src_style)), 'r') as f:
lines = f.readlines()
for line in lines:
path = line.strip()
src_data.append(path)
tar_data = []
with open(os.path.join(data_dir, '{}_train.txt'.format(tar_style)), 'r') as f:
lines = f.readlines()
for line in lines:
path = line.strip()
tar_data.append(path)
return src_data, tar_data
def _shuffle_data(self):
np.random.shuffle(self.src_data)
np.random.shuffle(self.tar_data)
def __len__(self):
return len(self.src_data)
def __getitem__(self, index):
src_path = self.src_data[index]
tar_path = self.tar_data[index]
src_img = Image.open(os.path.join(self.data_dir, src_path))
tar_img = Image.open(os.path.join(self.data_dir, tar_path))
src_img = src_img.convert('RGB')
tar_img = tar_img.convert('RGB')
# transform src img
if self.src_transform is not None:
src_img = self.src_transform(src_img)
# transform tar img
if self.tar_transform is not None:
tar_img = self.tar_transform(tar_img)
return src_img, tar_img
class CartoonDefaultDataset(Dataset):
def __init__(self, data_dir, style='real', transform=None):
self.data_dir = data_dir
self.data = self._load_data(data_dir, style)
print("total {} {} images for testing".format(len(self.data), style))
self.transform = transform
def _load_data(self, data_dir, style):
data = []
with open(os.path.join(data_dir, '{}_test.txt'.format(style)), 'r') as f:
lines = f.readlines()
for line in lines:
path = line.strip()
data.append(path)
return data
def __len__(self):
return len(self.data)
def __getitem__(self, index):
path = self.data[index]
img = Image.open(os.path.join(self.data_dir, path))
img = img.convert('RGB')
# transform src img
if self.transform is not None:
img = self.transform(img)
return img
class CartoonGANDataset(CartoonDataset):
def __init__(self, data_dir, src_style='real', tar_style='gongqijun', src_transform=None, tar_transform=None):
super(CartoonGANDataset, self).__init__(data_dir, src_style, tar_style, src_transform, tar_transform)
def __getitem__(self, index):
src_path = self.src_data[index]
tar_path = self.tar_data[index]
src_img = Image.open(os.path.join(self.data_dir, src_path))
tar_img = Image.open(os.path.join(self.data_dir, tar_path))
src_img = src_img.convert('RGB')
tar_img = tar_img.convert('RGB')
# get edge smoothed transform
smooth_tar_img = smooth_image_edges(np.asarray(tar_img))
smooth_tar_img = Image.fromarray(smooth_tar_img)
# transform src img
if self.src_transform is not None:
src_img = self.src_transform(src_img)
# transform tar img
if self.tar_transform is not None:
tar_img = self.tar_transform(tar_img)
smooth_tar_img = self.tar_transform(smooth_tar_img)
return src_img, tar_img, smooth_tar_img
class StarCartoonDataset(Dataset):
def __init__(self, data_dir, src_transform=None, tar_transform=None):
self.data_dir = data_dir
self.src_data, self.tar_data = self._load_data(data_dir)
self.src_transform = src_transform
self.tar_transform = tar_transform
def _load_data(self, data_dir):
src_data = []
with open(os.path.join(data_dir, 'real_train.txt'), 'r') as f:
lines = f.readlines()
for line in lines:
path = line.strip()
src_data.append(path)
styles = ['gongqijun', 'xinhaicheng', 'disney', 'tangqian']
tar_data = {}
for i, style in enumerate(styles):
tar_data[i] = []
with open(os.path.join(data_dir, '{}_train.txt'.format(style)), 'r') as f:
lines = f.readlines()
for line in lines:
path = line.strip()
tar_data[i].append(path)
return src_data, tar_data
def _shuffle_data(self):
for key, item in self.tar_data.items():
np.random.shuffle(item)
self.tar_data[key] = item
def __len__(self):
return len(self.src_data)
def __getitem__(self, index):
# sample a target
tar_label = random.randint(0, 3)
src_path = self.src_data[index]
tar_path = self.tar_data[tar_label][index]
src_img = Image.open(os.path.join(self.data_dir, src_path))
tar_img = Image.open(os.path.join(self.data_dir, tar_path))
src_img = src_img.convert('RGB')
tar_img = tar_img.convert('RGB')
if self.src_transform:
src_img = self.src_transform(src_img)
if self.tar_transform:
tar_img = self.tar_transform(tar_img)
return src_img, tar_img, tar_label
class ClassifierDataset(Dataset):
def __init__(self, data_dir, split, transform=None):
self.data_dir = data_dir
self.data, self.labels = self._load_data(data_dir, split)
self.transform = transform
def _load_data(self, data_dir, split):
styles = ['disney', 'gongqijun','tangqian','xinhaicheng']
class_dict = {
"disney": 0,
"gongqijun": 1,
"tangqian": 2,
"xinhaicheng": 3,
}
data = []
labels = []
for i, style in enumerate(styles):
cls = class_dict[style]
with open(os.path.join(data_dir, '{}_{}.txt'.format(style, split)), 'r') as f:
lines = f.readlines()
for line in lines:
path = line.strip()
data.append(path)
labels.append(int(cls))
return data, labels
def __len__(self):
return len(self.data)
def __getitem__(self, index):
path = self.data[index]
label = np.asarray(self.labels[index], dtype=np.int64)
img = Image.open(os.path.join(self.data_dir, path))
img = img.convert('RGB')
if self.transform:
img = self.transform(img)
return img, label
if __name__ == '__main__':
from tqdm import tqdm
data_dir = '/home/zhaobin/cartoon/'
style = 'gongqijun'
dataset = StarCartoonDataset(data_dir)
import matplotlib.pyplot as plt
for i in tqdm(range(len(dataset)), total=len(dataset)):
src_img, tar_img, tar_label = dataset.__getitem__(i)
|
[
"random.randint",
"numpy.asarray",
"PIL.Image.fromarray",
"os.path.join",
"numpy.random.shuffle"
] |
[((1307, 1339), 'numpy.random.shuffle', 'np.random.shuffle', (['self.src_data'], {}), '(self.src_data)\n', (1324, 1339), True, 'import numpy as np\n'), ((1348, 1380), 'numpy.random.shuffle', 'np.random.shuffle', (['self.tar_data'], {}), '(self.tar_data)\n', (1365, 1380), True, 'import numpy as np\n'), ((3718, 3749), 'PIL.Image.fromarray', 'Image.fromarray', (['smooth_tar_img'], {}), '(smooth_tar_img)\n', (3733, 3749), False, 'from PIL import Image\n'), ((5402, 5422), 'random.randint', 'random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (5416, 5422), False, 'import random\n'), ((6988, 7034), 'numpy.asarray', 'np.asarray', (['self.labels[index]'], {'dtype': 'np.int64'}), '(self.labels[index], dtype=np.int64)\n', (6998, 7034), True, 'import numpy as np\n'), ((1583, 1620), 'os.path.join', 'os.path.join', (['self.data_dir', 'src_path'], {}), '(self.data_dir, src_path)\n', (1595, 1620), False, 'import os\n'), ((1651, 1688), 'os.path.join', 'os.path.join', (['self.data_dir', 'tar_path'], {}), '(self.data_dir, tar_path)\n', (1663, 1688), False, 'import os\n'), ((2795, 2828), 'os.path.join', 'os.path.join', (['self.data_dir', 'path'], {}), '(self.data_dir, path)\n', (2807, 2828), False, 'import os\n'), ((3400, 3437), 'os.path.join', 'os.path.join', (['self.data_dir', 'src_path'], {}), '(self.data_dir, src_path)\n', (3412, 3437), False, 'import os\n'), ((3468, 3505), 'os.path.join', 'os.path.join', (['self.data_dir', 'tar_path'], {}), '(self.data_dir, tar_path)\n', (3480, 3505), False, 'import os\n'), ((3672, 3691), 'numpy.asarray', 'np.asarray', (['tar_img'], {}), '(tar_img)\n', (3682, 3691), True, 'import numpy as np\n'), ((5201, 5224), 'numpy.random.shuffle', 'np.random.shuffle', (['item'], {}), '(item)\n', (5218, 5224), True, 'import numpy as np\n'), ((5543, 5580), 'os.path.join', 'os.path.join', (['self.data_dir', 'src_path'], {}), '(self.data_dir, src_path)\n', (5555, 5580), False, 'import os\n'), ((5611, 5648), 'os.path.join', 'os.path.join', (['self.data_dir', 'tar_path'], {}), '(self.data_dir, tar_path)\n', (5623, 5648), False, 'import os\n'), ((7060, 7093), 'os.path.join', 'os.path.join', (['self.data_dir', 'path'], {}), '(self.data_dir, path)\n', (7072, 7093), False, 'import os\n'), ((4477, 4517), 'os.path.join', 'os.path.join', (['data_dir', '"""real_train.txt"""'], {}), "(data_dir, 'real_train.txt')\n", (4489, 4517), False, 'import os\n')]
|
"""
Script calculates the mean January-April sea ice extent for the Bering Sea
over the 1850 to 2018 period and 1979-2018 period
Notes
-----
Author : <NAME>
Date : 24 March 2019
"""
### Import modules
import numpy as np
import matplotlib.pyplot as plt
import datetime
import scipy.stats as sts
### Define directories
directorydata = '/home/zlabe/Documents/Projects/BeringSeaIce2018/BAMS/Data/'
### Define time
now = datetime.datetime.now()
currentmn = str(now.month)
currentdy = str(now.day)
currentyr = str(now.year)
currenttime = currentmn + '_' + currentdy + '_' + currentyr
titletime = currentmn + '/' + currentdy + '/' + currentyr
print('\n' '----Calculating Bering SIE - %s----' % titletime)
### Define years
years = np.arange(1850,2018+1,1)
yearsat = np.arange(1979,2018+1,1)
###############################################################################
###############################################################################
###############################################################################
#### Retrieve data from NSIDC regional extent in Bering Sea
### Retrieve data from NSIDC regional extent in Bering Sea
beringjan = np.genfromtxt(directorydata + \
'Bering_SIE_NSIDC_01_1979-2018.txt')/1e6
beringfeb = np.genfromtxt(directorydata + \
'Bering_SIE_NSIDC_02_1979-2018.txt')/1e6
beringmar = np.genfromtxt(directorydata + \
'Bering_SIE_NSIDC_03_1979-2018.txt')/1e6
beringapr = np.genfromtxt(directorydata + \
'Bering_SIE_NSIDC_04_1979-2018.txt')/1e6
meansat = (beringjan + beringfeb + beringmar + beringapr)/4.
### Save sea ice extent data from NSIDC
np.savetxt(directorydata + 'Bering_SIE_NSIDC_Jan-Apr_1979-2018.txt',meansat,
delimiter=',',header='File contains mean Jan-Apr SIE from NSIDC' \
'\n Sea Ice Index v3 for years 1979-2018 \n')
###############################################################################
###############################################################################
###############################################################################
#### Retrieve data from Sea Ice Atlas
atlasjan = np.genfromtxt(directorydata + 'Bering_SIE85_iceatlas_' \
'01_1850-2018.txt',skip_header=1)
atlasfeb = np.genfromtxt(directorydata + 'Bering_SIE85_iceatlas_' \
'02_1850-2018.txt',skip_header=1)
atlasmar = np.genfromtxt(directorydata + 'Bering_SIE85_iceatlas_' \
'03_1850-2018.txt',skip_header=1)
atlasapr = np.genfromtxt(directorydata + 'Bering_SIE85_iceatlas_' \
'04_1850-2018.txt',skip_header=1)
meanatlas = (atlasjan + atlasfeb + atlasmar + atlasapr)/4.
### Save sea ice extent data from NSIDC
np.savetxt(directorydata + 'Bering_SIE85_iceatlas_Jan-Apr_1850-2018.txt',meanatlas,
delimiter=',',header='File contains mean Jan-Apr SIE from historical' \
'\n ice atlas (University of Alaska) for years' \
'\n 1850-2018 \n')
###############################################################################
###############################################################################
###############################################################################
#### Compute Correlations
satperiod = meanatlas[-40:]
### Mask any nans before correlation
mask = ~np.logical_or(np.isnan(satperiod),np.isnan(meansat))
corr,p = sts.pearsonr(satperiod[mask],meansat[mask])
print('\n>>> Correlation between ice atlas and NSIDC is --> %s' % np.round(corr,3))
print('\n>>> P-value between ice atlas and NSIDC is --> %s' % p)
|
[
"numpy.savetxt",
"numpy.genfromtxt",
"scipy.stats.pearsonr",
"numpy.isnan",
"numpy.arange",
"numpy.round",
"datetime.datetime.now"
] |
[((441, 464), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (462, 464), False, 'import datetime\n'), ((749, 777), 'numpy.arange', 'np.arange', (['(1850)', '(2018 + 1)', '(1)'], {}), '(1850, 2018 + 1, 1)\n', (758, 777), True, 'import numpy as np\n'), ((784, 812), 'numpy.arange', 'np.arange', (['(1979)', '(2018 + 1)', '(1)'], {}), '(1979, 2018 + 1, 1)\n', (793, 812), True, 'import numpy as np\n'), ((1781, 1983), 'numpy.savetxt', 'np.savetxt', (["(directorydata + 'Bering_SIE_NSIDC_Jan-Apr_1979-2018.txt')", 'meansat'], {'delimiter': '""","""', 'header': '"""File contains mean Jan-Apr SIE from NSIDC\n Sea Ice Index v3 for years 1979-2018 \n"""'}), '(directorydata + \'Bering_SIE_NSIDC_Jan-Apr_1979-2018.txt\',\n meansat, delimiter=\',\', header=\n """File contains mean Jan-Apr SIE from NSIDC\n Sea Ice Index v3 for years 1979-2018 \n"""\n )\n', (1791, 1983), True, 'import numpy as np\n'), ((2275, 2365), 'numpy.genfromtxt', 'np.genfromtxt', (["(directorydata + 'Bering_SIE85_iceatlas_01_1850-2018.txt')"], {'skip_header': '(1)'}), "(directorydata + 'Bering_SIE85_iceatlas_01_1850-2018.txt',\n skip_header=1)\n", (2288, 2365), True, 'import numpy as np\n'), ((2403, 2493), 'numpy.genfromtxt', 'np.genfromtxt', (["(directorydata + 'Bering_SIE85_iceatlas_02_1850-2018.txt')"], {'skip_header': '(1)'}), "(directorydata + 'Bering_SIE85_iceatlas_02_1850-2018.txt',\n skip_header=1)\n", (2416, 2493), True, 'import numpy as np\n'), ((2531, 2621), 'numpy.genfromtxt', 'np.genfromtxt', (["(directorydata + 'Bering_SIE85_iceatlas_03_1850-2018.txt')"], {'skip_header': '(1)'}), "(directorydata + 'Bering_SIE85_iceatlas_03_1850-2018.txt',\n skip_header=1)\n", (2544, 2621), True, 'import numpy as np\n'), ((2659, 2749), 'numpy.genfromtxt', 'np.genfromtxt', (["(directorydata + 'Bering_SIE85_iceatlas_04_1850-2018.txt')"], {'skip_header': '(1)'}), "(directorydata + 'Bering_SIE85_iceatlas_04_1850-2018.txt',\n skip_header=1)\n", (2672, 2749), True, 'import numpy as np\n'), ((2904, 3135), 'numpy.savetxt', 'np.savetxt', (["(directorydata + 'Bering_SIE85_iceatlas_Jan-Apr_1850-2018.txt')", 'meanatlas'], {'delimiter': '""","""', 'header': '"""File contains mean Jan-Apr SIE from historical\n ice atlas (University of Alaska) for years\n 1850-2018 \n"""'}), '(directorydata + \'Bering_SIE85_iceatlas_Jan-Apr_1850-2018.txt\',\n meanatlas, delimiter=\',\', header=\n """File contains mean Jan-Apr SIE from historical\n ice atlas (University of Alaska) for years\n 1850-2018 \n"""\n )\n', (2914, 3135), True, 'import numpy as np\n'), ((3553, 3597), 'scipy.stats.pearsonr', 'sts.pearsonr', (['satperiod[mask]', 'meansat[mask]'], {}), '(satperiod[mask], meansat[mask])\n', (3565, 3597), True, 'import scipy.stats as sts\n'), ((1182, 1248), 'numpy.genfromtxt', 'np.genfromtxt', (["(directorydata + 'Bering_SIE_NSIDC_01_1979-2018.txt')"], {}), "(directorydata + 'Bering_SIE_NSIDC_01_1979-2018.txt')\n", (1195, 1248), True, 'import numpy as np\n'), ((1294, 1360), 'numpy.genfromtxt', 'np.genfromtxt', (["(directorydata + 'Bering_SIE_NSIDC_02_1979-2018.txt')"], {}), "(directorydata + 'Bering_SIE_NSIDC_02_1979-2018.txt')\n", (1307, 1360), True, 'import numpy as np\n'), ((1406, 1472), 'numpy.genfromtxt', 'np.genfromtxt', (["(directorydata + 'Bering_SIE_NSIDC_03_1979-2018.txt')"], {}), "(directorydata + 'Bering_SIE_NSIDC_03_1979-2018.txt')\n", (1419, 1472), True, 'import numpy as np\n'), ((1518, 1584), 'numpy.genfromtxt', 'np.genfromtxt', (["(directorydata + 'Bering_SIE_NSIDC_04_1979-2018.txt')"], {}), "(directorydata + 'Bering_SIE_NSIDC_04_1979-2018.txt')\n", (1531, 1584), True, 'import numpy as np\n'), ((3505, 3524), 'numpy.isnan', 'np.isnan', (['satperiod'], {}), '(satperiod)\n', (3513, 3524), True, 'import numpy as np\n'), ((3525, 3542), 'numpy.isnan', 'np.isnan', (['meansat'], {}), '(meansat)\n', (3533, 3542), True, 'import numpy as np\n'), ((3663, 3680), 'numpy.round', 'np.round', (['corr', '(3)'], {}), '(corr, 3)\n', (3671, 3680), True, 'import numpy as np\n')]
|
import os
import time
import numpy as np
# from IPython import embed
print("perform experiments on amazoncat 13K (multilabel)")
leaf_example_multiplier = 2
lr = 1
bits = 30
alpha = 0.1 # 0.3
passes = 4
learn_at_leaf = True
use_oas = True
# num_queries = 1 #does not really use
dream_at_update = 1
# hal_version = 1 #does not really use
loss = "squared"
dream_repeats = 3
# Precision_at_K = 5
num_examples = 1186239
max_num_labels = 13330
tree_node = int(
num_examples / (np.log(num_examples) / np.log(2) * leaf_example_multiplier)
)
train_data = "amazoncat_train.mat.mult_label.vw.txt"
test_data = "amazoncat_test.mat.mult_label.vw.txt"
if os.path.exists(train_data) is not True:
os.system("wget http://kalman.ml.cmu.edu/wen_datasets/{}".format(train_data))
if os.path.exists(test_data) is not True:
os.system("wget http://kalman.ml.cmu.edu/wen_datasets/{}".format(test_data))
saved_model = "{}.vw".format(train_data)
print("## Training...")
start = time.time()
# train_data = 'tmp_rcv1x.vw.txt'
command_line = f"../../build/vowpalwabbit/vw -d {train_data} --memory_tree {tree_node} {'--learn_at_leaf' if learn_at_leaf else ''} --dream_at_update {dream_at_update}\
--max_number_of_labels {max_num_labels} --dream_repeats {dream_repeats} {'--oas' if use_oas else ''} \
--leaf_example_multiplier {leaf_example_multiplier} --alpha {alpha} -l {lr} -b {bits} -c --passes {passes} --loss_function {loss} --holdout_off -f {saved_model}"
os.system(command_line)
train_time = time.time() - start
print("## Testing...")
start = time.time()
os.system(
"../../build/vowpalwabbit/vw {} --oas {} -i {}".format(
test_data, use_oas, saved_model
)
)
test_time = time.time() - start
print("## train time {}, and test time {}".format(train_time, test_time))
|
[
"numpy.log",
"os.path.exists",
"os.system",
"time.time"
] |
[((971, 982), 'time.time', 'time.time', ([], {}), '()\n', (980, 982), False, 'import time\n'), ((1471, 1494), 'os.system', 'os.system', (['command_line'], {}), '(command_line)\n', (1480, 1494), False, 'import os\n'), ((1560, 1571), 'time.time', 'time.time', ([], {}), '()\n', (1569, 1571), False, 'import time\n'), ((651, 677), 'os.path.exists', 'os.path.exists', (['train_data'], {}), '(train_data)\n', (665, 677), False, 'import os\n'), ((776, 801), 'os.path.exists', 'os.path.exists', (['test_data'], {}), '(test_data)\n', (790, 801), False, 'import os\n'), ((1508, 1519), 'time.time', 'time.time', ([], {}), '()\n', (1517, 1519), False, 'import time\n'), ((1703, 1714), 'time.time', 'time.time', ([], {}), '()\n', (1712, 1714), False, 'import time\n'), ((481, 501), 'numpy.log', 'np.log', (['num_examples'], {}), '(num_examples)\n', (487, 501), True, 'import numpy as np\n'), ((504, 513), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (510, 513), True, 'import numpy as np\n')]
|
import random
import sys
import heapq
from typing import Callable, Iterator, List, Tuple, Any, Optional, TYPE_CHECKING
import numpy as np
if TYPE_CHECKING:
import pandas
import pyarrow
from ray.data.impl.sort import SortKeyT
from ray.data.aggregate import AggregateFn
from ray.data.block import (
Block,
BlockAccessor,
BlockMetadata,
T,
U,
KeyType,
AggType,
BlockExecStats,
KeyFn,
)
from ray.data.impl.block_builder import BlockBuilder
from ray.data.impl.size_estimator import SizeEstimator
class SimpleBlockBuilder(BlockBuilder[T]):
def __init__(self):
self._items = []
self._size_estimator = SizeEstimator()
def add(self, item: T) -> None:
self._items.append(item)
self._size_estimator.add(item)
def add_block(self, block: List[T]) -> None:
assert isinstance(block, list), block
self._items.extend(block)
for item in block:
self._size_estimator.add(item)
def num_rows(self) -> int:
return len(self._items)
def build(self) -> Block:
return list(self._items)
def get_estimated_memory_usage(self) -> int:
return self._size_estimator.size_bytes()
class SimpleBlockAccessor(BlockAccessor):
def __init__(self, items: List[T]):
self._items = items
def num_rows(self) -> int:
return len(self._items)
def iter_rows(self) -> Iterator[T]:
return iter(self._items)
def slice(self, start: int, end: int, copy: bool) -> "SimpleBlockAccessor[T]":
view = self._items[start:end]
if copy:
view = view.copy()
return view
def random_shuffle(self, random_seed: Optional[int]) -> List[T]:
random = np.random.RandomState(random_seed)
items = self._items.copy()
random.shuffle(items)
return items
def to_pandas(self) -> "pandas.DataFrame":
import pandas
return pandas.DataFrame({"value": self._items})
def to_numpy(self, column: str = None) -> np.ndarray:
if column:
raise ValueError("`column` arg not supported for list block")
return np.array(self._items)
def to_arrow(self) -> "pyarrow.Table":
import pyarrow
return pyarrow.Table.from_pandas(self.to_pandas())
def to_block(self) -> List[T]:
return self._items
def size_bytes(self) -> int:
return sys.getsizeof(self._items)
def schema(self) -> Any:
if self._items:
return type(self._items[0])
else:
return None
def zip(self, other: "Block[T]") -> "Block[T]":
if not isinstance(other, list):
raise ValueError(
"Cannot zip {} with block of type {}".format(type(self), type(other))
)
if len(other) != len(self._items):
raise ValueError(
"Cannot zip self (length {}) with block of length {}".format(
len(self), len(other)
)
)
return list(zip(self._items, other))
@staticmethod
def builder() -> SimpleBlockBuilder[T]:
return SimpleBlockBuilder()
def sample(self, n_samples: int = 1, key: "SortKeyT" = None) -> List[T]:
if not callable(key) and key is not None:
raise NotImplementedError(
"Python sort key must be either None or a callable "
"function, was: {}".format(key)
)
k = min(n_samples, len(self._items))
ret = random.sample(self._items, k)
if key is None:
return ret
return [key(x) for x in ret]
def count(self, on: KeyFn) -> Optional[U]:
if on is not None and not callable(on):
raise ValueError(
"on must be a callable or None when aggregating on Simple blocks, but "
f"got: {type(on)}."
)
if self.num_rows() == 0:
return None
count = 0
for r in self.iter_rows():
if on is not None:
r = on(r)
if r is not None:
count += 1
return count
def _apply_accum(
self,
init: AggType,
accum: Callable[[AggType, T], AggType],
on: KeyFn,
ignore_nulls: bool,
) -> Optional[U]:
"""Helper providing null handling around applying an aggregation."""
if on is not None and not callable(on):
raise ValueError(
"on must be a callable or None when aggregating on Simple blocks, but "
f"got: {type(on)}."
)
if self.num_rows() == 0:
return None
has_data = False
a = init
for r in self.iter_rows():
if on is not None:
r = on(r)
if r is None:
if ignore_nulls:
continue
else:
return None
else:
has_data = True
a = accum(a, r)
return a if has_data else None
def sum(self, on: KeyFn, ignore_nulls: bool) -> Optional[U]:
return self._apply_accum(0, lambda a, r: a + r, on, ignore_nulls)
def min(self, on: KeyFn, ignore_nulls: bool) -> Optional[U]:
return self._apply_accum(float("inf"), min, on, ignore_nulls)
def max(self, on: KeyFn, ignore_nulls: bool) -> Optional[U]:
return self._apply_accum(float("-inf"), max, on, ignore_nulls)
def mean(self, on: KeyFn, ignore_nulls: bool) -> Optional[U]:
return self._apply_accum(
[0, 0],
lambda a, r: [a[0] + r, a[1] + 1],
on,
ignore_nulls,
)
def std(self, on: KeyFn, ignore_nulls: bool) -> Optional[U]:
def accum(a: List[float], r: float) -> List[float]:
# Accumulates the current count, the current mean, and the sum of
# squared differences from the current mean (M2).
M2, mean, count = a
count += 1
delta = r - mean
mean += delta / count
delta2 = r - mean
M2 += delta * delta2
return [M2, mean, count]
return self._apply_accum([0, 0, 0], accum, on, ignore_nulls)
def sum_of_squared_diffs_from_mean(
self,
on: KeyFn,
ignore_nulls: bool,
mean: Optional[U] = None,
) -> Optional[U]:
if mean is None:
# If precomputed mean not given, we compute it ourselves.
mean = self.mean(on, ignore_nulls)
return self._apply_accum(
0,
lambda a, r: a + (r - mean) ** 2,
on,
ignore_nulls,
)
def sort_and_partition(
self, boundaries: List[T], key: "SortKeyT", descending: bool
) -> List["Block[T]"]:
items = sorted(self._items, key=key, reverse=descending)
if len(boundaries) == 0:
return [items]
# For each boundary value, count the number of items that are less
# than it. Since the block is sorted, these counts partition the items
# such that boundaries[i] <= x < boundaries[i + 1] for each x in
# partition[i]. If `descending` is true, `boundaries` would also be
# in descending order and we only need to count the number of items
# *greater than* the boundary value instead.
key_fn = key if key else lambda x: x
comp_fn = (
(lambda x, b: key_fn(x) > b) if descending else (lambda x, b: key_fn(x) < b)
) # noqa E731
# Compute the boundary indices in O(n) time via scan.
boundary_indices = []
remaining = boundaries.copy()
for i, x in enumerate(items):
while remaining and not comp_fn(x, remaining[0]):
remaining.pop(0)
boundary_indices.append(i)
for _ in remaining:
boundary_indices.append(len(items))
assert len(boundary_indices) == len(boundaries)
ret = []
prev_i = 0
for i in boundary_indices:
ret.append(items[prev_i:i])
prev_i = i
ret.append(items[prev_i:])
return ret
def combine(
self, key: KeyFn, aggs: Tuple[AggregateFn]
) -> Block[Tuple[KeyType, AggType]]:
"""Combine rows with the same key into an accumulator.
This assumes the block is already sorted by key in ascending order.
Args:
key: The key function that returns the key from the row
or None for global aggregation.
agg: The aggregations to do.
Returns:
A sorted block of (k, v_1, ..., v_n) tuples where k is the groupby
key and v_i is the partially combined accumulator for the ith given
aggregation.
If key is None then the k element of tuple is omitted.
"""
if key is not None and not callable(key):
raise ValueError(
"key must be a callable or None when aggregating on Simple blocks, but "
f"got: {type(key)}."
)
def iter_groups() -> Iterator[Tuple[KeyType, Block]]:
"""Creates an iterator over zero-copy group views."""
if key is None:
# Global aggregation consists of a single "group", so we short-circuit.
yield None, self.to_block()
return
start = end = 0
iter = self.iter_rows()
next_row = None
# Use a bool to indicate if next_row is valid
# instead of checking if next_row is None
# since a row can have None value.
has_next_row = False
while True:
try:
if not has_next_row:
next_row = next(iter)
has_next_row = True
next_key = key(next_row)
while key(next_row) == next_key:
end += 1
try:
next_row = next(iter)
except StopIteration:
has_next_row = False
next_row = None
break
yield next_key, self.slice(start, end, copy=False)
start = end
except StopIteration:
break
ret = []
for group_key, group_view in iter_groups():
# Aggregate.
accumulators = [agg.init(group_key) for agg in aggs]
for i in range(len(aggs)):
accumulators[i] = aggs[i].accumulate_block(accumulators[i], group_view)
# Build the row.
if key is None:
ret.append(tuple(accumulators))
else:
ret.append((group_key,) + tuple(accumulators))
return ret
@staticmethod
def merge_sorted_blocks(
blocks: List[Block[T]], key: "SortKeyT", descending: bool
) -> Tuple[Block[T], BlockMetadata]:
stats = BlockExecStats.builder()
ret = [x for block in blocks for x in block]
ret.sort(key=key, reverse=descending)
return ret, SimpleBlockAccessor(ret).get_metadata(
None, exec_stats=stats.build()
)
@staticmethod
def aggregate_combined_blocks(
blocks: List[Block[Tuple[KeyType, AggType]]],
key: KeyFn,
aggs: Tuple[AggregateFn],
) -> Tuple[Block[Tuple[KeyType, U]], BlockMetadata]:
"""Aggregate sorted, partially combined blocks with the same key range.
This assumes blocks are already sorted by key in ascending order,
so we can do merge sort to get all the rows with the same key.
Args:
blocks: A list of partially combined and sorted blocks.
key: The key function that returns the key from the row
or None for global aggregation.
aggs: The aggregations to do.
Returns:
A block of (k, v_1, ..., v_n) tuples and its metadata where k is
the groupby key and v_i is the corresponding aggregation result for
the ith given aggregation.
If key is None then the k element of tuple is omitted.
"""
stats = BlockExecStats.builder()
key_fn = (lambda r: r[0]) if key else (lambda r: 0)
iter = heapq.merge(
*[SimpleBlockAccessor(block).iter_rows() for block in blocks], key=key_fn
)
next_row = None
ret = []
while True:
try:
if next_row is None:
next_row = next(iter)
next_key = key_fn(next_row)
def gen():
nonlocal iter
nonlocal next_row
while key_fn(next_row) == next_key:
yield next_row
try:
next_row = next(iter)
except StopIteration:
next_row = None
break
first = True
accumulators = [None] * len(aggs)
for r in gen():
if first:
for i in range(len(aggs)):
accumulators[i] = r[i + 1] if key else r[i]
first = False
else:
for i in range(len(aggs)):
accumulators[i] = aggs[i].merge(
accumulators[i], r[i + 1] if key else r[i]
)
if key is None:
ret.append(
tuple(
agg.finalize(accumulator)
for agg, accumulator in zip(aggs, accumulators)
)
)
else:
ret.append(
(next_key,)
+ tuple(
agg.finalize(accumulator)
for agg, accumulator in zip(aggs, accumulators)
)
)
except StopIteration:
break
return ret, SimpleBlockAccessor(ret).get_metadata(
None, exec_stats=stats.build()
)
|
[
"pandas.DataFrame",
"random.sample",
"random.shuffle",
"numpy.random.RandomState",
"ray.data.impl.size_estimator.SizeEstimator",
"numpy.array",
"sys.getsizeof",
"ray.data.block.BlockExecStats.builder"
] |
[((667, 682), 'ray.data.impl.size_estimator.SizeEstimator', 'SizeEstimator', ([], {}), '()\n', (680, 682), False, 'from ray.data.impl.size_estimator import SizeEstimator\n'), ((1746, 1780), 'numpy.random.RandomState', 'np.random.RandomState', (['random_seed'], {}), '(random_seed)\n', (1767, 1780), True, 'import numpy as np\n'), ((1824, 1845), 'random.shuffle', 'random.shuffle', (['items'], {}), '(items)\n', (1838, 1845), False, 'import random\n'), ((1953, 1993), 'pandas.DataFrame', 'pandas.DataFrame', (["{'value': self._items}"], {}), "({'value': self._items})\n", (1969, 1993), False, 'import pandas\n'), ((2161, 2182), 'numpy.array', 'np.array', (['self._items'], {}), '(self._items)\n', (2169, 2182), True, 'import numpy as np\n'), ((2422, 2448), 'sys.getsizeof', 'sys.getsizeof', (['self._items'], {}), '(self._items)\n', (2435, 2448), False, 'import sys\n'), ((3530, 3559), 'random.sample', 'random.sample', (['self._items', 'k'], {}), '(self._items, k)\n', (3543, 3559), False, 'import random\n'), ((11113, 11137), 'ray.data.block.BlockExecStats.builder', 'BlockExecStats.builder', ([], {}), '()\n', (11135, 11137), False, 'from ray.data.block import Block, BlockAccessor, BlockMetadata, T, U, KeyType, AggType, BlockExecStats, KeyFn\n'), ((12345, 12369), 'ray.data.block.BlockExecStats.builder', 'BlockExecStats.builder', ([], {}), '()\n', (12367, 12369), False, 'from ray.data.block import Block, BlockAccessor, BlockMetadata, T, U, KeyType, AggType, BlockExecStats, KeyFn\n')]
|
# -*- coding:utf-8 -*-
from preprocessing import Tokenizer
import random
import csv
import json
import numpy as np
import sentencepiece as spm
from konlpy.tag import Okt
import torch
from torch.utils.data import Dataset, DataLoader
class BertLMDataset(Dataset):
def __init__(self, dataset, tokenizer: Tokenizer, vocab_size=5000):
self.tokenizer = tokenizer
# 데이터 로딩
with open(dataset, 'r', encoding='utf-8') as f:
self.data = json.load(f)
# 데이터 전처리 (str to int)
for i, d in enumerate(self.data):
self.data[i]['content'] = tokenizer.tokens_to_ids(d['content'])
# masking을 위한 토큰 클래스 로딩
self.total_tokens = tokenizer.get_tokens(vocab_prefix=f'vocab_{vocab_size}', for_masking=True)
def __getitem__(self, item):
tokens = self.data[item]['content']
masked_tokens, candi_index, answers = self._masking(tokens)
masked_tokens = torch.LongTensor(masked_tokens)
mask = np.zeros_like(masked_tokens)
mask[candi_index] = 1 # ex) [0, 1, 1, 0, 0, 1, ...]
mask = torch.from_numpy(mask).long()
sparse_answers = np.zeros_like(masked_tokens)
sparse_answers[candi_index] = answers # ex) [0, 32, 5, 0, 0, 12, ...]
sparse_answers = torch.from_numpy(sparse_answers).long()
return masked_tokens, mask, sparse_answers
def _masking(self, tokens):
sep_idx = tokens.index(self.tokenizer.token_to_id('[SEP]'))
t_tokens = tokens[1:sep_idx]
k = int(len(t_tokens) * 0.15)
candi_index = list(range(1, len(t_tokens)+1)) # CLS를 제외했기 때문에 +1
random.shuffle(candi_index)
candi_index = candi_index[:k]
random_token_index = candi_index[:int(k * 0.1)] # 랜덤 마스킹
# correct_token_index = candi_index[int(k * 0.1):int(k * 0.2)] # 정답 마스킹
mask_token_index = candi_index[int(k * 0.2):] # 마스크토큰 마스킹
masked_tokens = np.array(tokens)
answers = masked_tokens[candi_index] # MASK에 해당하는 라벨 토큰
for idx in random_token_index:
masked_tokens[idx] = self.tokenizer.token_to_id(random.choice(self.total_tokens))
masked_tokens[mask_token_index] = self.tokenizer.token_to_id('[MASK]')
return masked_tokens, candi_index, answers
def __len__(self):
return len(self.data)
class BertClsDataset(Dataset):
def __init__(self, dataset, tokenizer: Tokenizer, max_num_seq=20, inference=False, vocab_size=5000, is_train=True):
self.max_num_seq = max_num_seq
self.inference = inference
self.is_train = is_train
self.tokenizer = tokenizer
self.total_tokens = tokenizer.get_tokens(vocab_prefix=f'vocab_{vocab_size}', for_masking=True)
# 데이터 로딩
with open(dataset, 'r', encoding='utf-8') as f:
self.data = json.load(f)
# 데이터 전처리 (str to int)
for i, d in enumerate(self.data):
doc = d['content']
n_doc = []
for sub_doc in doc:
n_doc.append(self.tokenizer.tokens_to_ids(sub_doc))
# n_doc.append(list(map(self.tokenizer.PieceToId, sub_doc.split())))
self.data[i]['content'] = n_doc
def __getitem__(self, item):
doc = self.data[item]['content']
if not self.inference and len(doc) > self.max_num_seq: # 문장 수가 많으면 일부 문장만 선택
sp = random.choice(list(range(len(doc) - self.max_num_seq)))
doc = doc[sp:sp + self.max_num_seq]
if self.is_train:
for i, sub_doc in enumerate(doc): ##
doc[i] = self._masking(sub_doc, mask_rate=0.3)
doc = torch.LongTensor(doc)
label = self.data[item]['label']
return doc, label
def _masking(self, tokens, mask_rate=0.1):
sep_idx = list(tokens).index(self.tokenizer.token_to_id('[SEP]'))
t_tokens = tokens[1:sep_idx]
k = int(len(t_tokens) * mask_rate)
candi_index = list(range(1, len(t_tokens)+1)) # CLS를 제외했기 때문에 +1
random.shuffle(candi_index)
candi_index = candi_index[:k]
random_token_index = candi_index[:int(k * 0.2)] # 랜덤 마스킹
mask_token_index = candi_index[int(k * 0.8):] # UNK 마스킹
masked_tokens = np.array(tokens)
for idx in random_token_index:
masked_tokens[idx] = self.tokenizer.token_to_id(random.choice(self.total_tokens))
masked_tokens[mask_token_index] = self.tokenizer.token_to_id('[UNK]')
return masked_tokens
def __len__(self):
return len(self.data)
if __name__ == '__main__':
dataset = BertClsDataset('bertcls_val_v5000_t128.json')
data_loader = DataLoader(dataset, batch_size=1, shuffle=False)
for i, (doc, label) in enumerate(data_loader):
print(doc.shape)
print(doc)
print(label)
if i > 0:
break
|
[
"numpy.zeros_like",
"json.load",
"torch.utils.data.DataLoader",
"torch.LongTensor",
"random.shuffle",
"random.choice",
"numpy.array",
"torch.from_numpy"
] |
[((4787, 4835), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': '(1)', 'shuffle': '(False)'}), '(dataset, batch_size=1, shuffle=False)\n', (4797, 4835), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((974, 1005), 'torch.LongTensor', 'torch.LongTensor', (['masked_tokens'], {}), '(masked_tokens)\n', (990, 1005), False, 'import torch\n'), ((1024, 1052), 'numpy.zeros_like', 'np.zeros_like', (['masked_tokens'], {}), '(masked_tokens)\n', (1037, 1052), True, 'import numpy as np\n'), ((1189, 1217), 'numpy.zeros_like', 'np.zeros_like', (['masked_tokens'], {}), '(masked_tokens)\n', (1202, 1217), True, 'import numpy as np\n'), ((1685, 1712), 'random.shuffle', 'random.shuffle', (['candi_index'], {}), '(candi_index)\n', (1699, 1712), False, 'import random\n'), ((1998, 2014), 'numpy.array', 'np.array', (['tokens'], {}), '(tokens)\n', (2006, 2014), True, 'import numpy as np\n'), ((3740, 3761), 'torch.LongTensor', 'torch.LongTensor', (['doc'], {}), '(doc)\n', (3756, 3761), False, 'import torch\n'), ((4126, 4153), 'random.shuffle', 'random.shuffle', (['candi_index'], {}), '(candi_index)\n', (4140, 4153), False, 'import random\n'), ((4355, 4371), 'numpy.array', 'np.array', (['tokens'], {}), '(tokens)\n', (4363, 4371), True, 'import numpy as np\n'), ((493, 505), 'json.load', 'json.load', (['f'], {}), '(f)\n', (502, 505), False, 'import json\n'), ((2916, 2928), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2925, 2928), False, 'import json\n'), ((1131, 1153), 'torch.from_numpy', 'torch.from_numpy', (['mask'], {}), '(mask)\n', (1147, 1153), False, 'import torch\n'), ((1324, 1356), 'torch.from_numpy', 'torch.from_numpy', (['sparse_answers'], {}), '(sparse_answers)\n', (1340, 1356), False, 'import torch\n'), ((2182, 2214), 'random.choice', 'random.choice', (['self.total_tokens'], {}), '(self.total_tokens)\n', (2195, 2214), False, 'import random\n'), ((4473, 4505), 'random.choice', 'random.choice', (['self.total_tokens'], {}), '(self.total_tokens)\n', (4486, 4505), False, 'import random\n')]
|
#!/usr/bin/env python
"""
setup the disperion database file structure and configuration file
"""
import os
import tempfile
import numpy as np
from dispersion import Material, Writer, Interpolation, Catalogue
from dispersion.config import default_config, write_config
def get_root_dir(conf):
"""
get the root dir from the user
"""
question = ("path for root directory of the catalogue file" +
" system [default: {}]> ")
default = conf['Path']
validator = os.path.isabs
data_name = "root directory"
return ask_and_confirm(question, default, validator, data_name)
def get_catalogue_name(conf):
"""
get the catalogue file name from the user
"""
question = ("name of the catalogue file" +
" [default: {}]> ")
default = conf['File']
validator = valid_file_name
data_name = "catalogue file name"
return ask_and_confirm(question, default, validator, data_name)
def ask_and_confirm(question, default, validator, data_name, confirm=True):
"""
Returns
-------
user_input: str
the data from the user
confirmed_input: bool
true if the input was confirmed by the user
Parameters
----------
question: str
the question to prompt the user input
default: str
the default value of this value
validator: function
function to validate the input
data_name: str
name of the data that is being input
"""
user_input = ask(question, default, validator)
confirmation_question = ("confirm {} as ".format(data_name) +
"{}? [y/n]> ".format(user_input))
return [user_input, get_confirmation(confirmation_question)]
def ask(question, default, validator):
"""
ask for user input with default value and then validate
"""
valid_input = False
while not valid_input:
user_input = input(question.format(default))
if user_input == "":
user_input = default
if validator(user_input):
valid_input = True
else:
print("input is not valid")
return user_input
def get_confirmation(question):
"""
get a yes/no answer to a question
"""
confirmed_input = False
while not confirmed_input:
confirmation1 = input(question)
if confirmation1 in {'y', 'yes'}:
confirmed_input = True
elif confirmation1 in {'n', 'no'}:
confirmed_input = False
break
else:
print("input invalid")
return confirmed_input
def valid_file_name(filename):
"""
test if filename is valid
create a file with the filename in a temporary directory and delete the
directory afterwards.
"""
with tempfile.TemporaryDirectory() as temp_dir:
file_path = os.path.join(temp_dir, filename)
try:
open(file_path, 'r')
return True
except IOError:
try:
open(file_path, 'w')
return True
except IOError:
return False
def install_modules(conf):
"""
make a subfolder for each module and ask to download files
"""
install_funcs = {"UserData":install_userdata,
"RefractiveIndexInfo":install_rii}
for module in conf['Modules']:
if module == "UserData":
install = True
else:
question = "install module {}? [y/n]> ".format(module)
install = get_confirmation(question)
conf['Modules'][module] = install
if install:
module_dir = os.path.join(conf['Path'], module)
if not os.path.isdir(module_dir):
os.mkdir(module_dir)
install_funcs[module](module_dir, conf)
return conf
def install_userdata(module_dir, conf):
make_example_txt(module_dir)
make_example_yaml(module_dir)
def make_example_txt(dir_path):
test_data = np.array([[400., 1.7, 0.1],
[500., 1.6, 0.05],
[600., 1.5, 0.0],
[700., 1.4, 0.0]])
mat = Material(tabulated_nk=test_data,
spectrum_type='wavelength', unit='nanometer')
mat.meta_data['Reference'] = "Literature reference to the data"
mat.meta_data['Comment'] = "Any additional information goes here"
mat.meta_data['Name'] = "Short name of the material"
mat.meta_data['FullName'] = "Full name of the material"
mat.meta_data['Author'] = "The author of this data file"
mat.meta_data['MetaComment'] = " This is a multiline meta-comment\n" + \
" which provides information not\n" + \
" in metadata"
filepath = os.path.join(dir_path, "example_file.txt")
write = Writer(filepath, mat)
write.write_file()
def make_example_yaml(dir_path):
model_params = {'name': 'Sellmeier',
'specrtrum_type':'wavelength',
'unit':'micrometer',
'valid_range':np.array([0.350, 2.0]),
'parameters': np.array([0, 1.0, 0.05,
2.0, 0.1,
10., 25.])}
mat = Material(model_kw=model_params, spectrum_type='wavelength', unit='micrometer')
mat.meta_data['Reference'] = "Literature reference to the data"
mat.meta_data['Comment'] = "Any additional information goes here"
mat.meta_data['Name'] = "Short name of the material"
mat.meta_data['FullName'] = "Full name of the material"
mat.meta_data['Author'] = "The author of this data file"
mat.meta_data['MetaComment'] = " This is a multiline meta-comment\n" + \
" which provides information not\n" + \
" in metadata"
k_data = np.array([[400., 0.1],
[500., 0.05],
[600., 0.0],
[700., 0.0]])
interp = Interpolation(k_data, unit='nm')
mat.data['imag'] = interp
filepath = os.path.join(dir_path, "example_file2.yml")
write = Writer(filepath, mat)
write.write_file()
def install_rii(module_dir, conf):
"""
download the refractive index info database from github
"""
question = ("download the refractive index info database from github?" +
" (required python package <GitPython>)" +
" [y/n]> ")
install = get_confirmation(question)
if install:
from git import Repo
git_url = "https://github.com/polyanskiy/refractiveindex.info-database.git"
#install_dir = os.path.join(conf['Path'], "RefractiveIndexInfo")
Repo.clone_from(git_url, module_dir)
def maybe_rebuild_catalogue(conf):
question = "rebuild catalogue? [y/n]> "
rebuild = get_confirmation(question)
if rebuild:
cat = Catalogue(config=conf, rebuild= 'All')
cat.save_to_file()
def main():
conf = default_config()
print("This script will provide a default configuration for the \n"+
"dispersion package")
confirmed_valid_path = False
while not confirmed_valid_path:
[path, confirmed_valid_path] = get_root_dir(conf)
conf['Path'] = path
#print("Path will be se to: {}".format(path))
confirmed_db_nane = False
while not confirmed_db_nane:
[name, confirmed_db_nane] = get_catalogue_name(conf)
conf['File'] = name
#print("Filename will be set to {}".format(name))
conf = install_modules(conf)
write_config(conf)
maybe_rebuild_catalogue(conf)
if __name__ == "__main__":
main()
|
[
"os.mkdir",
"tempfile.TemporaryDirectory",
"dispersion.Material",
"dispersion.Writer",
"os.path.isdir",
"dispersion.config.default_config",
"dispersion.config.write_config",
"dispersion.Interpolation",
"numpy.array",
"git.Repo.clone_from",
"dispersion.Catalogue",
"os.path.join"
] |
[((3971, 4062), 'numpy.array', 'np.array', (['[[400.0, 1.7, 0.1], [500.0, 1.6, 0.05], [600.0, 1.5, 0.0], [700.0, 1.4, 0.0]]'], {}), '([[400.0, 1.7, 0.1], [500.0, 1.6, 0.05], [600.0, 1.5, 0.0], [700.0,\n 1.4, 0.0]])\n', (3979, 4062), True, 'import numpy as np\n'), ((4143, 4221), 'dispersion.Material', 'Material', ([], {'tabulated_nk': 'test_data', 'spectrum_type': '"""wavelength"""', 'unit': '"""nanometer"""'}), "(tabulated_nk=test_data, spectrum_type='wavelength', unit='nanometer')\n", (4151, 4221), False, 'from dispersion import Material, Writer, Interpolation, Catalogue\n'), ((4774, 4816), 'os.path.join', 'os.path.join', (['dir_path', '"""example_file.txt"""'], {}), "(dir_path, 'example_file.txt')\n", (4786, 4816), False, 'import os\n'), ((4829, 4850), 'dispersion.Writer', 'Writer', (['filepath', 'mat'], {}), '(filepath, mat)\n', (4835, 4850), False, 'from dispersion import Material, Writer, Interpolation, Catalogue\n'), ((5277, 5355), 'dispersion.Material', 'Material', ([], {'model_kw': 'model_params', 'spectrum_type': '"""wavelength"""', 'unit': '"""micrometer"""'}), "(model_kw=model_params, spectrum_type='wavelength', unit='micrometer')\n", (5285, 5355), False, 'from dispersion import Material, Writer, Interpolation, Catalogue\n'), ((5887, 5954), 'numpy.array', 'np.array', (['[[400.0, 0.1], [500.0, 0.05], [600.0, 0.0], [700.0, 0.0]]'], {}), '([[400.0, 0.1], [500.0, 0.05], [600.0, 0.0], [700.0, 0.0]])\n', (5895, 5954), True, 'import numpy as np\n'), ((6033, 6065), 'dispersion.Interpolation', 'Interpolation', (['k_data'], {'unit': '"""nm"""'}), "(k_data, unit='nm')\n", (6046, 6065), False, 'from dispersion import Material, Writer, Interpolation, Catalogue\n'), ((6111, 6154), 'os.path.join', 'os.path.join', (['dir_path', '"""example_file2.yml"""'], {}), "(dir_path, 'example_file2.yml')\n", (6123, 6154), False, 'import os\n'), ((6167, 6188), 'dispersion.Writer', 'Writer', (['filepath', 'mat'], {}), '(filepath, mat)\n', (6173, 6188), False, 'from dispersion import Material, Writer, Interpolation, Catalogue\n'), ((7017, 7033), 'dispersion.config.default_config', 'default_config', ([], {}), '()\n', (7031, 7033), False, 'from dispersion.config import default_config, write_config\n'), ((7580, 7598), 'dispersion.config.write_config', 'write_config', (['conf'], {}), '(conf)\n', (7592, 7598), False, 'from dispersion.config import default_config, write_config\n'), ((2771, 2800), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (2798, 2800), False, 'import tempfile\n'), ((2834, 2866), 'os.path.join', 'os.path.join', (['temp_dir', 'filename'], {}), '(temp_dir, filename)\n', (2846, 2866), False, 'import os\n'), ((5075, 5096), 'numpy.array', 'np.array', (['[0.35, 2.0]'], {}), '([0.35, 2.0])\n', (5083, 5096), True, 'import numpy as np\n'), ((5133, 5179), 'numpy.array', 'np.array', (['[0, 1.0, 0.05, 2.0, 0.1, 10.0, 25.0]'], {}), '([0, 1.0, 0.05, 2.0, 0.1, 10.0, 25.0])\n', (5141, 5179), True, 'import numpy as np\n'), ((6739, 6775), 'git.Repo.clone_from', 'Repo.clone_from', (['git_url', 'module_dir'], {}), '(git_url, module_dir)\n', (6754, 6775), False, 'from git import Repo\n'), ((6927, 6964), 'dispersion.Catalogue', 'Catalogue', ([], {'config': 'conf', 'rebuild': '"""All"""'}), "(config=conf, rebuild='All')\n", (6936, 6964), False, 'from dispersion import Material, Writer, Interpolation, Catalogue\n'), ((3628, 3662), 'os.path.join', 'os.path.join', (["conf['Path']", 'module'], {}), "(conf['Path'], module)\n", (3640, 3662), False, 'import os\n'), ((3682, 3707), 'os.path.isdir', 'os.path.isdir', (['module_dir'], {}), '(module_dir)\n', (3695, 3707), False, 'import os\n'), ((3725, 3745), 'os.mkdir', 'os.mkdir', (['module_dir'], {}), '(module_dir)\n', (3733, 3745), False, 'import os\n')]
|
import sys
sys.path.append("utils")
sys.path.append("models")
from file_io import *
from train_utils import *
import numpy as np
import pandas as pd
import matplotlib as mp
import matplotlib.pyplot as plt
import time
from test import init_test
from pathlib import Path
import torch
from torch.utils.data import Dataset, DataLoader, sampler
from torch import nn
from random_word import RandomWords
import pandas as pd
from dataloaders import get_double_scan_v1_loader
#from Unet2D import Unet2D
import torch.optim as optim
import torchvision
from tqdm import tqdm
import torch.nn.functional as F
import time
import os
from importlib import import_module
from torch.utils.tensorboard import SummaryWriter
import torchgeometry
from logger_utils import *
np.random.seed(int(time.time()))
PRINT_DEBUG = False
def train_step(X_batch, Y_batch, optimizer, model, loss_fn, acc_fn):
X_batch = X_batch.cuda()
Y_batch = Y_batch.cuda()
optimizer.zero_grad()
outputs = model(X_batch)
loss = loss_fn(outputs, Y_batch)
loss.backward()
optimizer.step()
acc = acc_fn(outputs, Y_batch)
return loss, acc, outputs
def check_accuracy(valid_dl, model, loss_fn, acc_fn, classes, tb_writer, seen_train_ex, other_logdir):
model.eval()
running_loss = 0.0
running_acc = 0.0
running_dice = 0.0
running_class_dice = np.zeros(classes)
save_batch = True
with torch.no_grad():
for X_batch, Y_batch in valid_dl:
X_batch = X_batch.cuda()
Y_batch = Y_batch.cuda()
cur_batch_sz = X_batch.size(0)
outputs = model(X_batch)
loss = loss_fn(outputs, Y_batch.long())
acc = acc_fn(outputs, Y_batch)
dice_score, dice_class_scores = mean_dice_score(outputs, Y_batch, classes)
running_acc += acc * cur_batch_sz
running_loss += loss * cur_batch_sz
running_dice += dice_score * cur_batch_sz
running_class_dice += dice_class_scores * cur_batch_sz
average_loss = running_loss / len(valid_dl.dataset)
average_acc = running_acc / len(valid_dl.dataset)
average_dice_sc = running_dice / len(valid_dl.dataset)
average_dice_class_sc = running_class_dice / len(valid_dl.dataset)
tb_writer.add_scalar("Val CE loss", average_loss, seen_train_ex)
tb_writer.add_scalar("Val dice acc", average_dice_sc, seen_train_ex)
tb_writer.add_scalar("Val px acc", average_acc, seen_train_ex)
#tb_writer.add_custom_scalars("Val class dice acc", numpy_to_class_dict(average_dice_class_sc), seen_train_ex)
for i,value in enumerate(average_dice_class_sc):
tb_writer.add_scalar(f'Val dice class_{i+1}', value, seen_train_ex)
print('{} Loss: {:.4f} PxAcc: {} Dice: {}'.format("Validation", average_loss, average_acc, average_dice_sc))
return average_dice_sc, average_dice_class_sc
def numpy_to_class_dict(np_arr):
ret_dict = {}
for val in np_arr:
ret_dict[f'Class {val+1}'] = val
return ret_dict
def train(model, classes, train_dl, valid_dl, loss_fn, optimizer, scheduler, acc_fn, epochs, tb_writer, hparam_log, other_logdir):
print(other_logdir)
start = time.time()
model.cuda()
len_train_ds = len(train_dl.dataset)
print("Len train ds")
print(len_train_ds)
seen_train_ex = 0
avg_dice = 0.0
avg_train_loss = 0.0
best_acc = 0.0
runs_without_improved_dice = 0
highest_dice = 0.0
seen_train_ex_highest_dice = 0
hparam_log["hgst dice"] = 0.0
hparam_log["hgst dice step"] = 0.0
hparam_log["hgst dice tr CE loss"] = 0.0
for epoch in range(epochs):
save_batch = True
model.train()
weight = epoch/epochs
print("weight", weight)
#loss_fn = weighted_combined_loss(nn.CrossEntropyLoss(), dice_loss, weight)
print('Epoch {}/{}'.format(epoch, epochs - 1))
print('-' * 10)
running_loss = 0.0
running_acc = 0.0
step = 0
# iterate over data
for X_batch, Y_batch in train_dl:
#print("x batch shape",X_batch.shape)
#print("y batch shape",Y_batch.shape)
loss, acc, outputs = train_step(X_batch, Y_batch, optimizer, model, loss_fn, acc_fn)
running_acc += acc*X_batch.size(0)
running_loss += loss*X_batch.size(0)
step += 1
seen_train_ex += X_batch.size(0)
tb_writer.add_scalar("Train CE loss", loss, seen_train_ex)
tb_writer.add_scalar("Train px acc", acc, seen_train_ex)
if step % 25 == 0:
print('Current step: {} Loss: {} Acc: {} '.format(step, loss, acc))
avg_dice, avg_dice_cl = check_accuracy(valid_dl, model, loss_fn, acc_fn, classes, tb_writer, seen_train_ex, other_logdir)
if avg_dice > highest_dice:
print("highest_dice", highest_dice)
highest_dice = avg_dice
highest_dice_cl = avg_dice_cl
hparam_log["hgst dice"] = highest_dice
for i,dice in enumerate(avg_dice_cl):
hparam_log[f'Class {i+1}'] = dice
hparam_log["hgst dice step"] = seen_train_ex
hparam_log["hgst dice tr CE loss"] = loss.item()
runs_without_improved_dice = 0
torch.save(model.state_dict(), os.path.join(other_logdir, "state_dict.pth"))
else:
runs_without_improved_dice +=1
avg_train_loss = running_loss / len_train_ds
avg_train_acc = running_acc / len_train_ds
scheduler.step(avg_train_loss)
print_epoch_stats(epoch, epochs, avg_train_loss, avg_train_acc)
if runs_without_improved_dice > 20:
print("Dice not improving for 12 epochs, abort training")
break
hparam_log["last step"] = seen_train_ex
hparam_log["last dice"] = avg_dice
hparam_log["last train loss"] = avg_train_loss
time_elapsed = time.time() - start
print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
def dict_to_numpy(hparam_dict):
hparam_dict["last train loss"] = hparam_dict["last train loss"].item()
for key in hparam_dict:
try:
hparam_dict[key] = hparam_dict[key].item()
except:
pass
try:
hparam_dict[key] = hparam_dict[key].detach().cpu().numpy()
except:
pass
def init_test(cfg):
hparam_log = {}
bs = cfg["batch_size"]
epochs_val = cfg["epochs"]
learn_rate = cfg["learning_rate"]
lr_patience = cfg["lr_patience"]
train_transforms = cfg["train_transforms"]
val_transforms = cfg["val_transforms"]
model_file = cfg["model"]
dataset = cfg["dataset"]
channel_ratio = cfg["channel_ratio"]
cross_entr_weights = cfg["cross_entr_weights"]
continue_training = False
if "custom_logdir" in cfg:
cust_logdir = cfg["custom_logdir"]
else:
cust_logdir = ""
tb_logdir = os.path.join("logdir", "tensorboard", dataset, cust_logdir, model_file)
other_logdir = os.path.join("logdir", "other", dataset, cust_logdir, model_file)
print("other_logdir", other_logdir)
try:
try_number = len(os.listdir(tb_logdir))
except:
try_number = 0
r = RandomWords()
if continue_training:
logdir_folder = "N1_None"
else:
random_word = r.get_random_word()
logdir_folder = f'N{try_number}_{random_word}'
tb_logdir = os.path.join(tb_logdir, logdir_folder)
other_logdir = os.path.join(other_logdir, logdir_folder)
os.makedirs(other_logdir, exist_ok=True)
print("other_logdir:", other_logdir)
print("tb_logdir:", tb_logdir)
tb_writer = SummaryWriter(tb_logdir)
train_loader, val_loader = get_double_scan_v1_loader(bs, train_transforms)
classes = 1
model_path = os.path.join("models",dataset)
model_import = import_model_from_path(model_file, model_path)
unet = model_import.Unet2D(6,2, channel_ratio)
if continue_training:
unet.load_state_dict(torch.load(os.path.join(other_logdir, "state_dict.pth")))
unet.cuda()
loss_fn = torchgeometry.losses.dice_loss
loss_fn = torch.nn.CrossEntropyLoss(weight=torch.tensor(cross_entr_weights).cuda())
#loss_fn2 = dice_loss
#loss_fn3 = weighted_combined_loss(loss_fn, loss_fn2)
opt = torch.optim.Adam(unet.parameters(), lr=learn_rate)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(opt, patience=3, verbose=True)
train(unet, classes, train_loader, val_loader, loss_fn, opt, scheduler, mean_pixel_accuracy, epochs_val, tb_writer, hparam_log, other_logdir)
init_test(cfg, logdir_folder, cust_logdir)
if __name__ == "__main__":
args = add_config_parser()
cfg = get_dict(args, print_config=True)
init_test(cfg)
|
[
"sys.path.append",
"torch.no_grad",
"os.makedirs",
"numpy.zeros",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"time.time",
"dataloaders.get_double_scan_v1_loader",
"random_word.RandomWords",
"torch.utils.tensorboard.SummaryWriter",
"test.init_test",
"os.path.join",
"os.listdir",
"torch.tensor"
] |
[((11, 35), 'sys.path.append', 'sys.path.append', (['"""utils"""'], {}), "('utils')\n", (26, 35), False, 'import sys\n'), ((36, 61), 'sys.path.append', 'sys.path.append', (['"""models"""'], {}), "('models')\n", (51, 61), False, 'import sys\n'), ((1356, 1373), 'numpy.zeros', 'np.zeros', (['classes'], {}), '(classes)\n', (1364, 1373), True, 'import numpy as np\n'), ((3180, 3191), 'time.time', 'time.time', ([], {}), '()\n', (3189, 3191), False, 'import time\n'), ((6984, 7055), 'os.path.join', 'os.path.join', (['"""logdir"""', '"""tensorboard"""', 'dataset', 'cust_logdir', 'model_file'], {}), "('logdir', 'tensorboard', dataset, cust_logdir, model_file)\n", (6996, 7055), False, 'import os\n'), ((7075, 7140), 'os.path.join', 'os.path.join', (['"""logdir"""', '"""other"""', 'dataset', 'cust_logdir', 'model_file'], {}), "('logdir', 'other', dataset, cust_logdir, model_file)\n", (7087, 7140), False, 'import os\n'), ((7284, 7297), 'random_word.RandomWords', 'RandomWords', ([], {}), '()\n', (7295, 7297), False, 'from random_word import RandomWords\n'), ((7482, 7520), 'os.path.join', 'os.path.join', (['tb_logdir', 'logdir_folder'], {}), '(tb_logdir, logdir_folder)\n', (7494, 7520), False, 'import os\n'), ((7540, 7581), 'os.path.join', 'os.path.join', (['other_logdir', 'logdir_folder'], {}), '(other_logdir, logdir_folder)\n', (7552, 7581), False, 'import os\n'), ((7586, 7626), 'os.makedirs', 'os.makedirs', (['other_logdir'], {'exist_ok': '(True)'}), '(other_logdir, exist_ok=True)\n', (7597, 7626), False, 'import os\n'), ((7720, 7744), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['tb_logdir'], {}), '(tb_logdir)\n', (7733, 7744), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((7778, 7825), 'dataloaders.get_double_scan_v1_loader', 'get_double_scan_v1_loader', (['bs', 'train_transforms'], {}), '(bs, train_transforms)\n', (7803, 7825), False, 'from dataloaders import get_double_scan_v1_loader\n'), ((7860, 7891), 'os.path.join', 'os.path.join', (['"""models"""', 'dataset'], {}), "('models', dataset)\n", (7872, 7891), False, 'import os\n'), ((8435, 8508), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'torch.optim.lr_scheduler.ReduceLROnPlateau', (['opt'], {'patience': '(3)', 'verbose': '(True)'}), '(opt, patience=3, verbose=True)\n', (8477, 8508), False, 'import torch\n'), ((8662, 8704), 'test.init_test', 'init_test', (['cfg', 'logdir_folder', 'cust_logdir'], {}), '(cfg, logdir_folder, cust_logdir)\n', (8671, 8704), False, 'from test import init_test\n'), ((8815, 8829), 'test.init_test', 'init_test', (['cfg'], {}), '(cfg)\n', (8824, 8829), False, 'from test import init_test\n'), ((776, 787), 'time.time', 'time.time', ([], {}), '()\n', (785, 787), False, 'import time\n'), ((1405, 1420), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1418, 1420), False, 'import torch\n'), ((5921, 5932), 'time.time', 'time.time', ([], {}), '()\n', (5930, 5932), False, 'import time\n'), ((7216, 7237), 'os.listdir', 'os.listdir', (['tb_logdir'], {}), '(tb_logdir)\n', (7226, 7237), False, 'import os\n'), ((5314, 5358), 'os.path.join', 'os.path.join', (['other_logdir', '"""state_dict.pth"""'], {}), "(other_logdir, 'state_dict.pth')\n", (5326, 5358), False, 'import os\n'), ((8076, 8120), 'os.path.join', 'os.path.join', (['other_logdir', '"""state_dict.pth"""'], {}), "(other_logdir, 'state_dict.pth')\n", (8088, 8120), False, 'import os\n'), ((8233, 8265), 'torch.tensor', 'torch.tensor', (['cross_entr_weights'], {}), '(cross_entr_weights)\n', (8245, 8265), False, 'import torch\n')]
|
import argparse
from timeit import default_timer as timer
import numpy as np
import tensorflow as tf
import tbpf_tf
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--degree", help="Degree of polynomial features", default=2, type=int)
parser.add_argument("-i", "--iterations", help="Number of iterations over one number of inputs",
default=10000, type=int)
parser.add_argument("--start", help="Number of inputs start", default=1, type=int)
parser.add_argument("--stop", help="Number of inputs stop", default=1001, type=int)
parser.add_argument("--step", help="Number of inputs step", default=100, type=int)
args = parser.parse_args()
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
order = args.degree
inputs = range(args.start, args.stop, args.step)
iterations = args.iterations
times = []
pfgs = [tbpf_tf.PFG2, tbpf_tf.PFG3, tbpf_tf.PFG4, tbpf_tf.PFG5]
sub_iterations = 100
if iterations/100 > 0:
iterations /= 100
else:
sub_iterations = 1
for i in inputs:
times_in = []
input_tensor = tf.random.normal([i], 0, 1, tf.float32)
x = tf.reshape(tf.concat([[1], input_tensor], 0), [1, -1])
M = []
for it in range(2, order + 1):
M.append(tf.convert_to_tensor(tbpf_tf.mask_matrix(i, it, True)))
for idx in range(int(iterations)):
start = timer()
for idx2 in range(sub_iterations):
pfg = pfgs[order-2](M, x)
end = timer()
times_in.append((end-start)/sub_iterations)
times.append([order, i, np.sum(times_in), np.min(times_in), np.mean(times_in), np.max(times_in),
pfgs[order-2](M, x).numpy().size])
print('Order: {}, Inputs: {}, Overall test time: {}, Test mean time: {}, # of PF: {}'.format(
times[-1][0], times[-1][1], times[-1][2], times[-1][4], times[-1][6]))
np.savetxt(f'res_tbpf_tf_gpu_d{order}_it{iterations*sub_iterations}.csv', times,
header='order,inputs,overall time,min time,mean time,max time,num of PF',
delimiter=',')
|
[
"numpy.sum",
"argparse.ArgumentParser",
"tensorflow.random.normal",
"timeit.default_timer",
"numpy.savetxt",
"tensorflow.concat",
"tensorflow.config.experimental.set_memory_growth",
"numpy.min",
"numpy.mean",
"numpy.max",
"tbpf_tf.mask_matrix",
"tensorflow.config.experimental.list_logical_devices",
"tensorflow.config.experimental.list_physical_devices"
] |
[((128, 153), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (151, 153), False, 'import argparse\n'), ((679, 730), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (723, 730), True, 'import tensorflow as tf\n'), ((2243, 2427), 'numpy.savetxt', 'np.savetxt', (['f"""res_tbpf_tf_gpu_d{order}_it{iterations * sub_iterations}.csv"""', 'times'], {'header': '"""order,inputs,overall time,min time,mean time,max time,num of PF"""', 'delimiter': '""","""'}), "(f'res_tbpf_tf_gpu_d{order}_it{iterations * sub_iterations}.csv',\n times, header=\n 'order,inputs,overall time,min time,mean time,max time,num of PF',\n delimiter=',')\n", (2253, 2427), True, 'import numpy as np\n'), ((1468, 1507), 'tensorflow.random.normal', 'tf.random.normal', (['[i]', '(0)', '(1)', 'tf.float32'], {}), '([i], 0, 1, tf.float32)\n', (1484, 1507), True, 'import tensorflow as tf\n'), ((909, 959), 'tensorflow.config.experimental.list_logical_devices', 'tf.config.experimental.list_logical_devices', (['"""GPU"""'], {}), "('GPU')\n", (952, 959), True, 'import tensorflow as tf\n'), ((1527, 1560), 'tensorflow.concat', 'tf.concat', (['[[1], input_tensor]', '(0)'], {}), '([[1], input_tensor], 0)\n', (1536, 1560), True, 'import tensorflow as tf\n'), ((1748, 1755), 'timeit.default_timer', 'timer', ([], {}), '()\n', (1753, 1755), True, 'from timeit import default_timer as timer\n'), ((1851, 1858), 'timeit.default_timer', 'timer', ([], {}), '()\n', (1856, 1858), True, 'from timeit import default_timer as timer\n'), ((838, 889), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (878, 889), True, 'import tensorflow as tf\n'), ((1939, 1955), 'numpy.sum', 'np.sum', (['times_in'], {}), '(times_in)\n', (1945, 1955), True, 'import numpy as np\n'), ((1957, 1973), 'numpy.min', 'np.min', (['times_in'], {}), '(times_in)\n', (1963, 1973), True, 'import numpy as np\n'), ((1975, 1992), 'numpy.mean', 'np.mean', (['times_in'], {}), '(times_in)\n', (1982, 1992), True, 'import numpy as np\n'), ((1994, 2010), 'numpy.max', 'np.max', (['times_in'], {}), '(times_in)\n', (2000, 2010), True, 'import numpy as np\n'), ((1657, 1689), 'tbpf_tf.mask_matrix', 'tbpf_tf.mask_matrix', (['i', 'it', '(True)'], {}), '(i, it, True)\n', (1676, 1689), False, 'import tbpf_tf\n')]
|
import numpy as np
def pdist(source_mtx, target_mtx):
distance_matrix = -2 * source_mtx.dot(target_mtx.transpose()) \
+ (source_mtx ** 2).sum(axis=1).reshape(-1, 1) \
+ (target_mtx ** 2).sum(axis=1).reshape(1, -1)
return distance_matrix
def get_acc(query_emb, query_idx, gall_emb, gall_idx, labels, except_self=False):
dist = pdist(query_emb, gall_emb)
if except_self:
sort_idx = np.argsort(dist, axis=1)[:, 1:21]
else:
sort_idx = np.argsort(dist, axis=1)[:, :20]
match = np.zeros((len(query_idx), 20))
for i, idx in enumerate(query_idx):
match[i] = labels[gall_idx[sort_idx[i].astype(np.int)]] == labels[idx]
acc_val = []
for k in [1, 5, 10, 20]:
acc = np.sum(np.sum(match[:, :k], axis=1) > 0) / match.shape[0]
acc_val.append(acc)
return acc_val
class Metric:
def __init__(self):
pass
def __call__(self, outputs, target, loss):
raise NotImplementedError
def reset(self):
raise NotImplementedError
def value(self):
raise NotImplementedError
def name(self):
raise NotImplementedError
class AccumulatedAccuracyMetric(Metric):
"""
Works with classification model
"""
def __init__(self):
self.correct = 0
self.total = 0
def __call__(self, outputs, target, loss):
pred = outputs[0].data.max(1, keepdim=True)[1]
self.correct += pred.eq(target[0].data.view_as(pred)).cpu().sum()
self.total += target[0].size(0)
return self.value()
def reset(self):
self.correct = 0
self.total = 0
def value(self):
return 100 * float(self.correct) / self.total
def name(self):
return 'Accuracy'
class AverageNonzeroTripletsMetric(Metric):
'''
Counts average number of nonzero triplets found in minibatches
'''
def __init__(self):
self.values = []
def __call__(self, outputs, target, loss):
self.values.append(loss[1])
return self.value()
def reset(self):
self.values = []
def value(self):
return np.mean(self.values)
def name(self):
return 'Average nonzero triplets'
class RetrivalAccMetric(Metric):
def __init__(self, data_num, vec_dim=128):
self.data_num = data_num
self.vec_dim = vec_dim
self.emb = np.zeros((self.data_num, self.vec_dim), dtype=np.float16)
self.label = np.zeros(self.data_num)
self.source = np.zeros(self.data_num)
self.cnt = 0
def __call__(self, outputs, target, source):
self.emb[self.cnt:self.cnt + outputs.shape[0]] = outputs.detach().cpu().numpy().astype(np.float16)
self.label[self.cnt:self.cnt + outputs.shape[0]] = target.detach().cpu().numpy()
self.source[self.cnt:self.cnt + outputs.shape[0]] = source.detach().cpu().numpy()
self.cnt += outputs.shape[0]
def reset(self):
self.emb = np.zeros((self.data_num, self.vec_dim))
self.label = np.zeros(self.data_num)
self.source = np.zeros(self.data_num)
self.cnt = 0
def value(self):
user_idx = np.where(self.source == 0)[0]
shop_idx = np.where(self.source == 1)[0]
user_emb_mtx = self.emb[user_idx]
shop_emb_mtx = self.emb[shop_idx]
inshop_acc = get_acc(shop_emb_mtx, shop_idx, shop_emb_mtx, shop_idx, self.label, True)
u2shop_acc = get_acc(user_emb_mtx, user_idx, shop_emb_mtx, shop_idx, self.label)
return inshop_acc, u2shop_acc
def name(self):
return 'Retrieval Accuracy'
|
[
"numpy.sum",
"numpy.zeros",
"numpy.argsort",
"numpy.mean",
"numpy.where"
] |
[((2160, 2180), 'numpy.mean', 'np.mean', (['self.values'], {}), '(self.values)\n', (2167, 2180), True, 'import numpy as np\n'), ((2409, 2466), 'numpy.zeros', 'np.zeros', (['(self.data_num, self.vec_dim)'], {'dtype': 'np.float16'}), '((self.data_num, self.vec_dim), dtype=np.float16)\n', (2417, 2466), True, 'import numpy as np\n'), ((2488, 2511), 'numpy.zeros', 'np.zeros', (['self.data_num'], {}), '(self.data_num)\n', (2496, 2511), True, 'import numpy as np\n'), ((2534, 2557), 'numpy.zeros', 'np.zeros', (['self.data_num'], {}), '(self.data_num)\n', (2542, 2557), True, 'import numpy as np\n'), ((2993, 3032), 'numpy.zeros', 'np.zeros', (['(self.data_num, self.vec_dim)'], {}), '((self.data_num, self.vec_dim))\n', (3001, 3032), True, 'import numpy as np\n'), ((3054, 3077), 'numpy.zeros', 'np.zeros', (['self.data_num'], {}), '(self.data_num)\n', (3062, 3077), True, 'import numpy as np\n'), ((3100, 3123), 'numpy.zeros', 'np.zeros', (['self.data_num'], {}), '(self.data_num)\n', (3108, 3123), True, 'import numpy as np\n'), ((452, 476), 'numpy.argsort', 'np.argsort', (['dist'], {'axis': '(1)'}), '(dist, axis=1)\n', (462, 476), True, 'import numpy as np\n'), ((515, 539), 'numpy.argsort', 'np.argsort', (['dist'], {'axis': '(1)'}), '(dist, axis=1)\n', (525, 539), True, 'import numpy as np\n'), ((3186, 3212), 'numpy.where', 'np.where', (['(self.source == 0)'], {}), '(self.source == 0)\n', (3194, 3212), True, 'import numpy as np\n'), ((3235, 3261), 'numpy.where', 'np.where', (['(self.source == 1)'], {}), '(self.source == 1)\n', (3243, 3261), True, 'import numpy as np\n'), ((778, 806), 'numpy.sum', 'np.sum', (['match[:, :k]'], {'axis': '(1)'}), '(match[:, :k], axis=1)\n', (784, 806), True, 'import numpy as np\n')]
|
import logging
import numpy as np
from bokeh import plotting
from bokeh.layouts import gridplot
L = logging.getLogger(__name__)
def bokeh_plot(data, var_name, results, title, module, test_name):
plot = bokeh_plot_var(data, var_name, results, title, module, test_name)
return gridplot([[plot]], sizing_mode='fixed')
def bokeh_plot_var(time, data, var_name, results, title, module, test_name):
""" Method to plot QC results using Bokeh """
if module not in results or test_name not in results[module]:
L.warning(f'No results for test {module}.{test_name} found')
return
qc_test = results[module][test_name]
qc_pass = np.ma.masked_where(qc_test != 1, data)
qc_suspect = np.ma.masked_where(qc_test != 3, data)
qc_fail = np.ma.masked_where(qc_test != 4, data)
qc_notrun = np.ma.masked_where(qc_test != 2, data)
p1 = plotting.figure(x_axis_type="datetime", title=test_name + ' : ' + title)
p1.grid.grid_line_alpha = 0.3
p1.xaxis.axis_label = 'Time'
p1.yaxis.axis_label = 'Data'
p1.line(time, data, legend_label='data', color='#A6CEE3')
p1.circle(time, qc_notrun, size=2, legend_label='qc not run', color='gray', alpha=0.2)
p1.circle(time, qc_pass, size=4, legend_label='qc pass', color='green', alpha=0.5)
p1.circle(time, qc_suspect, size=4, legend_label='qc suspect', color='orange', alpha=0.7)
p1.circle(time, qc_fail, size=6, legend_label='qc fail', color='red', alpha=1.0)
p1.circle(time, qc_notrun, size=6, legend_label='qc not eval', color='gray', alpha=1.0)
return p1
def bokeh_multi_plot(stream, results, title, **kwargs):
kwargs = {
**{
'merge_tools': True,
'toolbar_location': 'below',
'sizing_mode': 'scale_width',
'plot_width': 600,
'plot_height': 200,
'ncols': 2
},
**kwargs
}
plots = list(bokeh_multi_var(stream, results, title))
return gridplot(plots, **kwargs)
def bokeh_multi_var(stream, results, title):
for vname, qcobj in results.items():
for modu, tests in qcobj.items():
for testname, testresults in tests.items():
plt = bokeh_plot_var(stream.time(), stream.data(vname), vname, qcobj, title, modu, testname)
yield plt
def bokeh_plot_collected_results(results, **kwargs):
kwargs = {
**{
'merge_tools': True,
'toolbar_location': 'below',
'sizing_mode': 'scale_width',
'plot_width': 600,
'plot_height': 200,
'ncols': 2
},
**kwargs
}
plots = []
for r in results:
if r.data.any() and r.results.any():
plots.append(bokeh_plot_collected_result(r))
return gridplot(plots, **kwargs)
def bokeh_plot_collected_result(cr):
title = f'{cr.stream_id}: {cr.package}.{cr.test}'
p1 = plotting.figure(x_axis_type="datetime", title=title)
p1.grid.grid_line_alpha = 0.3
p1.xaxis.axis_label = 'Time'
p1.yaxis.axis_label = 'Data'
qc_pass = np.ma.masked_where(cr.results != 1, cr.data)
qc_suspect = np.ma.masked_where(cr.results != 3, cr.data)
qc_fail = np.ma.masked_where(cr.results != 4, cr.data)
qc_notrun = np.ma.masked_where(cr.results != 2, cr.data)
p1.line(cr.tinp, cr.data, legend_label='data', color='#A6CEE3')
p1.circle(cr.tinp, qc_notrun, size=3, legend_label='qc not run', color='gray', alpha=0.2)
p1.circle(cr.tinp, qc_pass, size=4, legend_label='qc pass', color='green', alpha=0.5)
p1.circle(cr.tinp, qc_suspect, size=4, legend_label='qc suspect', color='orange', alpha=0.7)
p1.circle(cr.tinp, qc_fail, size=6, legend_label='qc fail', color='red', alpha=1.0)
p1.circle(cr.tinp, qc_notrun, size=3, legend_label='qc not eval', color='gray', alpha=1.0)
return p1
|
[
"bokeh.plotting.figure",
"numpy.ma.masked_where",
"logging.getLogger",
"bokeh.layouts.gridplot"
] |
[((102, 129), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (119, 129), False, 'import logging\n'), ((287, 326), 'bokeh.layouts.gridplot', 'gridplot', (['[[plot]]'], {'sizing_mode': '"""fixed"""'}), "([[plot]], sizing_mode='fixed')\n", (295, 326), False, 'from bokeh.layouts import gridplot\n'), ((664, 702), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(qc_test != 1)', 'data'], {}), '(qc_test != 1, data)\n', (682, 702), True, 'import numpy as np\n'), ((720, 758), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(qc_test != 3)', 'data'], {}), '(qc_test != 3, data)\n', (738, 758), True, 'import numpy as np\n'), ((773, 811), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(qc_test != 4)', 'data'], {}), '(qc_test != 4, data)\n', (791, 811), True, 'import numpy as np\n'), ((828, 866), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(qc_test != 2)', 'data'], {}), '(qc_test != 2, data)\n', (846, 866), True, 'import numpy as np\n'), ((877, 949), 'bokeh.plotting.figure', 'plotting.figure', ([], {'x_axis_type': '"""datetime"""', 'title': "(test_name + ' : ' + title)"}), "(x_axis_type='datetime', title=test_name + ' : ' + title)\n", (892, 949), False, 'from bokeh import plotting\n'), ((1970, 1995), 'bokeh.layouts.gridplot', 'gridplot', (['plots'], {}), '(plots, **kwargs)\n', (1978, 1995), False, 'from bokeh.layouts import gridplot\n'), ((2786, 2811), 'bokeh.layouts.gridplot', 'gridplot', (['plots'], {}), '(plots, **kwargs)\n', (2794, 2811), False, 'from bokeh.layouts import gridplot\n'), ((2914, 2966), 'bokeh.plotting.figure', 'plotting.figure', ([], {'x_axis_type': '"""datetime"""', 'title': 'title'}), "(x_axis_type='datetime', title=title)\n", (2929, 2966), False, 'from bokeh import plotting\n'), ((3081, 3125), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(cr.results != 1)', 'cr.data'], {}), '(cr.results != 1, cr.data)\n', (3099, 3125), True, 'import numpy as np\n'), ((3143, 3187), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(cr.results != 3)', 'cr.data'], {}), '(cr.results != 3, cr.data)\n', (3161, 3187), True, 'import numpy as np\n'), ((3202, 3246), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(cr.results != 4)', 'cr.data'], {}), '(cr.results != 4, cr.data)\n', (3220, 3246), True, 'import numpy as np\n'), ((3263, 3307), 'numpy.ma.masked_where', 'np.ma.masked_where', (['(cr.results != 2)', 'cr.data'], {}), '(cr.results != 2, cr.data)\n', (3281, 3307), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import h5py
import numpy
from numpy import sin, cos, pi, degrees
from ext import hdf5handler
from matplotlib import pyplot as plt
#MKS
G = 6.67384e-11 # m^3 kg^-1 s^-2
MSun = 1.9891e30 # kg^1
AU = 149597870700 # m^1
DAY = 3600*24 # s^1
YEAR = DAY*365.25 # s^1
def rk4(h, f, t, z):
k1 = h * f(t, z)
k2 = h * f(t + 0.5*h, z + 0.5*k1)
k3 = h * f(t + 0.5*h, z + 0.5*k2)
k4 = h * f(t + h, z + k3)
return z + (k1 + 2*(k2 + k3) + k4)/6.0
def forward_euler(h, f, t, z):
return z + h*f(t, z)
def twobody_vmass(t, state):
a, e, f, w, M = state
n = (G*M/a**3)**(1/2) #mean motion
dM = -1e-5*MSun/YEAR #implement here?
da = -a*(1 + e**2 + 2*e*cos(f)) / (1-e**2) * dM/M
de = -(e+cos(f)) * dM/M
dw = -sin(f) / e * dM/M
df = -dw + n*(1+e*cos(f))**2 / ((1 - e**2)**(3/2))
return numpy.array([da, de, df, dw, dM])
def main():
#todo:energy, ang_mom, position, etc.
dt = 1*YEAR
M0 = 1*MSun
a0 = 100*AU
e0 = 0.9
f0 = 0
w0 = 0
state = numpy.array([a0, e0, f0, w0, M0])
with hdf5handler.HDF5Handler('test.hdf5') as handle:
for method in [rk4]:
handle.prefix = method.__name__
for t in numpy.arange(0, 5e4*YEAR, dt):
print("t={:.3f} year".format(t/YEAR))
state = method(dt, twobody_vmass, t, state)
handle.put(t, '/time')
handle.put(state[0], '/a')
handle.put(state[1], '/e')
handle.put(state[2], '/f')
handle.put(state[3], '/w')
handle.put(state[4], '/mass')
f = h5py.File('test.hdf5')
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
for method in ['rk4']:
ax1.plot(f[method+'/time'].value/YEAR , f[method+'/a'].value / AU)
ax2.plot(f[method+'/time'].value/YEAR , f[method+'/e'])
ax3.plot(f[method+'/time'].value/YEAR , f[method+'/f'].value % (2*pi))
ax4.plot(f[method+'/time'].value/YEAR , f[method+'/mass'].value / MSun)
plt.savefig('image.png')
fig = plt.figure()
ax = fig.add_subplot(111,polar=True)
for method in ['rk4']:
ax.plot(f[method+'/f'].value %(2*pi), f[method+'/time'].value /YEAR )
plt.savefig('image1.png')
fig = plt.figure()
ax = fig.add_subplot(111,polar=True)
for method in ['rk4']:
ax.plot(f[method+'/w'].value %(2*pi), f[method+'/time'].value /YEAR )
plt.savefig('image2.png')
if __name__ == "__main__":
main()
|
[
"h5py.File",
"ext.hdf5handler.HDF5Handler",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.array",
"numpy.arange",
"numpy.cos",
"matplotlib.pyplot.savefig"
] |
[((881, 914), 'numpy.array', 'numpy.array', (['[da, de, df, dw, dM]'], {}), '([da, de, df, dw, dM])\n', (892, 914), False, 'import numpy\n'), ((1067, 1100), 'numpy.array', 'numpy.array', (['[a0, e0, f0, w0, M0]'], {}), '([a0, e0, f0, w0, M0])\n', (1078, 1100), False, 'import numpy\n'), ((1664, 1686), 'h5py.File', 'h5py.File', (['"""test.hdf5"""'], {}), "('test.hdf5')\n", (1673, 1686), False, 'import h5py\n'), ((1697, 1709), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1707, 1709), True, 'from matplotlib import pyplot as plt\n'), ((2165, 2189), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""image.png"""'], {}), "('image.png')\n", (2176, 2189), True, 'from matplotlib import pyplot as plt\n'), ((2202, 2214), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2212, 2214), True, 'from matplotlib import pyplot as plt\n'), ((2365, 2390), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""image1.png"""'], {}), "('image1.png')\n", (2376, 2390), True, 'from matplotlib import pyplot as plt\n'), ((2403, 2415), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2413, 2415), True, 'from matplotlib import pyplot as plt\n'), ((2566, 2591), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""image2.png"""'], {}), "('image2.png')\n", (2577, 2591), True, 'from matplotlib import pyplot as plt\n'), ((1110, 1146), 'ext.hdf5handler.HDF5Handler', 'hdf5handler.HDF5Handler', (['"""test.hdf5"""'], {}), "('test.hdf5')\n", (1133, 1146), False, 'from ext import hdf5handler\n'), ((1252, 1287), 'numpy.arange', 'numpy.arange', (['(0)', '(50000.0 * YEAR)', 'dt'], {}), '(0, 50000.0 * YEAR, dt)\n', (1264, 1287), False, 'import numpy\n'), ((770, 776), 'numpy.cos', 'cos', (['f'], {}), '(f)\n', (773, 776), False, 'from numpy import sin, cos, pi, degrees\n'), ((796, 802), 'numpy.sin', 'sin', (['f'], {}), '(f)\n', (799, 802), False, 'from numpy import sin, cos, pi, degrees\n'), ((730, 736), 'numpy.cos', 'cos', (['f'], {}), '(f)\n', (733, 736), False, 'from numpy import sin, cos, pi, degrees\n'), ((837, 843), 'numpy.cos', 'cos', (['f'], {}), '(f)\n', (840, 843), False, 'from numpy import sin, cos, pi, degrees\n')]
|
# -*- coding: utf-8 -*-
import numpy as np
eps = np.finfo(float).eps
def infnorm(x):
return np.linalg.norm(x, np.inf)
def scaled_tol(n):
tol = 5e1*eps if n < 20 else np.log(n)**2.5*eps
return tol
# bespoke test generators
def infNormLessThanTol(a, b, tol):
def asserter(self):
self.assertLessEqual(infnorm(a-b), tol)
return asserter
# test functions
testfunctions = []
fun_details = [
# (
# function,
# name for the test printouts,
# Matlab chebfun adaptive degree on [-1,1],
# Any roots on the real line?
# )
(lambda x: x**3 + x**2 + x + 1.1, 'poly3(x)', 4, True),
(lambda x: np.exp(x), 'exp(x)', 15, False),
(lambda x: np.sin(x), 'sin(x)', 14, True),
(lambda x: .2+.1*np.sin(x), '(.2+.1*sin(x))', 14, False),
(lambda x: np.cos(20*x), 'cos(20x)', 51, True),
(lambda x: 0.*x+1., 'constfun', 1, False),
(lambda x: 0.*x, 'zerofun', 1, True),
]
for k, items in enumerate(fun_details):
fun = items[0]
fun.__name__ = items[1]
testfunctions.append((fun, items[2], items[3]))
# TODO: check these lengths against Chebfun
# TODO: more examples
|
[
"numpy.log",
"numpy.finfo",
"numpy.sin",
"numpy.linalg.norm",
"numpy.exp",
"numpy.cos"
] |
[((51, 66), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (59, 66), True, 'import numpy as np\n'), ((99, 124), 'numpy.linalg.norm', 'np.linalg.norm', (['x', 'np.inf'], {}), '(x, np.inf)\n', (113, 124), True, 'import numpy as np\n'), ((654, 663), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (660, 663), True, 'import numpy as np\n'), ((722, 731), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (728, 731), True, 'import numpy as np\n'), ((857, 871), 'numpy.cos', 'np.cos', (['(20 * x)'], {}), '(20 * x)\n', (863, 871), True, 'import numpy as np\n'), ((178, 187), 'numpy.log', 'np.log', (['n'], {}), '(n)\n', (184, 187), True, 'import numpy as np\n'), ((795, 804), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (801, 804), True, 'import numpy as np\n')]
|
import pytest
import numpy as np
import pandas as pd
from pandas import Categorical, Series, CategoricalIndex
from pandas.core.dtypes.concat import union_categoricals
from pandas.util import testing as tm
class TestUnionCategoricals(object):
def test_union_categorical(self):
# GH 13361
data = [
(list('abc'), list('abd'), list('abcabd')),
([0, 1, 2], [2, 3, 4], [0, 1, 2, 2, 3, 4]),
([0, 1.2, 2], [2, 3.4, 4], [0, 1.2, 2, 2, 3.4, 4]),
(['b', 'b', np.nan, 'a'], ['a', np.nan, 'c'],
['b', 'b', np.nan, 'a', 'a', np.nan, 'c']),
(pd.date_range('2014-01-01', '2014-01-05'),
pd.date_range('2014-01-06', '2014-01-07'),
pd.date_range('2014-01-01', '2014-01-07')),
(pd.date_range('2014-01-01', '2014-01-05', tz='US/Central'),
pd.date_range('2014-01-06', '2014-01-07', tz='US/Central'),
pd.date_range('2014-01-01', '2014-01-07', tz='US/Central')),
(pd.period_range('2014-01-01', '2014-01-05'),
pd.period_range('2014-01-06', '2014-01-07'),
pd.period_range('2014-01-01', '2014-01-07')),
]
for a, b, combined in data:
for box in [Categorical, CategoricalIndex, Series]:
result = union_categoricals([box(Categorical(a)),
box(Categorical(b))])
expected = Categorical(combined)
tm.assert_categorical_equal(result, expected,
check_category_order=True)
# new categories ordered by appearance
s = Categorical(['x', 'y', 'z'])
s2 = Categorical(['a', 'b', 'c'])
result = union_categoricals([s, s2])
expected = Categorical(['x', 'y', 'z', 'a', 'b', 'c'],
categories=['x', 'y', 'z', 'a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
s = Categorical([0, 1.2, 2], ordered=True)
s2 = Categorical([0, 1.2, 2], ordered=True)
result = union_categoricals([s, s2])
expected = Categorical([0, 1.2, 2, 0, 1.2, 2], ordered=True)
tm.assert_categorical_equal(result, expected)
# must exactly match types
s = Categorical([0, 1.2, 2])
s2 = Categorical([2, 3, 4])
msg = 'dtype of categories must be the same'
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([s, s2])
msg = 'No Categoricals to union'
with tm.assert_raises_regex(ValueError, msg):
union_categoricals([])
def test_union_categoricals_nan(self):
# GH 13759
res = union_categoricals([pd.Categorical([1, 2, np.nan]),
pd.Categorical([3, 2, np.nan])])
exp = Categorical([1, 2, np.nan, 3, 2, np.nan])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([pd.Categorical(['A', 'B']),
pd.Categorical(['B', 'B', np.nan])])
exp = Categorical(['A', 'B', 'B', 'B', np.nan])
tm.assert_categorical_equal(res, exp)
val1 = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-03-01'),
pd.NaT]
val2 = [pd.NaT, pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-02-01')]
res = union_categoricals([pd.Categorical(val1), pd.Categorical(val2)])
exp = Categorical(val1 + val2,
categories=[pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-03-01'),
pd.Timestamp('2011-02-01')])
tm.assert_categorical_equal(res, exp)
# all NaN
res = union_categoricals([pd.Categorical([np.nan, np.nan]),
pd.Categorical(['X'])])
exp = Categorical([np.nan, np.nan, 'X'])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([pd.Categorical([np.nan, np.nan]),
pd.Categorical([np.nan, np.nan])])
exp = Categorical([np.nan, np.nan, np.nan, np.nan])
tm.assert_categorical_equal(res, exp)
def test_union_categoricals_empty(self):
# GH 13759
res = union_categoricals([pd.Categorical([]),
pd.Categorical([])])
exp = Categorical([])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([pd.Categorical([]),
pd.Categorical([1.0])])
exp = Categorical([1.0])
tm.assert_categorical_equal(res, exp)
# to make dtype equal
nanc = pd.Categorical(np.array([np.nan], dtype=np.float64))
res = union_categoricals([nanc,
pd.Categorical([])])
tm.assert_categorical_equal(res, nanc)
def test_union_categorical_same_category(self):
# check fastpath
c1 = Categorical([1, 2, 3, 4], categories=[1, 2, 3, 4])
c2 = Categorical([3, 2, 1, np.nan], categories=[1, 2, 3, 4])
res = union_categoricals([c1, c2])
exp = Categorical([1, 2, 3, 4, 3, 2, 1, np.nan],
categories=[1, 2, 3, 4])
tm.assert_categorical_equal(res, exp)
c1 = Categorical(['z', 'z', 'z'], categories=['x', 'y', 'z'])
c2 = Categorical(['x', 'x', 'x'], categories=['x', 'y', 'z'])
res = union_categoricals([c1, c2])
exp = Categorical(['z', 'z', 'z', 'x', 'x', 'x'],
categories=['x', 'y', 'z'])
tm.assert_categorical_equal(res, exp)
def test_union_categoricals_ordered(self):
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], ordered=False)
msg = 'Categorical.ordered must be the same'
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2])
res = union_categoricals([c1, c1])
exp = Categorical([1, 2, 3, 1, 2, 3], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3, np.nan], ordered=True)
c2 = Categorical([3, 2], categories=[1, 2, 3], ordered=True)
res = union_categoricals([c1, c2])
exp = Categorical([1, 2, 3, np.nan, 3, 2], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], categories=[3, 2, 1], ordered=True)
msg = "to union ordered Categoricals, all categories must be the same"
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2])
def test_union_categoricals_ignore_order(self):
# GH 15219
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], ordered=False)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
tm.assert_categorical_equal(res, exp)
msg = 'Categorical.ordered must be the same'
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2], ignore_order=False)
res = union_categoricals([c1, c1], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([c1, c1], ignore_order=False)
exp = Categorical([1, 2, 3, 1, 2, 3],
categories=[1, 2, 3], ordered=True)
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3, np.nan], ordered=True)
c2 = Categorical([3, 2], categories=[1, 2, 3], ordered=True)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, np.nan, 3, 2])
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([1, 2, 3], categories=[3, 2, 1], ordered=True)
res = union_categoricals([c1, c2], ignore_order=True)
exp = Categorical([1, 2, 3, 1, 2, 3])
tm.assert_categorical_equal(res, exp)
res = union_categoricals([c2, c1], ignore_order=True,
sort_categories=True)
exp = Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])
tm.assert_categorical_equal(res, exp)
c1 = Categorical([1, 2, 3], ordered=True)
c2 = Categorical([4, 5, 6], ordered=True)
result = union_categoricals([c1, c2], ignore_order=True)
expected = Categorical([1, 2, 3, 4, 5, 6])
tm.assert_categorical_equal(result, expected)
msg = "to union ordered Categoricals, all categories must be the same"
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2], ignore_order=False)
with tm.assert_raises_regex(TypeError, msg):
union_categoricals([c1, c2])
def test_union_categoricals_sort(self):
# GH 13846
c1 = Categorical(['x', 'y', 'z'])
c2 = Categorical(['a', 'b', 'c'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['x', 'y', 'z', 'a', 'b', 'c'],
categories=['a', 'b', 'c', 'x', 'y', 'z'])
tm.assert_categorical_equal(result, expected)
# fastpath
c1 = Categorical(['a', 'b'], categories=['b', 'a', 'c'])
c2 = Categorical(['b', 'c'], categories=['b', 'a', 'c'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(['a', 'b'], categories=['c', 'a', 'b'])
c2 = Categorical(['b', 'c'], categories=['c', 'a', 'b'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
# fastpath - skip resort
c1 = Categorical(['a', 'b'], categories=['a', 'b', 'c'])
c2 = Categorical(['b', 'c'], categories=['a', 'b', 'c'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(['x', np.nan])
c2 = Categorical([np.nan, 'b'])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical(['x', np.nan, np.nan, 'b'],
categories=['b', 'x'])
tm.assert_categorical_equal(result, expected)
c1 = Categorical([np.nan])
c2 = Categorical([np.nan])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical([np.nan, np.nan], categories=[])
tm.assert_categorical_equal(result, expected)
c1 = Categorical([])
c2 = Categorical([])
result = union_categoricals([c1, c2], sort_categories=True)
expected = Categorical([])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(['b', 'a'], categories=['b', 'a', 'c'], ordered=True)
c2 = Categorical(['a', 'c'], categories=['b', 'a', 'c'], ordered=True)
with pytest.raises(TypeError):
union_categoricals([c1, c2], sort_categories=True)
def test_union_categoricals_sort_false(self):
# GH 13846
c1 = Categorical(['x', 'y', 'z'])
c2 = Categorical(['a', 'b', 'c'])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(['x', 'y', 'z', 'a', 'b', 'c'],
categories=['x', 'y', 'z', 'a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
# fastpath
c1 = Categorical(['a', 'b'], categories=['b', 'a', 'c'])
c2 = Categorical(['b', 'c'], categories=['b', 'a', 'c'])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['b', 'a', 'c'])
tm.assert_categorical_equal(result, expected)
# fastpath - skip resort
c1 = Categorical(['a', 'b'], categories=['a', 'b', 'c'])
c2 = Categorical(['b', 'c'], categories=['a', 'b', 'c'])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(['a', 'b', 'b', 'c'],
categories=['a', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(['x', np.nan])
c2 = Categorical([np.nan, 'b'])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(['x', np.nan, np.nan, 'b'],
categories=['x', 'b'])
tm.assert_categorical_equal(result, expected)
c1 = Categorical([np.nan])
c2 = Categorical([np.nan])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical([np.nan, np.nan], categories=[])
tm.assert_categorical_equal(result, expected)
c1 = Categorical([])
c2 = Categorical([])
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical([])
tm.assert_categorical_equal(result, expected)
c1 = Categorical(['b', 'a'], categories=['b', 'a', 'c'], ordered=True)
c2 = Categorical(['a', 'c'], categories=['b', 'a', 'c'], ordered=True)
result = union_categoricals([c1, c2], sort_categories=False)
expected = Categorical(['b', 'a', 'a', 'c'],
categories=['b', 'a', 'c'], ordered=True)
tm.assert_categorical_equal(result, expected)
def test_union_categorical_unwrap(self):
# GH 14173
c1 = Categorical(['a', 'b'])
c2 = pd.Series(['b', 'c'], dtype='category')
result = union_categoricals([c1, c2])
expected = Categorical(['a', 'b', 'b', 'c'])
tm.assert_categorical_equal(result, expected)
c2 = CategoricalIndex(c2)
result = union_categoricals([c1, c2])
tm.assert_categorical_equal(result, expected)
c1 = Series(c1)
result = union_categoricals([c1, c2])
tm.assert_categorical_equal(result, expected)
with pytest.raises(TypeError):
union_categoricals([c1, ['a', 'b', 'c']])
|
[
"pandas.core.dtypes.concat.union_categoricals",
"pandas.Timestamp",
"pandas.date_range",
"pandas.period_range",
"pandas.util.testing.assert_raises_regex",
"pytest.raises",
"numpy.array",
"pandas.Series",
"pandas.util.testing.assert_categorical_equal",
"pandas.Categorical",
"pandas.CategoricalIndex"
] |
[((1665, 1693), 'pandas.Categorical', 'Categorical', (["['x', 'y', 'z']"], {}), "(['x', 'y', 'z'])\n", (1676, 1693), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((1707, 1735), 'pandas.Categorical', 'Categorical', (["['a', 'b', 'c']"], {}), "(['a', 'b', 'c'])\n", (1718, 1735), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((1753, 1780), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[s, s2]'], {}), '([s, s2])\n', (1771, 1780), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((1800, 1890), 'pandas.Categorical', 'Categorical', (["['x', 'y', 'z', 'a', 'b', 'c']"], {'categories': "['x', 'y', 'z', 'a', 'b', 'c']"}), "(['x', 'y', 'z', 'a', 'b', 'c'], categories=['x', 'y', 'z', 'a',\n 'b', 'c'])\n", (1811, 1890), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((1926, 1971), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['result', 'expected'], {}), '(result, expected)\n', (1953, 1971), True, 'from pandas.util import testing as tm\n'), ((1985, 2023), 'pandas.Categorical', 'Categorical', (['[0, 1.2, 2]'], {'ordered': '(True)'}), '([0, 1.2, 2], ordered=True)\n', (1996, 2023), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((2037, 2075), 'pandas.Categorical', 'Categorical', (['[0, 1.2, 2]'], {'ordered': '(True)'}), '([0, 1.2, 2], ordered=True)\n', (2048, 2075), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((2093, 2120), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[s, s2]'], {}), '([s, s2])\n', (2111, 2120), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((2140, 2189), 'pandas.Categorical', 'Categorical', (['[0, 1.2, 2, 0, 1.2, 2]'], {'ordered': '(True)'}), '([0, 1.2, 2, 0, 1.2, 2], ordered=True)\n', (2151, 2189), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((2198, 2243), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['result', 'expected'], {}), '(result, expected)\n', (2225, 2243), True, 'from pandas.util import testing as tm\n'), ((2292, 2316), 'pandas.Categorical', 'Categorical', (['[0, 1.2, 2]'], {}), '([0, 1.2, 2])\n', (2303, 2316), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((2330, 2352), 'pandas.Categorical', 'Categorical', (['[2, 3, 4]'], {}), '([2, 3, 4])\n', (2341, 2352), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((2840, 2881), 'pandas.Categorical', 'Categorical', (['[1, 2, np.nan, 3, 2, np.nan]'], {}), '([1, 2, np.nan, 3, 2, np.nan])\n', (2851, 2881), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((2890, 2927), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['res', 'exp'], {}), '(res, exp)\n', (2917, 2927), True, 'from pandas.util import testing as tm\n'), ((3076, 3117), 'pandas.Categorical', 'Categorical', (["['A', 'B', 'B', 'B', np.nan]"], {}), "(['A', 'B', 'B', 'B', np.nan])\n", (3087, 3117), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((3126, 3163), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['res', 'exp'], {}), '(res, exp)\n', (3153, 3163), True, 'from pandas.util import testing as tm\n'), ((3683, 3720), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['res', 'exp'], {}), '(res, exp)\n', (3710, 3720), True, 'from pandas.util import testing as tm\n'), ((3880, 3914), 'pandas.Categorical', 'Categorical', (["[np.nan, np.nan, 'X']"], {}), "([np.nan, np.nan, 'X'])\n", (3891, 3914), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((3923, 3960), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['res', 'exp'], {}), '(res, exp)\n', (3950, 3960), True, 'from pandas.util import testing as tm\n'), ((4113, 4158), 'pandas.Categorical', 'Categorical', (['[np.nan, np.nan, np.nan, np.nan]'], {}), '([np.nan, np.nan, np.nan, np.nan])\n', (4124, 4158), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((4167, 4204), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['res', 'exp'], {}), '(res, exp)\n', (4194, 4204), True, 'from pandas.util import testing as tm\n'), ((4393, 4408), 'pandas.Categorical', 'Categorical', (['[]'], {}), '([])\n', (4404, 4408), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((4417, 4454), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['res', 'exp'], {}), '(res, exp)\n', (4444, 4454), True, 'from pandas.util import testing as tm\n'), ((4582, 4600), 'pandas.Categorical', 'Categorical', (['[1.0]'], {}), '([1.0])\n', (4593, 4600), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((4609, 4646), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['res', 'exp'], {}), '(res, exp)\n', (4636, 4646), True, 'from pandas.util import testing as tm\n'), ((4849, 4887), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['res', 'nanc'], {}), '(res, nanc)\n', (4876, 4887), True, 'from pandas.util import testing as tm\n'), ((4979, 5029), 'pandas.Categorical', 'Categorical', (['[1, 2, 3, 4]'], {'categories': '[1, 2, 3, 4]'}), '([1, 2, 3, 4], categories=[1, 2, 3, 4])\n', (4990, 5029), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((5043, 5098), 'pandas.Categorical', 'Categorical', (['[3, 2, 1, np.nan]'], {'categories': '[1, 2, 3, 4]'}), '([3, 2, 1, np.nan], categories=[1, 2, 3, 4])\n', (5054, 5098), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((5113, 5141), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[c1, c2]'], {}), '([c1, c2])\n', (5131, 5141), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((5156, 5223), 'pandas.Categorical', 'Categorical', (['[1, 2, 3, 4, 3, 2, 1, np.nan]'], {'categories': '[1, 2, 3, 4]'}), '([1, 2, 3, 4, 3, 2, 1, np.nan], categories=[1, 2, 3, 4])\n', (5167, 5223), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((5258, 5295), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['res', 'exp'], {}), '(res, exp)\n', (5285, 5295), True, 'from pandas.util import testing as tm\n'), ((5310, 5366), 'pandas.Categorical', 'Categorical', (["['z', 'z', 'z']"], {'categories': "['x', 'y', 'z']"}), "(['z', 'z', 'z'], categories=['x', 'y', 'z'])\n", (5321, 5366), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((5380, 5436), 'pandas.Categorical', 'Categorical', (["['x', 'x', 'x']"], {'categories': "['x', 'y', 'z']"}), "(['x', 'x', 'x'], categories=['x', 'y', 'z'])\n", (5391, 5436), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((5451, 5479), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[c1, c2]'], {}), '([c1, c2])\n', (5469, 5479), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((5494, 5565), 'pandas.Categorical', 'Categorical', (["['z', 'z', 'z', 'x', 'x', 'x']"], {'categories': "['x', 'y', 'z']"}), "(['z', 'z', 'z', 'x', 'x', 'x'], categories=['x', 'y', 'z'])\n", (5505, 5565), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((5600, 5637), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['res', 'exp'], {}), '(res, exp)\n', (5627, 5637), True, 'from pandas.util import testing as tm\n'), ((5699, 5735), 'pandas.Categorical', 'Categorical', (['[1, 2, 3]'], {'ordered': '(True)'}), '([1, 2, 3], ordered=True)\n', (5710, 5735), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((5749, 5786), 'pandas.Categorical', 'Categorical', (['[1, 2, 3]'], {'ordered': '(False)'}), '([1, 2, 3], ordered=False)\n', (5760, 5786), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((5950, 5978), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[c1, c1]'], {}), '([c1, c1])\n', (5968, 5978), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((5993, 6038), 'pandas.Categorical', 'Categorical', (['[1, 2, 3, 1, 2, 3]'], {'ordered': '(True)'}), '([1, 2, 3, 1, 2, 3], ordered=True)\n', (6004, 6038), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((6047, 6084), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['res', 'exp'], {}), '(res, exp)\n', (6074, 6084), True, 'from pandas.util import testing as tm\n'), ((6099, 6143), 'pandas.Categorical', 'Categorical', (['[1, 2, 3, np.nan]'], {'ordered': '(True)'}), '([1, 2, 3, np.nan], ordered=True)\n', (6110, 6143), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((6157, 6212), 'pandas.Categorical', 'Categorical', (['[3, 2]'], {'categories': '[1, 2, 3]', 'ordered': '(True)'}), '([3, 2], categories=[1, 2, 3], ordered=True)\n', (6168, 6212), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((6228, 6256), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[c1, c2]'], {}), '([c1, c2])\n', (6246, 6256), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((6271, 6321), 'pandas.Categorical', 'Categorical', (['[1, 2, 3, np.nan, 3, 2]'], {'ordered': '(True)'}), '([1, 2, 3, np.nan, 3, 2], ordered=True)\n', (6282, 6321), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((6330, 6367), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['res', 'exp'], {}), '(res, exp)\n', (6357, 6367), True, 'from pandas.util import testing as tm\n'), ((6382, 6418), 'pandas.Categorical', 'Categorical', (['[1, 2, 3]'], {'ordered': '(True)'}), '([1, 2, 3], ordered=True)\n', (6393, 6418), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((6432, 6490), 'pandas.Categorical', 'Categorical', (['[1, 2, 3]'], {'categories': '[3, 2, 1]', 'ordered': '(True)'}), '([1, 2, 3], categories=[3, 2, 1], ordered=True)\n', (6443, 6490), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((6750, 6786), 'pandas.Categorical', 'Categorical', (['[1, 2, 3]'], {'ordered': '(True)'}), '([1, 2, 3], ordered=True)\n', (6761, 6786), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((6800, 6837), 'pandas.Categorical', 'Categorical', (['[1, 2, 3]'], {'ordered': '(False)'}), '([1, 2, 3], ordered=False)\n', (6811, 6837), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((6853, 6900), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[c1, c2]'], {'ignore_order': '(True)'}), '([c1, c2], ignore_order=True)\n', (6871, 6900), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((6915, 6946), 'pandas.Categorical', 'Categorical', (['[1, 2, 3, 1, 2, 3]'], {}), '([1, 2, 3, 1, 2, 3])\n', (6926, 6946), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((6955, 6992), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['res', 'exp'], {}), '(res, exp)\n', (6982, 6992), True, 'from pandas.util import testing as tm\n'), ((7176, 7223), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[c1, c1]'], {'ignore_order': '(True)'}), '([c1, c1], ignore_order=True)\n', (7194, 7223), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((7238, 7269), 'pandas.Categorical', 'Categorical', (['[1, 2, 3, 1, 2, 3]'], {}), '([1, 2, 3, 1, 2, 3])\n', (7249, 7269), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((7278, 7315), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['res', 'exp'], {}), '(res, exp)\n', (7305, 7315), True, 'from pandas.util import testing as tm\n'), ((7331, 7379), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[c1, c1]'], {'ignore_order': '(False)'}), '([c1, c1], ignore_order=False)\n', (7349, 7379), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((7394, 7461), 'pandas.Categorical', 'Categorical', (['[1, 2, 3, 1, 2, 3]'], {'categories': '[1, 2, 3]', 'ordered': '(True)'}), '([1, 2, 3, 1, 2, 3], categories=[1, 2, 3], ordered=True)\n', (7405, 7461), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((7496, 7533), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['res', 'exp'], {}), '(res, exp)\n', (7523, 7533), True, 'from pandas.util import testing as tm\n'), ((7548, 7592), 'pandas.Categorical', 'Categorical', (['[1, 2, 3, np.nan]'], {'ordered': '(True)'}), '([1, 2, 3, np.nan], ordered=True)\n', (7559, 7592), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((7606, 7661), 'pandas.Categorical', 'Categorical', (['[3, 2]'], {'categories': '[1, 2, 3]', 'ordered': '(True)'}), '([3, 2], categories=[1, 2, 3], ordered=True)\n', (7617, 7661), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((7677, 7724), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[c1, c2]'], {'ignore_order': '(True)'}), '([c1, c2], ignore_order=True)\n', (7695, 7724), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((7739, 7775), 'pandas.Categorical', 'Categorical', (['[1, 2, 3, np.nan, 3, 2]'], {}), '([1, 2, 3, np.nan, 3, 2])\n', (7750, 7775), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((7784, 7821), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['res', 'exp'], {}), '(res, exp)\n', (7811, 7821), True, 'from pandas.util import testing as tm\n'), ((7836, 7872), 'pandas.Categorical', 'Categorical', (['[1, 2, 3]'], {'ordered': '(True)'}), '([1, 2, 3], ordered=True)\n', (7847, 7872), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((7886, 7944), 'pandas.Categorical', 'Categorical', (['[1, 2, 3]'], {'categories': '[3, 2, 1]', 'ordered': '(True)'}), '([1, 2, 3], categories=[3, 2, 1], ordered=True)\n', (7897, 7944), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((7960, 8007), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[c1, c2]'], {'ignore_order': '(True)'}), '([c1, c2], ignore_order=True)\n', (7978, 8007), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((8022, 8053), 'pandas.Categorical', 'Categorical', (['[1, 2, 3, 1, 2, 3]'], {}), '([1, 2, 3, 1, 2, 3])\n', (8033, 8053), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((8062, 8099), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['res', 'exp'], {}), '(res, exp)\n', (8089, 8099), True, 'from pandas.util import testing as tm\n'), ((8115, 8184), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[c2, c1]'], {'ignore_order': '(True)', 'sort_categories': '(True)'}), '([c2, c1], ignore_order=True, sort_categories=True)\n', (8133, 8184), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((8232, 8285), 'pandas.Categorical', 'Categorical', (['[1, 2, 3, 1, 2, 3]'], {'categories': '[1, 2, 3]'}), '([1, 2, 3, 1, 2, 3], categories=[1, 2, 3])\n', (8243, 8285), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((8294, 8331), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['res', 'exp'], {}), '(res, exp)\n', (8321, 8331), True, 'from pandas.util import testing as tm\n'), ((8346, 8382), 'pandas.Categorical', 'Categorical', (['[1, 2, 3]'], {'ordered': '(True)'}), '([1, 2, 3], ordered=True)\n', (8357, 8382), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((8396, 8432), 'pandas.Categorical', 'Categorical', (['[4, 5, 6]'], {'ordered': '(True)'}), '([4, 5, 6], ordered=True)\n', (8407, 8432), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((8450, 8497), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[c1, c2]'], {'ignore_order': '(True)'}), '([c1, c2], ignore_order=True)\n', (8468, 8497), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((8517, 8548), 'pandas.Categorical', 'Categorical', (['[1, 2, 3, 4, 5, 6]'], {}), '([1, 2, 3, 4, 5, 6])\n', (8528, 8548), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((8557, 8602), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['result', 'expected'], {}), '(result, expected)\n', (8584, 8602), True, 'from pandas.util import testing as tm\n'), ((8969, 8997), 'pandas.Categorical', 'Categorical', (["['x', 'y', 'z']"], {}), "(['x', 'y', 'z'])\n", (8980, 8997), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((9011, 9039), 'pandas.Categorical', 'Categorical', (["['a', 'b', 'c']"], {}), "(['a', 'b', 'c'])\n", (9022, 9039), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((9057, 9107), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[c1, c2]'], {'sort_categories': '(True)'}), '([c1, c2], sort_categories=True)\n', (9075, 9107), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((9127, 9217), 'pandas.Categorical', 'Categorical', (["['x', 'y', 'z', 'a', 'b', 'c']"], {'categories': "['a', 'b', 'c', 'x', 'y', 'z']"}), "(['x', 'y', 'z', 'a', 'b', 'c'], categories=['a', 'b', 'c', 'x',\n 'y', 'z'])\n", (9138, 9217), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((9253, 9298), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['result', 'expected'], {}), '(result, expected)\n', (9280, 9298), True, 'from pandas.util import testing as tm\n'), ((9332, 9383), 'pandas.Categorical', 'Categorical', (["['a', 'b']"], {'categories': "['b', 'a', 'c']"}), "(['a', 'b'], categories=['b', 'a', 'c'])\n", (9343, 9383), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((9397, 9448), 'pandas.Categorical', 'Categorical', (["['b', 'c']"], {'categories': "['b', 'a', 'c']"}), "(['b', 'c'], categories=['b', 'a', 'c'])\n", (9408, 9448), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((9466, 9516), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[c1, c2]'], {'sort_categories': '(True)'}), '([c1, c2], sort_categories=True)\n', (9484, 9516), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((9536, 9597), 'pandas.Categorical', 'Categorical', (["['a', 'b', 'b', 'c']"], {'categories': "['a', 'b', 'c']"}), "(['a', 'b', 'b', 'c'], categories=['a', 'b', 'c'])\n", (9547, 9597), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((9637, 9682), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['result', 'expected'], {}), '(result, expected)\n', (9664, 9682), True, 'from pandas.util import testing as tm\n'), ((9697, 9748), 'pandas.Categorical', 'Categorical', (["['a', 'b']"], {'categories': "['c', 'a', 'b']"}), "(['a', 'b'], categories=['c', 'a', 'b'])\n", (9708, 9748), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((9762, 9813), 'pandas.Categorical', 'Categorical', (["['b', 'c']"], {'categories': "['c', 'a', 'b']"}), "(['b', 'c'], categories=['c', 'a', 'b'])\n", (9773, 9813), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((9831, 9881), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[c1, c2]'], {'sort_categories': '(True)'}), '([c1, c2], sort_categories=True)\n', (9849, 9881), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((9901, 9962), 'pandas.Categorical', 'Categorical', (["['a', 'b', 'b', 'c']"], {'categories': "['a', 'b', 'c']"}), "(['a', 'b', 'b', 'c'], categories=['a', 'b', 'c'])\n", (9912, 9962), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((10002, 10047), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['result', 'expected'], {}), '(result, expected)\n', (10029, 10047), True, 'from pandas.util import testing as tm\n'), ((10095, 10146), 'pandas.Categorical', 'Categorical', (["['a', 'b']"], {'categories': "['a', 'b', 'c']"}), "(['a', 'b'], categories=['a', 'b', 'c'])\n", (10106, 10146), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((10160, 10211), 'pandas.Categorical', 'Categorical', (["['b', 'c']"], {'categories': "['a', 'b', 'c']"}), "(['b', 'c'], categories=['a', 'b', 'c'])\n", (10171, 10211), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((10229, 10279), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[c1, c2]'], {'sort_categories': '(True)'}), '([c1, c2], sort_categories=True)\n', (10247, 10279), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((10299, 10360), 'pandas.Categorical', 'Categorical', (["['a', 'b', 'b', 'c']"], {'categories': "['a', 'b', 'c']"}), "(['a', 'b', 'b', 'c'], categories=['a', 'b', 'c'])\n", (10310, 10360), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((10400, 10445), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['result', 'expected'], {}), '(result, expected)\n', (10427, 10445), True, 'from pandas.util import testing as tm\n'), ((10460, 10486), 'pandas.Categorical', 'Categorical', (["['x', np.nan]"], {}), "(['x', np.nan])\n", (10471, 10486), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((10500, 10526), 'pandas.Categorical', 'Categorical', (["[np.nan, 'b']"], {}), "([np.nan, 'b'])\n", (10511, 10526), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((10544, 10594), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[c1, c2]'], {'sort_categories': '(True)'}), '([c1, c2], sort_categories=True)\n', (10562, 10594), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((10614, 10676), 'pandas.Categorical', 'Categorical', (["['x', np.nan, np.nan, 'b']"], {'categories': "['b', 'x']"}), "(['x', np.nan, np.nan, 'b'], categories=['b', 'x'])\n", (10625, 10676), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((10716, 10761), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['result', 'expected'], {}), '(result, expected)\n', (10743, 10761), True, 'from pandas.util import testing as tm\n'), ((10776, 10797), 'pandas.Categorical', 'Categorical', (['[np.nan]'], {}), '([np.nan])\n', (10787, 10797), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((10811, 10832), 'pandas.Categorical', 'Categorical', (['[np.nan]'], {}), '([np.nan])\n', (10822, 10832), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((10850, 10900), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[c1, c2]'], {'sort_categories': '(True)'}), '([c1, c2], sort_categories=True)\n', (10868, 10900), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((10920, 10964), 'pandas.Categorical', 'Categorical', (['[np.nan, np.nan]'], {'categories': '[]'}), '([np.nan, np.nan], categories=[])\n', (10931, 10964), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((10973, 11018), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['result', 'expected'], {}), '(result, expected)\n', (11000, 11018), True, 'from pandas.util import testing as tm\n'), ((11033, 11048), 'pandas.Categorical', 'Categorical', (['[]'], {}), '([])\n', (11044, 11048), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((11062, 11077), 'pandas.Categorical', 'Categorical', (['[]'], {}), '([])\n', (11073, 11077), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((11095, 11145), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[c1, c2]'], {'sort_categories': '(True)'}), '([c1, c2], sort_categories=True)\n', (11113, 11145), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((11165, 11180), 'pandas.Categorical', 'Categorical', (['[]'], {}), '([])\n', (11176, 11180), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((11189, 11234), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['result', 'expected'], {}), '(result, expected)\n', (11216, 11234), True, 'from pandas.util import testing as tm\n'), ((11249, 11314), 'pandas.Categorical', 'Categorical', (["['b', 'a']"], {'categories': "['b', 'a', 'c']", 'ordered': '(True)'}), "(['b', 'a'], categories=['b', 'a', 'c'], ordered=True)\n", (11260, 11314), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((11328, 11393), 'pandas.Categorical', 'Categorical', (["['a', 'c']"], {'categories': "['b', 'a', 'c']", 'ordered': '(True)'}), "(['a', 'c'], categories=['b', 'a', 'c'], ordered=True)\n", (11339, 11393), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((11579, 11607), 'pandas.Categorical', 'Categorical', (["['x', 'y', 'z']"], {}), "(['x', 'y', 'z'])\n", (11590, 11607), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((11621, 11649), 'pandas.Categorical', 'Categorical', (["['a', 'b', 'c']"], {}), "(['a', 'b', 'c'])\n", (11632, 11649), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((11667, 11718), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[c1, c2]'], {'sort_categories': '(False)'}), '([c1, c2], sort_categories=False)\n', (11685, 11718), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((11738, 11828), 'pandas.Categorical', 'Categorical', (["['x', 'y', 'z', 'a', 'b', 'c']"], {'categories': "['x', 'y', 'z', 'a', 'b', 'c']"}), "(['x', 'y', 'z', 'a', 'b', 'c'], categories=['x', 'y', 'z', 'a',\n 'b', 'c'])\n", (11749, 11828), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((11864, 11909), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['result', 'expected'], {}), '(result, expected)\n', (11891, 11909), True, 'from pandas.util import testing as tm\n'), ((11943, 11994), 'pandas.Categorical', 'Categorical', (["['a', 'b']"], {'categories': "['b', 'a', 'c']"}), "(['a', 'b'], categories=['b', 'a', 'c'])\n", (11954, 11994), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((12008, 12059), 'pandas.Categorical', 'Categorical', (["['b', 'c']"], {'categories': "['b', 'a', 'c']"}), "(['b', 'c'], categories=['b', 'a', 'c'])\n", (12019, 12059), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((12077, 12128), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[c1, c2]'], {'sort_categories': '(False)'}), '([c1, c2], sort_categories=False)\n', (12095, 12128), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((12148, 12209), 'pandas.Categorical', 'Categorical', (["['a', 'b', 'b', 'c']"], {'categories': "['b', 'a', 'c']"}), "(['a', 'b', 'b', 'c'], categories=['b', 'a', 'c'])\n", (12159, 12209), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((12249, 12294), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['result', 'expected'], {}), '(result, expected)\n', (12276, 12294), True, 'from pandas.util import testing as tm\n'), ((12342, 12393), 'pandas.Categorical', 'Categorical', (["['a', 'b']"], {'categories': "['a', 'b', 'c']"}), "(['a', 'b'], categories=['a', 'b', 'c'])\n", (12353, 12393), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((12407, 12458), 'pandas.Categorical', 'Categorical', (["['b', 'c']"], {'categories': "['a', 'b', 'c']"}), "(['b', 'c'], categories=['a', 'b', 'c'])\n", (12418, 12458), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((12476, 12527), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[c1, c2]'], {'sort_categories': '(False)'}), '([c1, c2], sort_categories=False)\n', (12494, 12527), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((12547, 12608), 'pandas.Categorical', 'Categorical', (["['a', 'b', 'b', 'c']"], {'categories': "['a', 'b', 'c']"}), "(['a', 'b', 'b', 'c'], categories=['a', 'b', 'c'])\n", (12558, 12608), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((12648, 12693), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['result', 'expected'], {}), '(result, expected)\n', (12675, 12693), True, 'from pandas.util import testing as tm\n'), ((12708, 12734), 'pandas.Categorical', 'Categorical', (["['x', np.nan]"], {}), "(['x', np.nan])\n", (12719, 12734), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((12748, 12774), 'pandas.Categorical', 'Categorical', (["[np.nan, 'b']"], {}), "([np.nan, 'b'])\n", (12759, 12774), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((12792, 12843), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[c1, c2]'], {'sort_categories': '(False)'}), '([c1, c2], sort_categories=False)\n', (12810, 12843), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((12863, 12925), 'pandas.Categorical', 'Categorical', (["['x', np.nan, np.nan, 'b']"], {'categories': "['x', 'b']"}), "(['x', np.nan, np.nan, 'b'], categories=['x', 'b'])\n", (12874, 12925), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((12965, 13010), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['result', 'expected'], {}), '(result, expected)\n', (12992, 13010), True, 'from pandas.util import testing as tm\n'), ((13025, 13046), 'pandas.Categorical', 'Categorical', (['[np.nan]'], {}), '([np.nan])\n', (13036, 13046), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((13060, 13081), 'pandas.Categorical', 'Categorical', (['[np.nan]'], {}), '([np.nan])\n', (13071, 13081), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((13099, 13150), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[c1, c2]'], {'sort_categories': '(False)'}), '([c1, c2], sort_categories=False)\n', (13117, 13150), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((13170, 13214), 'pandas.Categorical', 'Categorical', (['[np.nan, np.nan]'], {'categories': '[]'}), '([np.nan, np.nan], categories=[])\n', (13181, 13214), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((13223, 13268), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['result', 'expected'], {}), '(result, expected)\n', (13250, 13268), True, 'from pandas.util import testing as tm\n'), ((13283, 13298), 'pandas.Categorical', 'Categorical', (['[]'], {}), '([])\n', (13294, 13298), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((13312, 13327), 'pandas.Categorical', 'Categorical', (['[]'], {}), '([])\n', (13323, 13327), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((13345, 13396), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[c1, c2]'], {'sort_categories': '(False)'}), '([c1, c2], sort_categories=False)\n', (13363, 13396), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((13416, 13431), 'pandas.Categorical', 'Categorical', (['[]'], {}), '([])\n', (13427, 13431), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((13440, 13485), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['result', 'expected'], {}), '(result, expected)\n', (13467, 13485), True, 'from pandas.util import testing as tm\n'), ((13500, 13565), 'pandas.Categorical', 'Categorical', (["['b', 'a']"], {'categories': "['b', 'a', 'c']", 'ordered': '(True)'}), "(['b', 'a'], categories=['b', 'a', 'c'], ordered=True)\n", (13511, 13565), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((13579, 13644), 'pandas.Categorical', 'Categorical', (["['a', 'c']"], {'categories': "['b', 'a', 'c']", 'ordered': '(True)'}), "(['a', 'c'], categories=['b', 'a', 'c'], ordered=True)\n", (13590, 13644), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((13662, 13713), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[c1, c2]'], {'sort_categories': '(False)'}), '([c1, c2], sort_categories=False)\n', (13680, 13713), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((13733, 13808), 'pandas.Categorical', 'Categorical', (["['b', 'a', 'a', 'c']"], {'categories': "['b', 'a', 'c']", 'ordered': '(True)'}), "(['b', 'a', 'a', 'c'], categories=['b', 'a', 'c'], ordered=True)\n", (13744, 13808), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((13848, 13893), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['result', 'expected'], {}), '(result, expected)\n', (13875, 13893), True, 'from pandas.util import testing as tm\n'), ((13972, 13995), 'pandas.Categorical', 'Categorical', (["['a', 'b']"], {}), "(['a', 'b'])\n", (13983, 13995), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((14009, 14048), 'pandas.Series', 'pd.Series', (["['b', 'c']"], {'dtype': '"""category"""'}), "(['b', 'c'], dtype='category')\n", (14018, 14048), True, 'import pandas as pd\n'), ((14066, 14094), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[c1, c2]'], {}), '([c1, c2])\n', (14084, 14094), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((14114, 14147), 'pandas.Categorical', 'Categorical', (["['a', 'b', 'b', 'c']"], {}), "(['a', 'b', 'b', 'c'])\n", (14125, 14147), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((14156, 14201), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['result', 'expected'], {}), '(result, expected)\n', (14183, 14201), True, 'from pandas.util import testing as tm\n'), ((14216, 14236), 'pandas.CategoricalIndex', 'CategoricalIndex', (['c2'], {}), '(c2)\n', (14232, 14236), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((14254, 14282), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[c1, c2]'], {}), '([c1, c2])\n', (14272, 14282), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((14291, 14336), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['result', 'expected'], {}), '(result, expected)\n', (14318, 14336), True, 'from pandas.util import testing as tm\n'), ((14351, 14361), 'pandas.Series', 'Series', (['c1'], {}), '(c1)\n', (14357, 14361), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((14379, 14407), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[c1, c2]'], {}), '([c1, c2])\n', (14397, 14407), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((14416, 14461), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['result', 'expected'], {}), '(result, expected)\n', (14443, 14461), True, 'from pandas.util import testing as tm\n'), ((2419, 2457), 'pandas.util.testing.assert_raises_regex', 'tm.assert_raises_regex', (['TypeError', 'msg'], {}), '(TypeError, msg)\n', (2441, 2457), True, 'from pandas.util import testing as tm\n'), ((2471, 2498), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[s, s2]'], {}), '([s, s2])\n', (2489, 2498), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((2554, 2593), 'pandas.util.testing.assert_raises_regex', 'tm.assert_raises_regex', (['ValueError', 'msg'], {}), '(ValueError, msg)\n', (2576, 2593), True, 'from pandas.util import testing as tm\n'), ((2607, 2629), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[]'], {}), '([])\n', (2625, 2629), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((3181, 3207), 'pandas.Timestamp', 'pd.Timestamp', (['"""2011-01-01"""'], {}), "('2011-01-01')\n", (3193, 3207), True, 'import pandas as pd\n'), ((3209, 3235), 'pandas.Timestamp', 'pd.Timestamp', (['"""2011-03-01"""'], {}), "('2011-03-01')\n", (3221, 3235), True, 'import pandas as pd\n'), ((3285, 3311), 'pandas.Timestamp', 'pd.Timestamp', (['"""2011-01-01"""'], {}), "('2011-01-01')\n", (3297, 3311), True, 'import pandas as pd\n'), ((3329, 3355), 'pandas.Timestamp', 'pd.Timestamp', (['"""2011-02-01"""'], {}), "('2011-02-01')\n", (3341, 3355), True, 'import pandas as pd\n'), ((4708, 4744), 'numpy.array', 'np.array', (['[np.nan]'], {'dtype': 'np.float64'}), '([np.nan], dtype=np.float64)\n', (4716, 4744), True, 'import numpy as np\n'), ((5854, 5892), 'pandas.util.testing.assert_raises_regex', 'tm.assert_raises_regex', (['TypeError', 'msg'], {}), '(TypeError, msg)\n', (5876, 5892), True, 'from pandas.util import testing as tm\n'), ((5906, 5934), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[c1, c2]'], {}), '([c1, c2])\n', (5924, 5934), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((6584, 6622), 'pandas.util.testing.assert_raises_regex', 'tm.assert_raises_regex', (['TypeError', 'msg'], {}), '(TypeError, msg)\n', (6606, 6622), True, 'from pandas.util import testing as tm\n'), ((6636, 6664), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[c1, c2]'], {}), '([c1, c2])\n', (6654, 6664), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((7060, 7098), 'pandas.util.testing.assert_raises_regex', 'tm.assert_raises_regex', (['TypeError', 'msg'], {}), '(TypeError, msg)\n', (7082, 7098), True, 'from pandas.util import testing as tm\n'), ((7112, 7160), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[c1, c2]'], {'ignore_order': '(False)'}), '([c1, c2], ignore_order=False)\n', (7130, 7160), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((8696, 8734), 'pandas.util.testing.assert_raises_regex', 'tm.assert_raises_regex', (['TypeError', 'msg'], {}), '(TypeError, msg)\n', (8718, 8734), True, 'from pandas.util import testing as tm\n'), ((8748, 8796), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[c1, c2]'], {'ignore_order': '(False)'}), '([c1, c2], ignore_order=False)\n', (8766, 8796), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((8811, 8849), 'pandas.util.testing.assert_raises_regex', 'tm.assert_raises_regex', (['TypeError', 'msg'], {}), '(TypeError, msg)\n', (8833, 8849), True, 'from pandas.util import testing as tm\n'), ((8863, 8891), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[c1, c2]'], {}), '([c1, c2])\n', (8881, 8891), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((11407, 11431), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (11420, 11431), False, 'import pytest\n'), ((11445, 11495), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (['[c1, c2]'], {'sort_categories': '(True)'}), '([c1, c2], sort_categories=True)\n', (11463, 11495), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((14476, 14500), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (14489, 14500), False, 'import pytest\n'), ((14514, 14555), 'pandas.core.dtypes.concat.union_categoricals', 'union_categoricals', (["[c1, ['a', 'b', 'c']]"], {}), "([c1, ['a', 'b', 'c']])\n", (14532, 14555), False, 'from pandas.core.dtypes.concat import union_categoricals\n'), ((626, 667), 'pandas.date_range', 'pd.date_range', (['"""2014-01-01"""', '"""2014-01-05"""'], {}), "('2014-01-01', '2014-01-05')\n", (639, 667), True, 'import pandas as pd\n'), ((682, 723), 'pandas.date_range', 'pd.date_range', (['"""2014-01-06"""', '"""2014-01-07"""'], {}), "('2014-01-06', '2014-01-07')\n", (695, 723), True, 'import pandas as pd\n'), ((738, 779), 'pandas.date_range', 'pd.date_range', (['"""2014-01-01"""', '"""2014-01-07"""'], {}), "('2014-01-01', '2014-01-07')\n", (751, 779), True, 'import pandas as pd\n'), ((796, 854), 'pandas.date_range', 'pd.date_range', (['"""2014-01-01"""', '"""2014-01-05"""'], {'tz': '"""US/Central"""'}), "('2014-01-01', '2014-01-05', tz='US/Central')\n", (809, 854), True, 'import pandas as pd\n'), ((869, 927), 'pandas.date_range', 'pd.date_range', (['"""2014-01-06"""', '"""2014-01-07"""'], {'tz': '"""US/Central"""'}), "('2014-01-06', '2014-01-07', tz='US/Central')\n", (882, 927), True, 'import pandas as pd\n'), ((942, 1000), 'pandas.date_range', 'pd.date_range', (['"""2014-01-01"""', '"""2014-01-07"""'], {'tz': '"""US/Central"""'}), "('2014-01-01', '2014-01-07', tz='US/Central')\n", (955, 1000), True, 'import pandas as pd\n'), ((1017, 1060), 'pandas.period_range', 'pd.period_range', (['"""2014-01-01"""', '"""2014-01-05"""'], {}), "('2014-01-01', '2014-01-05')\n", (1032, 1060), True, 'import pandas as pd\n'), ((1075, 1118), 'pandas.period_range', 'pd.period_range', (['"""2014-01-06"""', '"""2014-01-07"""'], {}), "('2014-01-06', '2014-01-07')\n", (1090, 1118), True, 'import pandas as pd\n'), ((1133, 1176), 'pandas.period_range', 'pd.period_range', (['"""2014-01-01"""', '"""2014-01-07"""'], {}), "('2014-01-01', '2014-01-07')\n", (1148, 1176), True, 'import pandas as pd\n'), ((1450, 1471), 'pandas.Categorical', 'Categorical', (['combined'], {}), '(combined)\n', (1461, 1471), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((1488, 1560), 'pandas.util.testing.assert_categorical_equal', 'tm.assert_categorical_equal', (['result', 'expected'], {'check_category_order': '(True)'}), '(result, expected, check_category_order=True)\n', (1515, 1560), True, 'from pandas.util import testing as tm\n'), ((2727, 2757), 'pandas.Categorical', 'pd.Categorical', (['[1, 2, np.nan]'], {}), '([1, 2, np.nan])\n', (2741, 2757), True, 'import pandas as pd\n'), ((2793, 2823), 'pandas.Categorical', 'pd.Categorical', (['[3, 2, np.nan]'], {}), '([3, 2, np.nan])\n', (2807, 2823), True, 'import pandas as pd\n'), ((2963, 2989), 'pandas.Categorical', 'pd.Categorical', (["['A', 'B']"], {}), "(['A', 'B'])\n", (2977, 2989), True, 'import pandas as pd\n'), ((3025, 3059), 'pandas.Categorical', 'pd.Categorical', (["['B', 'B', np.nan]"], {}), "(['B', 'B', np.nan])\n", (3039, 3059), True, 'import pandas as pd\n'), ((3392, 3412), 'pandas.Categorical', 'pd.Categorical', (['val1'], {}), '(val1)\n', (3406, 3412), True, 'import pandas as pd\n'), ((3414, 3434), 'pandas.Categorical', 'pd.Categorical', (['val2'], {}), '(val2)\n', (3428, 3434), True, 'import pandas as pd\n'), ((3774, 3806), 'pandas.Categorical', 'pd.Categorical', (['[np.nan, np.nan]'], {}), '([np.nan, np.nan])\n', (3788, 3806), True, 'import pandas as pd\n'), ((3842, 3863), 'pandas.Categorical', 'pd.Categorical', (["['X']"], {}), "(['X'])\n", (3856, 3863), True, 'import pandas as pd\n'), ((3996, 4028), 'pandas.Categorical', 'pd.Categorical', (['[np.nan, np.nan]'], {}), '([np.nan, np.nan])\n', (4010, 4028), True, 'import pandas as pd\n'), ((4064, 4096), 'pandas.Categorical', 'pd.Categorical', (['[np.nan, np.nan]'], {}), '([np.nan, np.nan])\n', (4078, 4096), True, 'import pandas as pd\n'), ((4304, 4322), 'pandas.Categorical', 'pd.Categorical', (['[]'], {}), '([])\n', (4318, 4322), True, 'import pandas as pd\n'), ((4358, 4376), 'pandas.Categorical', 'pd.Categorical', (['[]'], {}), '([])\n', (4372, 4376), True, 'import pandas as pd\n'), ((4490, 4508), 'pandas.Categorical', 'pd.Categorical', (['[]'], {}), '([])\n', (4504, 4508), True, 'import pandas as pd\n'), ((4544, 4565), 'pandas.Categorical', 'pd.Categorical', (['[1.0]'], {}), '([1.0])\n', (4558, 4565), True, 'import pandas as pd\n'), ((4820, 4838), 'pandas.Categorical', 'pd.Categorical', (['[]'], {}), '([])\n', (4834, 4838), True, 'import pandas as pd\n'), ((3514, 3540), 'pandas.Timestamp', 'pd.Timestamp', (['"""2011-01-01"""'], {}), "('2011-01-01')\n", (3526, 3540), True, 'import pandas as pd\n'), ((3580, 3606), 'pandas.Timestamp', 'pd.Timestamp', (['"""2011-03-01"""'], {}), "('2011-03-01')\n", (3592, 3606), True, 'import pandas as pd\n'), ((3646, 3672), 'pandas.Timestamp', 'pd.Timestamp', (['"""2011-02-01"""'], {}), "('2011-02-01')\n", (3658, 3672), True, 'import pandas as pd\n'), ((1339, 1353), 'pandas.Categorical', 'Categorical', (['a'], {}), '(a)\n', (1350, 1353), False, 'from pandas import Categorical, Series, CategoricalIndex\n'), ((1405, 1419), 'pandas.Categorical', 'Categorical', (['b'], {}), '(b)\n', (1416, 1419), False, 'from pandas import Categorical, Series, CategoricalIndex\n')]
|
import numpy as np
import argparse
from maci.learners import MAVBAC, MASQL, ROMMEO
from maci.misc.sampler import MASampler
from maci.environments import PBeautyGame, MatrixGame, DifferentialGame
from maci.environments import make_particle_env
from maci.misc import logger
import gtimer as gt
import datetime
from copy import deepcopy
from maci.get_agents import ddpg_agent, masql_agent, pr2ac_agent, rom_agent
import maci.misc.tf_utils as U
import os
from keras.backend.tensorflow_backend import set_session
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth = True # dynamically grow the memory used on the GPU
sess = tf.Session(config=config)
set_session(sess)
def get_particle_game(particle_game_name, arglist):
env = make_particle_env(game_name=particle_game_name)
print(env.action_space, env.observation_space)
agent_num = env.n
adv_agent_num = 0
if particle_game_name == 'simple_push' or particle_game_name == 'simple_adversary':
adv_agent_num = 1
elif particle_game_name == 'simple_tag':
adv_agent_num = 3
model_names_setting = arglist.model_names_setting.split('_')
model_name = '_'.join(model_names_setting)
model_names = [model_names_setting[1]] * adv_agent_num + [model_names_setting[0]] * (agent_num - adv_agent_num)
return env, agent_num, model_name, model_names
def parse_args():
parser = argparse.ArgumentParser("Reinforcement Learning experiments for multiagent environments")
# Environment
# ['particle-simple_spread', 'particle-simple_adversary', 'particle-simple_tag', 'particle-simple_push']
parser.add_argument('-g', "--game_name", type=str, default="diff-ma_softq", help="name of the game")
parser.add_argument('-p', "--p", type=float, default=1.1, help="p")
parser.add_argument('-mu', "--mu", type=float, default=1.5, help="mu")
parser.add_argument('-r', "--reward_type", type=str, default="abs", help="reward type")
parser.add_argument('-mp', "--max_path_length", type=int, default=1, help="reward type")
parser.add_argument('-ms', "--max_steps", type=int, default=10000, help="number of epochs")
parser.add_argument('-me', "--memory", type=int, default=0, help="reward type")
parser.add_argument('-n', "--n", type=int, default=2, help="name of the game")
parser.add_argument('-bs', "--batch_size", type=int, default=512, help="name of the game")
parser.add_argument('-hm', "--hidden_size", type=int, default=100, help="name of the game")
parser.add_argument('-ti', "--training_interval", type=int, default=1, help="name of the game")
parser.add_argument('-re', "--repeat", type=bool, default=False, help="name of the game")
parser.add_argument('-a', "--aux", type=bool, default=True, help="name of the game")
parser.add_argument('-gr', "--global_reward", type=bool, default=False, help="name of the game")
parser.add_argument('-m', "--model_names_setting", type=str, default='PR2AC1_PR2AC1', help="models setting agent vs adv")
return parser.parse_args()
def main(arglist):
game_name = arglist.game_name
# 'abs', 'one'
reward_type = arglist.reward_type
p = arglist.p
agent_num = arglist.n
u_range = 1.
k = 0
print(arglist.aux, 'arglist.aux')
model_names_setting = arglist.model_names_setting.split('_')
model_names = [model_names_setting[0]] + [model_names_setting[1]] * (agent_num - 1)
model_name = '_'.join(model_names)
path_prefix = game_name
if game_name == 'pbeauty':
env = PBeautyGame(agent_num=agent_num, reward_type=reward_type, p=p)
path_prefix = game_name + '-' + reward_type + '-' + str(p)
elif 'matrix' in game_name:
matrix_game_name = game_name.split('-')[-1]
repeated = arglist.repeat
max_step = arglist.max_path_length
memory = arglist.memory
env = MatrixGame(game=matrix_game_name, agent_num=agent_num,
action_num=2, repeated=repeated,
max_step=max_step, memory=memory,
discrete_action=False, tuple_obs=False)
path_prefix = '{}-{}-{}-{}'.format(game_name, repeated, max_step, memory)
elif 'diff' in game_name:
diff_game_name = game_name.split('-')[-1]
agent_num = 2
env = DifferentialGame(diff_game_name, agent_num)
elif 'particle' in game_name:
particle_game_name = game_name.split('-')[-1]
env, agent_num, model_name, model_names = get_particle_game(particle_game_name, arglist)
now = datetime.datetime.now()
timestamp = now.strftime('%Y-%m-%d %H:%M:%S.%f %Z')
if 'CG' in model_name:
model_name = model_name + '-{}'.format(arglist.mu)
if not arglist.aux:
model_name = model_name + '-{}'.format(arglist.aux)
suffix = '{}/{}/{}/{}'.format(path_prefix, agent_num, model_name, timestamp)
print(suffix)
logger.add_tabular_output('./log/{}.csv'.format(suffix))
snapshot_dir = './snapshot/{}'.format(suffix)
policy_dir = './policy/{}'.format(suffix)
os.makedirs(snapshot_dir, exist_ok=True)
os.makedirs(policy_dir, exist_ok=True)
logger.set_snapshot_dir(snapshot_dir)
agents = []
M = arglist.hidden_size
batch_size = arglist.batch_size
# MultiAgent sampler
sampler = MASampler(agent_num=agent_num, joint=True, global_reward=arglist.global_reward, max_path_length=25, min_pool_size=100, batch_size=batch_size)
base_kwargs = {
'sampler': sampler,
'epoch_length': 100,
'n_epochs': arglist.max_steps,
'n_train_repeat': 1,
'eval_render': True,
'eval_n_episodes': 10
}
with U.single_threaded_session():
for i, model_name in enumerate(model_names):
if 'PR2AC' in model_name:
k = int(model_name[-1])
g = False
mu = arglist.mu
if 'G' in model_name:
g = True
agent = pr2ac_agent(model_name, i, env, M, u_range, base_kwargs, k=k, g=g, mu=mu, game_name=game_name, aux=arglist.aux)
elif model_name == 'MASQL':
agent = masql_agent(model_name, i, env, M, u_range, base_kwargs, game_name=game_name)
elif model_name == 'ROMMEO':
agent = rom_agent(model_name, i, env, M, u_range, base_kwargs, game_name=game_name)
else:
if model_name == 'DDPG':
joint = False
opponent_modelling = False
elif model_name == 'MADDPG': # Multi-Agent Deep Deterministic Policy Gradient
joint = True
opponent_modelling = False
elif model_name == 'DDPG-OM':
joint = True
opponent_modelling = True
agent = ddpg_agent(joint, opponent_modelling, model_names, i, env, M, u_range, base_kwargs, game_name=game_name)
agents.append(agent)
sampler.initialize(env, agents)
for agent in agents:
agent._init_training()
gt.rename_root('MARLAlgorithm')
gt.reset()
gt.set_def_unique(False)
initial_exploration_done = False
# noise = .1
noise = .5
alpha = .1
for agent in agents:
try:
agent.policy.set_noise_level(noise)
except:
pass
# alpha = .5
for steps in gt.timed_for(range(base_kwargs['n_epochs'] + 1)):
# alpha = .1 + np.exp(-0.1 * max(steps-10, 0)) * 500.
logger.push_prefix('Epoch #%d | ' % steps)
if steps % (25*1000) == 0:
print(suffix)
for t in range(base_kwargs['epoch_length']):
# TODO.code consolidation: Add control interval to sampler
if not initial_exploration_done:
if steps >= 1000:
initial_exploration_done = True
sampler.sample()
if not initial_exploration_done:
continue
gt.stamp('sample')
print('Sample Done')
if steps == 1000:
noise = 0.1
for agent in agents:
try:
agent.policy.set_noise_level(noise)
except:
pass
# alpha = 10.
if steps == 2000:
noise = 0.1
for agent in agents:
try:
agent.policy.set_noise_level(noise)
except:
pass
# alpha = .1
if steps == 3000:
noise = 0.05
for agent in agents:
try:
agent.policy.set_noise_level(noise)
except:
pass
if steps > base_kwargs['n_epochs'] / 6:
noise = 0.01
for agent in agents:
try:
agent.policy.set_noise_level(noise)
except:
pass
if steps % arglist.training_interval != 0:
continue
for j in range(base_kwargs['n_train_repeat']):
batch_n = []
recent_batch_n = []
indices = None
receent_indices = None
for i, agent in enumerate(agents):
if i == 0:
batch = agent.pool.random_batch(batch_size)
indices = agent.pool.indices
receent_indices = list(range(agent.pool._top-batch_size, agent.pool._top))
batch_n.append(agent.pool.random_batch_by_indices(indices))
recent_batch_n.append(agent.pool.random_batch_by_indices(receent_indices))
# print(len(batch_n))
target_next_actions_n = []
# try:
all_obs = np.array(np.concatenate([batch['observations'] for batch in batch_n], axis=-1))
all_next_obs = np.array(np.concatenate([batch['next_observations'] for batch in batch_n], axis=-1))
# print(all_obs[0])
for batch in batch_n:
# print('making all obs')
batch['all_observations'] = deepcopy(all_obs)
batch['all_next_observations'] = deepcopy(all_next_obs)
opponent_current_actions_n = []
for agent, batch in zip(agents, batch_n):
target_next_actions_n.append(agent.target_policy.get_actions(batch['next_observations']))
opponent_current_actions_n.append(agent.policy.get_actions(batch['observations']))
# update opponent actions
for i, agent in enumerate(agents):
batch_n[i]['opponent_current_actions'] = np.reshape(
np.delete(deepcopy(opponent_current_actions_n), i, 0), (-1, agent._opponent_action_dim))
opponent_actions_n = np.array([batch['actions'] for batch in batch_n])
recent_opponent_actions_n = np.array([batch['actions'] for batch in recent_batch_n])
####### figure out
recent_opponent_observations_n = []
for batch in recent_batch_n:
recent_opponent_observations_n.append(batch['observations'])
current_actions = [agents[i].policy.get_actions(batch_n[i]['next_observations'])[0][0] for i in range(agent_num)]
all_actions_k = []
for i, agent in enumerate(agents):
if isinstance(agent, MAVBAC):
if agent._k > 0:
batch_actions_k = agent.policy.get_all_actions(batch_n[i]['next_observations'])
actions_k = [a[0][0] for a in batch_actions_k]
all_actions_k.append(';'.join(list(map(str, actions_k))))
if len(all_actions_k) > 0:
with open('{}/all_actions.csv'.format(policy_dir), 'a') as f:
f.write(','.join(list(map(str, all_actions_k))) + '\n')
with open('{}/policy.csv'.format(policy_dir), 'a') as f:
f.write(','.join(list(map(str, current_actions)))+'\n')
# print('============')
for i, agent in enumerate(agents):
try:
batch_n[i]['next_actions'] = deepcopy(target_next_actions_n[i])
except:
pass
batch_n[i]['opponent_actions'] = np.reshape(np.delete(deepcopy(opponent_actions_n), i, 0), (-1, agent._opponent_action_dim))
if agent.joint:
if agent.opponent_modelling:
batch_n[i]['recent_opponent_observations'] = recent_opponent_observations_n[i]
batch_n[i]['recent_opponent_actions'] = np.reshape(np.delete(deepcopy(recent_opponent_actions_n), i, 0), (-1, agent._opponent_action_dim))
batch_n[i]['opponent_next_actions'] = agent.opponent_policy.get_actions(batch_n[i]['next_observations'])
else:
batch_n[i]['opponent_next_actions'] = np.reshape(np.delete(deepcopy(target_next_actions_n), i, 0), (-1, agent._opponent_action_dim))
if isinstance(agent, MAVBAC) or isinstance(agent, MASQL) or isinstance(agent, ROMMEO):
agent._do_training(iteration=t + steps * agent._epoch_length, batch=batch_n[i], annealing=alpha)
else:
agent._do_training(iteration=t + steps * agent._epoch_length, batch=batch_n[i])
gt.stamp('train')
sampler.terminate()
if __name__ == '__main__':
arglist = parse_args()
main(arglist)
|
[
"argparse.ArgumentParser",
"tensorflow.ConfigProto",
"keras.backend.tensorflow_backend.set_session",
"maci.misc.logger.set_snapshot_dir",
"maci.misc.tf_utils.single_threaded_session",
"gtimer.set_def_unique",
"gtimer.rename_root",
"gtimer.stamp",
"gtimer.reset",
"datetime.datetime.now",
"maci.environments.make_particle_env",
"maci.environments.DifferentialGame",
"copy.deepcopy",
"tensorflow.Session",
"maci.environments.MatrixGame",
"maci.get_agents.rom_agent",
"numpy.concatenate",
"maci.get_agents.ddpg_agent",
"maci.misc.sampler.MASampler",
"os.makedirs",
"maci.environments.PBeautyGame",
"maci.get_agents.masql_agent",
"numpy.array",
"maci.misc.logger.push_prefix",
"maci.get_agents.pr2ac_agent"
] |
[((544, 560), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (558, 560), True, 'import tensorflow as tf\n'), ((654, 679), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (664, 679), True, 'import tensorflow as tf\n'), ((680, 697), 'keras.backend.tensorflow_backend.set_session', 'set_session', (['sess'], {}), '(sess)\n', (691, 697), False, 'from keras.backend.tensorflow_backend import set_session\n'), ((762, 809), 'maci.environments.make_particle_env', 'make_particle_env', ([], {'game_name': 'particle_game_name'}), '(game_name=particle_game_name)\n', (779, 809), False, 'from maci.environments import make_particle_env\n'), ((1401, 1495), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Reinforcement Learning experiments for multiagent environments"""'], {}), "(\n 'Reinforcement Learning experiments for multiagent environments')\n", (1424, 1495), False, 'import argparse\n'), ((4551, 4574), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (4572, 4574), False, 'import datetime\n'), ((5064, 5104), 'os.makedirs', 'os.makedirs', (['snapshot_dir'], {'exist_ok': '(True)'}), '(snapshot_dir, exist_ok=True)\n', (5075, 5104), False, 'import os\n'), ((5109, 5147), 'os.makedirs', 'os.makedirs', (['policy_dir'], {'exist_ok': '(True)'}), '(policy_dir, exist_ok=True)\n', (5120, 5147), False, 'import os\n'), ((5152, 5189), 'maci.misc.logger.set_snapshot_dir', 'logger.set_snapshot_dir', (['snapshot_dir'], {}), '(snapshot_dir)\n', (5175, 5189), False, 'from maci.misc import logger\n'), ((5310, 5461), 'maci.misc.sampler.MASampler', 'MASampler', ([], {'agent_num': 'agent_num', 'joint': '(True)', 'global_reward': 'arglist.global_reward', 'max_path_length': '(25)', 'min_pool_size': '(100)', 'batch_size': 'batch_size'}), '(agent_num=agent_num, joint=True, global_reward=arglist.\n global_reward, max_path_length=25, min_pool_size=100, batch_size=batch_size\n )\n', (5319, 5461), False, 'from maci.misc.sampler import MASampler\n'), ((3536, 3598), 'maci.environments.PBeautyGame', 'PBeautyGame', ([], {'agent_num': 'agent_num', 'reward_type': 'reward_type', 'p': 'p'}), '(agent_num=agent_num, reward_type=reward_type, p=p)\n', (3547, 3598), False, 'from maci.environments import PBeautyGame, MatrixGame, DifferentialGame\n'), ((5673, 5700), 'maci.misc.tf_utils.single_threaded_session', 'U.single_threaded_session', ([], {}), '()\n', (5698, 5700), True, 'import maci.misc.tf_utils as U\n'), ((7093, 7124), 'gtimer.rename_root', 'gt.rename_root', (['"""MARLAlgorithm"""'], {}), "('MARLAlgorithm')\n", (7107, 7124), True, 'import gtimer as gt\n'), ((7133, 7143), 'gtimer.reset', 'gt.reset', ([], {}), '()\n', (7141, 7143), True, 'import gtimer as gt\n'), ((7152, 7176), 'gtimer.set_def_unique', 'gt.set_def_unique', (['(False)'], {}), '(False)\n', (7169, 7176), True, 'import gtimer as gt\n'), ((3874, 4044), 'maci.environments.MatrixGame', 'MatrixGame', ([], {'game': 'matrix_game_name', 'agent_num': 'agent_num', 'action_num': '(2)', 'repeated': 'repeated', 'max_step': 'max_step', 'memory': 'memory', 'discrete_action': '(False)', 'tuple_obs': '(False)'}), '(game=matrix_game_name, agent_num=agent_num, action_num=2,\n repeated=repeated, max_step=max_step, memory=memory, discrete_action=\n False, tuple_obs=False)\n', (3884, 4044), False, 'from maci.environments import PBeautyGame, MatrixGame, DifferentialGame\n'), ((7588, 7630), 'maci.misc.logger.push_prefix', 'logger.push_prefix', (["('Epoch #%d | ' % steps)"], {}), "('Epoch #%d | ' % steps)\n", (7606, 7630), False, 'from maci.misc import logger\n'), ((4310, 4353), 'maci.environments.DifferentialGame', 'DifferentialGame', (['diff_game_name', 'agent_num'], {}), '(diff_game_name, agent_num)\n', (4326, 4353), False, 'from maci.environments import PBeautyGame, MatrixGame, DifferentialGame\n'), ((5982, 6097), 'maci.get_agents.pr2ac_agent', 'pr2ac_agent', (['model_name', 'i', 'env', 'M', 'u_range', 'base_kwargs'], {'k': 'k', 'g': 'g', 'mu': 'mu', 'game_name': 'game_name', 'aux': 'arglist.aux'}), '(model_name, i, env, M, u_range, base_kwargs, k=k, g=g, mu=mu,\n game_name=game_name, aux=arglist.aux)\n', (5993, 6097), False, 'from maci.get_agents import ddpg_agent, masql_agent, pr2ac_agent, rom_agent\n'), ((8102, 8120), 'gtimer.stamp', 'gt.stamp', (['"""sample"""'], {}), "('sample')\n", (8110, 8120), True, 'import gtimer as gt\n'), ((14329, 14346), 'gtimer.stamp', 'gt.stamp', (['"""train"""'], {}), "('train')\n", (14337, 14346), True, 'import gtimer as gt\n'), ((6158, 6235), 'maci.get_agents.masql_agent', 'masql_agent', (['model_name', 'i', 'env', 'M', 'u_range', 'base_kwargs'], {'game_name': 'game_name'}), '(model_name, i, env, M, u_range, base_kwargs, game_name=game_name)\n', (6169, 6235), False, 'from maci.get_agents import ddpg_agent, masql_agent, pr2ac_agent, rom_agent\n'), ((11420, 11469), 'numpy.array', 'np.array', (["[batch['actions'] for batch in batch_n]"], {}), "([batch['actions'] for batch in batch_n])\n", (11428, 11469), True, 'import numpy as np\n'), ((11518, 11574), 'numpy.array', 'np.array', (["[batch['actions'] for batch in recent_batch_n]"], {}), "([batch['actions'] for batch in recent_batch_n])\n", (11526, 11574), True, 'import numpy as np\n'), ((6301, 6376), 'maci.get_agents.rom_agent', 'rom_agent', (['model_name', 'i', 'env', 'M', 'u_range', 'base_kwargs'], {'game_name': 'game_name'}), '(model_name, i, env, M, u_range, base_kwargs, game_name=game_name)\n', (6310, 6376), False, 'from maci.get_agents import ddpg_agent, masql_agent, pr2ac_agent, rom_agent\n'), ((6840, 6948), 'maci.get_agents.ddpg_agent', 'ddpg_agent', (['joint', 'opponent_modelling', 'model_names', 'i', 'env', 'M', 'u_range', 'base_kwargs'], {'game_name': 'game_name'}), '(joint, opponent_modelling, model_names, i, env, M, u_range,\n base_kwargs, game_name=game_name)\n', (6850, 6948), False, 'from maci.get_agents import ddpg_agent, masql_agent, pr2ac_agent, rom_agent\n'), ((10274, 10343), 'numpy.concatenate', 'np.concatenate', (["[batch['observations'] for batch in batch_n]"], {'axis': '(-1)'}), "([batch['observations'] for batch in batch_n], axis=-1)\n", (10288, 10343), True, 'import numpy as np\n'), ((10389, 10463), 'numpy.concatenate', 'np.concatenate', (["[batch['next_observations'] for batch in batch_n]"], {'axis': '(-1)'}), "([batch['next_observations'] for batch in batch_n], axis=-1)\n", (10403, 10463), True, 'import numpy as np\n'), ((10649, 10666), 'copy.deepcopy', 'deepcopy', (['all_obs'], {}), '(all_obs)\n', (10657, 10666), False, 'from copy import deepcopy\n'), ((10724, 10746), 'copy.deepcopy', 'deepcopy', (['all_next_obs'], {}), '(all_next_obs)\n', (10732, 10746), False, 'from copy import deepcopy\n'), ((12974, 13008), 'copy.deepcopy', 'deepcopy', (['target_next_actions_n[i]'], {}), '(target_next_actions_n[i])\n', (12982, 13008), False, 'from copy import deepcopy\n'), ((11299, 11335), 'copy.deepcopy', 'deepcopy', (['opponent_current_actions_n'], {}), '(opponent_current_actions_n)\n', (11307, 11335), False, 'from copy import deepcopy\n'), ((13152, 13180), 'copy.deepcopy', 'deepcopy', (['opponent_actions_n'], {}), '(opponent_actions_n)\n', (13160, 13180), False, 'from copy import deepcopy\n'), ((13524, 13559), 'copy.deepcopy', 'deepcopy', (['recent_opponent_actions_n'], {}), '(recent_opponent_actions_n)\n', (13532, 13559), False, 'from copy import deepcopy\n'), ((13864, 13895), 'copy.deepcopy', 'deepcopy', (['target_next_actions_n'], {}), '(target_next_actions_n)\n', (13872, 13895), False, 'from copy import deepcopy\n')]
|
to_import = ['mlmodels.modelutils',
'mlmodels.search.bayesian',
'mlmodels.search.hparameters.lgbm_params']
import logging
logger = logging.getLogger()
from os.path import dirname, abspath, split
project_name = split(dirname(abspath(__file__)))[1]
logger.info(f'{__file__} module: project directory: {project_name}')
import sys
import importlib
for module in to_import:
module_name = module.split('.')[-1]
new_module = importlib.import_module(name = f'.{module}', package = project_name)
sys.modules[__name__].__dict__.update({module_name: new_module})
################################################################################
try:
from lightgbm import LGBMClassifier, LGBMRegressor
except Exception as e:
print('Package missing for model lgbm: lightgbm')
try:
import shap
except Exception as e:
print('Package missing for model lgbm: shap (for feature importance)')
import scipy
import pandas as pd
import numpy as np
#shap.initjs()
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score
def_params = dict(
models = [],
features = None,
metric_func = None,
minimize_metric = False,
is_walk_forward = False,
transform_walk_forward = None,
target_name = None,
transform_cols = None,
optimize_on_val = True
)
# Hparameters of lgbm
lgbm_hparam_keys = [
'num_leaves',
'max_depth',
'min_data_in_leaf',
'bagging_fraction',
'learning_rate',
'reg_alpha',
'reg_lambda',
'min_sum_hessian_in_leaf',
'feature_fraction',
'unbalanced_sets',
'num_iterations',
'random_state',
'bagging_freq',
'bagging_seed',
'early_stopping_round',
'objective',
'metric',
'verbose',
'num_class'
]
class LGBM:
def __init__(self, **kwargs):
lgbm_hparams = {}
for k in lgbm_hparam_keys:
if k in kwargs:
lgbm_hparams[k] = kwargs[k]
self.lgbm_hparams = lgbm_hparams
self.__dict__.update(def_params)
self.__dict__.update(kwargs)
self.walk_forward_features = None
assert(self.objective in ['binary', 'multiclass', 'regression', 'multiclassova'])
self.feature_importances = None
def test(self, test_data):
X_test = test_data[0]
y_test = test_data[1]
test_metric = None
test_preds = self.predict(X_test)
return self.get_metric(test_preds, y_test)
def get_metric(self, y_true, y_pred):
if self.metric_func is None:
if self.metric == 'accuracy':
if self.objective in ['multiclass', 'multiclassova']:
return accuracy_score(y_true, np.argmax(y_pred, axis = 1))
elif self.objecetive == 'binary':
return accuracy_score(y_true, y_pred > self.binary_threshold)
else:
return self.metric_func(y_true, y_pred)
def predict(self, x, debug = False, **kwargs):
assert(len(self.models) > 0)
# Predict for each model
model_preds = []
for model in self.models:
try:
model_preds.append(model.predict_proba(x))
except:
model_preds.append(model.predict(x))
preds = np.mean(model_preds, axis = 0)
# Return results of each prediction
return preds
def predict_walk_forward(self, X, X_independent, y, n_pred, clf = None):
"""X_independent is a dataframe that consists of features that dont depend on target. (ex: date)
It must be given in X_val.
X_independent must have enough examples for prediction.
"""
assert(len(self.models) > 0)
assert(isinstance(X, pd.DataFrame))
assert(isinstance(X_independent, pd.DataFrame))
assert(len(X) + n_pred >= len(X_independent))
#assert(target_col not in X.columns)
if self.walk_forward_features is None:
self.walk_forward_features = X.columns
train_size = len(X)
data = X.copy().reset_index(drop = True)
data[self.target_name] = y.values
#display(X_independent)
data = pd.concat([data, X_independent], axis = 0, ignore_index=True)
data = data.reset_index(drop = True)
for i in range(n_pred):
new_example_i = train_size + i
# 1- Calculate new features
data.loc[new_example_i, self.transform_cols] = self.transform_walk_forward(data.iloc[:new_example_i])
# 2- Make a prediction (if a model was not specified, predictions from all CV models will be averaged.)
last_example = data.loc[new_example_i, self.walk_forward_features].values.reshape(1, -1)
data.loc[new_example_i, self.target_name] = self.predict(last_example) if clf is None else clf.predict(last_example)
return data.loc[data.index >= train_size, self.target_name].values
def fit(self, x):
""" X can be pd.Dataframe, np.ndarray or sparse.
y has to be pd.series
"""
train_data = x['train_data']
val_data = x['val_data']
self.models = []
# For CV
oof_preds = np.zeros(len(train_data[0]))
X_data = train_data[0]
y_data = train_data[1]
# Validate after CV
X_val = val_data[0]
try:
y_val = np.array(val_data[1].todense()).ravel()
except:
y_val = np.array(val_data[1]).ravel()
is_sparse = scipy.sparse.issparse(X_data)
# Create dataframe to keep feature importances for each fold
feature_importances = pd.DataFrame()
if not is_sparse:
self.features = X_data.columns
if self.features is not None:
if not len(self.features) == X_data.shape[1]:
raise ValueError(
'Number of features must be the same as n_columns in X.')
# Create column for features
feature_importances['feature'] = self.features
cv_metrics = list()
n_folds = 0
folds = None
val_preds = None
if not isinstance(self.folds, list):
folds = self.folds.split(X_data, y_data)
else:
folds = self.folds
oof_idx = []
for i_fold, (trn_idx, val_idx) in enumerate(folds):
# We can calculate an oof score only on oof examples.
# In time series CV schemes some examples will never become oof.
oof_idx.extend(val_idx)
n_folds += 1
X_trn_fold = X_data[trn_idx] if is_sparse else X_data.iloc[trn_idx]
X_val_fold = X_data[val_idx] if is_sparse else X_data.iloc[val_idx]
y_val_fold = None
y_trn_fold = None
if isinstance(y_data, pd.Series):
y_trn_fold = y_data.iloc[trn_idx]
y_val_fold = y_data.iloc[val_idx]
else:
y_trn_fold = y_data[trn_idx]
y_val_fold = y_data[val_idx]
try:
y_trn_fold = np.array(y_trn_fold.todense()).ravel()
y_val_fold = np.array(y_val_fold.todense()).ravel()
except:
y_trn_fold = np.array(y_trn_fold).ravel()
y_val_fold = np.array(y_val_fold).ravel()
logger.info('Training on fold {}'.format(i_fold))
# Training for this fold
clf = LGBMRegressor(**self.lgbm_hparams) if self.objective == 'regression' else LGBMClassifier(**self.lgbm_hparams)
clf = clf.fit(X = X_trn_fold, y = y_trn_fold,
eval_set = [(X_trn_fold, y_trn_fold),
(X_val_fold, y_val_fold)],
early_stopping_rounds = 250,
verbose = 200)
# Keep models of each fold
self.models.append(clf)
feature_importances['fold_{}'.format(i_fold)] = clf.feature_importances_
try:
oof_preds[val_idx] = clf.predict_proba(X_val_fold)
except:
oof_preds[val_idx] = clf.predict(X_val_fold) if not self.is_walk_forward else \
self.predict_walk_forward(X_trn_fold, X_val_fold, y_trn_fold, len(y_val_fold), clf)
# Validation for this fold
if X_val is not None:
if val_preds is None:
try:
val_preds = clf.predict_proba(X_val)
except:
val_preds = clf.predict(X_val) if not self.is_walk_forward else self.predict_walk_forward(X_data, X_val, y_data, len(y_val), clf)
else:
try:
val_preds += clf.predict_proba(X_val)
except:
val_preds += clf.predict(X_val) if not self.is_walk_forward else self.predict_walk_forward(X_data, X_val, y_data, len(y_val), clf)
logger.info('Training has finished.')
# Validation
val_metric = None
if X_val is not None:
val_preds /= n_folds
logger.info('Calculating validation metric...')
val_metric = self.get_metric(y_val, val_preds)
logger.info(f'Validation {self.metric}: {val_metric}')
feature_importances['importance'] = \
feature_importances[[f'fold_{i}' for i in range(n_folds)]].sum(axis = 1)
cols_to_keep = [col for col in feature_importances.columns if 'fold' not in col]
self.feature_importances = feature_importances[cols_to_keep]
if 'feature' in self.feature_importances.columns:
self.feature_importances.sort_values(by = 'importance',
ascending = False,
inplace = True)
return {
#'cv_metrics': cv_metrics,
'feature_importances': feature_importances,
'val_preds' : val_preds,
'oof_preds': oof_preds,
'metric': self.get_metric(train_data[1][oof_idx], oof_preds[oof_idx]) if not self.optimize_on_val else val_metric,
'val_metric': val_metric
}
def display_feature_importances(self):
display(self.feature_importances.style.background_gradient(cmap = 'coolwarm'))
def explain_shap(self, data, features = None, class_names = None, which_class = None, return_importances = True, plot_type = None):
X, y = data
explainer = shap.TreeExplainer(self.models[0])
shap_values = explainer.shap_values(X)
if which_class is not None:
assert(class_names is not None)
assert(which_class in class_names)
class_i = class_names.index(which_class)
shap_values = shap_values[class_i]
shap.summary_plot(shap_values,
X,
feature_names = features,
class_names = class_names,
plot_type = plot_type)
if return_importances:
return shap_values
def search(self, x, num_iter = 3, trials_path = 'trials_lgbm', fixed_hparams = None, search_space = None):
# Get default hparams
search_space = lgbm_params.search_space if search_space is None else search_space
fixed_params = lgbm_params.search_fixed if fixed_hparams is None else fixed_hparams
print("Fixed hparameters:")
print(fixed_params)
self.__dict__.update(fixed_params)
# Search
print(f'Minimize metric: {self.minimize_metric}')
res_search = bayesian.bayesian_search(
self.__init__,
self.fit,
x,
search_space,
fixed_params,
num_iter = num_iter,
mode = 'bayesian',
minimize = self.minimize_metric,
trials_path = trials_path,
model_tag = 'lgbm')
return res_search
|
[
"pandas.DataFrame",
"os.path.abspath",
"lightgbm.LGBMClassifier",
"importlib.import_module",
"numpy.argmax",
"scipy.sparse.issparse",
"sklearn.metrics.accuracy_score",
"shap.TreeExplainer",
"numpy.mean",
"numpy.array",
"lightgbm.LGBMRegressor",
"shap.summary_plot",
"pandas.concat",
"logging.getLogger"
] |
[((158, 177), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (175, 177), False, 'import logging\n'), ((455, 519), 'importlib.import_module', 'importlib.import_module', ([], {'name': 'f""".{module}"""', 'package': 'project_name'}), "(name=f'.{module}', package=project_name)\n", (478, 519), False, 'import importlib\n'), ((3381, 3409), 'numpy.mean', 'np.mean', (['model_preds'], {'axis': '(0)'}), '(model_preds, axis=0)\n', (3388, 3409), True, 'import numpy as np\n'), ((4320, 4379), 'pandas.concat', 'pd.concat', (['[data, X_independent]'], {'axis': '(0)', 'ignore_index': '(True)'}), '([data, X_independent], axis=0, ignore_index=True)\n', (4329, 4379), True, 'import pandas as pd\n'), ((5721, 5750), 'scipy.sparse.issparse', 'scipy.sparse.issparse', (['X_data'], {}), '(X_data)\n', (5742, 5750), False, 'import scipy\n'), ((5859, 5873), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (5871, 5873), True, 'import pandas as pd\n'), ((11018, 11052), 'shap.TreeExplainer', 'shap.TreeExplainer', (['self.models[0]'], {}), '(self.models[0])\n', (11036, 11052), False, 'import shap\n'), ((11348, 11456), 'shap.summary_plot', 'shap.summary_plot', (['shap_values', 'X'], {'feature_names': 'features', 'class_names': 'class_names', 'plot_type': 'plot_type'}), '(shap_values, X, feature_names=features, class_names=\n class_names, plot_type=plot_type)\n', (11365, 11456), False, 'import shap\n'), ((252, 269), 'os.path.abspath', 'abspath', (['__file__'], {}), '(__file__)\n', (259, 269), False, 'from os.path import dirname, abspath, split\n'), ((7774, 7808), 'lightgbm.LGBMRegressor', 'LGBMRegressor', ([], {}), '(**self.lgbm_hparams)\n', (7787, 7808), False, 'from lightgbm import LGBMClassifier, LGBMRegressor\n'), ((7848, 7883), 'lightgbm.LGBMClassifier', 'LGBMClassifier', ([], {}), '(**self.lgbm_hparams)\n', (7862, 7883), False, 'from lightgbm import LGBMClassifier, LGBMRegressor\n'), ((2777, 2802), 'numpy.argmax', 'np.argmax', (['y_pred'], {'axis': '(1)'}), '(y_pred, axis=1)\n', (2786, 2802), True, 'import numpy as np\n'), ((2883, 2937), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_true', '(y_pred > self.binary_threshold)'], {}), '(y_true, y_pred > self.binary_threshold)\n', (2897, 2937), False, 'from sklearn.metrics import accuracy_score\n'), ((5662, 5683), 'numpy.array', 'np.array', (['val_data[1]'], {}), '(val_data[1])\n', (5670, 5683), True, 'import numpy as np\n'), ((7540, 7560), 'numpy.array', 'np.array', (['y_trn_fold'], {}), '(y_trn_fold)\n', (7548, 7560), True, 'import numpy as np\n'), ((7602, 7622), 'numpy.array', 'np.array', (['y_val_fold'], {}), '(y_val_fold)\n', (7610, 7622), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 16 13:12:07 2021
@author: <NAME>
"""
import warnings
warnings.filterwarnings("ignore")
import time
import unittest
import numpy as np
from smoot.smoot import MOO
from smoot.zdt import ZDT
from smt.sampling_methods import LHS
from smt.problems import Branin
from smt.utils.sm_test_case import SMTestCase
from pymoo.factory import get_performance_indicator
class TestMOO(SMTestCase):
def test_Branin(self):
n_iter = 10
fun = Branin()
criterion = "EI"
mo = MOO(
n_iter=n_iter,
criterion=criterion,
xlimits=fun.xlimits,
random_state=42,
)
print("running test Branin 2D -> 1D")
start = time.time()
mo.optimize(fun=fun)
x_opt, y_opt = mo.result.X[0][0], mo.result.F[0][0]
print("x_opt :", x_opt)
print("y_opt :", y_opt)
print("seconds taken Branin: ", time.time() - start, "\n")
self.assertTrue(
np.allclose([[-3.14, 12.275]], x_opt, rtol=0.2)
or np.allclose([[3.14, 2.275]], x_opt, rtol=0.2)
or np.allclose([[9.42, 2.475]], x_opt, rtol=0.2)
)
self.assertAlmostEqual(0.39, float(y_opt), delta=1)
def test_zdt(self, type=1, criterion="EHVI", ndim=2, n_iter=10):
fun = ZDT(type=type, ndim=ndim)
mo = MOO(
n_iter=n_iter,
criterion=criterion,
random_state=1,
)
print("running test ZDT", type, ": " + str(ndim) + "D -> 2D,", criterion)
start = time.time()
mo.optimize(fun=fun)
print("seconds taken :", time.time() - start)
exact = fun.pareto(random_state=1)[1]
gd = get_performance_indicator("gd", exact)
dist = gd.calc(mo.result.F)
print("distance to the exact Pareto front", dist, "\n")
self.assertLess(dist, 1)
def test_zdt_2(self):
self.test_zdt(type=2, criterion="WB2S")
def test_zdt_3(self):
self.test_zdt(type=3, criterion="PI", n_iter=20)
def test_zdt_2_3Dto2D(self):
self.test_zdt(type=2, criterion="EHVI", ndim=3)
def test_train_pts_known(self):
fun = ZDT()
xlimits = fun.xlimits
sampling = LHS(xlimits=xlimits, random_state=42)
xt = sampling(20) # generating data as if it were known data
yt = fun(xt) # idem : "known" datapoint for training
mo = MOO(n_iter=10, criterion="MPI", xdoe=xt, ydoe=yt, random_state=42)
print("running test ZDT with known training points")
start = time.time()
mo.optimize(fun=fun)
print("seconds taken :", time.time() - start)
exact = fun.pareto(random_state=1)[1]
gd = get_performance_indicator("gd", exact)
dist = gd.calc(mo.result.F)
print("distance to the exact Pareto front", dist, "\n")
self.assertLess(dist, 1)
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"warnings.filterwarnings",
"pymoo.factory.get_performance_indicator",
"smt.problems.Branin",
"numpy.allclose",
"smoot.zdt.ZDT",
"time.time",
"smt.sampling_methods.LHS",
"smoot.smoot.MOO"
] |
[((102, 135), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (125, 135), False, 'import warnings\n'), ((2946, 2961), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2959, 2961), False, 'import unittest\n'), ((498, 506), 'smt.problems.Branin', 'Branin', ([], {}), '()\n', (504, 506), False, 'from smt.problems import Branin\n'), ((546, 623), 'smoot.smoot.MOO', 'MOO', ([], {'n_iter': 'n_iter', 'criterion': 'criterion', 'xlimits': 'fun.xlimits', 'random_state': '(42)'}), '(n_iter=n_iter, criterion=criterion, xlimits=fun.xlimits, random_state=42)\n', (549, 623), False, 'from smoot.smoot import MOO\n'), ((745, 756), 'time.time', 'time.time', ([], {}), '()\n', (754, 756), False, 'import time\n'), ((1338, 1363), 'smoot.zdt.ZDT', 'ZDT', ([], {'type': 'type', 'ndim': 'ndim'}), '(type=type, ndim=ndim)\n', (1341, 1363), False, 'from smoot.zdt import ZDT\n'), ((1378, 1433), 'smoot.smoot.MOO', 'MOO', ([], {'n_iter': 'n_iter', 'criterion': 'criterion', 'random_state': '(1)'}), '(n_iter=n_iter, criterion=criterion, random_state=1)\n', (1381, 1433), False, 'from smoot.smoot import MOO\n'), ((1579, 1590), 'time.time', 'time.time', ([], {}), '()\n', (1588, 1590), False, 'import time\n'), ((1733, 1771), 'pymoo.factory.get_performance_indicator', 'get_performance_indicator', (['"""gd"""', 'exact'], {}), "('gd', exact)\n", (1758, 1771), False, 'from pymoo.factory import get_performance_indicator\n'), ((2205, 2210), 'smoot.zdt.ZDT', 'ZDT', ([], {}), '()\n', (2208, 2210), False, 'from smoot.zdt import ZDT\n'), ((2260, 2297), 'smt.sampling_methods.LHS', 'LHS', ([], {'xlimits': 'xlimits', 'random_state': '(42)'}), '(xlimits=xlimits, random_state=42)\n', (2263, 2297), False, 'from smt.sampling_methods import LHS\n'), ((2443, 2509), 'smoot.smoot.MOO', 'MOO', ([], {'n_iter': '(10)', 'criterion': '"""MPI"""', 'xdoe': 'xt', 'ydoe': 'yt', 'random_state': '(42)'}), "(n_iter=10, criterion='MPI', xdoe=xt, ydoe=yt, random_state=42)\n", (2446, 2509), False, 'from smoot.smoot import MOO\n'), ((2587, 2598), 'time.time', 'time.time', ([], {}), '()\n', (2596, 2598), False, 'import time\n'), ((2741, 2779), 'pymoo.factory.get_performance_indicator', 'get_performance_indicator', (['"""gd"""', 'exact'], {}), "('gd', exact)\n", (2766, 2779), False, 'from pymoo.factory import get_performance_indicator\n'), ((950, 961), 'time.time', 'time.time', ([], {}), '()\n', (959, 961), False, 'import time\n'), ((1014, 1061), 'numpy.allclose', 'np.allclose', (['[[-3.14, 12.275]]', 'x_opt'], {'rtol': '(0.2)'}), '([[-3.14, 12.275]], x_opt, rtol=0.2)\n', (1025, 1061), True, 'import numpy as np\n'), ((1077, 1122), 'numpy.allclose', 'np.allclose', (['[[3.14, 2.275]]', 'x_opt'], {'rtol': '(0.2)'}), '([[3.14, 2.275]], x_opt, rtol=0.2)\n', (1088, 1122), True, 'import numpy as np\n'), ((1138, 1183), 'numpy.allclose', 'np.allclose', (['[[9.42, 2.475]]', 'x_opt'], {'rtol': '(0.2)'}), '([[9.42, 2.475]], x_opt, rtol=0.2)\n', (1149, 1183), True, 'import numpy as np\n'), ((1653, 1664), 'time.time', 'time.time', ([], {}), '()\n', (1662, 1664), False, 'import time\n'), ((2661, 2672), 'time.time', 'time.time', ([], {}), '()\n', (2670, 2672), False, 'import time\n')]
|
import copy
import random
import smmp
import numpy as np
from math import *
from universe1 import *
from protein1 import *
from mergesort import *
from sklearn import preprocessing
phi = np.concatenate((np.random.uniform(-80,-50,10),np.random.uniform(-160,-120,10)))
psi = np.concatenate((np.random.uniform(-50,-20,10),np.random.uniform(110,140,10)))
class geneticAlgorithmProtein():
'''Genetic Algorithm for python'''
def __init__(self,max_iteration=100,population_size=100,
mut_probout=0.18,mut_probin=0.25,crossover_prob=1.,parents_port=0.5,elit_ratio=0.10):
self.dim = len(smmp.var_r.vlvr);
self.dimcant = smmp.mol_par.nvr
self.max_iter = max_iteration
self.pop_size = population_size
self.mut_prob = mut_probin #0.28
self.mut_probout = mut_probout #0.25
self.cross_prob = crossover_prob
self.parent_port = int(parents_port*self.pop_size)
trl = self.pop_size - self.parent_port
if trl%2!=0:
self.parent_port+=1
trl = self.pop_size*elit_ratio
if trl<1 and elit_ratio>0:
self.num_elit=2
else: self.num_elit=int(trl)
self.datazeros=np.zeros(self.dim-self.dimcant)
def run(self,fitnes):
self.__fitness = fitnes#-15.0
#cant of angles for aminoacids
AnglesRes = []
sumAngle = 0
for val in smmp.res_i.nvrrs:
if val == 0:
break;
AnglesRes.append([val,sumAngle])
sumAngle+=val
######################
# initial population #
######################
pop = [[np.zeros(self.dim),0]]*self.pop_size
datazeros=np.zeros(self.dim-self.dimcant)
for p in range(0,self.pop_size):
val = copy.deepcopy(np.random.uniform(-180,180,self.dimcant))
r = random.random()
val = val + (180-val)*r
smmp.var_r.vlvr = np.concatenate((val,datazeros))
pop[p] = [val,myUniverso.energy()]
pop.sort(key = lambda x: x[1])
# evaluation chromosoma
minfit=pop[0][1]
if self.__fitness >= minfit: return pop[0]
M_Echrom = copy.deepcopy(pop[:self.num_elit])
Echrom = copy.deepcopy(M_Echrom[0])
counter = 0
while(counter<self.max_iter and Echrom[1] >= self.__fitness):
Nchrom = self.roulette_wheel_selection(M_Echrom)
print(counter,Echrom[1])
# crossover
offspring1,offspring2 = self.crossoverOne(Echrom,Nchrom,AnglesRes)
smmp.var_r.vlvr = np.concatenate((offspring1[0],datazeros))
offspring1[1]=myUniverso.energy()
smmp.var_r.vlvr = np.concatenate((offspring2[0],datazeros))
offspring2[1]=myUniverso.energy()
#mutation
if offspring1[1] >= offspring2[1]:
offspring1 = self.mutation(offspring1,AnglesRes)
smmp.var_r.vlvr = np.concatenate((offspring1[0],datazeros))
offspring1[1]=myUniverso.energy()
if offspring2[1] > Echrom[1]:
offspring2 = self.mutation(offspring2,AnglesRes)
smmp.var_r.vlvr = np.concatenate((offspring2[0],datazeros))
offspring2[1]=myUniverso.energy()
else:
offspring2 = self.mutation(offspring2,AnglesRes)
smmp.var_r.vlvr = np.concatenate((offspring2[0],datazeros))
offspring2[1]=myUniverso.energy()
if offspring1[1] > Echrom[1]:
offspring1 = self.mutation(offspring1,AnglesRes)
smmp.var_r.vlvr = np.concatenate((offspring1[0],datazeros))
offspring1[1]=myUniverso.energy()
M_Echrom.append(offspring1)
M_Echrom.append(offspring2)
M_Echrom.sort(key = lambda x: x[1])
M_Echrom = copy.deepcopy(M_Echrom[:self.num_elit])
Echrom=copy.deepcopy(M_Echrom[0])
counter+=1
print("cantidad de iteraciones: ",counter)
smmp.var_r.vlvr = np.concatenate((Echrom[0],datazeros))
return Echrom
def roulette_wheel_selection(self,population):
population = population[1:self.num_elit]
fitness = [x[1] for x in population]
sp = np.sum(fitness)
vp = []
for x in population:
vp.append(x[1]/sp)
r = random.random()
vp = preprocessing.normalize([np.array(vp)])
vp = vp[0]
idd = np.searchsorted(vp,r,side='right')-1
#r = random.random()
#idd = int(idd + (self.num_elit-1-idd)*r)
Nc = copy.deepcopy(population[idd])
return Nc
def crossoverOne(self,x,y,AnglesRes):
ofs1 = copy.deepcopy(x)
ofs2 = copy.deepcopy(y)
# One point
l = len(AnglesRes)
ran1=np.random.randint(1,l-1)
for i in range(ran1,l):
if self.cross_prob > np.random.random() :
ofs1[0][AnglesRes[i][1]:AnglesRes[i][0]+AnglesRes[i][1]]=y[0][
AnglesRes[i][1]:AnglesRes[i][0]+AnglesRes[i][1]]
ofs2[0][AnglesRes[i][1]:AnglesRes[i][0]+AnglesRes[i][1]]=x[0][
AnglesRes[i][1]:AnglesRes[i][0]+AnglesRes[i][1]]
return ofs1,ofs2
def crossoverTwo(self,x,y,AnglesRes):
ofs1 = copy.deepcopy(x)
ofs2 = copy.deepcopy(y)
# two point
l = len(AnglesRes)
ran1=np.random.randint(0,l)
ran2=np.random.randint(ran1,l)
for i in range(ran1,ran2):
if self.cross_prob > np.random.random() :
ofs1[0][AnglesRes[i][1]:AnglesRes[i][0]+AnglesRes[i][1]]=y[0][
AnglesRes[i][1]:AnglesRes[i][0]+AnglesRes[i][1]]
ofs2[0][AnglesRes[i][1]:AnglesRes[i][0]+AnglesRes[i][1]]=x[0][
AnglesRes[i][1]:AnglesRes[i][0]+AnglesRes[i][1]]
return ofs1,ofs2
def crossoverUniform(self,x,y,AnglesRes):
ofs1 = copy.deepcopy(x)
ofs2 = copy.deepcopy(y)
# uniform
l = len(AnglesRes)
for i in range(0,l):
if 0.5 > np.random.random() :
ofs1[0][AnglesRes[i][1]:AnglesRes[i][0]+AnglesRes[i][1]]=y[0][
AnglesRes[i][1]:AnglesRes[i][0]+AnglesRes[i][1]]
ofs2[0][AnglesRes[i][1]:AnglesRes[i][0]+AnglesRes[i][1]]=x[0][
AnglesRes[i][1]:AnglesRes[i][0]+AnglesRes[i][1]]
return ofs1,ofs2
def crossoverBinary(self,x,y,AnglesRes):
ofs1 = copy.deepcopy(x)
ofs2 = copy.deepcopy(y)
# uniform
l = len(AnglesRes)
eta=2
for i in range(0,l):
ran = np.random.random()
if 0.5 > ran :
beta = 2.*ran
else:
beta = 1./(2.*(1.-ran))
eta **= 1. / (eta + 1.)
x1 = y[0][AnglesRes[i][1]:AnglesRes[i][0]+AnglesRes[i][1]]
x2 = x[0][AnglesRes[i][1]:AnglesRes[i][0]+AnglesRes[i][1]]
ofs1[0][AnglesRes[i][1]:AnglesRes[i][0]+AnglesRes[i][1]] = 0.5 * (((
1 + beta) * x1) + ((1 - beta) * x2))
ofs2[0][AnglesRes[i][1]:AnglesRes[i][0]+AnglesRes[i][1]] = 0.5 * (((
1 - beta) * x2) + ((1 + beta) * x1))
return ofs1,ofs2
def mutation(self,x,AnglesRes):
ofs = copy.deepcopy(x)
l = len(AnglesRes)
for i in range(0,l):
if self.mut_probout > np.random.random():
for j in range(AnglesRes[i][0]):
if self.mut_prob > np.random.random():
index = AnglesRes[i][1]+j
#r = random.random()
replace = np.random.uniform(-180,180)
#replace = -180 + (180+180)*r
ofs[0][index]=replace
return ofs
myUniverso = Universe(T=300,st=0)
protA = Protein("EXAMPLES/prueba.seq",'')
myUniverso.add(protA)
GAP = geneticAlgorithmProtein(50000,200,
mut_probout= 0.6,#0.18,#0.2
mut_probin= 0.06, #0.25, #0.15
elit_ratio=0.5)
Echrom = GAP.run(-15)
print(myUniverso.energy(),myUniverso.rgyr(),myUniverso.helix(),protA.hbond())
smmp.outpdb(0,'final.pdb')
|
[
"smmp.outpdb",
"numpy.random.uniform",
"copy.deepcopy",
"numpy.sum",
"numpy.zeros",
"numpy.searchsorted",
"random.random",
"numpy.random.randint",
"numpy.random.random",
"numpy.array",
"numpy.concatenate"
] |
[((8489, 8516), 'smmp.outpdb', 'smmp.outpdb', (['(0)', '"""final.pdb"""'], {}), "(0, 'final.pdb')\n", (8500, 8516), False, 'import smmp\n'), ((204, 235), 'numpy.random.uniform', 'np.random.uniform', (['(-80)', '(-50)', '(10)'], {}), '(-80, -50, 10)\n', (221, 235), True, 'import numpy as np\n'), ((234, 267), 'numpy.random.uniform', 'np.random.uniform', (['(-160)', '(-120)', '(10)'], {}), '(-160, -120, 10)\n', (251, 267), True, 'import numpy as np\n'), ((291, 322), 'numpy.random.uniform', 'np.random.uniform', (['(-50)', '(-20)', '(10)'], {}), '(-50, -20, 10)\n', (308, 322), True, 'import numpy as np\n'), ((321, 352), 'numpy.random.uniform', 'np.random.uniform', (['(110)', '(140)', '(10)'], {}), '(110, 140, 10)\n', (338, 352), True, 'import numpy as np\n'), ((1210, 1243), 'numpy.zeros', 'np.zeros', (['(self.dim - self.dimcant)'], {}), '(self.dim - self.dimcant)\n', (1218, 1243), True, 'import numpy as np\n'), ((1729, 1762), 'numpy.zeros', 'np.zeros', (['(self.dim - self.dimcant)'], {}), '(self.dim - self.dimcant)\n', (1737, 1762), True, 'import numpy as np\n'), ((2222, 2256), 'copy.deepcopy', 'copy.deepcopy', (['pop[:self.num_elit]'], {}), '(pop[:self.num_elit])\n', (2235, 2256), False, 'import copy\n'), ((2274, 2300), 'copy.deepcopy', 'copy.deepcopy', (['M_Echrom[0]'], {}), '(M_Echrom[0])\n', (2287, 2300), False, 'import copy\n'), ((4156, 4194), 'numpy.concatenate', 'np.concatenate', (['(Echrom[0], datazeros)'], {}), '((Echrom[0], datazeros))\n', (4170, 4194), True, 'import numpy as np\n'), ((4375, 4390), 'numpy.sum', 'np.sum', (['fitness'], {}), '(fitness)\n', (4381, 4390), True, 'import numpy as np\n'), ((4479, 4494), 'random.random', 'random.random', ([], {}), '()\n', (4492, 4494), False, 'import random\n'), ((4710, 4740), 'copy.deepcopy', 'copy.deepcopy', (['population[idd]'], {}), '(population[idd])\n', (4723, 4740), False, 'import copy\n'), ((4818, 4834), 'copy.deepcopy', 'copy.deepcopy', (['x'], {}), '(x)\n', (4831, 4834), False, 'import copy\n'), ((4850, 4866), 'copy.deepcopy', 'copy.deepcopy', (['y'], {}), '(y)\n', (4863, 4866), False, 'import copy\n'), ((4927, 4954), 'numpy.random.randint', 'np.random.randint', (['(1)', '(l - 1)'], {}), '(1, l - 1)\n', (4944, 4954), True, 'import numpy as np\n'), ((5427, 5443), 'copy.deepcopy', 'copy.deepcopy', (['x'], {}), '(x)\n', (5440, 5443), False, 'import copy\n'), ((5459, 5475), 'copy.deepcopy', 'copy.deepcopy', (['y'], {}), '(y)\n', (5472, 5475), False, 'import copy\n'), ((5536, 5559), 'numpy.random.randint', 'np.random.randint', (['(0)', 'l'], {}), '(0, l)\n', (5553, 5559), True, 'import numpy as np\n'), ((5572, 5598), 'numpy.random.randint', 'np.random.randint', (['ran1', 'l'], {}), '(ran1, l)\n', (5589, 5598), True, 'import numpy as np\n'), ((6087, 6103), 'copy.deepcopy', 'copy.deepcopy', (['x'], {}), '(x)\n', (6100, 6103), False, 'import copy\n'), ((6119, 6135), 'copy.deepcopy', 'copy.deepcopy', (['y'], {}), '(y)\n', (6132, 6135), False, 'import copy\n'), ((6651, 6667), 'copy.deepcopy', 'copy.deepcopy', (['x'], {}), '(x)\n', (6664, 6667), False, 'import copy\n'), ((6683, 6699), 'copy.deepcopy', 'copy.deepcopy', (['y'], {}), '(y)\n', (6696, 6699), False, 'import copy\n'), ((7502, 7518), 'copy.deepcopy', 'copy.deepcopy', (['x'], {}), '(x)\n', (7515, 7518), False, 'import copy\n'), ((1892, 1907), 'random.random', 'random.random', ([], {}), '()\n', (1905, 1907), False, 'import random\n'), ((1974, 2006), 'numpy.concatenate', 'np.concatenate', (['(val, datazeros)'], {}), '((val, datazeros))\n', (1988, 2006), True, 'import numpy as np\n'), ((2625, 2667), 'numpy.concatenate', 'np.concatenate', (['(offspring1[0], datazeros)'], {}), '((offspring1[0], datazeros))\n', (2639, 2667), True, 'import numpy as np\n'), ((2748, 2790), 'numpy.concatenate', 'np.concatenate', (['(offspring2[0], datazeros)'], {}), '((offspring2[0], datazeros))\n', (2762, 2790), True, 'import numpy as np\n'), ((3969, 4008), 'copy.deepcopy', 'copy.deepcopy', (['M_Echrom[:self.num_elit]'], {}), '(M_Echrom[:self.num_elit])\n', (3982, 4008), False, 'import copy\n'), ((4028, 4054), 'copy.deepcopy', 'copy.deepcopy', (['M_Echrom[0]'], {}), '(M_Echrom[0])\n', (4041, 4054), False, 'import copy\n'), ((4581, 4617), 'numpy.searchsorted', 'np.searchsorted', (['vp', 'r'], {'side': '"""right"""'}), "(vp, r, side='right')\n", (4596, 4617), True, 'import numpy as np\n'), ((6815, 6833), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (6831, 6833), True, 'import numpy as np\n'), ((1834, 1876), 'numpy.random.uniform', 'np.random.uniform', (['(-180)', '(180)', 'self.dimcant'], {}), '(-180, 180, self.dimcant)\n', (1851, 1876), True, 'import numpy as np\n'), ((3005, 3047), 'numpy.concatenate', 'np.concatenate', (['(offspring1[0], datazeros)'], {}), '((offspring1[0], datazeros))\n', (3019, 3047), True, 'import numpy as np\n'), ((3463, 3505), 'numpy.concatenate', 'np.concatenate', (['(offspring2[0], datazeros)'], {}), '((offspring2[0], datazeros))\n', (3477, 3505), True, 'import numpy as np\n'), ((4533, 4545), 'numpy.array', 'np.array', (['vp'], {}), '(vp)\n', (4541, 4545), True, 'import numpy as np\n'), ((5027, 5045), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5043, 5045), True, 'import numpy as np\n'), ((5675, 5693), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5691, 5693), True, 'import numpy as np\n'), ((6240, 6258), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (6256, 6258), True, 'import numpy as np\n'), ((7661, 7679), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (7677, 7679), True, 'import numpy as np\n'), ((1673, 1691), 'numpy.zeros', 'np.zeros', (['self.dim'], {}), '(self.dim)\n', (1681, 1691), True, 'import numpy as np\n'), ((3250, 3292), 'numpy.concatenate', 'np.concatenate', (['(offspring2[0], datazeros)'], {}), '((offspring2[0], datazeros))\n', (3264, 3292), True, 'import numpy as np\n'), ((3708, 3750), 'numpy.concatenate', 'np.concatenate', (['(offspring1[0], datazeros)'], {}), '((offspring1[0], datazeros))\n', (3722, 3750), True, 'import numpy as np\n'), ((7769, 7787), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (7785, 7787), True, 'import numpy as np\n'), ((7918, 7946), 'numpy.random.uniform', 'np.random.uniform', (['(-180)', '(180)'], {}), '(-180, 180)\n', (7935, 7946), True, 'import numpy as np\n')]
|
# Importing the Keras libraries and packages
import numpy as np
import keras
import tensorflow as tf
from keras.models import load_model
from IPython.display import display
from PIL import Image
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True
)
test_datagen = ImageDataGenerator(rescale=1./255)
def import_model():
classifier = load_model("./model/best_weight.h5")
return classifier
def test(classifier, test_img):
print("testing")
test_image = prepImage(test_img)
result = classifier.predict(test_image)
return printResult(result)
def prepImage(testImage):
test_image = image.load_img(testImage, target_size=(64, 64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis=0)
return test_image
def printResult(result):
if result[0][0] == 1:
prediction = True
else:
prediction = False
return prediction
|
[
"keras.models.load_model",
"keras.preprocessing.image.ImageDataGenerator",
"numpy.expand_dims",
"keras.preprocessing.image.img_to_array",
"keras.preprocessing.image.load_img"
] |
[((478, 574), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)', 'shear_range': '(0.2)', 'zoom_range': '(0.2)', 'horizontal_flip': '(True)'}), '(rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2,\n horizontal_flip=True)\n', (496, 574), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((602, 639), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)'}), '(rescale=1.0 / 255)\n', (620, 639), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((676, 712), 'keras.models.load_model', 'load_model', (['"""./model/best_weight.h5"""'], {}), "('./model/best_weight.h5')\n", (686, 712), False, 'from keras.models import load_model\n'), ((948, 995), 'keras.preprocessing.image.load_img', 'image.load_img', (['testImage'], {'target_size': '(64, 64)'}), '(testImage, target_size=(64, 64))\n', (962, 995), False, 'from keras.preprocessing import image\n'), ((1013, 1043), 'keras.preprocessing.image.img_to_array', 'image.img_to_array', (['test_image'], {}), '(test_image)\n', (1031, 1043), False, 'from keras.preprocessing import image\n'), ((1061, 1095), 'numpy.expand_dims', 'np.expand_dims', (['test_image'], {'axis': '(0)'}), '(test_image, axis=0)\n', (1075, 1095), True, 'import numpy as np\n')]
|
import numpy as np
class congestionInference:
def __init__(self, latency_jumps, jitter_analysis):
self.latency_jumps = latency_jumps
self.jitter_analysis = jitter_analysis
self.congestion = False
def fit(self):
self.congestion_inferences = []
for i in range(len(self.latency_jumps)):
if self.congestion and self.latency_jumps[i][2]:
# The link was already congested and it has jumped to a state of larger mean RTT
continue
else:
if self.latency_jumps[i][2] and self.jitter_analysis[i][2]:
self.congestion = True
else:
self.congestion = False
self.congestion_inferences.append((self.latency_jumps[i][0], self.latency_jumps[i][1], self.congestion))
def getInferences(self):
return np.array(self.congestion_inferences)
|
[
"numpy.array"
] |
[((899, 935), 'numpy.array', 'np.array', (['self.congestion_inferences'], {}), '(self.congestion_inferences)\n', (907, 935), True, 'import numpy as np\n')]
|
"""
Recommender
"""
from __future__ import annotations
from pathlib import Path
from typing import Any, Callable, Optional, Tuple, Union, cast
import numpy as np
import torch
from sklearn.neighbors import NearestNeighbors
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
from wav2rec._utils.validation import check_is_fitted
from wav2rec.core.similarity import cosine_similarity, similarity_calculator
from wav2rec.data.dataset import Wav2RecDataset
from wav2rec.nn.lightening import Wav2RecNet
def _l2_normalize(array: np.ndarray, axis: int = -1) -> np.ndarray:
norm = np.linalg.norm(array, ord=2, axis=axis, keepdims=True)
array_norm: np.ndarray = array / np.maximum(norm, np.finfo(array.dtype).eps)
return array_norm
def _standardize_input(x: Union[torch.Tensor, np.ndarray]) -> torch.Tensor:
x = torch.as_tensor(x)
if x.ndim == 1:
return x.unsqueeze(0)
elif x.ndim == 2:
return x
else:
raise IndexError(f"Input must be 1D or 2D, got {x.ndim}D")
class Wav2Rec:
"""Waveform recommendation & matching engine.
Args:
model_path (Path): path to (training) checkpoint for ``Wav2RecNet``
distance_metric (str): distance metric to use for nearest neighbours search
normalize (bool): if ``True`` perform L2 normalization on all projections
similarity (callable, optional): a callable which accepts two 1D arrays
and returns a float. Must be compiled with ``numba.jit(nopython=True)``.
If ``None`` distances will be returned instead (see ``distance_metric``).
batch_size (int): number of audio files to send to the Wav2Rec neural network
model for projection simultaneously.
num_workers (int): number of subprocesses to use when loading data from the
dataset. See ``torch.utils.data.dataloader.DataLoader``.
pin_memory (bool): copy tensors to CUDA memory before the data loader
returns them.
prefetch_factor (int): Number of samples to load in advance of each worker.
See ``torch.utils.data.dataloader.DataLoader``.
device (torch.device, optional): device to run the model on.
If ``None``, the device will be selected automatically.
verbose (bool): if ``True`` display a progress bar while fitting.
**kwargs (Keyword Arguments): Keyword arguments to pass to ``NearestNeighbors``.
Warnings:
* By default, this class uses ``distance_metric='euclidean'`` and ``normalize=True``.
These settings have been purposefully chosen so that the distances computed
for nearest neighbours search accord with the default similarity metric used:
cosine similarity. (The euclidean distance between L2 normalized vectors is an
effective proxy of cosine similarity, see reference below.)
References:
* https://en.wikipedia.org/wiki/Cosine_similarity
"""
def __init__(
self,
model_path: Path,
distance_metric: str = "euclidean",
normalize: bool = True,
similarity_metric: Optional[
Callable[[np.ndarray, np.ndarray], float]
] = cosine_similarity,
batch_size: int = 1,
num_workers: int = 0,
pin_memory: bool = False,
prefetch_factor: int = 2,
device: Optional[torch.device] = None,
verbose: bool = True,
**kwargs: Any,
) -> None:
self.model_path = Path
self.normalize = normalize
self.similarity_metric = similarity_metric
self.batch_size = batch_size
self.num_workers = num_workers
self.pin_memory = pin_memory
self.prefetch_factor = prefetch_factor
self.device = device or torch.device(
"cuda" if torch.cuda.is_available() else "cpu"
)
self.verbose = verbose
self.net = Wav2RecNet.load_from_checkpoint(model_path).eval().to(self.device)
self._nneighbours = NearestNeighbors(
metric=kwargs.pop("metric", distance_metric),
n_jobs=kwargs.pop("n_jobs", -1),
**kwargs,
)
self.paths: np.ndarray = np.array([], dtype="str")
self.fitted: bool = False
@property
def _X(self) -> np.ndarray:
return cast(np.ndarray, self._nneighbours._fit_X)
def _dataset2loader(self, dataset: Wav2RecDataset) -> DataLoader:
return DataLoader(
dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.num_workers,
pin_memory=self.pin_memory,
prefetch_factor=self.prefetch_factor,
)
def get_projection(self, x: Union[torch.Tensor, np.ndarray]) -> np.ndarray:
"""Get the model's projection of a waveform ``x``.
Args:
x (np.ndarray, torch.Tensor): a 1D array or tensor with shape ``[FEATURES]``
or a 2D array or tensor with shape ``[BATCH, FEATURES]``.
Returns:
proj (np.ndarray): a projection of ``x``.
"""
with torch.inference_mode():
proj: np.ndarray = (
self.net(_standardize_input(x).to(self.device)).cpu().numpy()
)
return _l2_normalize(proj, axis=-1) if self.normalize else proj
def fit(self, dataset: Wav2RecDataset) -> Wav2Rec:
"""Fit the recommender to a dataset.
Fitting is composed of three steps:
1. Iterating over all files in the dataset
2. Computing `Wav2RecNet`` projections for each file
3. Fitting the nearest neighbours algorithm against the projections
Args:
dataset (Wav2RecDataset): a dataset to fit against.
Returns:
Wav2Rec
"""
all_paths, all_projections = list(), list()
with tqdm(desc="Fitting", disable=not self.verbose, total=len(dataset)) as pbar:
for paths, audio in self._dataset2loader(dataset):
all_paths.extend(paths)
all_projections.append(self.get_projection(audio))
pbar.update(len(audio))
self.paths = np.asarray(all_paths)
self._nneighbours.fit(np.concatenate(all_projections))
self.fitted = True
return self
def _get_neighbours(
self,
proj: np.ndarray,
n: int,
return_distance: bool,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
if n > len(self._X):
raise ValueError("`n` is larger than dataset")
neighbors: np.ndarray = self._nneighbours.kneighbors(
proj,
n_neighbors=n,
return_distance=return_distance,
)
return neighbors
@check_is_fitted
def recommend(
self,
x: Union[torch.Tensor, np.ndarray],
n: int = 5,
) -> Tuple[np.ndarray, np.ndarray]:
"""Recommend waveforms in ``dataset`` similar to ``x`.
Args:
x (np.ndarray, torch.Tensor): a 2D array or tensor
Shape: ``[BATCH, FEATURES]``.
n (int): number of recommendations to generate
Returns:
result (Tuple[np.ndarray, np.ndarray]): a tuple containing:
* ``metrics``: a 2D array of either similarity or distance metrics.
Shape: ``[BATCH, NEIGHBOURS]``.
* ``paths``: a 2D array of recommended file paths.
Shape: ``[BATCH, NEIGHBOURS]``.
"""
proj = self.get_projection(x)
if callable(self.similarity_metric):
ix = self._get_neighbours(proj, n=n, return_distance=False)
metrics = similarity_calculator(
X_query=proj,
X_neighbours=self._X[ix],
metric=self.similarity_metric,
)
else:
metrics, ix = self._get_neighbours(proj, n=n, return_distance=True)
return metrics, self.paths[ix]
|
[
"wav2rec.core.similarity.similarity_calculator",
"torch.inference_mode",
"torch.utils.data.DataLoader",
"wav2rec.nn.lightening.Wav2RecNet.load_from_checkpoint",
"typing.cast",
"numpy.asarray",
"numpy.finfo",
"numpy.array",
"numpy.linalg.norm",
"torch.cuda.is_available",
"torch.as_tensor",
"numpy.concatenate"
] |
[((603, 657), 'numpy.linalg.norm', 'np.linalg.norm', (['array'], {'ord': '(2)', 'axis': 'axis', 'keepdims': '(True)'}), '(array, ord=2, axis=axis, keepdims=True)\n', (617, 657), True, 'import numpy as np\n'), ((847, 865), 'torch.as_tensor', 'torch.as_tensor', (['x'], {}), '(x)\n', (862, 865), False, 'import torch\n'), ((4189, 4214), 'numpy.array', 'np.array', (['[]'], {'dtype': '"""str"""'}), "([], dtype='str')\n", (4197, 4214), True, 'import numpy as np\n'), ((4311, 4353), 'typing.cast', 'cast', (['np.ndarray', 'self._nneighbours._fit_X'], {}), '(np.ndarray, self._nneighbours._fit_X)\n', (4315, 4353), False, 'from typing import Any, Callable, Optional, Tuple, Union, cast\n'), ((4440, 4608), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'self.batch_size', 'shuffle': '(False)', 'num_workers': 'self.num_workers', 'pin_memory': 'self.pin_memory', 'prefetch_factor': 'self.prefetch_factor'}), '(dataset, batch_size=self.batch_size, shuffle=False, num_workers=\n self.num_workers, pin_memory=self.pin_memory, prefetch_factor=self.\n prefetch_factor)\n', (4450, 4608), False, 'from torch.utils.data import DataLoader\n'), ((6169, 6190), 'numpy.asarray', 'np.asarray', (['all_paths'], {}), '(all_paths)\n', (6179, 6190), True, 'import numpy as np\n'), ((5098, 5120), 'torch.inference_mode', 'torch.inference_mode', ([], {}), '()\n', (5118, 5120), False, 'import torch\n'), ((6221, 6252), 'numpy.concatenate', 'np.concatenate', (['all_projections'], {}), '(all_projections)\n', (6235, 6252), True, 'import numpy as np\n'), ((7698, 7795), 'wav2rec.core.similarity.similarity_calculator', 'similarity_calculator', ([], {'X_query': 'proj', 'X_neighbours': 'self._X[ix]', 'metric': 'self.similarity_metric'}), '(X_query=proj, X_neighbours=self._X[ix], metric=self.\n similarity_metric)\n', (7719, 7795), False, 'from wav2rec.core.similarity import cosine_similarity, similarity_calculator\n'), ((712, 733), 'numpy.finfo', 'np.finfo', (['array.dtype'], {}), '(array.dtype)\n', (720, 733), True, 'import numpy as np\n'), ((3809, 3834), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3832, 3834), False, 'import torch\n'), ((3907, 3950), 'wav2rec.nn.lightening.Wav2RecNet.load_from_checkpoint', 'Wav2RecNet.load_from_checkpoint', (['model_path'], {}), '(model_path)\n', (3938, 3950), False, 'from wav2rec.nn.lightening import Wav2RecNet\n')]
|
import os
import pandas
import uuid
import math
import plotly.graph_objects as go
from scipy.stats import sem, norm
import numpy
from plotly.subplots import make_subplots
import glob
from models import Gillespie, CellDivision, DeterministicCellDivision
n_A = 6.023E23 # Avogadro's Number
e_coli_vol = 6.5E-16 # Liters
def division(df):
result = zip(df["Time"], df["Counter"])
traces = []
for i in result:
if i[1] == 0:
trace = dict(
type="line",
x0=i[0],
y0=0,
x1=i[0],
y1=25,
line=dict(
color="Black",
width=1,
dash="dashdot"
))
traces.append(trace)
else:
pass
return traces
def gaussian(u, s, x):
gauss = (1/(s * math.sqrt(2 * math.pi))) * math.exp(-0.5 * ((x-u)/s)**2)
return gauss
def average_cycle_times(sim, save, cell_cycle):
path = os.path.join(sim, "*.csv")
for fname in glob.glob(path):
df = pandas.read_csv(fname, sep='\t')
time_at_division = []
time_at_two_genes = []
result = zip(df["Clock"].tolist(), df["Counter"].tolist())
for i in result:
if i[1] == cell_cycle:
time_at_division.append(i[0])
elif i[1] == cell_cycle/2:
time_at_two_genes.append(i[0])
time_at_division = [x / 60 for x in time_at_division]
time_at_two_genes =[x / 60 for x in time_at_two_genes]
print("Max time division: ", max(time_at_division))
print("Min time division: ", min(time_at_division))
print("Division mean: ", numpy.array(time_at_division).mean())
print("Division std: ", numpy.array(time_at_division).std())
print("Max time at 2 genes: ", max(time_at_two_genes))
print("Min time at 2 genes: ", min(time_at_two_genes))
print("Two genes mean: ", numpy.array(time_at_two_genes).mean())
print("Two genes std: ", numpy.array(time_at_two_genes).std())
genes_mean = numpy.array(time_at_two_genes).mean()
genes_std = numpy.array(time_at_two_genes).std()
divide_mean = numpy.array(time_at_division).mean()
divide_std = numpy.array(time_at_division).std()
gauss_divide_x = numpy.linspace(min(time_at_division), max(time_at_division), len(time_at_division))
gauss_divide_y = norm.pdf(gauss_divide_x, divide_mean, divide_std)
gauss_genes_x = numpy.linspace(min(time_at_two_genes), max(time_at_two_genes), len(time_at_two_genes))
gauss_genes_y = norm.pdf(gauss_genes_x, genes_mean, genes_std)
fig = make_subplots(rows=2, cols=1)
fig.update_yaxes(title_text="Probability of <b>Two Genes</b> (minutes)", row=2, col=1)
fig.update_xaxes(title_text="Time (Min)")
fig.update_layout(
title="Probability of time of division and two genes in cell ({n} cell cycles)".format(n=len(time_at_division)),
yaxis_title="Probability of <b>division</b> (minutes)",
legend_title="Legend",
font=dict(
family="Courier New, monospace",
size=12,
color="Black"))
division_trace = go.Histogram(x=time_at_division, histnorm='probability density', name="Division Histogram")
division_gauss_trace = go.Scatter(x=gauss_divide_x, y=gauss_divide_y, name="Gaussian")
two_gene_trace = go.Histogram(x=time_at_two_genes, histnorm='probability density', name="2 Genes Histogram")
two_gene_gauss_trace = go.Scatter(x=gauss_genes_x, y=gauss_genes_y, name="Gaussian")
fig.add_trace(division_trace, row=1, col=1)
fig.add_trace(division_gauss_trace, row=1, col=1,)
fig.add_trace(two_gene_trace, row=2, col=1)
fig.add_trace(two_gene_gauss_trace, row=2, col=1)
fig.show()
if save:
html = os.path.join(sim, "Average_times.html")
image = os.path.join(sim, "Average_times.png")
fig.write_html(html)
fig.write_image(image)
else:
pass
def combine_cell_cycles(sim, save, const):
path = os.path.join(sim, "*.csv")
for fname in glob.glob(path):
df = pandas.read_csv(fname, sep='\t')
# df = df.iloc[25200:]
ml = []
tl = []
pl = []
number_of_cycles = []
mrna_cell_cycles = pandas.DataFrame()
protein_cell_cycles = pandas.DataFrame()
counter = pandas.DataFrame()
result = zip(df["Counter"].tolist(), df["mRNA"].tolist(), df["Divide"].tolist(), df["Proteins"].tolist())
for i in result:
if i[2] == "Yes":
x = str(uuid.uuid4())
mrna_cell_cycles["Cycle_{}".format(x)] = ml
protein_cell_cycles["Cycle_{}".format(x)] = pl
counter["Cycle_{}".format(x)] = tl
number_of_cycles.append(i[0])
ml.clear()
tl.clear()
pl.clear()
ml.append(i[1])
tl.append(i[0])
pl.append(i[3])
else:
ml.append(i[1])
tl.append(i[0])
pl.append(i[3])
mrna_cell_cycles["Average"] = mrna_cell_cycles.mean(axis=1)
mrna_cell_cycles["std"] = mrna_cell_cycles.std(axis=1)
mrna_cell_cycles["Counter"] = numpy.linspace(0, 1, len(mrna_cell_cycles["Average"].tolist()))
protein_cell_cycles["Average"] = protein_cell_cycles.mean(axis=1)
protein_cell_cycles["std"] = protein_cell_cycles.std(axis=1)
protein_cell_cycles["Counter"] = numpy.linspace(0, 1, len(protein_cell_cycles["Average"].tolist()))
mrna_std_minus = []
mrna_std_plus = []
prot_std_minus = []
prot_std_plus = []
mrna_stats = zip(mrna_cell_cycles["Average"].tolist(), mrna_cell_cycles["std"].tolist())
prot_stats = zip(protein_cell_cycles["Average"].tolist(), protein_cell_cycles["std"].tolist())
for i, j in mrna_stats:
mrna_std_minus.append(i-j)
mrna_std_plus.append(i+j)
for i, j in prot_stats:
prot_std_minus.append(i-j)
prot_std_plus.append(i+j)
numerical = DeterministicCellDivision(tmax=36000,
num_of_datapoints=36000,
m0=7.59,
p0=1014.145,
const=const)
numerical_run = numerical.numerical_sim()
numerical_run = numerical_run.iloc[32401:]
numerical_x = numpy.linspace(0, 1, 3600)
"""Just graphing things"""
fig = make_subplots(rows=2, cols=1, vertical_spacing=0.02, shared_xaxes=True)
# Making traces for plot
mrna_std_minus_trace = go.Scatter(x=mrna_cell_cycles["Counter"],
y=mrna_std_minus,
name="Gillespie mRNA STD",
line=dict(color='darkgrey'),
fill=None,
showlegend=False)
mrna_std_plus_trace = go.Scatter(x=mrna_cell_cycles["Counter"],
y=mrna_std_plus,
name="Gillespie mRNA STD",
line=dict(color='darkgrey'),
fill='tonexty',
showlegend=False)
mrna_trace = go.Scatter(x=mrna_cell_cycles["Counter"],
y=mrna_cell_cycles["Average"],
name="Gillespie mRNA",
line=dict(color='royalblue'))
numerical_mrna_trace = go.Scatter(x=numerical_x,
y=numerical_run["mRNA"],
name="Numerical mRNA",
line=dict(color='royalblue',
dash='dash'))
prot_std_minus_trace = go.Scatter(x=protein_cell_cycles["Counter"],
y=prot_std_minus,
name="Gillespie Protein STD",
line=dict(color='darkgrey'),
fill=None,
showlegend=False)
prot_std_plus_trace = go.Scatter(x=protein_cell_cycles["Counter"],
y=prot_std_plus,
name="Gillespie Protein STD",
line=dict(color='darkgrey'),
fill='tonexty',
showlegend=False)
prot_trace = go.Scatter(x=protein_cell_cycles["Counter"],
y=protein_cell_cycles["Average"],
name="Gillespie Protein",
line=dict(color='firebrick'))
numerical_prot_trace = go.Scatter(x=numerical_x,
y=numerical_run["Proteins"],
name="Numerical Proteins",
line=dict(color='firebrick',
dash='dash'))
fig.add_trace(prot_trace, row=1, col=1)
fig.add_trace(numerical_prot_trace, row=1, col=1)
fig.add_trace(prot_std_minus_trace, row=1, col=1)
fig.add_trace(prot_std_plus_trace, row=1, col=1,)
fig.add_trace(numerical_mrna_trace, row=2, col=1)
fig.add_trace(mrna_trace, row=2, col=1)
fig.add_trace(mrna_std_minus_trace, row=2, col=1)
fig.add_trace(mrna_std_plus_trace, row=2, col=1)
fig.update_yaxes(title_text="Number of <b>mRNA</b> Molecules", row=2, col=1)
fig.update_xaxes(title_text="Time (Hours)", row=2, col=1)
fig.update_layout(
title="Cell cycle average of Protein and mRNA over time for {n} cell cycles".format(n=len(number_of_cycles)),
yaxis_title="Number of <b>Protein</b> Molecules",
legend_title="Legend",
font=dict(
family="Courier New, monospace",
size=12,
color="Black"
)
)
fig.show()
if save:
html = os.path.join(sim, "Cell_cycle_average.html")
image = os.path.join(sim, "Cell_cycle_average.png")
fig.write_html(html)
fig.write_image(image)
else:
pass
def numerical_plot(run, save):
"""Plot Analytical model"""
numerical_run = run.numerical_sim()
numerical_fig = make_subplots(rows=2, cols=1, shared_xaxes=True, vertical_spacing=0.02)
mrna_trace = go.Scatter(x=numerical_run["Time"],
y=numerical_run["mRNA"],
name="mRNA")
prot_trace = go.Scatter(x=numerical_run["Time"],
y=numerical_run["Proteins"],
name="Proteins")
numerical_fig.add_trace(mrna_trace, row=2, col=1)
numerical_fig.add_trace(prot_trace, row=1, col=1)
numerical_fig.update_layout(
title="Numerical Cell division comparison of mRNA and Protein molecules over time",
yaxis_title="Number of <b>Protein</b> Molecules",
legend_title="Legend",
barmode="group",
font=dict(
family="Courier New, monospace",
size=12,
color="Black"
)
)
numerical_fig.update_yaxes(title_text="Number of <b>mRNA</b> Molecules", row=2, col=1)
numerical_fig.update_xaxes(title_text="Time (Hours)", row=2, col=1)
numerical_fig.show()
if save:
numerical_fig.write_html("numerical.html")
numerical_fig.write_image("numerical.png")
else:
pass
def histogram_plot(sim, save):
norm_hist = make_subplots(rows=2, cols=1, vertical_spacing=0.02)
norm_hist.update_layout(
title="Probability distribution of mRNA and Protein",
yaxis_title="Probability of <b>mRNA</b> Molecules",
legend_title="Legend",
font=dict(
family="Courier New, monospace",
size=12,
color="Black"))
norm_hist.update_yaxes(title_text="Probability of <b>Protein</b>", row=2, col=1)
norm_hist.update_xaxes(title_text="Number of <b>Molecules</b>", row=2, col=1)
"""Get data from runs"""
path = os.path.join(sim, "*.csv")
mrna = []
prot = []
for fname in glob.glob(path):
df = pandas.read_csv(fname, sep='\t')
mrna.extend(df["mRNA"].tolist())
prot.extend(df["Proteins"].tolist())
mrna_hist = go.Histogram(x=mrna, name="mRNA Histogram")
prot_hist = go.Histogram(x=prot, name="Protein Histogram")
norm_hist.add_trace(mrna_hist, row=1, col=1)
norm_hist.add_trace(prot_hist, row=2, col=1)
norm_hist.show()
if save:
html_file = os.path.join(sim, "hist.html")
png_file = os.path.join(sim, "hist.png")
norm_hist.write_html(html_file)
norm_hist.write_image(png_file)
else:
pass
def plot_statistics(sim, save, const):
stat_fig = make_subplots(rows=2, cols=2, vertical_spacing=0.02)
path = os.path.join(sim, "*.csv")
mrna_cell_cycles = pandas.DataFrame()
protein_cell_cycles = pandas.DataFrame()
ml = []
pl = []
for fname in glob.glob(path):
df = pandas.read_csv(fname, sep='\t')
result = zip(df["Counter"].tolist(), df["mRNA"].tolist(), df["Divide"].tolist(), df["Proteins"].tolist())
for i in result:
if i[2] == "Yes":
x = str(uuid.uuid4())
mrna_cell_cycles["Cycle_{}".format(x)] = ml
protein_cell_cycles["Cycle_{}".format(x)] = pl
ml.clear()
pl.clear()
ml.append(i[1])
pl.append(i[3])
else:
ml.append(i[1])
pl.append(i[3])
mrna_mean = mrna_cell_cycles.mean(axis=0).tolist()
mrna_var = mrna_cell_cycles.var(axis=0).tolist()
prot_mean = protein_cell_cycles.mean(axis=0).tolist()
prot_var = protein_cell_cycles.var(axis=0).tolist()
mrna_sem = sem(mrna_mean)
prot_sem = sem(prot_mean)
numerical = DeterministicCellDivision(tmax=36000,
num_of_datapoints=36000,
m0=7.59,
p0=1014.145,
const=const)
numerical_run = numerical.numerical_sim()
numerical_run = numerical_run.iloc[32400:]
num_protein_mean = go.Bar(x=["Numerical Protein Mean"],
y=[numerical_run["Proteins"].mean()],
name="Numerical Protein",
marker=dict(color=["darkgrey"]))
num_protein_var = go.Bar(x=["Numerical Protein Variance"],
y=[numerical_run["Proteins"].var()],
name="Numerical Protein",
marker=dict(color=["darkgrey"]))
num_mrna_mean = go.Bar(x=["Numerical mRNA Mean"],
y=[numerical_run["mRNA"].mean()],
name="Numerical mRNA",
marker=dict(color=["darkgrey"]))
num_mrna_var = go.Bar(x=["Numerical mRNA Variance"],
y=[numerical_run["mRNA"].var()],
name="Numerical mRNA",
marker=dict(color=["darkgrey"]))
gill_protein_mean = go.Bar(x=["Gillespie Protein Mean"],
y=[numpy.array(prot_mean).mean()],
text=prot_mean,
name="Gillespie Protein",
marker=dict(color=["firebrick"]),
error_y=dict(type='data', array=[prot_sem])
)
gill_mrna_mean = go.Bar(x=["Gillespie mRNA Mean"],
y=[numpy.array(mrna_mean).mean()],
text=mrna_mean,
name="Gillespie mRNA",
marker=dict(color=["royalblue"]),
error_y=dict(type='data', array=[mrna_sem])
)
gill_protein_var = go.Bar(x=["Gillespie Protein Variance"],
y=[numpy.array(prot_var).mean()],
text=prot_var,
name="Gillespie Protein",
marker=dict(color=["firebrick"]),
error_y=dict(type='data', array=[sem(prot_var)]))
gill_mrna_var = go.Bar(x=["Gillespie mRNA Variance"],
y=[numpy.array(mrna_var).mean()],
text=mrna_var,
name="Gillespie mRNA",
marker=dict(color=["royalblue"]),
error_y=dict(type='data', array=[sem(mrna_var)]))
stat_fig.add_trace(gill_protein_mean, row=1, col=1)
stat_fig.add_trace(num_protein_mean, row=1, col=1)
stat_fig.add_trace(gill_mrna_mean, row=1, col=2)
stat_fig.add_trace(num_mrna_mean, row=1, col=2)
stat_fig.add_trace(gill_protein_var, row=2, col=1)
stat_fig.add_trace(num_protein_var, row=2, col=1)
stat_fig.add_trace(gill_mrna_var, row=2, col=2)
stat_fig.add_trace(num_mrna_var, row=2, col=2)
stat_fig.update_layout(
title="Mean and Variance for dividing for {n} cell cycles".format(n=len(mrna_mean)),
yaxis_title="Number of Molecules",
showlegend=False,
font=dict(
family="Courier New, monospace",
size=12,
color="Black"
)
)
stat_fig.show()
if save:
html_file = os.path.join(sim, "stats.html")
png_file = os.path.join(sim, "stats.png")
stat_fig.write_html(html_file)
stat_fig.write_image(png_file)
else:
pass
def plot_gillespie(number_of_runs, sim, save):
cell_div_fig = make_subplots(specs=[[{"secondary_y": True}],
[{"secondary_y": False}]],
rows=2,
cols=1,
row_heights=[0.8, 0.2],
shared_xaxes=True,
vertical_spacing=0.02)
cell_div_fig.update_layout(
title="Cell division comparison of mRNA and Protein molecules over time",
yaxis_title="Number of <b>Protein</b> Molecules",
legend_title="Legend",
font=dict(
family="Courier New, monospace",
size=12,
color="Black"
)
)
cell_div_fig.update_yaxes(title_text="Number of <b>mRNA</b> Molecules", secondary_y=True)
cell_div_fig.update_xaxes(title_text="Time (Hours)", row=2, col=1)
cell_div_fig.update_yaxes(title_text="Number of <b>Genes</b>", row=2, col=1)
path = os.path.join(sim, "*.csv")
if number_of_runs == 1:
for fname in glob.glob(path):
df = pandas.read_csv(fname, sep='\t')
"""Gillespie model of cell division"""
mrna_trace = go.Scatter(x=df["Time"],
y=df["mRNA"],
name="mRNA",
line=dict(color='royalblue', )
)
protein_trace = go.Scatter(x=df["Time"],
y=df["Proteins"],
name="Protein",
line=dict(color='firebrick', )
)
genes_trace = go.Scatter(x=df["Time"],
y=df["Gene Number"],
name="Number of genes")
cell_div_fig.add_trace(mrna_trace, secondary_y=True, row=1, col=1)
cell_div_fig.add_trace(protein_trace, secondary_y=False, row=1, col=1)
cell_div_fig.add_trace(genes_trace, row=2, col=1)
for i in division(df):
cell_div_fig.add_shape(i, row=1, col=1, secondary_y=True)
cell_div_fig.show()
else:
pass
if save:
html_path = os.path.join(sim, "Gillespie.html")
image_path = os.path.join(sim, "Gillespie.png")
cell_div_fig.write_image(image_path)
cell_div_fig.write_html(html_path)
else:
pass
def main():
"""Constants to be changed by user"""
# seconds for sim
tmax = 360000
number_of_datapoints = 43200
# k0 (mRNA), k1 (protein), dm, dp
const = [0.0167, 0.167, 0.0022, 0.0]
# m0, p0 [0, 0]
initial_conditions = [7, 1014]
number_of_simulations = 1
# time points (in unites of dt)
cell_cycle = 6300
save = True
# """Initiate Numerical sim"""
# numerical = DeterministicCellDivision(tmax=tmax,
# num_of_datapoints=number_of_datapoints,
# m0=initial_conditions[0],
# p0=initial_conditions[1],
# const=const)
# numerical_plot(numerical, save)
"""Begin Gillespie Simulation"""
gillespie_cell_model = CellDivision(tmax=tmax, m0=initial_conditions[0], p0=initial_conditions[1], const=const,
number_of_sims=number_of_simulations, cell_cycle=cell_cycle)
run = gillespie_cell_model.multiple_cells()
combine_cell_cycles(sim=run, save=save, const=const)
average_cycle_times(sim=run, save=save, cell_cycle=cell_cycle)
# """Different plots for the Gillespie data"""
histogram_plot(sim=run, save=save)
plot_statistics(sim=run, save=save, const=const)
plot_gillespie(number_of_runs=number_of_simulations, sim=run, save=save)
if __name__ == '__main__':
main()
|
[
"pandas.DataFrame",
"plotly.graph_objects.Scatter",
"math.exp",
"uuid.uuid4",
"plotly.graph_objects.Histogram",
"math.sqrt",
"models.DeterministicCellDivision",
"models.CellDivision",
"pandas.read_csv",
"scipy.stats.norm.pdf",
"numpy.array",
"scipy.stats.sem",
"numpy.linspace",
"glob.glob",
"plotly.subplots.make_subplots",
"os.path.join"
] |
[((1001, 1027), 'os.path.join', 'os.path.join', (['sim', '"""*.csv"""'], {}), "(sim, '*.csv')\n", (1013, 1027), False, 'import os\n'), ((1045, 1060), 'glob.glob', 'glob.glob', (['path'], {}), '(path)\n', (1054, 1060), False, 'import glob\n'), ((2356, 2405), 'scipy.stats.norm.pdf', 'norm.pdf', (['gauss_divide_x', 'divide_mean', 'divide_std'], {}), '(gauss_divide_x, divide_mean, divide_std)\n', (2364, 2405), False, 'from scipy.stats import sem, norm\n'), ((2533, 2579), 'scipy.stats.norm.pdf', 'norm.pdf', (['gauss_genes_x', 'genes_mean', 'genes_std'], {}), '(gauss_genes_x, genes_mean, genes_std)\n', (2541, 2579), False, 'from scipy.stats import sem, norm\n'), ((2591, 2620), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(2)', 'cols': '(1)'}), '(rows=2, cols=1)\n', (2604, 2620), False, 'from plotly.subplots import make_subplots\n'), ((3132, 3228), 'plotly.graph_objects.Histogram', 'go.Histogram', ([], {'x': 'time_at_division', 'histnorm': '"""probability density"""', 'name': '"""Division Histogram"""'}), "(x=time_at_division, histnorm='probability density', name=\n 'Division Histogram')\n", (3144, 3228), True, 'import plotly.graph_objects as go\n'), ((3251, 3314), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'gauss_divide_x', 'y': 'gauss_divide_y', 'name': '"""Gaussian"""'}), "(x=gauss_divide_x, y=gauss_divide_y, name='Gaussian')\n", (3261, 3314), True, 'import plotly.graph_objects as go\n'), ((3337, 3433), 'plotly.graph_objects.Histogram', 'go.Histogram', ([], {'x': 'time_at_two_genes', 'histnorm': '"""probability density"""', 'name': '"""2 Genes Histogram"""'}), "(x=time_at_two_genes, histnorm='probability density', name=\n '2 Genes Histogram')\n", (3349, 3433), True, 'import plotly.graph_objects as go\n'), ((3456, 3517), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': 'gauss_genes_x', 'y': 'gauss_genes_y', 'name': '"""Gaussian"""'}), "(x=gauss_genes_x, y=gauss_genes_y, name='Gaussian')\n", (3466, 3517), True, 'import plotly.graph_objects as go\n'), ((4001, 4027), 'os.path.join', 'os.path.join', (['sim', '"""*.csv"""'], {}), "(sim, '*.csv')\n", (4013, 4027), False, 'import os\n'), ((4045, 4060), 'glob.glob', 'glob.glob', (['path'], {}), '(path)\n', (4054, 4060), False, 'import glob\n'), ((4220, 4238), 'pandas.DataFrame', 'pandas.DataFrame', ([], {}), '()\n', (4236, 4238), False, 'import pandas\n'), ((4265, 4283), 'pandas.DataFrame', 'pandas.DataFrame', ([], {}), '()\n', (4281, 4283), False, 'import pandas\n'), ((4298, 4316), 'pandas.DataFrame', 'pandas.DataFrame', ([], {}), '()\n', (4314, 4316), False, 'import pandas\n'), ((5925, 6027), 'models.DeterministicCellDivision', 'DeterministicCellDivision', ([], {'tmax': '(36000)', 'num_of_datapoints': '(36000)', 'm0': '(7.59)', 'p0': '(1014.145)', 'const': 'const'}), '(tmax=36000, num_of_datapoints=36000, m0=7.59, p0=\n 1014.145, const=const)\n', (5950, 6027), False, 'from models import Gillespie, CellDivision, DeterministicCellDivision\n'), ((6302, 6328), 'numpy.linspace', 'numpy.linspace', (['(0)', '(1)', '(3600)'], {}), '(0, 1, 3600)\n', (6316, 6328), False, 'import numpy\n'), ((6370, 6441), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(2)', 'cols': '(1)', 'vertical_spacing': '(0.02)', 'shared_xaxes': '(True)'}), '(rows=2, cols=1, vertical_spacing=0.02, shared_xaxes=True)\n', (6383, 6441), False, 'from plotly.subplots import make_subplots\n'), ((10280, 10351), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(2)', 'cols': '(1)', 'shared_xaxes': '(True)', 'vertical_spacing': '(0.02)'}), '(rows=2, cols=1, shared_xaxes=True, vertical_spacing=0.02)\n', (10293, 10351), False, 'from plotly.subplots import make_subplots\n'), ((10369, 10442), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': "numerical_run['Time']", 'y': "numerical_run['mRNA']", 'name': '"""mRNA"""'}), "(x=numerical_run['Time'], y=numerical_run['mRNA'], name='mRNA')\n", (10379, 10442), True, 'import plotly.graph_objects as go\n'), ((10517, 10603), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': "numerical_run['Time']", 'y': "numerical_run['Proteins']", 'name': '"""Proteins"""'}), "(x=numerical_run['Time'], y=numerical_run['Proteins'], name=\n 'Proteins')\n", (10527, 10603), True, 'import plotly.graph_objects as go\n'), ((11505, 11557), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(2)', 'cols': '(1)', 'vertical_spacing': '(0.02)'}), '(rows=2, cols=1, vertical_spacing=0.02)\n', (11518, 11557), False, 'from plotly.subplots import make_subplots\n'), ((12088, 12114), 'os.path.join', 'os.path.join', (['sim', '"""*.csv"""'], {}), "(sim, '*.csv')\n", (12100, 12114), False, 'import os\n'), ((12160, 12175), 'glob.glob', 'glob.glob', (['path'], {}), '(path)\n', (12169, 12175), False, 'import glob\n'), ((12325, 12368), 'plotly.graph_objects.Histogram', 'go.Histogram', ([], {'x': 'mrna', 'name': '"""mRNA Histogram"""'}), "(x=mrna, name='mRNA Histogram')\n", (12337, 12368), True, 'import plotly.graph_objects as go\n'), ((12385, 12431), 'plotly.graph_objects.Histogram', 'go.Histogram', ([], {'x': 'prot', 'name': '"""Protein Histogram"""'}), "(x=prot, name='Protein Histogram')\n", (12397, 12431), True, 'import plotly.graph_objects as go\n'), ((12823, 12875), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'rows': '(2)', 'cols': '(2)', 'vertical_spacing': '(0.02)'}), '(rows=2, cols=2, vertical_spacing=0.02)\n', (12836, 12875), False, 'from plotly.subplots import make_subplots\n'), ((12887, 12913), 'os.path.join', 'os.path.join', (['sim', '"""*.csv"""'], {}), "(sim, '*.csv')\n", (12899, 12913), False, 'import os\n'), ((12937, 12955), 'pandas.DataFrame', 'pandas.DataFrame', ([], {}), '()\n', (12953, 12955), False, 'import pandas\n'), ((12982, 13000), 'pandas.DataFrame', 'pandas.DataFrame', ([], {}), '()\n', (12998, 13000), False, 'import pandas\n'), ((13042, 13057), 'glob.glob', 'glob.glob', (['path'], {}), '(path)\n', (13051, 13057), False, 'import glob\n'), ((13872, 13886), 'scipy.stats.sem', 'sem', (['mrna_mean'], {}), '(mrna_mean)\n', (13875, 13886), False, 'from scipy.stats import sem, norm\n'), ((13902, 13916), 'scipy.stats.sem', 'sem', (['prot_mean'], {}), '(prot_mean)\n', (13905, 13916), False, 'from scipy.stats import sem, norm\n'), ((13934, 14036), 'models.DeterministicCellDivision', 'DeterministicCellDivision', ([], {'tmax': '(36000)', 'num_of_datapoints': '(36000)', 'm0': '(7.59)', 'p0': '(1014.145)', 'const': 'const'}), '(tmax=36000, num_of_datapoints=36000, m0=7.59, p0=\n 1014.145, const=const)\n', (13959, 14036), False, 'from models import Gillespie, CellDivision, DeterministicCellDivision\n'), ((17797, 17959), 'plotly.subplots.make_subplots', 'make_subplots', ([], {'specs': "[[{'secondary_y': True}], [{'secondary_y': False}]]", 'rows': '(2)', 'cols': '(1)', 'row_heights': '[0.8, 0.2]', 'shared_xaxes': '(True)', 'vertical_spacing': '(0.02)'}), "(specs=[[{'secondary_y': True}], [{'secondary_y': False}]],\n rows=2, cols=1, row_heights=[0.8, 0.2], shared_xaxes=True,\n vertical_spacing=0.02)\n", (17810, 17959), False, 'from plotly.subplots import make_subplots\n'), ((18745, 18771), 'os.path.join', 'os.path.join', (['sim', '"""*.csv"""'], {}), "(sim, '*.csv')\n", (18757, 18771), False, 'import os\n'), ((21028, 21181), 'models.CellDivision', 'CellDivision', ([], {'tmax': 'tmax', 'm0': 'initial_conditions[0]', 'p0': 'initial_conditions[1]', 'const': 'const', 'number_of_sims': 'number_of_simulations', 'cell_cycle': 'cell_cycle'}), '(tmax=tmax, m0=initial_conditions[0], p0=initial_conditions[1],\n const=const, number_of_sims=number_of_simulations, cell_cycle=cell_cycle)\n', (21040, 21181), False, 'from models import Gillespie, CellDivision, DeterministicCellDivision\n'), ((893, 928), 'math.exp', 'math.exp', (['(-0.5 * ((x - u) / s) ** 2)'], {}), '(-0.5 * ((x - u) / s) ** 2)\n', (901, 928), False, 'import math\n'), ((1075, 1107), 'pandas.read_csv', 'pandas.read_csv', (['fname'], {'sep': '"""\t"""'}), "(fname, sep='\\t')\n", (1090, 1107), False, 'import pandas\n'), ((3767, 3806), 'os.path.join', 'os.path.join', (['sim', '"""Average_times.html"""'], {}), "(sim, 'Average_times.html')\n", (3779, 3806), False, 'import os\n'), ((3823, 3861), 'os.path.join', 'os.path.join', (['sim', '"""Average_times.png"""'], {}), "(sim, 'Average_times.png')\n", (3835, 3861), False, 'import os\n'), ((4075, 4107), 'pandas.read_csv', 'pandas.read_csv', (['fname'], {'sep': '"""\t"""'}), "(fname, sep='\\t')\n", (4090, 4107), False, 'import pandas\n'), ((9967, 10011), 'os.path.join', 'os.path.join', (['sim', '"""Cell_cycle_average.html"""'], {}), "(sim, 'Cell_cycle_average.html')\n", (9979, 10011), False, 'import os\n'), ((10028, 10071), 'os.path.join', 'os.path.join', (['sim', '"""Cell_cycle_average.png"""'], {}), "(sim, 'Cell_cycle_average.png')\n", (10040, 10071), False, 'import os\n'), ((12190, 12222), 'pandas.read_csv', 'pandas.read_csv', (['fname'], {'sep': '"""\t"""'}), "(fname, sep='\\t')\n", (12205, 12222), False, 'import pandas\n'), ((12584, 12614), 'os.path.join', 'os.path.join', (['sim', '"""hist.html"""'], {}), "(sim, 'hist.html')\n", (12596, 12614), False, 'import os\n'), ((12634, 12663), 'os.path.join', 'os.path.join', (['sim', '"""hist.png"""'], {}), "(sim, 'hist.png')\n", (12646, 12663), False, 'import os\n'), ((13072, 13104), 'pandas.read_csv', 'pandas.read_csv', (['fname'], {'sep': '"""\t"""'}), "(fname, sep='\\t')\n", (13087, 13104), False, 'import pandas\n'), ((17546, 17577), 'os.path.join', 'os.path.join', (['sim', '"""stats.html"""'], {}), "(sim, 'stats.html')\n", (17558, 17577), False, 'import os\n'), ((17597, 17627), 'os.path.join', 'os.path.join', (['sim', '"""stats.png"""'], {}), "(sim, 'stats.png')\n", (17609, 17627), False, 'import os\n'), ((18821, 18836), 'glob.glob', 'glob.glob', (['path'], {}), '(path)\n', (18830, 18836), False, 'import glob\n'), ((19448, 19517), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': "df['Time']", 'y': "df['Gene Number']", 'name': '"""Number of genes"""'}), "(x=df['Time'], y=df['Gene Number'], name='Number of genes')\n", (19458, 19517), True, 'import plotly.graph_objects as go\n'), ((19982, 20017), 'os.path.join', 'os.path.join', (['sim', '"""Gillespie.html"""'], {}), "(sim, 'Gillespie.html')\n", (19994, 20017), False, 'import os\n'), ((20039, 20073), 'os.path.join', 'os.path.join', (['sim', '"""Gillespie.png"""'], {}), "(sim, 'Gillespie.png')\n", (20051, 20073), False, 'import os\n'), ((2030, 2060), 'numpy.array', 'numpy.array', (['time_at_two_genes'], {}), '(time_at_two_genes)\n', (2041, 2060), False, 'import numpy\n'), ((2084, 2114), 'numpy.array', 'numpy.array', (['time_at_two_genes'], {}), '(time_at_two_genes)\n', (2095, 2114), False, 'import numpy\n'), ((2139, 2168), 'numpy.array', 'numpy.array', (['time_at_division'], {}), '(time_at_division)\n', (2150, 2168), False, 'import numpy\n'), ((2193, 2222), 'numpy.array', 'numpy.array', (['time_at_division'], {}), '(time_at_division)\n', (2204, 2222), False, 'import numpy\n'), ((18855, 18887), 'pandas.read_csv', 'pandas.read_csv', (['fname'], {'sep': '"""\t"""'}), "(fname, sep='\\t')\n", (18870, 18887), False, 'import pandas\n'), ((866, 888), 'math.sqrt', 'math.sqrt', (['(2 * math.pi)'], {}), '(2 * math.pi)\n', (875, 888), False, 'import math\n'), ((1655, 1684), 'numpy.array', 'numpy.array', (['time_at_division'], {}), '(time_at_division)\n', (1666, 1684), False, 'import numpy\n'), ((1721, 1750), 'numpy.array', 'numpy.array', (['time_at_division'], {}), '(time_at_division)\n', (1732, 1750), False, 'import numpy\n'), ((1907, 1937), 'numpy.array', 'numpy.array', (['time_at_two_genes'], {}), '(time_at_two_genes)\n', (1918, 1937), False, 'import numpy\n'), ((1975, 2005), 'numpy.array', 'numpy.array', (['time_at_two_genes'], {}), '(time_at_two_genes)\n', (1986, 2005), False, 'import numpy\n'), ((4494, 4506), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4504, 4506), False, 'import uuid\n'), ((13298, 13310), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (13308, 13310), False, 'import uuid\n'), ((15333, 15355), 'numpy.array', 'numpy.array', (['prot_mean'], {}), '(prot_mean)\n', (15344, 15355), False, 'import numpy\n'), ((15729, 15751), 'numpy.array', 'numpy.array', (['mrna_mean'], {}), '(mrna_mean)\n', (15740, 15751), False, 'import numpy\n'), ((16118, 16139), 'numpy.array', 'numpy.array', (['prot_var'], {}), '(prot_var)\n', (16129, 16139), False, 'import numpy\n'), ((16377, 16390), 'scipy.stats.sem', 'sem', (['prot_var'], {}), '(prot_var)\n', (16380, 16390), False, 'from scipy.stats import sem, norm\n'), ((16483, 16504), 'numpy.array', 'numpy.array', (['mrna_var'], {}), '(mrna_var)\n', (16494, 16504), False, 'import numpy\n'), ((16727, 16740), 'scipy.stats.sem', 'sem', (['mrna_var'], {}), '(mrna_var)\n', (16730, 16740), False, 'from scipy.stats import sem, norm\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.