code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
import pylab
import matplotlib as mpl
import numpy as np
import matplotlib.colors as colors
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
from colormap import cmap_builder, test_cmap # for _build_colormap()
# ---------------------------------------------------------------------------
# Useful colormaps:
# plt.cm.spectral, plt.get_cmap('jet')
# hint: reverse colormaps by adding underscore and r, e.g. plt.cm.spectral_r
# ---------------------------------------------------------------------------
# cmap_rg = mcolors.LinearSegmentedColormap('my_colormap', cdict, 100)
def _build_colormap(color1, color2, color3):
""" Builds colormap from three given colors (given as strings)"""
cm = cmap_builder('blue', 'orange', 'green')
return cm
def show_truncated_colormap(cmap = plt.cm.spectral, minv = 0.2, maxv = 0.8):
""" Compare original and truncated colormap """
arr = np.linspace(0, 50, 100).reshape((10, 10))
fig, ax = plt.subplots(ncols=2)
cmap = plt.cm.spectral
new_cmap = _truncate_colormap(cmap, minv, maxv)
ax[0].imshow(arr, interpolation='nearest', cmap=cmap)
ax[1].imshow(arr, interpolation='nearest', cmap=new_cmap)
plt.show()
def _truncate_colormap(cmap = plt.cm.spectral, minval=0.0, maxval=1.0, n=100):
""" Sample colormap from given colormap """
""" """
new_cmap = colors.LinearSegmentedColormap.from_list(
'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap(np.linspace(minval, maxval, n)))
return new_cmap
def do_plot ( arr, filename = "test.png", title = "A plot", bool_save = False, minval=0, maxval=0.95, cmap=plt.cm.spectral ):
""" A function to plot and label a raster dataset given as an array. Extra
options to choose bounds of the colourscale and the colormap to use.
"""
# TODO: use opencv for saving files
dpi = 200
# define source colormap
cmap = plt.cm.spectral
# cut colors of sourcecolormap to get appropriate colors for ndvi
cmap_ndvi = _truncate_colormap(cmap, minval=0.9, maxval=0.5, n=100)
plt.imshow (arr, interpolation='nearest', cmap = cmap)
plt.title(title)
plt.colorbar()
plt.axis('off')
if bool_save == True:
plt.savefig(filename,bbox_inches='tight', dpi = dpi)
else:
plt.show()
plt.clf()
|
[
"matplotlib.pyplot.imshow",
"colormap.cmap_builder",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.axis",
"numpy.linspace",
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((769, 808), 'colormap.cmap_builder', 'cmap_builder', (['"""blue"""', '"""orange"""', '"""green"""'], {}), "('blue', 'orange', 'green')\n", (781, 808), False, 'from colormap import cmap_builder, test_cmap\n'), ((1019, 1040), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'ncols': '(2)'}), '(ncols=2)\n', (1031, 1040), True, 'import matplotlib.pyplot as plt\n'), ((1245, 1255), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1253, 1255), True, 'import matplotlib.pyplot as plt\n'), ((2150, 2201), 'matplotlib.pyplot.imshow', 'plt.imshow', (['arr'], {'interpolation': '"""nearest"""', 'cmap': 'cmap'}), "(arr, interpolation='nearest', cmap=cmap)\n", (2160, 2201), True, 'import matplotlib.pyplot as plt\n'), ((2209, 2225), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2218, 2225), True, 'import matplotlib.pyplot as plt\n'), ((2230, 2244), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2242, 2244), True, 'import matplotlib.pyplot as plt\n'), ((2249, 2264), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2257, 2264), True, 'import matplotlib.pyplot as plt\n'), ((2386, 2395), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2393, 2395), True, 'import matplotlib.pyplot as plt\n'), ((2300, 2351), 'matplotlib.pyplot.savefig', 'plt.savefig', (['filename'], {'bbox_inches': '"""tight"""', 'dpi': 'dpi'}), "(filename, bbox_inches='tight', dpi=dpi)\n", (2311, 2351), True, 'import matplotlib.pyplot as plt\n'), ((2371, 2381), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2379, 2381), True, 'import matplotlib.pyplot as plt\n'), ((963, 986), 'numpy.linspace', 'np.linspace', (['(0)', '(50)', '(100)'], {}), '(0, 50, 100)\n', (974, 986), True, 'import numpy as np\n'), ((1545, 1575), 'numpy.linspace', 'np.linspace', (['minval', 'maxval', 'n'], {}), '(minval, maxval, n)\n', (1556, 1575), True, 'import numpy as np\n')]
|
import random as _random
import unittest as _unittest
import numpy as _np
import torch as _torch
import torchutils as _tu
class _TestUtils(_unittest.TestCase):
def test_set_random_seed(self):
# Set new seed and verify.
seed = _random.randint(1, 1000)
_tu.set_random_seed(seed)
np_new_seed = _np.random.get_state()[1][0]
torch_new_seed = _torch.initial_seed()
self.assertEqual(seed, np_new_seed)
self.assertEqual(seed, torch_new_seed)
if _torch.cuda.is_available():
cuda_new_seed = _torch.cuda.initial_seed()
self.assertEqual(seed, cuda_new_seed)
if __name__ == '__main__':
_unittest.main()
|
[
"numpy.random.get_state",
"torch.initial_seed",
"torch.cuda.is_available",
"torch.cuda.initial_seed",
"unittest.main",
"random.randint",
"torchutils.set_random_seed"
] |
[((676, 692), 'unittest.main', '_unittest.main', ([], {}), '()\n', (690, 692), True, 'import unittest as _unittest\n'), ((251, 275), 'random.randint', '_random.randint', (['(1)', '(1000)'], {}), '(1, 1000)\n', (266, 275), True, 'import random as _random\n'), ((284, 309), 'torchutils.set_random_seed', '_tu.set_random_seed', (['seed'], {}), '(seed)\n', (303, 309), True, 'import torchutils as _tu\n'), ((386, 407), 'torch.initial_seed', '_torch.initial_seed', ([], {}), '()\n', (405, 407), True, 'import torch as _torch\n'), ((510, 536), 'torch.cuda.is_available', '_torch.cuda.is_available', ([], {}), '()\n', (534, 536), True, 'import torch as _torch\n'), ((566, 592), 'torch.cuda.initial_seed', '_torch.cuda.initial_seed', ([], {}), '()\n', (590, 592), True, 'import torch as _torch\n'), ((332, 354), 'numpy.random.get_state', '_np.random.get_state', ([], {}), '()\n', (352, 354), True, 'import numpy as _np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
from solo12_collisions_utils import followBoundary
# Load the collision map from file
col_map_file = './npy_data/collision_map_centered_res100.npy'
col_map = np.load(col_map_file, allow_pickle=True)
traj1 = np.array(followBoundary(col_map))
traj1 = [[t[1], t[0]] for t in traj1]
traj2 = np.array(followBoundary(col_map, first_dir=2))
traj2 = [[t[1], t[0]] for t in traj2]
print(col_map.shape)
#plt.imshow(col_map.T)
#plt.show()
xSize = len(col_map[0])
ySize = len(col_map)
xx, yy = np.meshgrid(np.linspace(0, xSize, xSize),
np.linspace(0, ySize, ySize))
# returns neighboring indices at given dist around [k,l]
def getNeighbors(k, l, dist):
neighbors = []
dist = int(dist)
for i in range(2*dist):
for j in range(2*dist):
neighbors.append([k - dist + i, l - dist + j])
return neighbors
X = []
for i in range(xSize):
for j in range(ySize):
neighbors = getNeighbors(i,j,2)
append = False
'''
for n in neighbors:
if(n in traj1 or n in traj2):
append = True
if(append or (i%3 == 0 and j%3 == 0)):
X.append([i,j])
'''
X.append([i,j])
X = np.array(X)
print(X.shape)
Y = col_map[X[:,0],X[:,1]] > 0 #for classifier
clf = svm.NuSVC(nu=0.5)
clf.fit(X,Y)
support = np.array(clf.support_vectors_)
print("Nb. support vectors : \n{}".format(clf.n_support_))
print("Support vectors : \n{}".format(support))
# plot the decision function for each datapoint on the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()), aspect='auto',
origin='lower', cmap=plt.cm.PuOr_r)
contours = plt.contour(xx, yy, Z, levels=[0], linewidths=2,
linestyles='dashed', colors=['red'])
#plt.scatter(X[:, 0], X[:, 1], s=35, c=Y, cmap=plt.cm.Paired,
# edgecolors='k')
plt.scatter(support[:,0], support[:,1], c='red', s=15)
plt.xticks(())
plt.yticks(())
plt.axis([0,xSize,0,ySize])
plt.show()
|
[
"matplotlib.pyplot.xticks",
"sklearn.svm.NuSVC",
"solo12_collisions_utils.followBoundary",
"numpy.array",
"matplotlib.pyplot.contour",
"numpy.linspace",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.axis",
"numpy.load",
"matplotlib.pyplot.show"
] |
[((235, 275), 'numpy.load', 'np.load', (['col_map_file'], {'allow_pickle': '(True)'}), '(col_map_file, allow_pickle=True)\n', (242, 275), True, 'import numpy as np\n'), ((1417, 1428), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (1425, 1428), True, 'import numpy as np\n'), ((1497, 1514), 'sklearn.svm.NuSVC', 'svm.NuSVC', ([], {'nu': '(0.5)'}), '(nu=0.5)\n', (1506, 1514), False, 'from sklearn import svm\n'), ((1538, 1568), 'numpy.array', 'np.array', (['clf.support_vectors_'], {}), '(clf.support_vectors_)\n', (1546, 1568), True, 'import numpy as np\n'), ((1991, 2080), 'matplotlib.pyplot.contour', 'plt.contour', (['xx', 'yy', 'Z'], {'levels': '[0]', 'linewidths': '(2)', 'linestyles': '"""dashed"""', 'colors': "['red']"}), "(xx, yy, Z, levels=[0], linewidths=2, linestyles='dashed',\n colors=['red'])\n", (2002, 2080), True, 'import matplotlib.pyplot as plt\n'), ((2191, 2247), 'matplotlib.pyplot.scatter', 'plt.scatter', (['support[:, 0]', 'support[:, 1]'], {'c': '"""red"""', 's': '(15)'}), "(support[:, 0], support[:, 1], c='red', s=15)\n", (2202, 2247), True, 'import matplotlib.pyplot as plt\n'), ((2246, 2260), 'matplotlib.pyplot.xticks', 'plt.xticks', (['()'], {}), '(())\n', (2256, 2260), True, 'import matplotlib.pyplot as plt\n'), ((2261, 2275), 'matplotlib.pyplot.yticks', 'plt.yticks', (['()'], {}), '(())\n', (2271, 2275), True, 'import matplotlib.pyplot as plt\n'), ((2276, 2306), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, xSize, 0, ySize]'], {}), '([0, xSize, 0, ySize])\n', (2284, 2306), True, 'import matplotlib.pyplot as plt\n'), ((2304, 2314), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2312, 2314), True, 'import matplotlib.pyplot as plt\n'), ((293, 316), 'solo12_collisions_utils.followBoundary', 'followBoundary', (['col_map'], {}), '(col_map)\n', (307, 316), False, 'from solo12_collisions_utils import followBoundary\n'), ((374, 410), 'solo12_collisions_utils.followBoundary', 'followBoundary', (['col_map'], {'first_dir': '(2)'}), '(col_map, first_dir=2)\n', (388, 410), False, 'from solo12_collisions_utils import followBoundary\n'), ((574, 602), 'numpy.linspace', 'np.linspace', (['(0)', 'xSize', 'xSize'], {}), '(0, xSize, xSize)\n', (585, 602), True, 'import numpy as np\n'), ((625, 653), 'numpy.linspace', 'np.linspace', (['(0)', 'ySize', 'ySize'], {}), '(0, ySize, ySize)\n', (636, 653), True, 'import numpy as np\n')]
|
import os
import numpy as np
from data_prepare import *
from Network_structure import *
from loss_function import *
from train_method import *
def save_eeg(saved_model, result_location, foldername, save_train, save_vali, save_test,
noiseEEG_train, EEG_train, noiseEEG_val, EEG_val, noiseEEG_test, EEG_test, train_num, denoise_network, datanum):
if save_train == True:
try:
# generate every signal in training set
Denoiseoutput_train, _ = test_step(saved_model, noiseEEG_train, EEG_train, denoise_network, datanum)
if not os.path.exists(result_location +'/'+ foldername + '/' + train_num + '/' +'nn_output'):
os.makedirs(result_location +'/'+ foldername + '/' + train_num + '/'+ 'nn_output' )
np.save(result_location +'/'+ foldername + '/' + train_num + '/' + 'nn_output' + '/' + 'noiseinput_train.npy', noiseEEG_train)
np.save(result_location +'/'+ foldername + '/' + train_num + '/' + 'nn_output' + '/' + 'Denoiseoutput_train.npy', Denoiseoutput_train) ####################### change the adress!
np.save(result_location +'/'+ foldername + '/' + train_num + '/' + 'nn_output' + '/' + 'EEG_train.npy', EEG_train)
except:
print("Error during saving training signal.")
if save_vali == True:
try:
# generate every signal in test set
Denoiseoutput_val, _ = test_step(saved_model, noiseEEG_val, EEG_val, denoise_network, datanum)
if not os.path.exists(result_location +'/'+ foldername + '/' + train_num + '/'+ 'nn_output'):
os.makedirs(result_location +'/'+ foldername + '/' + train_num + '/'+ 'nn_output')
np.save(result_location +'/'+ foldername + '/' + train_num + '/' + 'nn_output' +'/' + 'noiseinput_val.npy', noiseEEG_val)
np.save(result_location +'/'+ foldername + '/' + train_num + '/' + 'nn_output' +'/' + 'Denoiseoutput_val.npy', Denoiseoutput_val) ####################### change the adress!
np.save(result_location +'/'+ foldername + '/' + train_num + '/' + 'nn_output' +'/' + 'EEG_val.npy', EEG_val)
except:
print("Error during saving validation signal.")
if save_test == True:
try:
# generate every signal in test set
Denoiseoutput_test, _ = test_step(saved_model, noiseEEG_test, EEG_test, denoise_network, datanum)
if not os.path.exists(result_location +'/'+ foldername + '/' + train_num + '/'+ 'nn_output'):
os.makedirs(result_location +'/'+ foldername + '/' + train_num + '/' + 'nn_output')
np.save(result_location +'/'+ foldername + '/' + train_num + '/' + 'nn_output' +'/' + 'noiseinput_test.npy', noiseEEG_test)
np.save(result_location +'/'+ foldername + '/' + train_num + '/' + 'nn_output' +'/' + 'Denoiseoutput_test.npy', Denoiseoutput_test) ####################### change the adress!
np.save(result_location +'/'+ foldername + '/' + train_num + '/' + 'nn_output' +'/' + 'EEG_test.npy', EEG_test)
except:
print("Error during saving test signal.")
|
[
"os.makedirs",
"os.path.exists",
"numpy.save"
] |
[((814, 946), 'numpy.save', 'np.save', (["(result_location + '/' + foldername + '/' + train_num + '/' + 'nn_output' +\n '/' + 'noiseinput_train.npy')", 'noiseEEG_train'], {}), "(result_location + '/' + foldername + '/' + train_num + '/' +\n 'nn_output' + '/' + 'noiseinput_train.npy', noiseEEG_train)\n", (821, 946), True, 'import numpy as np\n'), ((955, 1095), 'numpy.save', 'np.save', (["(result_location + '/' + foldername + '/' + train_num + '/' + 'nn_output' +\n '/' + 'Denoiseoutput_train.npy')", 'Denoiseoutput_train'], {}), "(result_location + '/' + foldername + '/' + train_num + '/' +\n 'nn_output' + '/' + 'Denoiseoutput_train.npy', Denoiseoutput_train)\n", (962, 1095), True, 'import numpy as np\n'), ((1164, 1284), 'numpy.save', 'np.save', (["(result_location + '/' + foldername + '/' + train_num + '/' + 'nn_output' +\n '/' + 'EEG_train.npy')", 'EEG_train'], {}), "(result_location + '/' + foldername + '/' + train_num + '/' +\n 'nn_output' + '/' + 'EEG_train.npy', EEG_train)\n", (1171, 1284), True, 'import numpy as np\n'), ((1811, 1939), 'numpy.save', 'np.save', (["(result_location + '/' + foldername + '/' + train_num + '/' + 'nn_output' +\n '/' + 'noiseinput_val.npy')", 'noiseEEG_val'], {}), "(result_location + '/' + foldername + '/' + train_num + '/' +\n 'nn_output' + '/' + 'noiseinput_val.npy', noiseEEG_val)\n", (1818, 1939), True, 'import numpy as np\n'), ((1947, 2083), 'numpy.save', 'np.save', (["(result_location + '/' + foldername + '/' + train_num + '/' + 'nn_output' +\n '/' + 'Denoiseoutput_val.npy')", 'Denoiseoutput_val'], {}), "(result_location + '/' + foldername + '/' + train_num + '/' +\n 'nn_output' + '/' + 'Denoiseoutput_val.npy', Denoiseoutput_val)\n", (1954, 2083), True, 'import numpy as np\n'), ((2158, 2274), 'numpy.save', 'np.save', (["(result_location + '/' + foldername + '/' + train_num + '/' + 'nn_output' +\n '/' + 'EEG_val.npy')", 'EEG_val'], {}), "(result_location + '/' + foldername + '/' + train_num + '/' +\n 'nn_output' + '/' + 'EEG_val.npy', EEG_val)\n", (2165, 2274), True, 'import numpy as np\n'), ((2788, 2918), 'numpy.save', 'np.save', (["(result_location + '/' + foldername + '/' + train_num + '/' + 'nn_output' +\n '/' + 'noiseinput_test.npy')", 'noiseEEG_test'], {}), "(result_location + '/' + foldername + '/' + train_num + '/' +\n 'nn_output' + '/' + 'noiseinput_test.npy', noiseEEG_test)\n", (2795, 2918), True, 'import numpy as np\n'), ((2926, 3064), 'numpy.save', 'np.save', (["(result_location + '/' + foldername + '/' + train_num + '/' + 'nn_output' +\n '/' + 'Denoiseoutput_test.npy')", 'Denoiseoutput_test'], {}), "(result_location + '/' + foldername + '/' + train_num + '/' +\n 'nn_output' + '/' + 'Denoiseoutput_test.npy', Denoiseoutput_test)\n", (2933, 3064), True, 'import numpy as np\n'), ((3139, 3257), 'numpy.save', 'np.save', (["(result_location + '/' + foldername + '/' + train_num + '/' + 'nn_output' +\n '/' + 'EEG_test.npy')", 'EEG_test'], {}), "(result_location + '/' + foldername + '/' + train_num + '/' +\n 'nn_output' + '/' + 'EEG_test.npy', EEG_test)\n", (3146, 3257), True, 'import numpy as np\n'), ((607, 699), 'os.path.exists', 'os.path.exists', (["(result_location + '/' + foldername + '/' + train_num + '/' + 'nn_output')"], {}), "(result_location + '/' + foldername + '/' + train_num + '/' +\n 'nn_output')\n", (621, 699), False, 'import os\n'), ((713, 802), 'os.makedirs', 'os.makedirs', (["(result_location + '/' + foldername + '/' + train_num + '/' + 'nn_output')"], {}), "(result_location + '/' + foldername + '/' + train_num + '/' +\n 'nn_output')\n", (724, 802), False, 'import os\n'), ((1603, 1695), 'os.path.exists', 'os.path.exists', (["(result_location + '/' + foldername + '/' + train_num + '/' + 'nn_output')"], {}), "(result_location + '/' + foldername + '/' + train_num + '/' +\n 'nn_output')\n", (1617, 1695), False, 'import os\n'), ((1709, 1798), 'os.makedirs', 'os.makedirs', (["(result_location + '/' + foldername + '/' + train_num + '/' + 'nn_output')"], {}), "(result_location + '/' + foldername + '/' + train_num + '/' +\n 'nn_output')\n", (1720, 1798), False, 'import os\n'), ((2579, 2671), 'os.path.exists', 'os.path.exists', (["(result_location + '/' + foldername + '/' + train_num + '/' + 'nn_output')"], {}), "(result_location + '/' + foldername + '/' + train_num + '/' +\n 'nn_output')\n", (2593, 2671), False, 'import os\n'), ((2685, 2774), 'os.makedirs', 'os.makedirs', (["(result_location + '/' + foldername + '/' + train_num + '/' + 'nn_output')"], {}), "(result_location + '/' + foldername + '/' + train_num + '/' +\n 'nn_output')\n", (2696, 2774), False, 'import os\n')]
|
import os
import tempfile
import time
import cv2
import numpy as np
from PIL import Image
def calcVanishingPoint(lines):
points = lines[:, :2]
normals = lines[:, 2:4] - lines[:, :2]
normals /= np.maximum(np.linalg.norm(normals, axis=-1, keepdims=True), 1e-4)
normals = np.stack([normals[:, 1], -normals[:, 0]], axis=1)
normalPointDot = (normals * points).sum(1)
if lines.shape[0] == 2:
VP = np.linalg.solve(normals, normalPointDot)
else:
VP = np.linalg.lstsq(normals, normalPointDot)[0]
pass
return VP
def calcVanishingPoints(allLines, numVPs):
distanceThreshold = np.sin(np.deg2rad(5))
lines = allLines.copy()
VPs = []
VPLines = []
for VPIndex in range(numVPs):
points = lines[:, :2]
lengths = np.linalg.norm(lines[:, 2:4] - lines[:, :2], axis=-1)
normals = lines[:, 2:4] - lines[:, :2]
normals /= np.maximum(np.linalg.norm(normals, axis=-1, keepdims=True), 1e-4)
normals = np.stack([normals[:, 1], -normals[:, 0]], axis=1)
maxNumInliers = 0
bestVP = np.zeros(2)
#for _ in range(int(np.sqrt(lines.shape[0]))):
for _ in range(min(pow(lines.shape[0], 2), 100)):
sampledInds = np.random.choice(lines.shape[0], 2)
if sampledInds[0] == sampledInds[1]:
continue
sampledLines = lines[sampledInds]
try:
VP = calcVanishingPoint(sampledLines)
except:
continue
inliers = np.abs(((np.expand_dims(VP, 0) - points) * normals).sum(-1)) / np.linalg.norm(np.expand_dims(VP, 0) - points, axis=-1) < distanceThreshold
numInliers = lengths[inliers].sum()
if numInliers > maxNumInliers:
maxNumInliers = numInliers
bestVP = VP
bestVPInliers = inliers
pass
continue
if maxNumInliers > 0:
inlierLines = lines[bestVPInliers]
VP = calcVanishingPoint(inlierLines)
VPs.append(VP)
#print(bestVP)
#print(inlierLines)
#print(VP)
#exit(1)
VPLines.append(inlierLines)
lines = lines[np.logical_not(bestVPInliers)]
pass
continue
VPs = np.stack(VPs, axis=0)
return VPs, VPLines, lines
def estimateFocalLength(image):
from pylsd.lsd import lsd
height = image.shape[0]
width = image.shape[1]
lines = lsd(image.mean(2))
lineImage = image.copy()
for line in lines:
cv2.line(lineImage, (int(line[0]), int(line[1])), (int(line[2]), int(line[3])), (0, 0, 255), int(np.ceil(line[4] / 2)))
continue
#cv2.imwrite('test/lines.png', lineImage)
numVPs = 3
VPs, VPLines, remainingLines = calcVanishingPoints(lines, numVPs=numVPs)
#focalLength = (np.sqrt(np.linalg.norm(np.cross(VPs[0], VPs[1]))) + np.sqrt(np.linalg.norm(np.cross(VPs[0], VPs[2]))) + np.sqrt(np.linalg.norm(np.cross(VPs[1], VPs[2])))) / 3
focalLength = (np.sqrt(np.abs(np.dot(VPs[0], VPs[1]))) + np.sqrt(np.abs(np.dot(VPs[0], VPs[2]))) + np.sqrt(np.abs(np.dot(VPs[1], VPs[2])))) / 3
return focalLength
def PlaneDepthLayer(planes, ranges):
batchSize = 1
if len(planes.shape) == 3:
batchSize = planes.shape[0]
planes = planes.reshape(planes.shape[0] * planes.shape[1], planes.shape[2])
pass
planesD = np.linalg.norm(planes, 2, 1)
planesD = np.maximum(planesD, 1e-4)
planesNormal = -planes / planesD.reshape(-1, 1).repeat(3, 1)
#print(planesD, planesNormal)
#print(ranges.min(), ranges.max())
normalXYZ = np.dot(ranges, planesNormal.transpose())
normalXYZ[normalXYZ == 0] = 1e-4
normalXYZ = 1 / normalXYZ
#print(normalXYZ.min(), normalXYZ.max())
depths = -normalXYZ
depths[:, :] *= planesD
if batchSize > 1:
depths = depths.reshape(depths.shape[0], depths.shape[1], batchSize, -1).transpose([2, 0, 1, 3])
pass
depths[(depths < 0) + (depths > 10)] = 10
return depths
def calcPlaneDepths(planes, width, height, info):
urange = np.arange(width, dtype=np.float32).reshape(1, -1).repeat(height, 0) / (width + 1) * (info[16] + 1) - info[2]
vrange = np.arange(height, dtype=np.float32).reshape(-1, 1).repeat(width, 1) / (height + 1) * (info[17] + 1) - info[6]
ranges = np.array([urange / info[0], np.ones(urange.shape), -vrange / info[5]]).transpose([1, 2, 0])
planeDepths = PlaneDepthLayer(planes, ranges)
return planeDepths
def drawDepthImage(depth):
#return cv2.applyColorMap(np.clip(depth / 10 * 255, 0, 255).astype(np.uint8), cv2.COLORMAP_JET)
return 255 - np.clip(depth / 5 * 255, 0, 255).astype(np.uint8)
class ColorPalette:
def __init__(self, numColors):
#np.random.seed(2)
#self.colorMap = np.random.randint(255, size = (numColors, 3))
#self.colorMap[0] = 0
self.colorMap = np.array([[255, 0, 0],
[0, 255, 0],
[0, 0, 255],
[80, 128, 255],
[255, 230, 180],
[255, 0, 255],
[0, 255, 255],
[100, 0, 0],
[0, 100, 0],
[255, 255, 0],
[50, 150, 0],
[200, 255, 255],
[255, 200, 255],
[128, 128, 80],
[0, 50, 128],
[0, 100, 100],
[0, 255, 128],
[0, 128, 255],
[255, 0, 128],
[128, 0, 255],
[255, 128, 0],
[128, 255, 0],
])
if numColors > self.colorMap.shape[0]:
self.colorMap = np.random.randint(255, size = (numColors, 3))
pass
return
def getColorMap(self):
return self.colorMap
def getColor(self, index):
if index >= colorMap.shape[0]:
return np.random.randint(255, size = (3))
else:
return self.colorMap[index]
pass
def drawSegmentationImage(segmentations, randomColor=None, numColors=22, blackIndex=-1):
if segmentations.ndim == 2:
numColors = max(numColors, segmentations.max() + 2, blackIndex + 1)
else:
numColors = max(numColors, segmentations.shape[2] + 2, blackIndex + 1)
pass
randomColor = ColorPalette(numColors).getColorMap()
if blackIndex >= 0:
randomColor[blackIndex] = 0
pass
width = segmentations.shape[1]
height = segmentations.shape[0]
if segmentations.ndim == 3:
#segmentation = (np.argmax(segmentations, 2) + 1) * (np.max(segmentations, 2) > 0.5)
segmentation = np.argmax(segmentations, 2)
else:
segmentation = segmentations
pass
segmentation = segmentation.astype(np.int)
return randomColor[segmentation.reshape(-1)].reshape((height, width, 3))
|
[
"numpy.clip",
"numpy.ceil",
"numpy.linalg.solve",
"numpy.ones",
"numpy.random.choice",
"numpy.logical_not",
"numpy.argmax",
"numpy.stack",
"numpy.deg2rad",
"numpy.zeros",
"numpy.array",
"numpy.random.randint",
"numpy.linalg.lstsq",
"numpy.linalg.norm",
"numpy.dot",
"numpy.expand_dims",
"numpy.maximum",
"numpy.arange"
] |
[((288, 337), 'numpy.stack', 'np.stack', (['[normals[:, 1], -normals[:, 0]]'], {'axis': '(1)'}), '([normals[:, 1], -normals[:, 0]], axis=1)\n', (296, 337), True, 'import numpy as np\n'), ((2329, 2350), 'numpy.stack', 'np.stack', (['VPs'], {'axis': '(0)'}), '(VPs, axis=0)\n', (2337, 2350), True, 'import numpy as np\n'), ((3445, 3473), 'numpy.linalg.norm', 'np.linalg.norm', (['planes', '(2)', '(1)'], {}), '(planes, 2, 1)\n', (3459, 3473), True, 'import numpy as np\n'), ((3486, 3513), 'numpy.maximum', 'np.maximum', (['planesD', '(0.0001)'], {}), '(planesD, 0.0001)\n', (3496, 3513), True, 'import numpy as np\n'), ((219, 266), 'numpy.linalg.norm', 'np.linalg.norm', (['normals'], {'axis': '(-1)', 'keepdims': '(True)'}), '(normals, axis=-1, keepdims=True)\n', (233, 266), True, 'import numpy as np\n'), ((427, 467), 'numpy.linalg.solve', 'np.linalg.solve', (['normals', 'normalPointDot'], {}), '(normals, normalPointDot)\n', (442, 467), True, 'import numpy as np\n'), ((638, 651), 'numpy.deg2rad', 'np.deg2rad', (['(5)'], {}), '(5)\n', (648, 651), True, 'import numpy as np\n'), ((793, 846), 'numpy.linalg.norm', 'np.linalg.norm', (['(lines[:, 2:4] - lines[:, :2])'], {'axis': '(-1)'}), '(lines[:, 2:4] - lines[:, :2], axis=-1)\n', (807, 846), True, 'import numpy as np\n'), ((997, 1046), 'numpy.stack', 'np.stack', (['[normals[:, 1], -normals[:, 0]]'], {'axis': '(1)'}), '([normals[:, 1], -normals[:, 0]], axis=1)\n', (1005, 1046), True, 'import numpy as np\n'), ((1090, 1101), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (1098, 1101), True, 'import numpy as np\n'), ((4933, 5286), 'numpy.array', 'np.array', (['[[255, 0, 0], [0, 255, 0], [0, 0, 255], [80, 128, 255], [255, 230, 180], [\n 255, 0, 255], [0, 255, 255], [100, 0, 0], [0, 100, 0], [255, 255, 0], [\n 50, 150, 0], [200, 255, 255], [255, 200, 255], [128, 128, 80], [0, 50, \n 128], [0, 100, 100], [0, 255, 128], [0, 128, 255], [255, 0, 128], [128,\n 0, 255], [255, 128, 0], [128, 255, 0]]'], {}), '([[255, 0, 0], [0, 255, 0], [0, 0, 255], [80, 128, 255], [255, 230,\n 180], [255, 0, 255], [0, 255, 255], [100, 0, 0], [0, 100, 0], [255, 255,\n 0], [50, 150, 0], [200, 255, 255], [255, 200, 255], [128, 128, 80], [0,\n 50, 128], [0, 100, 100], [0, 255, 128], [0, 128, 255], [255, 0, 128], [\n 128, 0, 255], [255, 128, 0], [128, 255, 0]])\n', (4941, 5286), True, 'import numpy as np\n'), ((7335, 7362), 'numpy.argmax', 'np.argmax', (['segmentations', '(2)'], {}), '(segmentations, 2)\n', (7344, 7362), True, 'import numpy as np\n'), ((491, 531), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['normals', 'normalPointDot'], {}), '(normals, normalPointDot)\n', (506, 531), True, 'import numpy as np\n'), ((924, 971), 'numpy.linalg.norm', 'np.linalg.norm', (['normals'], {'axis': '(-1)', 'keepdims': '(True)'}), '(normals, axis=-1, keepdims=True)\n', (938, 971), True, 'import numpy as np\n'), ((1241, 1276), 'numpy.random.choice', 'np.random.choice', (['lines.shape[0]', '(2)'], {}), '(lines.shape[0], 2)\n', (1257, 1276), True, 'import numpy as np\n'), ((6343, 6386), 'numpy.random.randint', 'np.random.randint', (['(255)'], {'size': '(numColors, 3)'}), '(255, size=(numColors, 3))\n', (6360, 6386), True, 'import numpy as np\n'), ((6581, 6611), 'numpy.random.randint', 'np.random.randint', (['(255)'], {'size': '(3)'}), '(255, size=3)\n', (6598, 6611), True, 'import numpy as np\n'), ((2254, 2283), 'numpy.logical_not', 'np.logical_not', (['bestVPInliers'], {}), '(bestVPInliers)\n', (2268, 2283), True, 'import numpy as np\n'), ((2696, 2716), 'numpy.ceil', 'np.ceil', (['(line[4] / 2)'], {}), '(line[4] / 2)\n', (2703, 2716), True, 'import numpy as np\n'), ((4664, 4696), 'numpy.clip', 'np.clip', (['(depth / 5 * 255)', '(0)', '(255)'], {}), '(depth / 5 * 255, 0, 255)\n', (4671, 4696), True, 'import numpy as np\n'), ((3172, 3194), 'numpy.dot', 'np.dot', (['VPs[1]', 'VPs[2]'], {}), '(VPs[1], VPs[2])\n', (3178, 3194), True, 'import numpy as np\n'), ((4382, 4403), 'numpy.ones', 'np.ones', (['urange.shape'], {}), '(urange.shape)\n', (4389, 4403), True, 'import numpy as np\n'), ((3088, 3110), 'numpy.dot', 'np.dot', (['VPs[0]', 'VPs[1]'], {}), '(VPs[0], VPs[1])\n', (3094, 3110), True, 'import numpy as np\n'), ((3130, 3152), 'numpy.dot', 'np.dot', (['VPs[0]', 'VPs[2]'], {}), '(VPs[0], VPs[2])\n', (3136, 3152), True, 'import numpy as np\n'), ((1614, 1635), 'numpy.expand_dims', 'np.expand_dims', (['VP', '(0)'], {}), '(VP, 0)\n', (1628, 1635), True, 'import numpy as np\n'), ((4109, 4143), 'numpy.arange', 'np.arange', (['width'], {'dtype': 'np.float32'}), '(width, dtype=np.float32)\n', (4118, 4143), True, 'import numpy as np\n'), ((4231, 4266), 'numpy.arange', 'np.arange', (['height'], {'dtype': 'np.float32'}), '(height, dtype=np.float32)\n', (4240, 4266), True, 'import numpy as np\n'), ((1545, 1566), 'numpy.expand_dims', 'np.expand_dims', (['VP', '(0)'], {}), '(VP, 0)\n', (1559, 1566), True, 'import numpy as np\n')]
|
from __future__ import print_function
import argparse
from collections import OrderedDict
import json
import os
import logging
from keras.callbacks import EarlyStopping
from sklearn.preprocessing import normalize
from sklearn.metrics import roc_curve, auc, roc_auc_score, precision_score, recall_score, f1_score, accuracy_score, average_precision_score
from scipy.sparse import csr_matrix
from keras.utils.io_utils import HDF5Matrix
#from keras.utils.visualize_util import plot
from keras.optimizers import SGD, Adam
from sklearn.metrics import r2_score
import numpy as np
import theano.tensor as tt
import pandas as pd
import random
import common
import models
from predict import obtain_predictions
from eval import do_eval
import h5py
class Config(object):
"""Configuration for the training process."""
def __init__(self, params, normalize=False, whiten=True):
self.model_id = common.get_next_model_id()
self.norm = normalize
self.whiten = whiten
self.x_path = '%s_%sx%s' % (params['dataset']['dataset'],params['dataset']['npatches'],params['dataset']['window'])
self.y_path = '%s_%s_%s' % (params['dataset']['fact'],params['dataset']['dim'],params['dataset']['dataset'])
self.dataset_settings = params['dataset']
self.training_params = params['training']
self.model_arch = params['cnn']
self.predicting_params = params['predicting']
def get_dict(self):
object_dict = self.__dict__
first_key = "model_id"
conf_dict = OrderedDict({first_key: object_dict[first_key]})
conf_dict.update(object_dict)
return conf_dict
def _squared_magnitude(x):
return tt.sqr(x).sum(axis=-1)
def _magnitude(x):
return tt.sqrt(tt.maximum(_squared_magnitude(x), np.finfo(x.dtype).tiny))
def cosine(x, y):
return tt.clip((1 - (x * y).sum(axis=-1) /
(_magnitude(x) * _magnitude(y))) / 2, 0, 1)
def load_sparse_csr(filename):
loader = np.load(filename)
return csr_matrix(( loader['data'], loader['indices'], loader['indptr']),
shape = loader['shape'])
def build_model(config):
"""Builds the cnn."""
params = config.model_arch
get_model = getattr(models, 'get_model_'+str(params['architecture']))
model = get_model(params)
#model = model_kenun.build_convnet_model(params)
# Learning setup
t_params = config.training_params
sgd = SGD(lr=t_params["learning_rate"], decay=t_params["decay"],
momentum=t_params["momentum"], nesterov=t_params["nesterov"])
adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
optimizer = eval(t_params['optimizer'])
metrics = ['mean_squared_error']
if config.model_arch["final_activation"] == 'softmax':
metrics.append('categorical_accuracy')
if t_params['loss_func'] == 'cosine':
loss_func = eval(t_params['loss_func'])
else:
loss_func = t_params['loss_func']
model.compile(loss=loss_func, optimizer=optimizer,metrics=metrics)
return model
def load_data_preprocesed(params, X_path, Y_path, dataset, val_percent, test_percent, n_samples, with_metadata=False, only_metadata=False, metadata_source='rovi'):
factors = np.load(common.DATASETS_DIR+'/y_train_'+Y_path+'.npy') # OJO remove S
index_factors = open(common.DATASETS_DIR+'/items_index_train_'+dataset+'.tsv').read().splitlines()
if not only_metadata:
all_X = np.load(common.TRAINDATA_DIR+'/X_train_'+X_path+'.npy')
index_train = open(common.TRAINDATA_DIR+'/index_train_%s.tsv' % (X_path)).read().splitlines()
all_Y = np.zeros((len(index_train),factors.shape[1]))
index_factors_inv = dict()
for i,item in enumerate(index_factors):
index_factors_inv[item] = i
for i,item in enumerate(index_train):
all_Y[i,:] = factors[index_factors_inv[item]]
else:
all_Y = factors
if with_metadata:
if 'w2v' in metadata_source:
all_X_meta = np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy' % (metadata_source,dataset))[:,:int(params['cnn']['sequence_length'])]
elif 'model' in metadata_source or not params['dataset']['sparse']:
all_X_meta = np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy' % (metadata_source,dataset))
else:
all_X_meta = load_sparse_csr(common.TRAINDATA_DIR+'/X_train_%s_%s.npz' % (metadata_source,dataset)).todense()
all_X_in_meta = all_X = all_X_meta
print(all_X.shape)
print(all_Y.shape)
if n_samples != 'all':
n_samples = int(n_samples)
all_X = all_X[:n_samples]
all_Y = all_Y[:n_samples]
if with_metadata:
all_X_in_meta = all_X_in_meta[:n_samples]
if params['training']['normalize_y'] == True:
normalize(all_Y,copy=False)
if params['training']["val_from_file"]:
Y_val = np.load(common.DATASETS_DIR+'/y_val_'+Y_path+'.npy')
Y_test = np.load(common.DATASETS_DIR+'/y_test_'+Y_path+'.npy') #!!! OJO remove S from trainS
if params['dataset']['sparse']:
X_val = load_sparse_csr(common.TRAINDATA_DIR+'/X_val_%s_%s.npz' % (metadata_source,dataset)).todense()
X_test = load_sparse_csr(common.TRAINDATA_DIR+'/X_test_%s_%s.npz' % (metadata_source,dataset)).todense()
else:
X_val = np.load(common.TRAINDATA_DIR+'/X_val_%s_%s.npy' % (metadata_source,dataset))
X_test = np.load(common.TRAINDATA_DIR+'/X_test_%s_%s.npy' % (metadata_source,dataset))
X_train = all_X
Y_train = all_Y
else:
N = all_Y.shape[0]
train_percent = 1 - val_percent - test_percent
N_train = int(train_percent * N)
N_val = int(val_percent * N)
logging.debug("Training data points: %d" % N_train)
logging.debug("Validation data points: %d" % N_val)
logging.debug("Test data points: %d" % (N - N_train - N_val))
if not only_metadata:
# Slice data
X_train = all_X[:N_train]
X_val = all_X[N_train:N_train + N_val]
X_test = all_X[N_train + N_val:]
Y_train = all_Y[:N_train]
Y_val = all_Y[N_train:N_train + N_val]
Y_test = all_Y[N_train + N_val:]
if with_metadata:
if only_metadata:
X_train = all_X_in_meta[:N_train]
X_val = all_X_in_meta[N_train:N_train + N_val]
X_test = all_X_in_meta[N_train + N_val:]
else:
X_train = [X_train,all_X_in_meta[:N_train]]
X_val = [X_val,all_X_in_meta[N_train:N_train + N_val]]
X_test = [X_test,all_X_in_meta[N_train + N_val:]]
return X_train, Y_train, X_val, Y_val, X_test, Y_test
def load_data_hf5(params,val_percent, test_percent):
hdf5_file = common.PATCHES_DIR+"/patches_train_%s_%s.hdf5" % (params['dataset']['dataset'],params['dataset']['window'])
f = h5py.File(hdf5_file,"r")
N = f["targets"].shape[0]
f.close()
train_percent = 1 - val_percent - test_percent
N_train = int(train_percent * N)
N_val = int(val_percent * N)
X_train = HDF5Matrix(hdf5_file, 'features', start=0, end=N_train)
Y_train = HDF5Matrix(hdf5_file, 'targets', start=0, end=N_train)
X_val = HDF5Matrix(hdf5_file, 'features', start=N_train, end=N_train+N_val)
Y_val = HDF5Matrix(hdf5_file, 'targets', start=N_train, end=N_train+N_val)
X_test = HDF5Matrix(hdf5_file, 'features', start=N_train+N_val, end=N)
Y_test = HDF5Matrix(hdf5_file, 'targets', start=N_train+N_val, end=N)
return X_train, Y_train, X_val, Y_val, X_test, Y_test, N_train
def load_data_hf5_memory(params,val_percent, test_percent, y_path, id2gt, X_meta = None, val_from_file = False):
if val_from_file:
hdf5_file = common.PATCHES_DIR+"/patches_train_%s_%sx%s.hdf5" % (params['dataset']['dataset'],params['dataset']['npatches'],params['dataset']['window'])
f = h5py.File(hdf5_file,"r")
index_train = f["index"][:]
index_train = np.delete(index_train, np.where(index_train == ""))
N_train = index_train.shape[0]
val_hdf5_file = common.PATCHES_DIR+"/patches_val_%s_%sx%s.hdf5" % (params['dataset']['dataset'],params['dataset']['npatches'],params['dataset']['window'])
f_val = h5py.File(val_hdf5_file,"r")
X_val = f_val['features'][:]
#Y_val = f_val['targets'][:]
factors_val = np.load(common.DATASETS_DIR+'/y_val_'+y_path+'.npy')
index_factors_val = open(common.DATASETS_DIR+'/items_index_val_'+params['dataset']['dataset']+'.tsv').read().splitlines()
id2gt_val = dict((index,factor) for (index,factor) in zip(index_factors_val,factors_val))
index_val = [i for i in f_val['index'][:] if i in id2gt_val]
X_val = np.delete(X_val, np.where(index_val == ""), axis=0)
index_val = np.delete(index_val, np.where(index_val == ""))
Y_val = np.asarray([id2gt_val[id] for id in index_val])
test_hdf5_file = common.PATCHES_DIR+"/patches_test_%s_%sx%s.hdf5" % (params['dataset']['dataset'],params['dataset']['npatches'],params['dataset']['window'])
f_test = h5py.File(test_hdf5_file,"r")
X_test = f_test['features'][:]
#Y_test = f_test['targets'][:]
factors_test = np.load(common.DATASETS_DIR+'/y_test_'+y_path+'.npy')
index_factors_test = open(common.DATASETS_DIR+'/items_index_test_'+params['dataset']['dataset']+'.tsv').read().splitlines()
id2gt_test = dict((index,factor) for (index,factor) in zip(index_factors_test,factors_test))
index_test = [i for i in f_test['index'][:] if i in id2gt_test]
X_test = np.delete(X_test, np.where(index_test == ""), axis=0)
index_test = np.delete(index_test, np.where(index_test == ""))
Y_test = np.asarray([id2gt_test[id] for id in index_test])
else:
hdf5_file = common.PATCHES_DIR+"/patches_train_%s_%sx%s.hdf5" % (params['dataset']['dataset'],params['dataset']['npatches'],params['dataset']['window'])
f = h5py.File(hdf5_file,"r")
index_all = f["index"][:]
N = index_all.shape[0]
train_percent = 1 - val_percent - test_percent
N_train = int(train_percent * N)
N_val = int(val_percent * N)
X_val = f['features'][N_train:N_train+N_val]
index_val = f['index'][N_train:N_train+N_val]
X_val = np.delete(X_val, np.where(index_val == ""), axis=0)
index_val = np.delete(index_val, np.where(index_val == ""))
Y_val = np.asarray([id2gt[id] for id in index_val])
X_test = f['features'][N_train+N_val:N]
index_test = f['index'][N_train+N_val:N]
print(index_test.shape)
print(X_test.shape)
X_test = np.delete(X_test, np.where(index_test == ""), axis=0)
index_test = np.delete(index_test, np.where(index_test == ""))
print(index_test.shape)
print(X_test.shape)
Y_test = np.asarray([id2gt[id] for id in index_test])
print(Y_test.shape)
index_train = f['index'][:N_train]
index_train = np.delete(index_train, np.where(index_train == ""))
N_train = index_train.shape[0]
if X_meta != None:
X_val = [X_val,X_meta[N_train:N_train+N_val]]
X_test = [X_test,X_meta[N_train+N_val:N]]
return X_val, Y_val, X_test, Y_test, N_train
def batch_block_generator(params, y_path, N_train, id2gt, X_meta=None,
val_from_file=False):
hdf5_file = common.PATCHES_DIR+"/patches_train_%s_%sx%s.hdf5" % (params['dataset']['dataset'],params['dataset']['npatches'],params['dataset']['window'])
f = h5py.File(hdf5_file,"r")
block_step = 50000
batch_size = params['training']['n_minibatch']
randomize = True
with_meta = False
if X_meta != None:
with_meta = True
while 1:
for i in range(0, N_train, block_step):
x_block = f['features'][i:min(N_train, i+block_step)]
index_block = f['index'][i:min(N_train, i+block_step)]
#y_block = f['targets'][i:min(N_train,i+block_step)]
x_block = np.delete(x_block, np.where(index_block == ""), axis=0)
index_block = np.delete(index_block, np.where(index_block == ""))
y_block = np.asarray([id2gt[id] for id in index_block])
if params['training']['normalize_y']:
normalize(y_block, copy=False)
items_list = range(x_block.shape[0])
if randomize:
random.shuffle(items_list)
for j in range(0, len(items_list), batch_size):
if j+batch_size <= x_block.shape[0]:
items_in_batch = items_list[j:j+batch_size]
x_batch = x_block[items_in_batch]
y_batch = y_block[items_in_batch]
if with_meta:
x_batch = [x_batch, X_meta[items_in_batch]]
yield (x_batch, y_batch)
def process(params,with_predict=True,with_eval=True):
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
params['cnn']['n_out'] = int(params['dataset']['dim'])
#params['cnn']['n_frames'] = int(params['dataset']['window'] * SR / float(HR))
with_metadata = params['dataset']['with_metadata']
only_metadata = params['dataset']['only_metadata']
metadata_source = params['dataset']['meta-suffix']
if with_metadata:
if 'w2v' in metadata_source:
X_meta = np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy' % (metadata_source,params['dataset']['dataset']))[:,:int(params['cnn']['sequence_length'])]
params['cnn']['n_metafeatures'] = len(X_meta[0])
if 'meta-suffix2' in params['dataset']:
X_meta2 = np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy' % (params['dataset']['meta-suffix2'],params['dataset']['dataset']))
params['cnn']['n_metafeatures2'] = len(X_meta2[0])
if 'meta-suffix3' in params['dataset']:
X_meta3 = np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy' % (params['dataset']['meta-suffix3'],params['dataset']['dataset']))
params['cnn']['n_metafeatures3'] = len(X_meta3[0])
if 'meta-suffix4' in params['dataset']:
X_meta4 = np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy' % (params['dataset']['meta-suffix4'],params['dataset']['dataset']))
params['cnn']['n_metafeatures4'] = len(X_meta4[0])
elif 'model' in metadata_source or not params['dataset']['sparse']:
X_meta = np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy' % (metadata_source,params['dataset']['dataset']))
params['cnn']['n_metafeatures'] = len(X_meta[0])
if 'meta-suffix2' in params['dataset']:
X_meta2 = np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy' % (params['dataset']['meta-suffix2'],params['dataset']['dataset']))
params['cnn']['n_metafeatures2'] = len(X_meta2[0])
if 'meta-suffix3' in params['dataset']:
X_meta3 = np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy' % (params['dataset']['meta-suffix3'],params['dataset']['dataset']))
params['cnn']['n_metafeatures3'] = len(X_meta3[0])
if 'meta-suffix4' in params['dataset']:
X_meta4 = np.load(common.TRAINDATA_DIR+'/X_train_%s_%s.npy' % (params['dataset']['meta-suffix4'],params['dataset']['dataset']))
params['cnn']['n_metafeatures4'] = len(X_meta4[0])
else:
X_meta = load_sparse_csr(common.TRAINDATA_DIR+'/X_train_%s_%s.npz' % (metadata_source,params['dataset']['dataset'])).todense()
params['cnn']['n_metafeatures'] = X_meta.shape[1]
if 'meta-suffix2' in params['dataset']:
X_meta2 = load_sparse_csr(common.TRAINDATA_DIR+'/X_train_%s_%s.npz' % (params['dataset']['meta-suffix2'],params['dataset']['dataset']))
params['cnn']['n_metafeatures2'] = X_meta2.shape[1]
if 'meta-suffix3' in params['dataset']:
X_meta3 = load_sparse_csr(common.TRAINDATA_DIR+'/X_train_%s_%s.npz' % (params['dataset']['meta-suffix3'],params['dataset']['dataset']))
params['cnn']['n_metafeatures3'] = len(X_meta3[0])
if 'meta-suffix4' in params['dataset']:
X_meta4 = load_sparse_csr(common.TRAINDATA_DIR+'/X_train_%s_%s.npz' % (params['dataset']['meta-suffix4'],params['dataset']['dataset']))
params['cnn']['n_metafeatures3'] = len(X_meta4[0])
print(X_meta.shape)
else:
X_meta = None
config = Config(params)
model_dir = os.path.join(common.MODELS_DIR, config.model_id)
common.ensure_dir(common.MODELS_DIR)
common.ensure_dir(model_dir)
model_file = os.path.join(model_dir, config.model_id + common.MODEL_EXT)
logging.debug("Building Network...")
#model = build_model(config)
model = build_model(config)
print(model.summary())
#plot(model, to_file='model2.png', show_shapes=True)
trained_model = config.get_dict()
# Save model
#plot(model, to_file=os.path.join(model_dir, config.model_id + PLOT_EXT))
common.save_model(model, model_file)
logging.debug(trained_model["model_id"])
logging.debug("Loading Data...")
with_generator = True
if only_metadata:
X_train, Y_train, X_val, Y_val, X_test, Y_test = \
load_data_preprocesed(params, config.x_path, config.y_path, params['dataset']['dataset'], config.training_params["validation"],
config.training_params["test"], config.dataset_settings["nsamples"], with_metadata, only_metadata, metadata_source)
if 'meta-suffix2' in params['dataset']:
X_train2, Y_train2, X_val2, Y_val2, X_test2, Y_test2 = \
load_data_preprocesed(params, config.x_path, config.y_path, params['dataset']['dataset'], config.training_params["validation"],
config.training_params["test"], config.dataset_settings["nsamples"], with_metadata, only_metadata, params['dataset']['meta-suffix2'])
X_train = [X_train,X_train2]
X_val = [X_val,X_val2]
X_test = [X_test,X_test2]
print("X_train bi", len(X_train))
if 'meta-suffix3' in params['dataset']:
X_train3, Y_train3, X_val3, Y_val3, X_test3, Y_test3 = \
load_data_preprocesed(params, config.x_path, config.y_path, params['dataset']['dataset'], config.training_params["validation"],
config.training_params["test"], config.dataset_settings["nsamples"], with_metadata, only_metadata, params['dataset']['meta-suffix3'])
X_train.append(X_train3)
X_val.append(X_val3)
X_test.append(X_test3)
print("X_train tri", len(X_train))
if 'meta-suffix4' in params['dataset']:
X_train4, Y_train4, X_val4, Y_val4, X_test4, Y_test4 = \
load_data_preprocesed(params, config.x_path, config.y_path, params['dataset']['dataset'], config.training_params["validation"],
config.training_params["test"], config.dataset_settings["nsamples"], with_metadata, only_metadata, params['dataset']['meta-suffix4'])
X_train.append(X_train4)
X_val.append(X_val4)
X_test.append(X_test4)
print("X_train four", len(X_train))
else:
if with_generator:
id2gt = dict()
factors = np.load(common.DATASETS_DIR+'/y_train_'+config.y_path+'.npy')
index_factors = open(common.DATASETS_DIR+'/items_index_train_'+params['dataset']['dataset']+'.tsv').read().splitlines()
id2gt = dict((index,factor) for (index,factor) in zip(index_factors,factors))
X_val, Y_val, X_test, Y_test, N_train = load_data_hf5_memory(params,config.training_params["validation"],config.training_params["test"],config.y_path,id2gt,X_meta,config.training_params["val_from_file"])
if params['dataset']['nsamples'] != 'all':
N_train = min(N_train,params['dataset']['nsamples'])
else:
X_train, Y_train, X_val, Y_val, X_test, Y_test, N_train = load_data_hf5(params,config.training_params["validation"],config.training_params["test"])
trained_model["whiten_scaler"] = common.TRAINDATA_DIR+'/scaler_%s.pk' % config.x_path
logging.debug("Training...")
if config.model_arch["final_activation"] == 'softmax':
monitor_metric = 'val_categorical_accuracy'
else:
monitor_metric = 'val_loss'
early_stopping = EarlyStopping(monitor=monitor_metric, patience=4)
if only_metadata:
epochs = model.fit(X_train, Y_train,
batch_size=config.training_params["n_minibatch"],
#shuffle='batch',
nb_epoch=config.training_params["n_epochs"],
verbose=1, validation_data=(X_val, Y_val),
callbacks=[early_stopping])
else:
if with_generator:
print(N_train)
epochs = model.fit_generator(batch_block_generator(params,config.y_path,N_train,id2gt,X_meta,config.training_params["val_from_file"]),
samples_per_epoch = N_train-(N_train % config.training_params["n_minibatch"]),
nb_epoch = config.training_params["n_epochs"],
verbose=1,
validation_data = (X_val, Y_val),
callbacks=[early_stopping])
else:
epochs = model.fit(X_train, Y_train,
batch_size=config.training_params["n_minibatch"],
shuffle='batch',
nb_epoch=config.training_params["n_epochs"],
verbose=1,
validation_data=(X_val, Y_val),
callbacks=[early_stopping])
model.save_weights(os.path.join(model_dir, config.model_id + common.WEIGHTS_EXT))
logging.debug("Saving trained model %s in %s..." %
(trained_model["model_id"], common.DEFAULT_TRAINED_MODELS_FILE))
common.save_trained_model(common.DEFAULT_TRAINED_MODELS_FILE, trained_model)
logging.debug("Evaluating...")
print(X_test[0].shape,X_test[1].shape)
preds=model.predict(X_test)
print(preds.shape)
if params["dataset"]["evaluation"] in ['binary','multiclass']:
y_pred = (preds > 0.5).astype('int32')
acc = accuracy_score(Y_test,y_pred)
prec = precision_score(Y_test,y_pred,average='macro')
recall = recall_score(Y_test,y_pred,average='macro')
f1 = f1_score(Y_test,y_pred,average='macro')
print('Accuracy', acc)
print("%.3f\t%.3f\t%.3f" % (prec,recall,f1))
if params["dataset"]["fact"] == 'class':
good_classes = np.nonzero(Y_test.sum(0))[0]
print(Y_test.shape,preds.shape)
#roc_auc=roc_auc_score(Y_test[:,good_classes],preds[:,good_classes])
#logging.debug('ROC-AUC '+str(roc_auc))
#pr_auc = average_precision_score(Y_test[:,good_classes],preds[:,good_classes])
#print('PR-AUC',pr_auc)
#r2 = roc_auc
elif params["dataset"]["evaluation"] not in ['binary','multiclass','multilabel']:
r2s = []
for i,pred in enumerate(preds):
r2 = r2_score(Y_test[i],pred)
r2s.append(r2)
r2 = np.asarray(r2s).mean()
logging.debug('R2 avg '+str(r2))
# Batch prediction
if X_test[1].shape == Y_test[1].shape:
score = model.evaluate(X_test, Y_test, verbose=0)
logging.debug(score)
logging.debug(model.metrics_names)
print(score)
trained_model["loss_score"] = score[0]
trained_model["mse"] = score[1]
if params["dataset"]["evaluation"] not in ['binary','multiclass','multilabel']:
trained_model["r2"] = r2
fw=open(common.DATA_DIR+'/results/train_results.txt','a')
fw.write(trained_model["model_id"]+'\n')
if params["training"]["loss_func"] == 'binary_crossentropy':
fw.write('ROC-AUC: '+str(roc_auc)+'\n')
print('ROC-AUC: '+str(roc_auc))
fw.write('Loss: '+str(score[0])+' ('+config.training_params["loss_func"]+')\n')
fw.write('MSE: '+str(score[1])+'\n')
elif params["dataset"]["evaluation"] not in ['binary','multiclass','multilabel']:
fw.write('R2 avg: '+str(r2)+'\n')
print('R2 avg: '+str(r2))
fw.write('Loss: '+str(score[0])+' ('+config.training_params["loss_func"]+')\n')
fw.write('MSE: '+str(score[1])+'\n')
fw.write(json.dumps(epochs.history)+"\n\n")
fw.close()
if with_predict:
trained_models = pd.read_csv(common.DEFAULT_TRAINED_MODELS_FILE, sep='\t')
model_config = trained_models[trained_models["model_id"] == trained_model["model_id"]]
model_config = model_config.to_dict(orient="list")
testset = open(common.DATASETS_DIR+'/items_index_test_%s.tsv' % (config.dataset_settings["dataset"])).read().splitlines()
if config.training_params["val_from_file"] and not only_metadata:
predictions, predictions_index = obtain_predictions(model_config, testset, trained_model["model_id"], config.predicting_params["trim_coeff"], model=model, with_metadata=with_metadata, only_metadata=only_metadata, metadata_source=metadata_source, with_patches=True)
else:
predictions, predictions_index = obtain_predictions(model_config, testset, trained_model["model_id"], config.predicting_params["trim_coeff"], model=model, with_metadata=with_metadata, only_metadata=only_metadata, metadata_source=metadata_source)
print("Predictions created")
if with_eval:
do_eval(trained_model["model_id"],get_roc=True,get_map=True,get_p=True,predictions=predictions,predictions_index=predictions_index)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Evaluates the model',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-p',
'--params',
dest="params_file",
help='JSON file with params',
default=False)
parser.add_argument('-pred',
'--predict',
dest="with_predict",
help='Predict factors',
action='store_true',
default=False)
parser.add_argument('-eval',
'--eval',
dest="with_eval",
help='Eval factors',
action='store_true',
default=False)
parser.add_argument('-m',
'--metadata',
dest="with_metadata",
help='Use metadata',
action='store_true',
default=False)
parser.add_argument('-om',
'--only_metadata',
dest="only_metadata",
help='Use only metadata',
action='store_true',
default=False)
parser.add_argument('-ms',
'--metadata_source',
dest="metadata_source",
type=str,
help='Suffix of metadata files',
default="rovi")
args = parser.parse_args()
params = models.params_1
if args.params_file:
params = json.load(open(args.params_file))
process(params)
|
[
"logging.debug",
"pandas.read_csv",
"common.get_next_model_id",
"common.save_model",
"sklearn.metrics.precision_score",
"sklearn.metrics.recall_score",
"keras.optimizers.SGD",
"common.ensure_dir",
"sklearn.metrics.r2_score",
"argparse.ArgumentParser",
"numpy.where",
"json.dumps",
"numpy.asarray",
"eval.do_eval",
"theano.tensor.sqr",
"keras.callbacks.EarlyStopping",
"scipy.sparse.csr_matrix",
"keras.optimizers.Adam",
"common.save_trained_model",
"collections.OrderedDict",
"random.shuffle",
"h5py.File",
"numpy.finfo",
"sklearn.metrics.accuracy_score",
"logging.basicConfig",
"sklearn.metrics.f1_score",
"os.path.join",
"keras.utils.io_utils.HDF5Matrix",
"sklearn.preprocessing.normalize",
"numpy.load",
"predict.obtain_predictions"
] |
[((1981, 1998), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (1988, 1998), True, 'import numpy as np\n'), ((2010, 2103), 'scipy.sparse.csr_matrix', 'csr_matrix', (["(loader['data'], loader['indices'], loader['indptr'])"], {'shape': "loader['shape']"}), "((loader['data'], loader['indices'], loader['indptr']), shape=\n loader['shape'])\n", (2020, 2103), False, 'from scipy.sparse import csr_matrix\n'), ((2437, 2562), 'keras.optimizers.SGD', 'SGD', ([], {'lr': "t_params['learning_rate']", 'decay': "t_params['decay']", 'momentum': "t_params['momentum']", 'nesterov': "t_params['nesterov']"}), "(lr=t_params['learning_rate'], decay=t_params['decay'], momentum=\n t_params['momentum'], nesterov=t_params['nesterov'])\n", (2440, 2562), False, 'from keras.optimizers import SGD, Adam\n'), ((2583, 2638), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.001)', 'beta_1': '(0.9)', 'beta_2': '(0.999)', 'epsilon': '(1e-08)'}), '(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08)\n', (2587, 2638), False, 'from keras.optimizers import SGD, Adam\n'), ((3236, 3296), 'numpy.load', 'np.load', (["(common.DATASETS_DIR + '/y_train_' + Y_path + '.npy')"], {}), "(common.DATASETS_DIR + '/y_train_' + Y_path + '.npy')\n", (3243, 3296), True, 'import numpy as np\n'), ((6944, 6969), 'h5py.File', 'h5py.File', (['hdf5_file', '"""r"""'], {}), "(hdf5_file, 'r')\n", (6953, 6969), False, 'import h5py\n'), ((7148, 7203), 'keras.utils.io_utils.HDF5Matrix', 'HDF5Matrix', (['hdf5_file', '"""features"""'], {'start': '(0)', 'end': 'N_train'}), "(hdf5_file, 'features', start=0, end=N_train)\n", (7158, 7203), False, 'from keras.utils.io_utils import HDF5Matrix\n'), ((7218, 7272), 'keras.utils.io_utils.HDF5Matrix', 'HDF5Matrix', (['hdf5_file', '"""targets"""'], {'start': '(0)', 'end': 'N_train'}), "(hdf5_file, 'targets', start=0, end=N_train)\n", (7228, 7272), False, 'from keras.utils.io_utils import HDF5Matrix\n'), ((7285, 7354), 'keras.utils.io_utils.HDF5Matrix', 'HDF5Matrix', (['hdf5_file', '"""features"""'], {'start': 'N_train', 'end': '(N_train + N_val)'}), "(hdf5_file, 'features', start=N_train, end=N_train + N_val)\n", (7295, 7354), False, 'from keras.utils.io_utils import HDF5Matrix\n'), ((7365, 7433), 'keras.utils.io_utils.HDF5Matrix', 'HDF5Matrix', (['hdf5_file', '"""targets"""'], {'start': 'N_train', 'end': '(N_train + N_val)'}), "(hdf5_file, 'targets', start=N_train, end=N_train + N_val)\n", (7375, 7433), False, 'from keras.utils.io_utils import HDF5Matrix\n'), ((7445, 7508), 'keras.utils.io_utils.HDF5Matrix', 'HDF5Matrix', (['hdf5_file', '"""features"""'], {'start': '(N_train + N_val)', 'end': 'N'}), "(hdf5_file, 'features', start=N_train + N_val, end=N)\n", (7455, 7508), False, 'from keras.utils.io_utils import HDF5Matrix\n'), ((7520, 7582), 'keras.utils.io_utils.HDF5Matrix', 'HDF5Matrix', (['hdf5_file', '"""targets"""'], {'start': '(N_train + N_val)', 'end': 'N'}), "(hdf5_file, 'targets', start=N_train + N_val, end=N)\n", (7530, 7582), False, 'from keras.utils.io_utils import HDF5Matrix\n'), ((11710, 11735), 'h5py.File', 'h5py.File', (['hdf5_file', '"""r"""'], {}), "(hdf5_file, 'r')\n", (11719, 11735), False, 'import h5py\n'), ((13089, 13163), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(message)s"""', 'level': 'logging.DEBUG'}), "(format='%(asctime)s %(message)s', level=logging.DEBUG)\n", (13108, 13163), False, 'import logging\n'), ((16725, 16773), 'os.path.join', 'os.path.join', (['common.MODELS_DIR', 'config.model_id'], {}), '(common.MODELS_DIR, config.model_id)\n', (16737, 16773), False, 'import os\n'), ((16778, 16814), 'common.ensure_dir', 'common.ensure_dir', (['common.MODELS_DIR'], {}), '(common.MODELS_DIR)\n', (16795, 16814), False, 'import common\n'), ((16819, 16847), 'common.ensure_dir', 'common.ensure_dir', (['model_dir'], {}), '(model_dir)\n', (16836, 16847), False, 'import common\n'), ((16865, 16924), 'os.path.join', 'os.path.join', (['model_dir', '(config.model_id + common.MODEL_EXT)'], {}), '(model_dir, config.model_id + common.MODEL_EXT)\n', (16877, 16924), False, 'import os\n'), ((16929, 16965), 'logging.debug', 'logging.debug', (['"""Building Network..."""'], {}), "('Building Network...')\n", (16942, 16965), False, 'import logging\n'), ((17253, 17289), 'common.save_model', 'common.save_model', (['model', 'model_file'], {}), '(model, model_file)\n', (17270, 17289), False, 'import common\n'), ((17295, 17335), 'logging.debug', 'logging.debug', (["trained_model['model_id']"], {}), "(trained_model['model_id'])\n", (17308, 17335), False, 'import logging\n'), ((17341, 17373), 'logging.debug', 'logging.debug', (['"""Loading Data..."""'], {}), "('Loading Data...')\n", (17354, 17373), False, 'import logging\n'), ((20469, 20497), 'logging.debug', 'logging.debug', (['"""Training..."""'], {}), "('Training...')\n", (20482, 20497), False, 'import logging\n'), ((20677, 20726), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': 'monitor_metric', 'patience': '(4)'}), '(monitor=monitor_metric, patience=4)\n', (20690, 20726), False, 'from keras.callbacks import EarlyStopping\n'), ((22068, 22188), 'logging.debug', 'logging.debug', (["('Saving trained model %s in %s...' % (trained_model['model_id'], common.\n DEFAULT_TRAINED_MODELS_FILE))"], {}), "('Saving trained model %s in %s...' % (trained_model[\n 'model_id'], common.DEFAULT_TRAINED_MODELS_FILE))\n", (22081, 22188), False, 'import logging\n'), ((22206, 22282), 'common.save_trained_model', 'common.save_trained_model', (['common.DEFAULT_TRAINED_MODELS_FILE', 'trained_model'], {}), '(common.DEFAULT_TRAINED_MODELS_FILE, trained_model)\n', (22231, 22282), False, 'import common\n'), ((22288, 22318), 'logging.debug', 'logging.debug', (['"""Evaluating..."""'], {}), "('Evaluating...')\n", (22301, 22318), False, 'import logging\n'), ((26023, 26142), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Evaluates the model"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Evaluates the model', formatter_class=\n argparse.ArgumentDefaultsHelpFormatter)\n", (26046, 26142), False, 'import argparse\n'), ((898, 924), 'common.get_next_model_id', 'common.get_next_model_id', ([], {}), '()\n', (922, 924), False, 'import common\n'), ((1531, 1579), 'collections.OrderedDict', 'OrderedDict', (['{first_key: object_dict[first_key]}'], {}), '({first_key: object_dict[first_key]})\n', (1542, 1579), False, 'from collections import OrderedDict\n'), ((3451, 3512), 'numpy.load', 'np.load', (["(common.TRAINDATA_DIR + '/X_train_' + X_path + '.npy')"], {}), "(common.TRAINDATA_DIR + '/X_train_' + X_path + '.npy')\n", (3458, 3512), True, 'import numpy as np\n'), ((4813, 4841), 'sklearn.preprocessing.normalize', 'normalize', (['all_Y'], {'copy': '(False)'}), '(all_Y, copy=False)\n', (4822, 4841), False, 'from sklearn.preprocessing import normalize\n'), ((4902, 4960), 'numpy.load', 'np.load', (["(common.DATASETS_DIR + '/y_val_' + Y_path + '.npy')"], {}), "(common.DATASETS_DIR + '/y_val_' + Y_path + '.npy')\n", (4909, 4960), True, 'import numpy as np\n'), ((4972, 5031), 'numpy.load', 'np.load', (["(common.DATASETS_DIR + '/y_test_' + Y_path + '.npy')"], {}), "(common.DATASETS_DIR + '/y_test_' + Y_path + '.npy')\n", (4979, 5031), True, 'import numpy as np\n'), ((5764, 5815), 'logging.debug', 'logging.debug', (["('Training data points: %d' % N_train)"], {}), "('Training data points: %d' % N_train)\n", (5777, 5815), False, 'import logging\n'), ((5824, 5875), 'logging.debug', 'logging.debug', (["('Validation data points: %d' % N_val)"], {}), "('Validation data points: %d' % N_val)\n", (5837, 5875), False, 'import logging\n'), ((5884, 5945), 'logging.debug', 'logging.debug', (["('Test data points: %d' % (N - N_train - N_val))"], {}), "('Test data points: %d' % (N - N_train - N_val))\n", (5897, 5945), False, 'import logging\n'), ((7957, 7982), 'h5py.File', 'h5py.File', (['hdf5_file', '"""r"""'], {}), "(hdf5_file, 'r')\n", (7966, 7982), False, 'import h5py\n'), ((8311, 8340), 'h5py.File', 'h5py.File', (['val_hdf5_file', '"""r"""'], {}), "(val_hdf5_file, 'r')\n", (8320, 8340), False, 'import h5py\n'), ((8436, 8494), 'numpy.load', 'np.load', (["(common.DATASETS_DIR + '/y_val_' + y_path + '.npy')"], {}), "(common.DATASETS_DIR + '/y_val_' + y_path + '.npy')\n", (8443, 8494), True, 'import numpy as np\n'), ((8955, 9002), 'numpy.asarray', 'np.asarray', (['[id2gt_val[id] for id in index_val]'], {}), '([id2gt_val[id] for id in index_val])\n', (8965, 9002), True, 'import numpy as np\n'), ((9186, 9216), 'h5py.File', 'h5py.File', (['test_hdf5_file', '"""r"""'], {}), "(test_hdf5_file, 'r')\n", (9195, 9216), False, 'import h5py\n'), ((9317, 9376), 'numpy.load', 'np.load', (["(common.DATASETS_DIR + '/y_test_' + y_path + '.npy')"], {}), "(common.DATASETS_DIR + '/y_test_' + y_path + '.npy')\n", (9324, 9376), True, 'import numpy as np\n'), ((9852, 9901), 'numpy.asarray', 'np.asarray', (['[id2gt_test[id] for id in index_test]'], {}), '([id2gt_test[id] for id in index_test])\n', (9862, 9901), True, 'import numpy as np\n'), ((10085, 10110), 'h5py.File', 'h5py.File', (['hdf5_file', '"""r"""'], {}), "(hdf5_file, 'r')\n", (10094, 10110), False, 'import h5py\n'), ((10583, 10626), 'numpy.asarray', 'np.asarray', (['[id2gt[id] for id in index_val]'], {}), '([id2gt[id] for id in index_val])\n', (10593, 10626), True, 'import numpy as np\n'), ((11019, 11063), 'numpy.asarray', 'np.asarray', (['[id2gt[id] for id in index_test]'], {}), '([id2gt[id] for id in index_test])\n', (11029, 11063), True, 'import numpy as np\n'), ((22001, 22062), 'os.path.join', 'os.path.join', (['model_dir', '(config.model_id + common.WEIGHTS_EXT)'], {}), '(model_dir, config.model_id + common.WEIGHTS_EXT)\n', (22013, 22062), False, 'import os\n'), ((22554, 22584), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['Y_test', 'y_pred'], {}), '(Y_test, y_pred)\n', (22568, 22584), False, 'from sklearn.metrics import roc_curve, auc, roc_auc_score, precision_score, recall_score, f1_score, accuracy_score, average_precision_score\n'), ((22599, 22647), 'sklearn.metrics.precision_score', 'precision_score', (['Y_test', 'y_pred'], {'average': '"""macro"""'}), "(Y_test, y_pred, average='macro')\n", (22614, 22647), False, 'from sklearn.metrics import roc_curve, auc, roc_auc_score, precision_score, recall_score, f1_score, accuracy_score, average_precision_score\n'), ((22663, 22708), 'sklearn.metrics.recall_score', 'recall_score', (['Y_test', 'y_pred'], {'average': '"""macro"""'}), "(Y_test, y_pred, average='macro')\n", (22675, 22708), False, 'from sklearn.metrics import roc_curve, auc, roc_auc_score, precision_score, recall_score, f1_score, accuracy_score, average_precision_score\n'), ((22720, 22761), 'sklearn.metrics.f1_score', 'f1_score', (['Y_test', 'y_pred'], {'average': '"""macro"""'}), "(Y_test, y_pred, average='macro')\n", (22728, 22761), False, 'from sklearn.metrics import roc_curve, auc, roc_auc_score, precision_score, recall_score, f1_score, accuracy_score, average_precision_score\n'), ((23669, 23689), 'logging.debug', 'logging.debug', (['score'], {}), '(score)\n', (23682, 23689), False, 'import logging\n'), ((23698, 23732), 'logging.debug', 'logging.debug', (['model.metrics_names'], {}), '(model.metrics_names)\n', (23711, 23732), False, 'import logging\n'), ((24821, 24878), 'pandas.read_csv', 'pd.read_csv', (['common.DEFAULT_TRAINED_MODELS_FILE'], {'sep': '"""\t"""'}), "(common.DEFAULT_TRAINED_MODELS_FILE, sep='\\t')\n", (24832, 24878), True, 'import pandas as pd\n'), ((25850, 25990), 'eval.do_eval', 'do_eval', (["trained_model['model_id']"], {'get_roc': '(True)', 'get_map': '(True)', 'get_p': '(True)', 'predictions': 'predictions', 'predictions_index': 'predictions_index'}), "(trained_model['model_id'], get_roc=True, get_map=True, get_p=True,\n predictions=predictions, predictions_index=predictions_index)\n", (25857, 25990), False, 'from eval import do_eval\n'), ((1683, 1692), 'theano.tensor.sqr', 'tt.sqr', (['x'], {}), '(x)\n', (1689, 1692), True, 'import theano.tensor as tt\n'), ((5362, 5441), 'numpy.load', 'np.load', (["(common.TRAINDATA_DIR + '/X_val_%s_%s.npy' % (metadata_source, dataset))"], {}), "(common.TRAINDATA_DIR + '/X_val_%s_%s.npy' % (metadata_source, dataset))\n", (5369, 5441), True, 'import numpy as np\n'), ((5460, 5545), 'numpy.load', 'np.load', (["(common.TRAINDATA_DIR + '/X_test_%s_%s.npy' % (metadata_source, dataset))"], {}), "(common.TRAINDATA_DIR + '/X_test_%s_%s.npy' % (metadata_source, dataset)\n )\n", (5467, 5545), True, 'import numpy as np\n'), ((8063, 8090), 'numpy.where', 'np.where', (["(index_train == '')"], {}), "(index_train == '')\n", (8071, 8090), True, 'import numpy as np\n'), ((8819, 8844), 'numpy.where', 'np.where', (["(index_val == '')"], {}), "(index_val == '')\n", (8827, 8844), True, 'import numpy as np\n'), ((8895, 8920), 'numpy.where', 'np.where', (["(index_val == '')"], {}), "(index_val == '')\n", (8903, 8920), True, 'import numpy as np\n'), ((9711, 9737), 'numpy.where', 'np.where', (["(index_test == '')"], {}), "(index_test == '')\n", (9719, 9737), True, 'import numpy as np\n'), ((9790, 9816), 'numpy.where', 'np.where', (["(index_test == '')"], {}), "(index_test == '')\n", (9798, 9816), True, 'import numpy as np\n'), ((10448, 10473), 'numpy.where', 'np.where', (["(index_val == '')"], {}), "(index_val == '')\n", (10456, 10473), True, 'import numpy as np\n'), ((10524, 10549), 'numpy.where', 'np.where', (["(index_val == '')"], {}), "(index_val == '')\n", (10532, 10549), True, 'import numpy as np\n'), ((10819, 10845), 'numpy.where', 'np.where', (["(index_test == '')"], {}), "(index_test == '')\n", (10827, 10845), True, 'import numpy as np\n'), ((10898, 10924), 'numpy.where', 'np.where', (["(index_test == '')"], {}), "(index_test == '')\n", (10906, 10924), True, 'import numpy as np\n'), ((11180, 11207), 'numpy.where', 'np.where', (["(index_train == '')"], {}), "(index_train == '')\n", (11188, 11207), True, 'import numpy as np\n'), ((12337, 12382), 'numpy.asarray', 'np.asarray', (['[id2gt[id] for id in index_block]'], {}), '([id2gt[id] for id in index_block])\n', (12347, 12382), True, 'import numpy as np\n'), ((19575, 19642), 'numpy.load', 'np.load', (["(common.DATASETS_DIR + '/y_train_' + config.y_path + '.npy')"], {}), "(common.DATASETS_DIR + '/y_train_' + config.y_path + '.npy')\n", (19582, 19642), True, 'import numpy as np\n'), ((25282, 25528), 'predict.obtain_predictions', 'obtain_predictions', (['model_config', 'testset', "trained_model['model_id']", "config.predicting_params['trim_coeff']"], {'model': 'model', 'with_metadata': 'with_metadata', 'only_metadata': 'only_metadata', 'metadata_source': 'metadata_source', 'with_patches': '(True)'}), "(model_config, testset, trained_model['model_id'], config\n .predicting_params['trim_coeff'], model=model, with_metadata=\n with_metadata, only_metadata=only_metadata, metadata_source=\n metadata_source, with_patches=True)\n", (25300, 25528), False, 'from predict import obtain_predictions\n'), ((25573, 25800), 'predict.obtain_predictions', 'obtain_predictions', (['model_config', 'testset', "trained_model['model_id']", "config.predicting_params['trim_coeff']"], {'model': 'model', 'with_metadata': 'with_metadata', 'only_metadata': 'only_metadata', 'metadata_source': 'metadata_source'}), "(model_config, testset, trained_model['model_id'], config\n .predicting_params['trim_coeff'], model=model, with_metadata=\n with_metadata, only_metadata=only_metadata, metadata_source=metadata_source\n )\n", (25591, 25800), False, 'from predict import obtain_predictions\n'), ((1780, 1797), 'numpy.finfo', 'np.finfo', (['x.dtype'], {}), '(x.dtype)\n', (1788, 1797), True, 'import numpy as np\n'), ((4016, 4101), 'numpy.load', 'np.load', (["(common.TRAINDATA_DIR + '/X_train_%s_%s.npy' % (metadata_source, dataset))"], {}), "(common.TRAINDATA_DIR + '/X_train_%s_%s.npy' % (metadata_source,\n dataset))\n", (4023, 4101), True, 'import numpy as np\n'), ((4238, 4323), 'numpy.load', 'np.load', (["(common.TRAINDATA_DIR + '/X_train_%s_%s.npy' % (metadata_source, dataset))"], {}), "(common.TRAINDATA_DIR + '/X_train_%s_%s.npy' % (metadata_source,\n dataset))\n", (4245, 4323), True, 'import numpy as np\n'), ((12200, 12227), 'numpy.where', 'np.where', (["(index_block == '')"], {}), "(index_block == '')\n", (12208, 12227), True, 'import numpy as np\n'), ((12286, 12313), 'numpy.where', 'np.where', (["(index_block == '')"], {}), "(index_block == '')\n", (12294, 12313), True, 'import numpy as np\n'), ((12449, 12479), 'sklearn.preprocessing.normalize', 'normalize', (['y_block'], {'copy': '(False)'}), '(y_block, copy=False)\n', (12458, 12479), False, 'from sklearn.preprocessing import normalize\n'), ((12571, 12597), 'random.shuffle', 'random.shuffle', (['items_list'], {}), '(items_list)\n', (12585, 12597), False, 'import random\n'), ((13552, 13658), 'numpy.load', 'np.load', (["(common.TRAINDATA_DIR + '/X_train_%s_%s.npy' % (metadata_source, params[\n 'dataset']['dataset']))"], {}), "(common.TRAINDATA_DIR + '/X_train_%s_%s.npy' % (metadata_source,\n params['dataset']['dataset']))\n", (13559, 13658), True, 'import numpy as np\n'), ((13833, 13958), 'numpy.load', 'np.load', (["(common.TRAINDATA_DIR + '/X_train_%s_%s.npy' % (params['dataset'][\n 'meta-suffix2'], params['dataset']['dataset']))"], {}), "(common.TRAINDATA_DIR + '/X_train_%s_%s.npy' % (params['dataset'][\n 'meta-suffix2'], params['dataset']['dataset']))\n", (13840, 13958), True, 'import numpy as np\n'), ((14096, 14221), 'numpy.load', 'np.load', (["(common.TRAINDATA_DIR + '/X_train_%s_%s.npy' % (params['dataset'][\n 'meta-suffix3'], params['dataset']['dataset']))"], {}), "(common.TRAINDATA_DIR + '/X_train_%s_%s.npy' % (params['dataset'][\n 'meta-suffix3'], params['dataset']['dataset']))\n", (14103, 14221), True, 'import numpy as np\n'), ((14359, 14484), 'numpy.load', 'np.load', (["(common.TRAINDATA_DIR + '/X_train_%s_%s.npy' % (params['dataset'][\n 'meta-suffix4'], params['dataset']['dataset']))"], {}), "(common.TRAINDATA_DIR + '/X_train_%s_%s.npy' % (params['dataset'][\n 'meta-suffix4'], params['dataset']['dataset']))\n", (14366, 14484), True, 'import numpy as np\n'), ((14641, 14747), 'numpy.load', 'np.load', (["(common.TRAINDATA_DIR + '/X_train_%s_%s.npy' % (metadata_source, params[\n 'dataset']['dataset']))"], {}), "(common.TRAINDATA_DIR + '/X_train_%s_%s.npy' % (metadata_source,\n params['dataset']['dataset']))\n", (14648, 14747), True, 'import numpy as np\n'), ((23408, 23433), 'sklearn.metrics.r2_score', 'r2_score', (['Y_test[i]', 'pred'], {}), '(Y_test[i], pred)\n', (23416, 23433), False, 'from sklearn.metrics import r2_score\n'), ((24720, 24746), 'json.dumps', 'json.dumps', (['epochs.history'], {}), '(epochs.history)\n', (24730, 24746), False, 'import json\n'), ((14880, 15005), 'numpy.load', 'np.load', (["(common.TRAINDATA_DIR + '/X_train_%s_%s.npy' % (params['dataset'][\n 'meta-suffix2'], params['dataset']['dataset']))"], {}), "(common.TRAINDATA_DIR + '/X_train_%s_%s.npy' % (params['dataset'][\n 'meta-suffix2'], params['dataset']['dataset']))\n", (14887, 15005), True, 'import numpy as np\n'), ((15143, 15268), 'numpy.load', 'np.load', (["(common.TRAINDATA_DIR + '/X_train_%s_%s.npy' % (params['dataset'][\n 'meta-suffix3'], params['dataset']['dataset']))"], {}), "(common.TRAINDATA_DIR + '/X_train_%s_%s.npy' % (params['dataset'][\n 'meta-suffix3'], params['dataset']['dataset']))\n", (15150, 15268), True, 'import numpy as np\n'), ((15406, 15531), 'numpy.load', 'np.load', (["(common.TRAINDATA_DIR + '/X_train_%s_%s.npy' % (params['dataset'][\n 'meta-suffix4'], params['dataset']['dataset']))"], {}), "(common.TRAINDATA_DIR + '/X_train_%s_%s.npy' % (params['dataset'][\n 'meta-suffix4'], params['dataset']['dataset']))\n", (15413, 15531), True, 'import numpy as np\n'), ((23473, 23488), 'numpy.asarray', 'np.asarray', (['r2s'], {}), '(r2s)\n', (23483, 23488), True, 'import numpy as np\n')]
|
import tensorflow as tf
from tensorflow.contrib import slim
import numpy as np
import os
import time
from utils import kde
from ops import *
tf.reset_default_graph()
os.environ['CUDA_VISIBLE_DEVICES'] = '6'
# Parameters
learning_rate = 1e-3
reg_param = 10.
batch_size = 128
x_dim = 2
z_dim = 2
sigma = 0.7
mu = 2
method = 'jare' # ['conopt', 'simgd', 'simregg', 'simregd', 'jare']
divergence = 'JS' # ['standard', 'JS', 'indicator', 'wgan']
opt_type = 'sgd' # ['sgd', 'rmsprop', 'adam']
outdir = os.path.join('affine_res', 'kde_Isotrlin', time.strftime("%Y%m%d"),
'{}_{}_bs{}_std{}_reg{}_lr{}_{}_mu{}'.format(method, divergence, batch_size, sigma,
reg_param, learning_rate, opt_type, mu))
sumdir = os.path.join('affine_res', 'summary_Isotrlin', time.strftime("%Y%m%d"),
'{}_{}_bs{}_std{}_reg{}_lr{}_{}_mu{}'.format(method, divergence, batch_size, sigma,
reg_param, learning_rate, opt_type, mu))
niter = 15000
n_save = 500
n_print = 100
bbox = [-2, 2, -2 + mu, 2 + mu]
# Target distribution
mus = np.vstack([0, mu] for _ in range(batch_size))
x_real = mus + sigma * tf.random_normal([batch_size, x_dim])
generator = tf.make_template('generator', generator4Gaussian_func1)
discriminator = tf.make_template('discriminator', discriminator4Gaussian_func1)
# g and d output
z = sigma * tf.random_normal([batch_size, z_dim])
x_fake = generator(z, x_dim, mu)
d_out_real = discriminator(x_real)
d_out_fake = discriminator(x_fake)
d_loss, g_loss = compute_loss(d_out_real, d_out_fake, divergence)
# collect two sets of trainable variables
g_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator')
d_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator')
d_loss_tot, g_loss_tot, train_op, reg, d_grad_norm, g_grad_norm = \
compute_gradients(d_loss, d_vars, g_loss, g_vars, opt_type, learning_rate, reg_param, method)
summary_op = tf.summary.merge([
tf.summary.scalar("loss/d_loss", d_loss),
tf.summary.scalar("loss/g_loss", g_loss),
tf.summary.scalar("loss/reg", reg),
tf.summary.scalar("loss/d_loss_tot", d_loss_tot),
tf.summary.scalar("loss/g_loss_tot", g_loss_tot),
tf.summary.scalar("grad/d_grad_norm", d_grad_norm),
tf.summary.scalar("grad/g_grad_norm", g_grad_norm),
])
print("Using the optimizer: {}".format(method))
# initialize and run
sess = tf.Session()
train_writer = tf.summary.FileWriter(sumdir, sess.graph)
sess.run(tf.global_variables_initializer())
if not os.path.exists(outdir):
os.makedirs(outdir)
print('Training: {}_{}_bs{}_mu{}_std{}_reg{}_lr{}'.format(
method, divergence, batch_size, mu, sigma, reg_param, learning_rate))
ztest = [sigma * np.random.randn(batch_size, z_dim) for i in range(10)]
# generate real samples
x_real_out = np.concatenate([sess.run(x_real)])
init_g = sess.run(g_vars[0])
init_d = sess.run(d_vars[0])
print('initial theta: {}'.format(init_d))
print('initial phi: {}'.format(init_g))
kde(x_real_out[:, 0], x_real_out[:, 1], bbox=bbox, save_file=os.path.join(outdir, 'real.png'))
for i in range(niter):
if i % n_print == 0:
d_loss_out, g_loss_out, summary_str = sess.run([d_loss, g_loss, summary_op])
train_writer.add_summary(summary_str, i)
print('iters = %d, d_loss = %.4f, g_loss = %.4f' % (i, d_loss_out, g_loss_out))
if i % n_save == 0:
x_out = np.concatenate([sess.run(x_fake, feed_dict={z: zt}) for zt in ztest], axis=0)
kde(x_out[:, 0], x_out[:, 1], bbox=bbox, save_file=os.path.join(outdir, '%d.png' % i))
sess.run(train_op)
sess.close()
|
[
"os.path.exists",
"tensorflow.reset_default_graph",
"tensorflow.random_normal",
"os.makedirs",
"tensorflow.Session",
"time.strftime",
"os.path.join",
"tensorflow.global_variables_initializer",
"numpy.random.randn",
"tensorflow.summary.scalar",
"tensorflow.summary.FileWriter",
"tensorflow.make_template",
"tensorflow.get_collection"
] |
[((144, 168), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (166, 168), True, 'import tensorflow as tf\n'), ((1302, 1357), 'tensorflow.make_template', 'tf.make_template', (['"""generator"""', 'generator4Gaussian_func1'], {}), "('generator', generator4Gaussian_func1)\n", (1318, 1357), True, 'import tensorflow as tf\n'), ((1374, 1437), 'tensorflow.make_template', 'tf.make_template', (['"""discriminator"""', 'discriminator4Gaussian_func1'], {}), "('discriminator', discriminator4Gaussian_func1)\n", (1390, 1437), True, 'import tensorflow as tf\n'), ((1728, 1798), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""generator"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator')\n", (1745, 1798), True, 'import tensorflow as tf\n'), ((1808, 1882), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""discriminator"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator')\n", (1825, 1882), True, 'import tensorflow as tf\n'), ((2518, 2530), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2528, 2530), True, 'import tensorflow as tf\n'), ((2546, 2587), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['sumdir', 'sess.graph'], {}), '(sumdir, sess.graph)\n', (2567, 2587), True, 'import tensorflow as tf\n'), ((546, 569), 'time.strftime', 'time.strftime', (['"""%Y%m%d"""'], {}), "('%Y%m%d')\n", (559, 569), False, 'import time\n'), ((841, 864), 'time.strftime', 'time.strftime', (['"""%Y%m%d"""'], {}), "('%Y%m%d')\n", (854, 864), False, 'import time\n'), ((1468, 1505), 'tensorflow.random_normal', 'tf.random_normal', (['[batch_size, z_dim]'], {}), '([batch_size, z_dim])\n', (1484, 1505), True, 'import tensorflow as tf\n'), ((2597, 2630), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2628, 2630), True, 'import tensorflow as tf\n'), ((2640, 2662), 'os.path.exists', 'os.path.exists', (['outdir'], {}), '(outdir)\n', (2654, 2662), False, 'import os\n'), ((2668, 2687), 'os.makedirs', 'os.makedirs', (['outdir'], {}), '(outdir)\n', (2679, 2687), False, 'import os\n'), ((1251, 1288), 'tensorflow.random_normal', 'tf.random_normal', (['[batch_size, x_dim]'], {}), '([batch_size, x_dim])\n', (1267, 1288), True, 'import tensorflow as tf\n'), ((2087, 2127), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss/d_loss"""', 'd_loss'], {}), "('loss/d_loss', d_loss)\n", (2104, 2127), True, 'import tensorflow as tf\n'), ((2133, 2173), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss/g_loss"""', 'g_loss'], {}), "('loss/g_loss', g_loss)\n", (2150, 2173), True, 'import tensorflow as tf\n'), ((2179, 2213), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss/reg"""', 'reg'], {}), "('loss/reg', reg)\n", (2196, 2213), True, 'import tensorflow as tf\n'), ((2219, 2267), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss/d_loss_tot"""', 'd_loss_tot'], {}), "('loss/d_loss_tot', d_loss_tot)\n", (2236, 2267), True, 'import tensorflow as tf\n'), ((2273, 2321), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss/g_loss_tot"""', 'g_loss_tot'], {}), "('loss/g_loss_tot', g_loss_tot)\n", (2290, 2321), True, 'import tensorflow as tf\n'), ((2328, 2378), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""grad/d_grad_norm"""', 'd_grad_norm'], {}), "('grad/d_grad_norm', d_grad_norm)\n", (2345, 2378), True, 'import tensorflow as tf\n'), ((2384, 2434), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""grad/g_grad_norm"""', 'g_grad_norm'], {}), "('grad/g_grad_norm', g_grad_norm)\n", (2401, 2434), True, 'import tensorflow as tf\n'), ((2839, 2873), 'numpy.random.randn', 'np.random.randn', (['batch_size', 'z_dim'], {}), '(batch_size, z_dim)\n', (2854, 2873), True, 'import numpy as np\n'), ((3168, 3200), 'os.path.join', 'os.path.join', (['outdir', '"""real.png"""'], {}), "(outdir, 'real.png')\n", (3180, 3200), False, 'import os\n'), ((3649, 3683), 'os.path.join', 'os.path.join', (['outdir', "('%d.png' % i)"], {}), "(outdir, '%d.png' % i)\n", (3661, 3683), False, 'import os\n')]
|
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
import numpy as np
import mindspore.nn as nn
import mindspore.ops as ops
import mindspore.context as context
from mindspore import Tensor
from mindspore.ops.operations import _inner_ops as inner
class Net(nn.Cell):
def __init__(self, op, axis):
super(Net, self).__init__()
if op == "Cummin":
self.op = inner.Cummin(axis)
elif op == "Cummax":
self.op = ops.Cummax(axis)
else:
raise ValueError("op value error.")
def construct(self, x):
return self.op(x)
def cum_minmax_compare(op, x, expected, axis, data_type):
net = Net(op, axis)
x = np.array(x).astype(data_type)
expected = (np.array(expected[0]).astype(data_type), np.array(expected[1]).astype(data_type))
# Pynative
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
output = net(Tensor(x))
assert np.allclose(output[0].asnumpy(), expected[0], equal_nan=True)
assert np.allclose(output[1].asnumpy(), expected[1])
# Graph
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
output = net(Tensor(x))
assert np.allclose(output[0].asnumpy(), expected[0], equal_nan=True)
assert np.allclose(output[1].asnumpy(), expected[1])
@pytest.mark.level0
@pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training
@pytest.mark.parametrize("data_type", [np.uint8, np.int8, np.int32, np.float16, np.float32])
def test_cummin_multi_dims(data_type):
"""
Feature: Op Cummin
Description: test Cummin operator with multiple dimension.
Expectation: the result match expectation.
"""
op = "Cummin"
axis = 1
x = [[[14, 19, 18, 11, 6], [1, 4, 18, 6, 1], [15, 13, 12, 9, 19]],
[[16, 16, 17, 10, 15], [9, 7, 10, 9, 4], [6, 14, 16, 3, 2]],
[[1, 13, 15, 1, 6], [20, 6, 8, 19, 19], [3, 14, 20, 18, 19]],
[[20, 1, 14, 9, 3], [13, 11, 2, 17, 14], [0, 15, 13, 7, 10]]]
cummin_output = (
[[[14, 19, 18, 11, 6], [1, 4, 18, 6, 1], [1, 4, 12, 6, 1]],
[[16, 16, 17, 10, 15], [9, 7, 10, 9, 4], [6, 7, 10, 3, 2]],
[[1, 13, 15, 1, 6], [1, 6, 8, 1, 6], [1, 6, 8, 1, 6]], [[20, 1, 14, 9, 3], [13, 1, 2, 9, 3], [0, 1, 2, 7, 3]]],
[[[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [1, 1, 2, 1, 1]], [[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [2, 1, 1, 2, 2]],
[[0, 0, 0, 0, 0], [0, 1, 1, 0, 0], [0, 1, 1, 0, 0]], [[0, 0, 0, 0, 0], [1, 0, 1, 0, 0], [2, 0, 1, 2, 0]]])
cum_minmax_compare(op, x, cummin_output, axis, data_type)
@pytest.mark.level0
@pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training
@pytest.mark.parametrize("data_type", [np.uint8, np.uint32, np.int8, np.int32, np.int64, np.float16, np.float32])
def test_cummax_multi_dims(data_type):
"""
Feature: Op Cummax
Description: test Cummax operator with multiple dimension.
Expectation: the result match expectation.
"""
op = "Cummax"
axis = 1
x = [[[11, 11, 1, 7, 11], [1, 8, 18, 0, 9], [12, 1, 16, 11, 8]],
[[18, 8, 10, 17, 14], [4, 20, 8, 20, 11], [14, 1, 8, 5, 16]],
[[6, 13, 19, 14, 8], [17, 19, 11, 0, 7], [18, 4, 13, 14, 16]],
[[10, 7, 7, 7, 19], [15, 0, 15, 5, 14], [9, 7, 10, 4, 14]]]
cummax_output = ([[[11, 11, 1, 7, 11], [11, 11, 18, 7, 11], [12, 11, 18, 11, 11]],
[[18, 8, 10, 17, 14], [18, 20, 10, 20, 14], [18, 20, 10, 20, 16]],
[[6, 13, 19, 14, 8], [17, 19, 19, 14, 8], [18, 19, 19, 14, 16]],
[[10, 7, 7, 7, 19], [15, 7, 15, 7, 19], [15, 7, 15, 7, 19]]],
[[[0, 0, 0, 0, 0], [0, 0, 1, 0, 0], [2, 0, 1, 2, 0]],
[[0, 0, 0, 0, 0], [0, 1, 0, 1, 0], [0, 1, 0, 1, 2]],
[[0, 0, 0, 0, 0], [1, 1, 0, 0, 0], [2, 1, 0, 2, 2]],
[[0, 0, 0, 0, 0], [1, 0, 1, 0, 0], [1, 2, 1, 0, 0]]])
cum_minmax_compare(op, x, cummax_output, axis, data_type)
@pytest.mark.level0
@pytest.mark.env_onecard
@pytest.mark.platform_x86_gpu_training
@pytest.mark.parametrize("data_type", [np.float16, np.float32])
def test_cumminmax_nan(data_type):
"""
Feature: Op Cummin/Cummax
Description: test Cummin/Cummax operator with nan input.
Expectation: the result match expectation.
"""
inf = float('inf')
nan = float('nan')
axis = 0
x = [4, inf, 1.5, -inf, 0, nan, 1]
cummin_output = ([4, 4, 1.5, -inf, -inf, nan, nan], [0, 0, 2, 3, 3, 5, 5])
cummax_output = ([4, inf, inf, inf, inf, nan, nan], [0, 1, 1, 1, 1, 5, 5])
cum_minmax_compare("Cummin", x, cummin_output, axis, data_type)
cum_minmax_compare("Cummax", x, cummax_output, axis, data_type)
|
[
"mindspore.ops.Cummax",
"mindspore.context.set_context",
"pytest.mark.parametrize",
"numpy.array",
"mindspore.ops.operations._inner_ops.Cummin",
"mindspore.Tensor"
] |
[((2017, 2113), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data_type"""', '[np.uint8, np.int8, np.int32, np.float16, np.float32]'], {}), "('data_type', [np.uint8, np.int8, np.int32, np.\n float16, np.float32])\n", (2040, 2113), False, 'import pytest\n'), ((3272, 3389), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data_type"""', '[np.uint8, np.uint32, np.int8, np.int32, np.int64, np.float16, np.float32]'], {}), "('data_type', [np.uint8, np.uint32, np.int8, np.\n int32, np.int64, np.float16, np.float32])\n", (3295, 3389), False, 'import pytest\n'), ((4683, 4745), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data_type"""', '[np.float16, np.float32]'], {}), "('data_type', [np.float16, np.float32])\n", (4706, 4745), False, 'import pytest\n'), ((1462, 1530), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.PYNATIVE_MODE', 'device_target': '"""GPU"""'}), "(mode=context.PYNATIVE_MODE, device_target='GPU')\n", (1481, 1530), True, 'import mindspore.context as context\n'), ((1706, 1771), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.GRAPH_MODE', 'device_target': '"""GPU"""'}), "(mode=context.GRAPH_MODE, device_target='GPU')\n", (1725, 1771), True, 'import mindspore.context as context\n'), ((1548, 1557), 'mindspore.Tensor', 'Tensor', (['x'], {}), '(x)\n', (1554, 1557), False, 'from mindspore import Tensor\n'), ((1789, 1798), 'mindspore.Tensor', 'Tensor', (['x'], {}), '(x)\n', (1795, 1798), False, 'from mindspore import Tensor\n'), ((1018, 1036), 'mindspore.ops.operations._inner_ops.Cummin', 'inner.Cummin', (['axis'], {}), '(axis)\n', (1030, 1036), True, 'from mindspore.ops.operations import _inner_ops as inner\n'), ((1314, 1325), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1322, 1325), True, 'import numpy as np\n'), ((1088, 1104), 'mindspore.ops.Cummax', 'ops.Cummax', (['axis'], {}), '(axis)\n', (1098, 1104), True, 'import mindspore.ops as ops\n'), ((1360, 1381), 'numpy.array', 'np.array', (['expected[0]'], {}), '(expected[0])\n', (1368, 1381), True, 'import numpy as np\n'), ((1401, 1422), 'numpy.array', 'np.array', (['expected[1]'], {}), '(expected[1])\n', (1409, 1422), True, 'import numpy as np\n')]
|
import random
import numpy as np
from mesa import Agent
# [STRATEGY_CHEATERS, STRATEGY_FAIR, STRATEGY_GENEROUS, STRATEGY_MARTYRS, STRATEGY_PRUDENT]
SMART_VAMPIRE_STRATEGIES_PROB = [0.25, 0.25, 0.125, 0.25, 0.125]
class Vampire(Agent):
def __init__(self, id, model, root_id):
super().__init__(id, model)
self.root_id = root_id
self.survival_time = 60
def step(self):
self.perform_hunt()
shared_food = self.perform_food_sharing()
self.perform_reproduction(shared_food)
self.survival_time -= 12
def get_root(self, root):
return [agent for agent in self.model.schedule.agents if agent.root_id == root]
def perform_hunt(self):
if random.random() < self.model.hunt_probability:
self.survival_time = 60
else:
self.survival_time -= 12
def perform_food_sharing(self):
if self.model.food_sharing:
if self.survival_time <= 24:
group = range(self.model.n_roots)
prob = np.ones(self.model.n_roots)
prob[self.root_id] = prob[self.root_id] * (self.model.n_roots - 1) * 9
prob = prob / np.sum(prob)
group_id = np.random.choice(group, p=prob)
group_member = self.get_root(group_id)
if len(group_member) > 0:
other = random.choice(group_member)
return self.share_food(other)
return False
def perform_reproduction(self, shared_food):
if self.model.reproduction and shared_food:
if random.random() < self.model.reproduction_probability:
id = max([agent.unique_id[1] for agent in self.get_root(self.root_id)]) + 1
baby_vampire = self.model.vampire_type((self.root_id, id), self.model,
random.choice(range(self.model.n_roots)))
self.model.schedule.add(baby_vampire)
def is_dead(self):
return self.survival_time <= 0
def share_food(self, other):
raise NotImplementedError
class SimpleVampire(Vampire):
def share_food(self, other):
if other.survival_time >= 48:
other.survival_time -= 6
self.survival_time += 6
return True
return False
class SmartVampire(Vampire):
STRATEGY_CHEATERS = 'Cheater'
STRATEGY_FAIR = 'Fair'
STRATEGY_MARTYRS = 'Martyrs'
STRATEGY_GENEROUS = 'Generous'
STRATEGY_PRUDENT = 'Prudent'
STRATEGIES = [STRATEGY_CHEATERS, STRATEGY_FAIR, STRATEGY_GENEROUS, STRATEGY_MARTYRS, STRATEGY_PRUDENT]
def __init__(self, id, model, root_id):
super().__init__(id, model, root_id)
self.motivation = np.random.choice(self.STRATEGIES, p=self.model.smart_vampire_strategies_prob)
def share_food(self, other):
if other.motivation == other.STRATEGY_CHEATERS:
return False
elif other.motivation == other.STRATEGY_MARTYRS:
other.survival_time -= 12
self.survival_time += 12
return True
elif other.motivation == other.STRATEGY_FAIR:
if other.survival_time >= 48:
other.survival_time -= 12
self.survival_time += 12
return True
elif other.survival_time >= 24:
other.survival_time -= 6
self.survival_time += 6
return True
elif other.motivation == other.STRATEGY_GENEROUS:
if other.survival_time >= 48:
other.survival_time -= 24
self.survival_time += 24
return True
elif other.survival_time >= 24:
other.survival_time -= 12
self.survival_time += 12
return True
elif other.motivation == other.STRATEGY_PRUDENT:
if other.survival_time >= 48:
other.survival_time -= 6
self.survival_time += 6
return True
return False
class SmartDynamicVampire(Vampire):
def __init__(self, id, model, root_id, motivation=None):
super().__init__(id, model, root_id)
self.motivation = np.random.randint(-4, 7) if not motivation else motivation
def step(self):
self.perform_hunt()
shared_food = self.perform_food_sharing()
self.motivation = max(min(self.motivation, self.model.max_motivation), self.model.min_motivation)
self.perform_reproduction(shared_food)
self.survival_time -= 12
def share_food(self, other):
if other.motivation < -2: # Cheater
self.motivation -= 1
return False
elif -2 <= other.motivation < 0: # Prudent
if other.survival_time >= 48:
other.survival_time -= 6
self.survival_time += 6
self.motivation += 1
return True
elif 0 <= other.motivation <= 1: # Fair
if other.survival_time >= 48:
other.survival_time -= 12
self.survival_time += 12
self.motivation += 1
return True
elif other.survival_time >= 24:
other.survival_time -= 6
self.survival_time += 6
self.motivation += 1
return True
elif 1 < other.motivation <= 4: # Generous
if other.survival_time >= 48:
other.survival_time -= 24
self.survival_time += 24
self.motivation += 1
return True
elif other.survival_time >= 24:
other.survival_time -= 12
self.survival_time += 12
self.motivation += 1
return True
elif other.motivation > 4: # Martyr
other.survival_time -= 12
self.survival_time += 12
self.motivation += 1
return True
self.motivation -= 1
return False
def perform_reproduction(self, shared_food):
if self.model.reproduction and shared_food:
if random.random() < self.model.reproduction_probability:
id = max([agent.unique_id[1] for agent in self.get_root(self.root_id)]) + 1
baby_vampire = self.model.vampire_type((self.root_id, id), self.model,
random.choice(range(self.model.n_roots)), -2)
self.model.schedule.add(baby_vampire)
|
[
"random.choice",
"numpy.ones",
"numpy.random.choice",
"numpy.sum",
"numpy.random.randint",
"random.random"
] |
[((2752, 2829), 'numpy.random.choice', 'np.random.choice', (['self.STRATEGIES'], {'p': 'self.model.smart_vampire_strategies_prob'}), '(self.STRATEGIES, p=self.model.smart_vampire_strategies_prob)\n', (2768, 2829), True, 'import numpy as np\n'), ((720, 735), 'random.random', 'random.random', ([], {}), '()\n', (733, 735), False, 'import random\n'), ((4226, 4250), 'numpy.random.randint', 'np.random.randint', (['(-4)', '(7)'], {}), '(-4, 7)\n', (4243, 4250), True, 'import numpy as np\n'), ((1041, 1068), 'numpy.ones', 'np.ones', (['self.model.n_roots'], {}), '(self.model.n_roots)\n', (1048, 1068), True, 'import numpy as np\n'), ((1226, 1257), 'numpy.random.choice', 'np.random.choice', (['group'], {'p': 'prob'}), '(group, p=prob)\n', (1242, 1257), True, 'import numpy as np\n'), ((1599, 1614), 'random.random', 'random.random', ([], {}), '()\n', (1612, 1614), False, 'import random\n'), ((6154, 6169), 'random.random', 'random.random', ([], {}), '()\n', (6167, 6169), False, 'import random\n'), ((1186, 1198), 'numpy.sum', 'np.sum', (['prob'], {}), '(prob)\n', (1192, 1198), True, 'import numpy as np\n'), ((1383, 1410), 'random.choice', 'random.choice', (['group_member'], {}), '(group_member)\n', (1396, 1410), False, 'import random\n')]
|
from numpy import random
x = random.multinomial(n=6, pvals=[1 / 6, 1 / 6, 1 / 6, 1 / 6, 1 / 6, 1 / 6])
print(x)
|
[
"numpy.random.multinomial"
] |
[((30, 103), 'numpy.random.multinomial', 'random.multinomial', ([], {'n': '(6)', 'pvals': '[1 / 6, 1 / 6, 1 / 6, 1 / 6, 1 / 6, 1 / 6]'}), '(n=6, pvals=[1 / 6, 1 / 6, 1 / 6, 1 / 6, 1 / 6, 1 / 6])\n', (48, 103), False, 'from numpy import random\n')]
|
#getLog(mt) : Returns the natural log of the matrix.
import numpy as np
from .isPSDMd import isPSD
__all__ = ['getLog']
def getLog(M, eps=1e-15):
r"""Takes as input a matrix M and returns the natural log of M.
Parameters
----------
M : numpy.ndarray
2-d array representing a hermitian matrix
eps : float
Optional with defaul 1e-15, sets tolerance for the smallest eigenvalue
Returns
----------
lgMt : numpy.ndarray
log of the input array
Notes
----------
Scales by eps, all eigenvalues between their actual value and 1.0,
if any of the eigenvalue is smaller than eps
"""
try :
(psd, val, vec) = isPSD(M,eps,flag=True)
except :
raise ValueError('Input matrix is not square and hermitian')
if psd == False:
raise ValueError('Eigenvalues of input matrix not sufficiently positive')
n = len(val)
#If any of the eigenvalues is smaller than eps, then rescale the spectrum
#to make all eigenvalues at least eps, this prevents log from complaining
if np.any(val<eps):
val = (1-eps)*val + eps*1.
lgMt = np.dot(np.log(val)*vec,vec.conj().T)
return lgMt
|
[
"numpy.log",
"numpy.any"
] |
[((1135, 1152), 'numpy.any', 'np.any', (['(val < eps)'], {}), '(val < eps)\n', (1141, 1152), True, 'import numpy as np\n'), ((1211, 1222), 'numpy.log', 'np.log', (['val'], {}), '(val)\n', (1217, 1222), True, 'import numpy as np\n')]
|
# Copyright 2020 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import cv2
import numpy as np
###############################################################################################################
# Get the environment variables (calibration dataset, image names)
calib_image_dir = os.environ['CALIB_DATASET']
calib_image_list = os.environ['CALIB_IMAGE_LIST']
calib_batch_size = int(os.environ['BATCH_SIZE'])
input_node=os.environ['INPUT_NODE_NAME']
input_width=int(os.environ['INPUT_WIDTH'])
input_height=int(os.environ['INPUT_HEIGHT'])
size = (input_width, input_width)
###############################################################################################################
def preprocess(image):
"""
Resize the image to fit the model input size.
Normalize image from [0:255] pixel values to the range [0:1].
"""
# Resize the image to match the model requirements
image = cv2.resize(image, size, interpolation=cv2.INTER_NEAREST)
# Set the values to float type
image = np.asarray(image)
image = image.astype(np.float32)
# Scale image
return image / 255.0 # TODO : vraiment resize ?
###############################################################################################################
def calib_input(iter):
"""
Input of the Yolo algorithm for calibration, using a batch of images.
"""
images = []
# Read content of the calibration image list
line = open(calib_image_list).readlines()
# Run a batch
for index in range(0, calib_batch_size):
# Get the image name to process
curline = line[iter * calib_batch_size + index]
calib_image_name = curline.strip()
# Open the corresponding image file
filename = os.path.join(calib_image_dir, calib_image_name)
image = cv2.imread(filename)
# Check whether the image is empty
if image is None :
raise TypeError("Image {} is empty.".format(filename))
# Resize and normalize image
image = preprocess(image)
# Append image to list of inputs
images.append(image)
print("Iteration number : {} and index number {} and file name {} ".format(iter, index, filename))
# Link input images to the input node name
return {input_node: images}
|
[
"os.path.join",
"cv2.resize",
"numpy.asarray",
"cv2.imread"
] |
[((1452, 1508), 'cv2.resize', 'cv2.resize', (['image', 'size'], {'interpolation': 'cv2.INTER_NEAREST'}), '(image, size, interpolation=cv2.INTER_NEAREST)\n', (1462, 1508), False, 'import cv2\n'), ((1551, 1568), 'numpy.asarray', 'np.asarray', (['image'], {}), '(image)\n', (1561, 1568), True, 'import numpy as np\n'), ((2223, 2270), 'os.path.join', 'os.path.join', (['calib_image_dir', 'calib_image_name'], {}), '(calib_image_dir, calib_image_name)\n', (2235, 2270), False, 'import os\n'), ((2281, 2301), 'cv2.imread', 'cv2.imread', (['filename'], {}), '(filename)\n', (2291, 2301), False, 'import cv2\n')]
|
# SCRAMBLE - Adit
import cv2
import numpy as np
from emb import *
def decryption2(p,key):
img = cv2.imread(p, cv2.IMREAD_GRAYSCALE)
i2 = np.zeros((258, 258), dtype="int")
i3 = np.zeros((258, 258), dtype="int")
i4 = np.zeros((258, 258), dtype="int")
i5 = np.zeros((258, 258), dtype="int")
key2=key[::-1]
k1=[]
k2=[]
for i in range(129):
k1.append(key[i] * -1)
k2.append(key2[i] * -1)
i2=img.transpose()
l=0
j=0
for i in range(1,258,2):
i3[i-1]=i2[i-1]
i3[i]=np.roll(i2[i],k2[l])
l+=1
i4=i3.transpose()
for i in range(0,258,2):
i5[i]=np.roll(i4[i],k1[j])
i5[i+1]=i4[i+1]
j+=1
i6,m=eject(i5)
g = "C:\\Users\\Adit\\Desktop\\gui\\ImageDecrypted2.jpg"
cv2.imwrite(g, i6)
with open("C:\\Users\\Adit\\Desktop\\gui\\HiddenMessage.txt","w") as f:
f.write(m)
|
[
"numpy.roll",
"cv2.imwrite",
"numpy.zeros",
"cv2.imread"
] |
[((101, 136), 'cv2.imread', 'cv2.imread', (['p', 'cv2.IMREAD_GRAYSCALE'], {}), '(p, cv2.IMREAD_GRAYSCALE)\n', (111, 136), False, 'import cv2\n'), ((146, 179), 'numpy.zeros', 'np.zeros', (['(258, 258)'], {'dtype': '"""int"""'}), "((258, 258), dtype='int')\n", (154, 179), True, 'import numpy as np\n'), ((189, 222), 'numpy.zeros', 'np.zeros', (['(258, 258)'], {'dtype': '"""int"""'}), "((258, 258), dtype='int')\n", (197, 222), True, 'import numpy as np\n'), ((232, 265), 'numpy.zeros', 'np.zeros', (['(258, 258)'], {'dtype': '"""int"""'}), "((258, 258), dtype='int')\n", (240, 265), True, 'import numpy as np\n'), ((275, 308), 'numpy.zeros', 'np.zeros', (['(258, 258)'], {'dtype': '"""int"""'}), "((258, 258), dtype='int')\n", (283, 308), True, 'import numpy as np\n'), ((783, 801), 'cv2.imwrite', 'cv2.imwrite', (['g', 'i6'], {}), '(g, i6)\n', (794, 801), False, 'import cv2\n'), ((542, 563), 'numpy.roll', 'np.roll', (['i2[i]', 'k2[l]'], {}), '(i2[i], k2[l])\n', (549, 563), True, 'import numpy as np\n'), ((641, 662), 'numpy.roll', 'np.roll', (['i4[i]', 'k1[j]'], {}), '(i4[i], k1[j])\n', (648, 662), True, 'import numpy as np\n')]
|
import pickle
import pytest
import numpy as np
from astropy import units as u
from astropy import modeling
from specutils.utils import QuantityModel
from ..utils.wcs_utils import refraction_index, vac_to_air, air_to_vac
wavelengths = [300, 500, 1000] * u.nm
data_index_refraction = {
'Griesen2006': np.array([3.07393068, 2.9434858 , 2.8925797 ]),
'Edlen1953': np.array([2.91557413, 2.78963801, 2.74148172]),
'Edlen1966': np.array([2.91554272, 2.7895973 , 2.74156098]),
'PeckReeder1972': np.array([2.91554211, 2.78960005, 2.74152561]),
'Morton2000': np.array([2.91568573, 2.78973402, 2.74169531]),
'Ciddor1996': np.array([2.91568633, 2.78973811, 2.74166131])
}
def test_quantity_model():
c = modeling.models.Chebyshev1D(3)
uc = QuantityModel(c, u.AA, u.km)
assert uc(10*u.nm).to(u.m) == 0*u.m
def test_pickle_quantity_model(tmp_path):
"""
Check that a QuantityModel can roundtrip through pickling, as it
would if fit in a multiprocessing pool.
"""
c = modeling.models.Chebyshev1D(3)
uc = QuantityModel(c, u.AA, u.km)
pkl_file = tmp_path / "qmodel.pkl"
with open(pkl_file, "wb") as f:
pickle.dump(uc, f)
with open(pkl_file, "rb") as f:
new_model = pickle.load(f)
assert new_model.input_units == uc.input_units
assert new_model.return_units == uc.return_units
assert type(new_model.unitless_model) == type(uc.unitless_model)
assert np.all(new_model.unitless_model.parameters == uc.unitless_model.parameters)
@pytest.mark.parametrize("method", data_index_refraction.keys())
def test_refraction_index(method):
tmp = (refraction_index(wavelengths, method) - 1) * 1e4
assert np.isclose(tmp, data_index_refraction[method], atol=1e-7).all()
@pytest.mark.parametrize("method", data_index_refraction.keys())
def test_air_to_vac(method):
tmp = refraction_index(wavelengths, method)
assert np.isclose(wavelengths.value * tmp,
air_to_vac(wavelengths, method=method, scheme='inversion').value,
rtol=1e-6).all()
assert np.isclose(wavelengths.value,
air_to_vac(vac_to_air(wavelengths, method=method),
method=method, scheme='iteration').value,
atol=1e-12).all()
|
[
"astropy.modeling.models.Chebyshev1D",
"pickle.dump",
"specutils.utils.QuantityModel",
"numpy.isclose",
"pickle.load",
"numpy.array",
"numpy.all"
] |
[((305, 349), 'numpy.array', 'np.array', (['[3.07393068, 2.9434858, 2.8925797]'], {}), '([3.07393068, 2.9434858, 2.8925797])\n', (313, 349), True, 'import numpy as np\n'), ((369, 415), 'numpy.array', 'np.array', (['[2.91557413, 2.78963801, 2.74148172]'], {}), '([2.91557413, 2.78963801, 2.74148172])\n', (377, 415), True, 'import numpy as np\n'), ((433, 478), 'numpy.array', 'np.array', (['[2.91554272, 2.7895973, 2.74156098]'], {}), '([2.91554272, 2.7895973, 2.74156098])\n', (441, 478), True, 'import numpy as np\n'), ((502, 548), 'numpy.array', 'np.array', (['[2.91554211, 2.78960005, 2.74152561]'], {}), '([2.91554211, 2.78960005, 2.74152561])\n', (510, 548), True, 'import numpy as np\n'), ((567, 613), 'numpy.array', 'np.array', (['[2.91568573, 2.78973402, 2.74169531]'], {}), '([2.91568573, 2.78973402, 2.74169531])\n', (575, 613), True, 'import numpy as np\n'), ((632, 678), 'numpy.array', 'np.array', (['[2.91568633, 2.78973811, 2.74166131]'], {}), '([2.91568633, 2.78973811, 2.74166131])\n', (640, 678), True, 'import numpy as np\n'), ((717, 747), 'astropy.modeling.models.Chebyshev1D', 'modeling.models.Chebyshev1D', (['(3)'], {}), '(3)\n', (744, 747), False, 'from astropy import modeling\n'), ((757, 785), 'specutils.utils.QuantityModel', 'QuantityModel', (['c', 'u.AA', 'u.km'], {}), '(c, u.AA, u.km)\n', (770, 785), False, 'from specutils.utils import QuantityModel\n'), ((1008, 1038), 'astropy.modeling.models.Chebyshev1D', 'modeling.models.Chebyshev1D', (['(3)'], {}), '(3)\n', (1035, 1038), False, 'from astropy import modeling\n'), ((1048, 1076), 'specutils.utils.QuantityModel', 'QuantityModel', (['c', 'u.AA', 'u.km'], {}), '(c, u.AA, u.km)\n', (1061, 1076), False, 'from specutils.utils import QuantityModel\n'), ((1438, 1513), 'numpy.all', 'np.all', (['(new_model.unitless_model.parameters == uc.unitless_model.parameters)'], {}), '(new_model.unitless_model.parameters == uc.unitless_model.parameters)\n', (1444, 1513), True, 'import numpy as np\n'), ((1162, 1180), 'pickle.dump', 'pickle.dump', (['uc', 'f'], {}), '(uc, f)\n', (1173, 1180), False, 'import pickle\n'), ((1238, 1252), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1249, 1252), False, 'import pickle\n'), ((1686, 1744), 'numpy.isclose', 'np.isclose', (['tmp', 'data_index_refraction[method]'], {'atol': '(1e-07)'}), '(tmp, data_index_refraction[method], atol=1e-07)\n', (1696, 1744), True, 'import numpy as np\n')]
|
#数値微分の例
import numpy as np
from common_function import function_1, numerical_diff
import matplotlib.pylab as plt
def tangent_line(f, x):
d = numerical_diff(f, x)
print(d)
y = f(x) - d*x
return lambda t: d*t + y
x = np.arange(0.0, 20.0, 0.1) #0から20まで0.1刻みのx配列
y = function_1(x)
plt.xlabel("x")
plt.ylabel("f(x)")
tf = tangent_line(function_1, 5)
y2 = tf(x)
plt.plot(x, y)
plt.plot(x, y2)
plt.show()
|
[
"matplotlib.pylab.xlabel",
"common_function.function_1",
"matplotlib.pylab.show",
"common_function.numerical_diff",
"matplotlib.pylab.plot",
"numpy.arange",
"matplotlib.pylab.ylabel"
] |
[((234, 259), 'numpy.arange', 'np.arange', (['(0.0)', '(20.0)', '(0.1)'], {}), '(0.0, 20.0, 0.1)\n', (243, 259), True, 'import numpy as np\n'), ((282, 295), 'common_function.function_1', 'function_1', (['x'], {}), '(x)\n', (292, 295), False, 'from common_function import function_1, numerical_diff\n'), ((297, 312), 'matplotlib.pylab.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (307, 312), True, 'import matplotlib.pylab as plt\n'), ((313, 331), 'matplotlib.pylab.ylabel', 'plt.ylabel', (['"""f(x)"""'], {}), "('f(x)')\n", (323, 331), True, 'import matplotlib.pylab as plt\n'), ((378, 392), 'matplotlib.pylab.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (386, 392), True, 'import matplotlib.pylab as plt\n'), ((393, 408), 'matplotlib.pylab.plot', 'plt.plot', (['x', 'y2'], {}), '(x, y2)\n', (401, 408), True, 'import matplotlib.pylab as plt\n'), ((409, 419), 'matplotlib.pylab.show', 'plt.show', ([], {}), '()\n', (417, 419), True, 'import matplotlib.pylab as plt\n'), ((146, 166), 'common_function.numerical_diff', 'numerical_diff', (['f', 'x'], {}), '(f, x)\n', (160, 166), False, 'from common_function import function_1, numerical_diff\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from perlin import generate_perlin
def gaussian_2d_fast(size, amp, mu_x, mu_y, sigma):
x = np.arange(0, 1, 1/size[0])
y = np.arange(0, 1, 1/size[1])
xs, ys = np.meshgrid(x,y)
dxs = np.minimum(np.abs(xs-mu_x), 1-np.abs(xs-mu_x))
dys = np.minimum(np.abs(ys-mu_y), 1-np.abs(ys-mu_y))
heat_map = amp*np.exp(-(dxs**2+dys**2)/(2*sigma**2))
return heat_map
def excitability_matrix(sigma_e, sigma_i, perlin_scale, grid_offset,
p_e=0.05, p_i=0.05, we=0.22, g=4,
n_row_e=120, n_row_i=60, mu_gwn=0, multiple_connections=True,
expected_connectivity=True, is_plot=True):
n_pop_e = n_row_e**2
n_pop_i = n_row_i**2
gL = 25 * 1e-9 # Siemens
p_max_e = p_e / (2 * np.pi * sigma_e**2)
p_max_i = p_i / (2 * np.pi * sigma_i**2)
# Two landscapes: e and i. The contribution of each neuron is stored separately in the n_row_e**2 matrices
e_landscape = np.zeros((n_row_e**2, n_row_e, n_row_e))
i_landscape = np.zeros((n_row_i**2, n_row_e, n_row_e))
perlin = generate_perlin(n_row_e, perlin_scale, seed_value=0)
x = np.arange(0,1,1/n_row_e)
y = np.arange(0,1,1/n_row_e)
X, Y = np.meshgrid(x,y)
U = np.cos(perlin)
V = np.sin(perlin)
# Excitatory
mu_xs = np.arange(0,1,1/n_row_e)
mu_ys = np.arange(0,1,1/n_row_e)
counter = 0
for i, mu_x in enumerate(mu_xs):
for j, mu_y in enumerate(mu_ys):
x_offset = grid_offset / n_row_e * np.cos(perlin[i,j])
y_offset = grid_offset / n_row_e * np.sin(perlin[i,j])
mh = gaussian_2d_fast((n_row_e, n_row_e), p_max_e, mu_x+x_offset, mu_y+y_offset, sigma_e)
if not multiple_connections:
#clip probabilities at 1
e_landscape[counter] = np.minimum(mh, np.ones(mh.shape))
else:
e_landscape[counter] = mh
counter += 1
# Inhibitory
mu_xs = np.arange(1/n_row_e,1+1/n_row_e,1/n_row_i)
mu_ys = np.arange(1/n_row_e,1+1/n_row_e,1/n_row_i)
counter = 0
for mu_x in mu_xs:
for mu_y in mu_ys:
mh = gaussian_2d_fast((n_row_e, n_row_e), p_max_i, mu_x, mu_y, sigma_i)
if not multiple_connections:
#clip probabilities at 1
i_landscape[counter] = np.minimum(mh, np.ones(mh.shape))
else:
i_landscape[counter] = mh
counter += 1
# in total there should be n_pop_e * (n_pop_e * p_max_e) = 10 368 000 e-connections
# and n_pop_i * (n_pop_e * 0.05) = 2 592 000 i-connections
num_e_connections = np.sum(e_landscape)
num_i_connections = np.sum(i_landscape)
if multiple_connections:
e_calibration = 1
i_calibration = 1
else:
e_calibration = n_pop_e * n_pop_e * p_e / num_e_connections
i_calibration = n_pop_i * n_pop_e * p_i / num_i_connections
print('e_calibration is ', e_calibration)
print('i_calibration is ', i_calibration)
if expected_connectivity:
# calculate expected number of connections
e_landscape = n_row_e**2*np.mean(e_landscape, axis=0)
i_landscape = n_row_i**2*np.mean(i_landscape, axis=0)
else: # we sample
sample_e_landscape = np.zeros((n_row_e, n_row_e))
for i in range(n_row_e):
for j in range(n_row_e):
neuron = e_landscape[:, i, j]
random_numbers = np.random.random(n_row_e**2)
num_connected = len(np.where(random_numbers<neuron)[0])
sample_e_landscape[i, j] = num_connected
sample_i_landscape = np.zeros((n_row_e, n_row_e))
for i in range(n_row_e):
for j in range(n_row_e):
neuron = i_landscape[:, i, j]
random_numbers = np.random.random(n_row_i**2)
num_connected = len(np.where(random_numbers<neuron)[0])
sample_i_landscape[i, j] = num_connected
e_landscape = sample_e_landscape
i_landscape = sample_i_landscape
# Now we fill a landscape with physical units (mV)
rest_pot = -70 # mV
thres_pot = -55 # mV
ext_pot = mu_gwn / gL * 1e3 #mV
no_activity_pot = rest_pot + ext_pot # -56 mV when mu_gwn = 350 pA
landscape = no_activity_pot * np.ones((n_row_e, n_row_e))
# Synapse strengths
we = we * e_calibration #mV
wi = -g * we * i_calibration / e_calibration #mV
landscape += we * e_landscape
landscape += wi * i_landscape
# scale X and Y quiver according to values in ei_landscape. first normalize landscape
norm_landscape = np.copy(landscape)
norm_landscape -= np.amin(norm_landscape)
norm_landscape /= np.amax(norm_landscape)
U = 0.5*np.multiply(U, norm_landscape)
V = 0.5*np.multiply(V, norm_landscape)
if is_plot:
# Plot
plt.figure(figsize=(8,8))
if expected_connectivity:
mode = 'Expected '
else:
mode = 'Sampled '
plt.title(mode+'EI landscape')
plt.imshow(landscape, origin='lower', extent=[0,1,0,1])
norm = mpl.colors.Normalize(vmin=round(np.amin(landscape)), vmax=round(np.amax(landscape)))
plt.colorbar(mpl.cm.ScalarMappable(norm=norm), label='mV')
plt.quiver(X, Y, U, V, units='xy', scale=50)
plt.suptitle(r'$\sigma_e=$'+str(sigma_e)+r', $\sigma_i=$'+str(sigma_i)+', perlin scale='+str(perlin_scale)+', g='+str(g),
fontsize=15)
plt.show()
# Plot binary landscape (below/above threshold)
above_thres = np.where(np.reshape(landscape, 14400)>thres_pot)
binary_landscape = np.zeros(14400)
binary_landscape[above_thres] = 1
binary_landscape = np.reshape(binary_landscape,(120, 120))
plt.figure(figsize=(8,8))
plt.title(mode+'EI landscape (binary)')
plt.imshow(binary_landscape, origin='lower', extent=[0,1,0,1])
plt.quiver(X, Y, U, V, units='xy', scale=50)
plt.suptitle(r'$\sigma_e=$'+str(sigma_e)+r', $\sigma_i=$'+str(sigma_i)+', perlin scale='+str(perlin_scale)+', g='+str(g),
fontsize=15)
plt.show()
return landscape, X, Y, U, V
|
[
"numpy.sin",
"numpy.arange",
"matplotlib.pyplot.imshow",
"numpy.mean",
"numpy.multiply",
"numpy.reshape",
"numpy.random.random",
"numpy.where",
"numpy.exp",
"matplotlib.cm.ScalarMappable",
"perlin.generate_perlin",
"numpy.meshgrid",
"numpy.abs",
"matplotlib.pyplot.quiver",
"numpy.amin",
"numpy.ones",
"numpy.cos",
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"numpy.copy",
"numpy.sum",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.amax"
] |
[((172, 200), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(1 / size[0])'], {}), '(0, 1, 1 / size[0])\n', (181, 200), True, 'import numpy as np\n'), ((207, 235), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(1 / size[1])'], {}), '(0, 1, 1 / size[1])\n', (216, 235), True, 'import numpy as np\n'), ((247, 264), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (258, 264), True, 'import numpy as np\n'), ((1046, 1088), 'numpy.zeros', 'np.zeros', (['(n_row_e ** 2, n_row_e, n_row_e)'], {}), '((n_row_e ** 2, n_row_e, n_row_e))\n', (1054, 1088), True, 'import numpy as np\n'), ((1105, 1147), 'numpy.zeros', 'np.zeros', (['(n_row_i ** 2, n_row_e, n_row_e)'], {}), '((n_row_i ** 2, n_row_e, n_row_e))\n', (1113, 1147), True, 'import numpy as np\n'), ((1159, 1211), 'perlin.generate_perlin', 'generate_perlin', (['n_row_e', 'perlin_scale'], {'seed_value': '(0)'}), '(n_row_e, perlin_scale, seed_value=0)\n', (1174, 1211), False, 'from perlin import generate_perlin\n'), ((1220, 1248), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(1 / n_row_e)'], {}), '(0, 1, 1 / n_row_e)\n', (1229, 1248), True, 'import numpy as np\n'), ((1253, 1281), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(1 / n_row_e)'], {}), '(0, 1, 1 / n_row_e)\n', (1262, 1281), True, 'import numpy as np\n'), ((1289, 1306), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (1300, 1306), True, 'import numpy as np\n'), ((1314, 1328), 'numpy.cos', 'np.cos', (['perlin'], {}), '(perlin)\n', (1320, 1328), True, 'import numpy as np\n'), ((1337, 1351), 'numpy.sin', 'np.sin', (['perlin'], {}), '(perlin)\n', (1343, 1351), True, 'import numpy as np\n'), ((1382, 1410), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(1 / n_row_e)'], {}), '(0, 1, 1 / n_row_e)\n', (1391, 1410), True, 'import numpy as np\n'), ((1419, 1447), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(1 / n_row_e)'], {}), '(0, 1, 1 / n_row_e)\n', (1428, 1447), True, 'import numpy as np\n'), ((2047, 2099), 'numpy.arange', 'np.arange', (['(1 / n_row_e)', '(1 + 1 / n_row_e)', '(1 / n_row_i)'], {}), '(1 / n_row_e, 1 + 1 / n_row_e, 1 / n_row_i)\n', (2056, 2099), True, 'import numpy as np\n'), ((2102, 2154), 'numpy.arange', 'np.arange', (['(1 / n_row_e)', '(1 + 1 / n_row_e)', '(1 / n_row_i)'], {}), '(1 / n_row_e, 1 + 1 / n_row_e, 1 / n_row_i)\n', (2111, 2154), True, 'import numpy as np\n'), ((2713, 2732), 'numpy.sum', 'np.sum', (['e_landscape'], {}), '(e_landscape)\n', (2719, 2732), True, 'import numpy as np\n'), ((2757, 2776), 'numpy.sum', 'np.sum', (['i_landscape'], {}), '(i_landscape)\n', (2763, 2776), True, 'import numpy as np\n'), ((4719, 4737), 'numpy.copy', 'np.copy', (['landscape'], {}), '(landscape)\n', (4726, 4737), True, 'import numpy as np\n'), ((4760, 4783), 'numpy.amin', 'np.amin', (['norm_landscape'], {}), '(norm_landscape)\n', (4767, 4783), True, 'import numpy as np\n'), ((4806, 4829), 'numpy.amax', 'np.amax', (['norm_landscape'], {}), '(norm_landscape)\n', (4813, 4829), True, 'import numpy as np\n'), ((286, 303), 'numpy.abs', 'np.abs', (['(xs - mu_x)'], {}), '(xs - mu_x)\n', (292, 303), True, 'import numpy as np\n'), ((343, 360), 'numpy.abs', 'np.abs', (['(ys - mu_y)'], {}), '(ys - mu_y)\n', (349, 360), True, 'import numpy as np\n'), ((399, 448), 'numpy.exp', 'np.exp', (['(-(dxs ** 2 + dys ** 2) / (2 * sigma ** 2))'], {}), '(-(dxs ** 2 + dys ** 2) / (2 * sigma ** 2))\n', (405, 448), True, 'import numpy as np\n'), ((3364, 3392), 'numpy.zeros', 'np.zeros', (['(n_row_e, n_row_e)'], {}), '((n_row_e, n_row_e))\n', (3372, 3392), True, 'import numpy as np\n'), ((3730, 3758), 'numpy.zeros', 'np.zeros', (['(n_row_e, n_row_e)'], {}), '((n_row_e, n_row_e))\n', (3738, 3758), True, 'import numpy as np\n'), ((4399, 4426), 'numpy.ones', 'np.ones', (['(n_row_e, n_row_e)'], {}), '((n_row_e, n_row_e))\n', (4406, 4426), True, 'import numpy as np\n'), ((4843, 4873), 'numpy.multiply', 'np.multiply', (['U', 'norm_landscape'], {}), '(U, norm_landscape)\n', (4854, 4873), True, 'import numpy as np\n'), ((4886, 4916), 'numpy.multiply', 'np.multiply', (['V', 'norm_landscape'], {}), '(V, norm_landscape)\n', (4897, 4916), True, 'import numpy as np\n'), ((4957, 4983), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (4967, 4983), True, 'import matplotlib.pyplot as plt\n'), ((5100, 5132), 'matplotlib.pyplot.title', 'plt.title', (["(mode + 'EI landscape')"], {}), "(mode + 'EI landscape')\n", (5109, 5132), True, 'import matplotlib.pyplot as plt\n'), ((5139, 5197), 'matplotlib.pyplot.imshow', 'plt.imshow', (['landscape'], {'origin': '"""lower"""', 'extent': '[0, 1, 0, 1]'}), "(landscape, origin='lower', extent=[0, 1, 0, 1])\n", (5149, 5197), True, 'import matplotlib.pyplot as plt\n'), ((5370, 5414), 'matplotlib.pyplot.quiver', 'plt.quiver', (['X', 'Y', 'U', 'V'], {'units': '"""xy"""', 'scale': '(50)'}), "(X, Y, U, V, units='xy', scale=50)\n", (5380, 5414), True, 'import matplotlib.pyplot as plt\n'), ((5578, 5588), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5586, 5588), True, 'import matplotlib.pyplot as plt\n'), ((5744, 5759), 'numpy.zeros', 'np.zeros', (['(14400)'], {}), '(14400)\n', (5752, 5759), True, 'import numpy as np\n'), ((5829, 5869), 'numpy.reshape', 'np.reshape', (['binary_landscape', '(120, 120)'], {}), '(binary_landscape, (120, 120))\n', (5839, 5869), True, 'import numpy as np\n'), ((5877, 5903), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (5887, 5903), True, 'import matplotlib.pyplot as plt\n'), ((5911, 5952), 'matplotlib.pyplot.title', 'plt.title', (["(mode + 'EI landscape (binary)')"], {}), "(mode + 'EI landscape (binary)')\n", (5920, 5952), True, 'import matplotlib.pyplot as plt\n'), ((5959, 6024), 'matplotlib.pyplot.imshow', 'plt.imshow', (['binary_landscape'], {'origin': '"""lower"""', 'extent': '[0, 1, 0, 1]'}), "(binary_landscape, origin='lower', extent=[0, 1, 0, 1])\n", (5969, 6024), True, 'import matplotlib.pyplot as plt\n'), ((6030, 6074), 'matplotlib.pyplot.quiver', 'plt.quiver', (['X', 'Y', 'U', 'V'], {'units': '"""xy"""', 'scale': '(50)'}), "(X, Y, U, V, units='xy', scale=50)\n", (6040, 6074), True, 'import matplotlib.pyplot as plt\n'), ((6238, 6248), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6246, 6248), True, 'import matplotlib.pyplot as plt\n'), ((305, 322), 'numpy.abs', 'np.abs', (['(xs - mu_x)'], {}), '(xs - mu_x)\n', (311, 322), True, 'import numpy as np\n'), ((362, 379), 'numpy.abs', 'np.abs', (['(ys - mu_y)'], {}), '(ys - mu_y)\n', (368, 379), True, 'import numpy as np\n'), ((3219, 3247), 'numpy.mean', 'np.mean', (['e_landscape'], {'axis': '(0)'}), '(e_landscape, axis=0)\n', (3226, 3247), True, 'import numpy as np\n'), ((3281, 3309), 'numpy.mean', 'np.mean', (['i_landscape'], {'axis': '(0)'}), '(i_landscape, axis=0)\n', (3288, 3309), True, 'import numpy as np\n'), ((5316, 5348), 'matplotlib.cm.ScalarMappable', 'mpl.cm.ScalarMappable', ([], {'norm': 'norm'}), '(norm=norm)\n', (5337, 5348), True, 'import matplotlib as mpl\n'), ((1587, 1607), 'numpy.cos', 'np.cos', (['perlin[i, j]'], {}), '(perlin[i, j])\n', (1593, 1607), True, 'import numpy as np\n'), ((1654, 1674), 'numpy.sin', 'np.sin', (['perlin[i, j]'], {}), '(perlin[i, j])\n', (1660, 1674), True, 'import numpy as np\n'), ((3542, 3572), 'numpy.random.random', 'np.random.random', (['(n_row_e ** 2)'], {}), '(n_row_e ** 2)\n', (3558, 3572), True, 'import numpy as np\n'), ((3908, 3938), 'numpy.random.random', 'np.random.random', (['(n_row_i ** 2)'], {}), '(n_row_i ** 2)\n', (3924, 3938), True, 'import numpy as np\n'), ((5677, 5705), 'numpy.reshape', 'np.reshape', (['landscape', '(14400)'], {}), '(landscape, 14400)\n', (5687, 5705), True, 'import numpy as np\n'), ((1913, 1930), 'numpy.ones', 'np.ones', (['mh.shape'], {}), '(mh.shape)\n', (1920, 1930), True, 'import numpy as np\n'), ((2432, 2449), 'numpy.ones', 'np.ones', (['mh.shape'], {}), '(mh.shape)\n', (2439, 2449), True, 'import numpy as np\n'), ((5242, 5260), 'numpy.amin', 'np.amin', (['landscape'], {}), '(landscape)\n', (5249, 5260), True, 'import numpy as np\n'), ((5274, 5292), 'numpy.amax', 'np.amax', (['landscape'], {}), '(landscape)\n', (5281, 5292), True, 'import numpy as np\n'), ((3607, 3640), 'numpy.where', 'np.where', (['(random_numbers < neuron)'], {}), '(random_numbers < neuron)\n', (3615, 3640), True, 'import numpy as np\n'), ((3973, 4006), 'numpy.where', 'np.where', (['(random_numbers < neuron)'], {}), '(random_numbers < neuron)\n', (3981, 4006), True, 'import numpy as np\n')]
|
"""
Module: libfmp.c8.c8s2_salience
Author: <NAME>, <NAME>
License: The MIT license, https://opensource.org/licenses/MIT
This file is part of the FMP Notebooks (https://www.audiolabs-erlangen.de/FMP)
"""
import numpy as np
import librosa
from scipy import ndimage
from numba import jit
import libfmp.b
import libfmp.c8
@jit(nopython=True)
def principal_argument(v):
"""Principal argument function
| Notebook: C6/C6S1_NoveltyPhase.ipynb, see also
| Notebook: C8/C8S2_InstantFreqEstimation.ipynb
Args:
v (float or np.ndarray): Value (or vector of values)
Returns:
w (float or np.ndarray): Principle value of v
"""
w = np.mod(v + 0.5, 1) - 0.5
return w
@jit(nopython=True)
def compute_if(X, Fs, N, H):
"""Instantenous frequency (IF) estamation
| Notebook: C8/C8S2_InstantFreqEstimation.ipynb, see also
| Notebook: C6/C6S1_NoveltyPhase.ipynb
Args:
X (np.ndarray): STFT
Fs (scalar): Sampling rate
N (int): Window size in samples
H (int): Hop size in samples
Returns:
F_coef_IF (np.ndarray): Matrix of IF values
"""
phi_1 = np.angle(X[:, 0:-1]) / (2 * np.pi)
phi_2 = np.angle(X[:, 1:]) / (2 * np.pi)
K = X.shape[0]
index_k = np.arange(0, K).reshape(-1, 1)
# Bin offset (FMP, Eq. (8.45))
kappa = (N / H) * principal_argument(phi_2 - phi_1 - index_k * H / N)
# Instantaneous frequencies (FMP, Eq. (8.44))
F_coef_IF = (index_k + kappa) * Fs / N
# Extend F_coef_IF by copying first column to match dimensions of X
F_coef_IF = np.hstack((np.copy(F_coef_IF[:, 0]).reshape(-1, 1), F_coef_IF))
return F_coef_IF
@jit(nopython=True)
def f_coef(k, Fs, N):
"""STFT center frequency
Notebook: C8/C8S2_SalienceRepresentation.ipynb
Args:
k (int): Coefficient number
Fs (scalar): Sampling rate in Hz
N (int): Window length in samples
Returns:
freq (float): STFT center frequency
"""
return k * Fs / N
@jit(nopython=True)
def frequency_to_bin_index(F, R=10.0, F_ref=55.0):
"""| Binning function with variable frequency resolution
| Note: Indexing starts with 0 (opposed to [FMP, Eq. (8.49)])
Notebook: C8/C8S2_SalienceRepresentation.ipynb
Args:
F (float): Frequency in Hz
R (float): Frequency resolution in cents (Default value = 10.0)
F_ref (float): Reference frequency in Hz (Default value = 55.0)
Returns:
bin_index (int): Index for bin (starting with index 0)
"""
bin_index = np.floor((1200 / R) * np.log2(F / F_ref) + 0.5).astype(np.int64)
return bin_index
@jit(nopython=True)
def p_bin(b, freq, R=10.0, F_ref=55.0):
"""Computes binning mask [FMP, Eq. (8.50)]
Notebook: C8/C8S2_SalienceRepresentation.ipynb
Args:
b (int): Bin index
freq (float): Center frequency
R (float): Frequency resolution in cents (Default value = 10.0)
F_ref (float): Reference frequency in Hz (Default value = 55.0)
Returns:
mask (float): Binning mask
"""
mask = frequency_to_bin_index(freq, R, F_ref) == b
mask = mask.reshape(-1, 1)
return mask
@jit(nopython=True)
def compute_y_lf_bin(Y, Fs, N, R=10.0, F_min=55.0, F_max=1760.0):
"""Log-frequency Spectrogram with variable frequency resolution using binning
Notebook: C8/C8S2_SalienceRepresentation.ipynb
Args:
Y (np.ndarray): Magnitude spectrogram
Fs (scalar): Sampling rate in Hz
N (int): Window length in samples
R (float): Frequency resolution in cents (Default value = 10.0)
F_min (float): Lower frequency bound (reference frequency) (Default value = 55.0)
F_max (float): Upper frequency bound (is included) (Default value = 1760.0)
Returns:
Y_LF_bin (np.ndarray): Binned log-frequency spectrogram
F_coef_hertz (np.ndarray): Frequency axis in Hz
F_coef_cents (np.ndarray): Frequency axis in cents
"""
# [FMP, Eq. (8.51)]
B = frequency_to_bin_index(np.array([F_max]), R, F_min)[0] + 1
F_coef_hertz = 2 ** (np.arange(0, B) * R / 1200) * F_min
F_coef_cents = np.arange(0, B*R, R)
Y_LF_bin = np.zeros((B, Y.shape[1]))
K = Y.shape[0]
freq = f_coef(np.arange(0, K), Fs, N)
freq_lim_idx = np.where(np.logical_and(freq >= F_min, freq <= F_max))[0]
freq_lim = freq[freq_lim_idx]
Y_lim = Y[freq_lim_idx, :]
for b in range(B):
coef_mask = p_bin(b, freq_lim, R, F_min)
Y_LF_bin[b, :] = (Y_lim*coef_mask).sum(axis=0)
return Y_LF_bin, F_coef_hertz, F_coef_cents
@jit(nopython=True)
def p_bin_if(b, F_coef_IF, R=10.0, F_ref=55.0):
"""Computes binning mask for instantaneous frequency binning [FMP, Eq. (8.52)]
Notebook: C8/C8S2_SalienceRepresentation.ipynb
Args:
b (int): Bin index
F_coef_IF (float): Instantaneous frequencies
R (float): Frequency resolution in cents (Default value = 10.0)
F_ref (float): Reference frequency in Hz (Default value = 55.0)
Returns:
mask (np.ndarray): Binning mask
"""
mask = frequency_to_bin_index(F_coef_IF, R, F_ref) == b
return mask
@jit(nopython=True)
def compute_y_lf_if_bin(X, Fs, N, H, R=10, F_min=55.0, F_max=1760.0, gamma=0.0):
"""Binned Log-frequency Spectrogram with variable frequency resolution based on instantaneous frequency
Notebook: C8/C8S2_SalienceRepresentation.ipynb
Args:
X (np.ndarray): Complex spectrogram
Fs (scalar): Sampling rate in Hz
N (int): Window length in samples
H (int): Hopsize in samples
R (float): Frequency resolution in cents (Default value = 10)
F_min (float): Lower frequency bound (reference frequency) (Default value = 55.0)
F_max (float): Upper frequency bound (Default value = 1760.0)
gamma (float): Logarithmic compression factor (Default value = 0.0)
Returns:
Y_LF_IF_bin (np.ndarray): Binned log-frequency spectrogram using instantaneous frequency
F_coef_hertz (np.ndarray): Frequency axis in Hz
F_coef_cents (np.ndarray): Frequency axis in cents
"""
# Compute instantaneous frequencies
F_coef_IF = libfmp.c8.compute_if(X, Fs, N, H)
freq_lim_mask = np.logical_and(F_coef_IF >= F_min, F_coef_IF < F_max)
F_coef_IF = F_coef_IF * freq_lim_mask
# Initialize ouput array and compute frequency axis
B = frequency_to_bin_index(np.array([F_max]), R, F_min)[0] + 1
F_coef_hertz = 2 ** (np.arange(0, B) * R / 1200) * F_min
F_coef_cents = np.arange(0, B*R, R)
Y_LF_IF_bin = np.zeros((B, X.shape[1]))
# Magnitude binning
if gamma == 0:
Y = np.abs(X) ** 2
else:
Y = np.log(1 + np.float32(gamma)*np.abs(X))
for b in range(B):
coef_mask = p_bin_if(b, F_coef_IF, R, F_min)
Y_LF_IF_bin[b, :] = (Y * coef_mask).sum(axis=0)
return Y_LF_IF_bin, F_coef_hertz, F_coef_cents
@jit(nopython=True)
def harmonic_summation(Y, num_harm=10, alpha=1.0):
"""Harmonic summation for spectrogram [FMP, Eq. (8.54)]
Notebook: C8/C8S2_SalienceRepresentation.ipynb
Args:
Y (np.ndarray): Magnitude spectrogram
num_harm (int): Number of harmonics (Default value = 10)
alpha (float): Weighting parameter (Default value = 1.0)
Returns:
Y_HS (np.ndarray): Spectrogram after harmonic summation
"""
Y_HS = np.zeros(Y.shape)
Y_zero_pad = np.vstack((Y, np.zeros((Y.shape[0]*num_harm, Y.shape[1]))))
K = Y.shape[0]
for k in range(K):
harm_idx = np.arange(1, num_harm+1)*(k)
weights = alpha ** (np.arange(1, num_harm+1) - 1).reshape(-1, 1)
Y_HS[k, :] = (Y_zero_pad[harm_idx, :] * weights).sum(axis=0)
return Y_HS
@jit(nopython=True)
def harmonic_summation_lf(Y_LF_bin, R, num_harm=10, alpha=1.0):
"""Harmonic summation for log-frequency spectrogram [FMP, Eq. (8.55)]
Notebook: C8/C8S2_SalienceRepresentation.ipynb
Args:
Y_LF_bin (np.ndarray): Log-frequency spectrogram
R (float): Frequency resolution in cents
num_harm (int): Number of harmonics (Default value = 10)
alpha (float): Weighting parameter (Default value = 1.0)
Returns:
Y_LF_bin_HS (np.ndarray): Log-frequency spectrogram after harmonic summation
"""
Y_LF_bin_HS = np.zeros(Y_LF_bin.shape)
pad_len = int(np.floor(np.log2(num_harm) * 1200 / R))
Y_LF_bin_zero_pad = np.vstack((Y_LF_bin, np.zeros((pad_len, Y_LF_bin.shape[1]))))
B = Y_LF_bin.shape[0]
for b in range(B):
harmonics = np.arange(1, num_harm+1)
harm_idx = b + np.floor(np.log2(harmonics) * 1200 / R).astype(np.int64)
weights = alpha ** (np.arange(1, num_harm+1) - 1).reshape(-1, 1)
Y_LF_bin_HS[b, :] = (Y_LF_bin_zero_pad[harm_idx, :] * weights).sum(axis=0)
return Y_LF_bin_HS
def compute_salience_rep(x, Fs, N, H, R, F_min=55.0, F_max=1760.0, num_harm=10, freq_smooth_len=11, alpha=1.0,
gamma=0.0):
"""Salience representation [FMP, Eq. (8.56)]
Notebook: C8/C8S2_SalienceRepresentation.ipynb
Args:
x (np.ndarray): Audio signal
Fs (scalar): Sampling frequency
N (int): Window length in samples
H (int): Hopsize in samples
R (float): Frequency resolution in cents
F_min (float): Lower frequency bound (reference frequency) (Default value = 55.0)
F_max (float): Upper frequency bound (Default value = 1760.0)
num_harm (int): Number of harmonics (Default value = 10)
freq_smooth_len (int): Filter length for vertical smoothing (Default value = 11)
alpha (float): Weighting parameter (Default value = 1.0)
gamma (float): Logarithmic compression factor (Default value = 0.0)
Returns:
Z (np.ndarray): Salience representation
F_coef_hertz (np.ndarray): Frequency axis in Hz
F_coef_cents (np.ndarray): Frequency axis in cents
"""
X = librosa.stft(x, n_fft=N, hop_length=H, win_length=N, pad_mode='constant')
Y_LF_IF_bin, F_coef_hertz, F_coef_cents = compute_y_lf_if_bin(X, Fs, N, H, R, F_min, F_max, gamma=gamma)
# smoothing
Y_LF_IF_bin = ndimage.filters.convolve1d(Y_LF_IF_bin, np.hanning(freq_smooth_len), axis=0, mode='constant')
Z = harmonic_summation_lf(Y_LF_IF_bin, R=R, num_harm=num_harm, alpha=alpha)
return Z, F_coef_hertz, F_coef_cents
|
[
"numpy.hanning",
"numpy.abs",
"numpy.copy",
"numpy.logical_and",
"numpy.angle",
"numpy.array",
"numpy.zeros",
"numba.jit",
"librosa.stft",
"numpy.log2",
"numpy.mod",
"numpy.float32",
"numpy.arange"
] |
[((325, 343), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (328, 343), False, 'from numba import jit\n'), ((709, 727), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (712, 727), False, 'from numba import jit\n'), ((1672, 1690), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (1675, 1690), False, 'from numba import jit\n'), ((2016, 2034), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (2019, 2034), False, 'from numba import jit\n'), ((2645, 2663), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (2648, 2663), False, 'from numba import jit\n'), ((3186, 3204), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (3189, 3204), False, 'from numba import jit\n'), ((4608, 4626), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (4611, 4626), False, 'from numba import jit\n'), ((5186, 5204), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (5189, 5204), False, 'from numba import jit\n'), ((6955, 6973), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (6958, 6973), False, 'from numba import jit\n'), ((7767, 7785), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (7770, 7785), False, 'from numba import jit\n'), ((4163, 4185), 'numpy.arange', 'np.arange', (['(0)', '(B * R)', 'R'], {}), '(0, B * R, R)\n', (4172, 4185), True, 'import numpy as np\n'), ((4199, 4224), 'numpy.zeros', 'np.zeros', (['(B, Y.shape[1])'], {}), '((B, Y.shape[1]))\n', (4207, 4224), True, 'import numpy as np\n'), ((6270, 6323), 'numpy.logical_and', 'np.logical_and', (['(F_coef_IF >= F_min)', '(F_coef_IF < F_max)'], {}), '(F_coef_IF >= F_min, F_coef_IF < F_max)\n', (6284, 6323), True, 'import numpy as np\n'), ((6570, 6592), 'numpy.arange', 'np.arange', (['(0)', '(B * R)', 'R'], {}), '(0, B * R, R)\n', (6579, 6592), True, 'import numpy as np\n'), ((6609, 6634), 'numpy.zeros', 'np.zeros', (['(B, X.shape[1])'], {}), '((B, X.shape[1]))\n', (6617, 6634), True, 'import numpy as np\n'), ((7421, 7438), 'numpy.zeros', 'np.zeros', (['Y.shape'], {}), '(Y.shape)\n', (7429, 7438), True, 'import numpy as np\n'), ((8348, 8372), 'numpy.zeros', 'np.zeros', (['Y_LF_bin.shape'], {}), '(Y_LF_bin.shape)\n', (8356, 8372), True, 'import numpy as np\n'), ((9984, 10057), 'librosa.stft', 'librosa.stft', (['x'], {'n_fft': 'N', 'hop_length': 'H', 'win_length': 'N', 'pad_mode': '"""constant"""'}), "(x, n_fft=N, hop_length=H, win_length=N, pad_mode='constant')\n", (9996, 10057), False, 'import librosa\n'), ((668, 686), 'numpy.mod', 'np.mod', (['(v + 0.5)', '(1)'], {}), '(v + 0.5, 1)\n', (674, 686), True, 'import numpy as np\n'), ((1147, 1167), 'numpy.angle', 'np.angle', (['X[:, 0:-1]'], {}), '(X[:, 0:-1])\n', (1155, 1167), True, 'import numpy as np\n'), ((1194, 1212), 'numpy.angle', 'np.angle', (['X[:, 1:]'], {}), '(X[:, 1:])\n', (1202, 1212), True, 'import numpy as np\n'), ((4263, 4278), 'numpy.arange', 'np.arange', (['(0)', 'K'], {}), '(0, K)\n', (4272, 4278), True, 'import numpy as np\n'), ((8586, 8612), 'numpy.arange', 'np.arange', (['(1)', '(num_harm + 1)'], {}), '(1, num_harm + 1)\n', (8595, 8612), True, 'import numpy as np\n'), ((10241, 10268), 'numpy.hanning', 'np.hanning', (['freq_smooth_len'], {}), '(freq_smooth_len)\n', (10251, 10268), True, 'import numpy as np\n'), ((1261, 1276), 'numpy.arange', 'np.arange', (['(0)', 'K'], {}), '(0, K)\n', (1270, 1276), True, 'import numpy as np\n'), ((4315, 4359), 'numpy.logical_and', 'np.logical_and', (['(freq >= F_min)', '(freq <= F_max)'], {}), '(freq >= F_min, freq <= F_max)\n', (4329, 4359), True, 'import numpy as np\n'), ((6691, 6700), 'numpy.abs', 'np.abs', (['X'], {}), '(X)\n', (6697, 6700), True, 'import numpy as np\n'), ((7470, 7515), 'numpy.zeros', 'np.zeros', (['(Y.shape[0] * num_harm, Y.shape[1])'], {}), '((Y.shape[0] * num_harm, Y.shape[1]))\n', (7478, 7515), True, 'import numpy as np\n'), ((7577, 7603), 'numpy.arange', 'np.arange', (['(1)', '(num_harm + 1)'], {}), '(1, num_harm + 1)\n', (7586, 7603), True, 'import numpy as np\n'), ((8476, 8514), 'numpy.zeros', 'np.zeros', (['(pad_len, Y_LF_bin.shape[1])'], {}), '((pad_len, Y_LF_bin.shape[1]))\n', (8484, 8514), True, 'import numpy as np\n'), ((4047, 4064), 'numpy.array', 'np.array', (['[F_max]'], {}), '([F_max])\n', (4055, 4064), True, 'import numpy as np\n'), ((6454, 6471), 'numpy.array', 'np.array', (['[F_max]'], {}), '([F_max])\n', (6462, 6471), True, 'import numpy as np\n'), ((1594, 1618), 'numpy.copy', 'np.copy', (['F_coef_IF[:, 0]'], {}), '(F_coef_IF[:, 0])\n', (1601, 1618), True, 'import numpy as np\n'), ((4108, 4123), 'numpy.arange', 'np.arange', (['(0)', 'B'], {}), '(0, B)\n', (4117, 4123), True, 'import numpy as np\n'), ((6515, 6530), 'numpy.arange', 'np.arange', (['(0)', 'B'], {}), '(0, B)\n', (6524, 6530), True, 'import numpy as np\n'), ((6739, 6756), 'numpy.float32', 'np.float32', (['gamma'], {}), '(gamma)\n', (6749, 6756), True, 'import numpy as np\n'), ((6757, 6766), 'numpy.abs', 'np.abs', (['X'], {}), '(X)\n', (6763, 6766), True, 'import numpy as np\n'), ((8400, 8417), 'numpy.log2', 'np.log2', (['num_harm'], {}), '(num_harm)\n', (8407, 8417), True, 'import numpy as np\n'), ((2578, 2596), 'numpy.log2', 'np.log2', (['(F / F_ref)'], {}), '(F / F_ref)\n', (2585, 2596), True, 'import numpy as np\n'), ((7634, 7660), 'numpy.arange', 'np.arange', (['(1)', '(num_harm + 1)'], {}), '(1, num_harm + 1)\n', (7643, 7660), True, 'import numpy as np\n'), ((8719, 8745), 'numpy.arange', 'np.arange', (['(1)', '(num_harm + 1)'], {}), '(1, num_harm + 1)\n', (8728, 8745), True, 'import numpy as np\n'), ((8643, 8661), 'numpy.log2', 'np.log2', (['harmonics'], {}), '(harmonics)\n', (8650, 8661), True, 'import numpy as np\n')]
|
from pandas import Series
from igraph import *
from numba import jit
import numpy as np
import os
# import time
# Gather all the files.
files = os.listdir('timeseries/')
# Concatenate (or stack) all the files.
# Approx 12.454981 seconds
i = 0
for f in files:
if i == 0:
ts_matrix = np.loadtxt('timeseries/' + f).T
i += 1
else:
new_ts = np.loadtxt('timeseries/' + f).T
ts_matrix = np.hstack((ts_matrix, new_ts))
"""
Compute the correlation matrix
"""
corr_mat = np.corrcoef(ts_matrix.T)
# Save in .npz file
# np.savez_compressed('corr_mat.npz', corr_mat=corr_mat)
# X = np.load('corr_mat.npz')
# X = X['corr_mat']
# a flatten function optimized by numba
@jit
def fast_flatten(X):
k = 0
length = X.shape[0] * X.shape[1]
X_flat = empty(length)
for i in range(X.shape[0]):
for j in range(X.shape[1]):
X_flat[k] = X[i, j]
k += 1
return X_flat
# helper function that returns the min of the number of
# unique values depending on the threshold
def min_thresh_val(X, threshold):
X_flat = fast_flatten(X)
index = int(len(X_flat) * threshold)
return unique(sort(X_flat))[::-1][:index].min()
# Computes the threshold matrix without killing the python kernel
@jit
def thresh_mat(X, threshold):
min_val = min_thresh_val(X, threshold)
print("Done with min_thresh_val")
# M = zeros((X.shape[0], X.shape[1]))
for i in range(X.shape[0]):
for j in range(X.shape[1]):
# if X[i, j] >= min_val:
# M[i, j] = X[i, j]
if X[i, j] < min_val:
X[i, j] = 0
thresh_mat(X, .01)
print("Finished Threshold Matrix")
# savez_compressed('threshold_mat.npz', threshold_mat=X)
# from: http://stackoverflow.com/questions/29655111/igraph-graph-from-numpy-or-pandas-adjacency-matrix
# get the row, col indices of the non-zero elements in your adjacency matrix
conn_indices = np.where(X)
# get the weights corresponding to these indices
weights = X[conn_indices]
# a sequence of (i, j) tuples, each corresponding to an edge from i -> j
edges = zip(*conn_indices)
# initialize the graph from the edge sequence
G = Graph(edges=edges, directed=False)
# assign node names and weights to be attributes of the vertices and edges
# respectively
G.vs['label'] = np.arange(X.shape[0])
G.es['weight'] = weights
# get the vertex clustering corresponding to the best modularity
cm = G.community_multilevel()
# save the cluster membership of each node in a csv file
Series(cm.membership).to_csv('mem.csv', index=False)
|
[
"pandas.Series",
"os.listdir",
"numpy.hstack",
"numpy.where",
"numpy.corrcoef",
"numpy.loadtxt",
"numpy.arange"
] |
[((145, 170), 'os.listdir', 'os.listdir', (['"""timeseries/"""'], {}), "('timeseries/')\n", (155, 170), False, 'import os\n'), ((504, 528), 'numpy.corrcoef', 'np.corrcoef', (['ts_matrix.T'], {}), '(ts_matrix.T)\n', (515, 528), True, 'import numpy as np\n'), ((1929, 1940), 'numpy.where', 'np.where', (['X'], {}), '(X)\n', (1937, 1940), True, 'import numpy as np\n'), ((2311, 2332), 'numpy.arange', 'np.arange', (['X.shape[0]'], {}), '(X.shape[0])\n', (2320, 2332), True, 'import numpy as np\n'), ((422, 452), 'numpy.hstack', 'np.hstack', (['(ts_matrix, new_ts)'], {}), '((ts_matrix, new_ts))\n', (431, 452), True, 'import numpy as np\n'), ((2512, 2533), 'pandas.Series', 'Series', (['cm.membership'], {}), '(cm.membership)\n', (2518, 2533), False, 'from pandas import Series\n'), ((296, 325), 'numpy.loadtxt', 'np.loadtxt', (["('timeseries/' + f)"], {}), "('timeseries/' + f)\n", (306, 325), True, 'import numpy as np\n'), ((370, 399), 'numpy.loadtxt', 'np.loadtxt', (["('timeseries/' + f)"], {}), "('timeseries/' + f)\n", (380, 399), True, 'import numpy as np\n')]
|
import numpy as np
# Part 1
data = np.loadtxt('data.csv')
def get_number_of_times_count_increased(data):
increased_counter = 0
for i in range(len(data)-1):
if data[i+1]>data[i]:
increased_counter +=1
return increased_counter
data = np.loadtxt('data.csv')
increased_counter = get_number_of_times_count_increased(data)
print(f'{increased_counter} of times is the number larger than before')
# Part II
window_size = 3
window_sums = []
for i in range(len(data) - window_size + 1):
print(data[i: i + window_size])
window_sum = np.sum(data[i: i+window_size])
print(window_sum)
window_sums.append(window_sum)
increased_counter_window = get_number_of_times_count_increased(window_sums)
print(f'{increased_counter_window} of times is the number larger than before')
|
[
"numpy.sum",
"numpy.loadtxt"
] |
[((37, 59), 'numpy.loadtxt', 'np.loadtxt', (['"""data.csv"""'], {}), "('data.csv')\n", (47, 59), True, 'import numpy as np\n'), ((268, 290), 'numpy.loadtxt', 'np.loadtxt', (['"""data.csv"""'], {}), "('data.csv')\n", (278, 290), True, 'import numpy as np\n'), ((568, 599), 'numpy.sum', 'np.sum', (['data[i:i + window_size]'], {}), '(data[i:i + window_size])\n', (574, 599), True, 'import numpy as np\n')]
|
# Copyright (c) 2019 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM,OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import logging
import cv2
import numpy as np
import time
from datetime import datetime
from random import uniform
"""Visual trigger for PCB anomaly detection.
"""
class Udf:
"""PCB anomaly detection trigger object.
"""
def __init__(self, n_right_px, n_left_px,
n_total_px, training_mode, scale_ratio):
"""Udf constructor
:param n_right_px: minimum number of pixels
to the right of PCB mask (default 1000)
:type n_right_px: int
:param n_left_px: minimum number of pixels
to the left of the PCB mask (default 1000)
:type n_left_px: int
:param n_total_px: minimum number of pixels
in the PCB mask (default 300000)
:type n_total_px: int
:param training_mode: flag to save image ROI's
for training (default false)
:type training_mode: bool
"""
self.log = logging.getLogger('PCB_FILTER')
self.log.debug("In ctor")
self.ratio = scale_ratio
# Initialize background subtractor
self.fgbg = cv2.createBackgroundSubtractorMOG2()
# Total white pixel # on MOG applied
# frame after morphological operations
self.n_total_px = n_total_px/(self.ratio*self.ratio)
# Total white pixel # on left edge of MOG
# applied frame after morphological operations
self.n_left_px = n_left_px/(self.ratio*self.ratio)
# Total white pixel # on right edge of MOG
# applied frame after morphological operations
self.n_right_px = n_right_px/(self.ratio*self.ratio)
# Flag to lock trigger from forwarding frames to classifier
self.filter_lock = False
self.training_mode = training_mode
self.profiling = False
self.count = 0
self.lock_frame_count = 0
self.threads = 0
def _check_frame(self, frame):
"""Determines if the given frame is the key frame of interest for
further processing or not
:param frame: frame blob
:type frame: numpy.ndarray
:return: True if the given frame is a key frame, else False
:rtype: bool
"""
# Apply background subtractor on frame
fgmask = self.fgbg.apply(frame)
rows, columns = fgmask.shape
if self.filter_lock is False:
# Applying morphological operations
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (20, 20))
ret, thresh = cv2.threshold(fgmask, 0, 255,
cv2.THRESH_BINARY+cv2.THRESH_OTSU)
thresh = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel)
# Crop left and right edges of frame
left = thresh[:, 0:10]
right = thresh[:, (columns - 10):(columns)]
# Count the # of white pixels in thresh
n_total = np.sum(thresh == 255)
n_left = np.sum(left == 255)
n_right = np.sum(right == 255)
# If the PCB is in view of camera & is not
# touching the left, right edge of frame
if (n_total > self.n_total_px) & \
(n_left < self.n_left_px) & \
(n_right < self.n_right_px):
# Find the PCB contour
contours, hier = cv2.findContours(thresh.copy(),
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_NONE)
if len(contours) != 0:
# Contour with largest area would be bounding the PCB
c = max(contours, key=cv2.contourArea)
# Obtain the bounding rectangle
# for the contour and calculate the center
x, y, w, h = cv2.boundingRect(c)
cX = int(x + (w / 2))
# If the rectangle bounding the
# PCB doesn't touch the left or right edge
# of frame and the center x lies within
if (x != 0) & ((x + w) != columns) & \
((columns/2 - (100/self.ratio)) <= cX and
cX <= (columns/2 + (100/self.ratio))):
return True
else:
return False
return False
def process(self, frame, metadata):
"""Processes every frame it receives based on the filter logic used
:param frame: frame blob
:type frame: numpy.ndarray
:param metadata: frame's metadata
:type metadata: str
:return: (should the frame be dropped, has the frame been updated,
new metadata for the frame if any)
:rtype: (bool, numpy.ndarray, str)
"""
frame_height, frame_width = frame.shape[:-1]
resized_frame = cv2.resize(frame, (int(frame_width/self.ratio),
int(frame_height/self.ratio)))
if self.training_mode is True:
self.count = self.count + 1
cv2.imwrite("/EII/test_videos/"+str(self.count)+".png", frame)
return True, None, None
else:
if self.filter_lock is False:
if self._check_frame(resized_frame):
self.filter_lock = True
# Re-initialize frame count during trigger lock to 0
self.lock_frame_count = 0
return False, None, metadata
else:
return True, None, None
else:
# Continue applying background subtractor to
# keep track of PCB positions
self._check_frame(resized_frame)
# Increment frame count during trigger lock phase
self.lock_frame_count = self.lock_frame_count + 1
if self.lock_frame_count == 7:
# Clear trigger lock after timeout
# period (measured in frame count here)
self.filter_lock = False
return True, None, None
|
[
"logging.getLogger",
"cv2.createBackgroundSubtractorMOG2",
"cv2.threshold",
"cv2.morphologyEx",
"numpy.sum",
"cv2.getStructuringElement",
"cv2.boundingRect"
] |
[((2080, 2111), 'logging.getLogger', 'logging.getLogger', (['"""PCB_FILTER"""'], {}), "('PCB_FILTER')\n", (2097, 2111), False, 'import logging\n'), ((2242, 2278), 'cv2.createBackgroundSubtractorMOG2', 'cv2.createBackgroundSubtractorMOG2', ([], {}), '()\n', (2276, 2278), False, 'import cv2\n'), ((3565, 3616), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_RECT', '(20, 20)'], {}), '(cv2.MORPH_RECT, (20, 20))\n', (3590, 3616), False, 'import cv2\n'), ((3643, 3709), 'cv2.threshold', 'cv2.threshold', (['fgmask', '(0)', '(255)', '(cv2.THRESH_BINARY + cv2.THRESH_OTSU)'], {}), '(fgmask, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)\n', (3656, 3709), False, 'import cv2\n'), ((3769, 3818), 'cv2.morphologyEx', 'cv2.morphologyEx', (['thresh', 'cv2.MORPH_CLOSE', 'kernel'], {}), '(thresh, cv2.MORPH_CLOSE, kernel)\n', (3785, 3818), False, 'import cv2\n'), ((4034, 4055), 'numpy.sum', 'np.sum', (['(thresh == 255)'], {}), '(thresh == 255)\n', (4040, 4055), True, 'import numpy as np\n'), ((4077, 4096), 'numpy.sum', 'np.sum', (['(left == 255)'], {}), '(left == 255)\n', (4083, 4096), True, 'import numpy as np\n'), ((4119, 4139), 'numpy.sum', 'np.sum', (['(right == 255)'], {}), '(right == 255)\n', (4125, 4139), True, 'import numpy as np\n'), ((4957, 4976), 'cv2.boundingRect', 'cv2.boundingRect', (['c'], {}), '(c)\n', (4973, 4976), False, 'import cv2\n')]
|
import numpy as np
from scipy.sparse import diags
from sklearn.metrics import pairwise_distances
from fclsp.reshaping_utils import vec_hollow_sym
def get_lap_coef(V, w, var_type, shape):
"""
Computes the Laplacian coefficent vector
TODO: finish documenting
Parameters
----------
V: array-like
w: array-like
var_type: str
Type of the variable. Must be one of ['hollow_sym', 'rect', 'multi'].
shape: tuple of ints
Shape of the variable.
Output
------
lap_coef:
"""
assert var_type in ['hollow_sym', 'rect', 'multi']
if var_type == 'hollow_sym':
return get_lap_coef_hollow_sym(V=V, w=w)
elif var_type == 'rect':
return get_lap_coef_rect(V=V, w=w, shape=shape)
elif var_type == 'multi':
return get_lap_coef_multi(V=V, w=w, shape=shape)
def get_lap_coef_hollow_sym(V, w):
"""
Returns the Laplacian coefficent for an adjaceny matrix.
Let A(x) in R^{d x d} be an adjacency matrix parametatized by its edges x in R^{d choose 2}. Also let V in R^{d times K} and w in R^K for K <= d.
The laplacian coefficient M(V, w) in R^{d choose 2} is the vector such that
M(V, w)^T x = Tr(V^T Laplacian(A(x)) V diag(w))
Parameters
----------
V: array-like, (n_nodes, K)
The input matrix.
w: array-like, (K, )
The input vector.
Output
-------
M(V, w): array-like, (n_nodes choose 2, )
The Laplacian coefficient vector.
"""
assert V.shape[1] == len(w)
coef = pairwise_distances(V @ diags(np.sqrt(w)), metric='euclidean',
n_jobs=None) # TODO: give option
coef = vec_hollow_sym(coef) ** 2
return coef
def get_lap_coef_rect(V, w, shape):
"""
Returns the Laplacian coefficent for a rectuangular matrix.
Parameters
----------
V: array-like, (n_nodes, K)
The input matrix.
w: array-like, (K, )
The input vector.
shape: tuple of two ints
Size of the rectangular matrix matrix.
Output
-------
M(V, w): array-like, (sum(shape), )
The Laplacian coefficient vector.
"""
raise NotImplementedError
def get_lap_coef_multi(V, w, shape):
"""
Returns the Laplacian coefficent for a multi-array.
Parameters
----------
V: array-like, (n_nodes, K)
The input matrix.
w: array-like, (K, )
The input vector.
shape: tuple of two ints
Size of the rectangular matrix matrix.
Output
-------
M(V, w): array-like
The Laplacian coefficient vector.
"""
raise NotImplementedError
|
[
"fclsp.reshaping_utils.vec_hollow_sym",
"numpy.sqrt"
] |
[((1688, 1708), 'fclsp.reshaping_utils.vec_hollow_sym', 'vec_hollow_sym', (['coef'], {}), '(coef)\n', (1702, 1708), False, 'from fclsp.reshaping_utils import vec_hollow_sym\n'), ((1579, 1589), 'numpy.sqrt', 'np.sqrt', (['w'], {}), '(w)\n', (1586, 1589), True, 'import numpy as np\n')]
|
# See ciDifference.ipynb for derivation, implementation notes, and test
def cidifference(datagen, umin, umax, wmin, wmax, alpha=0.05,
rmin=0, rmax=1, raiseonerr=False):
import numpy as np
from cvxopt import solvers, matrix
from math import log, exp
from scipy.stats import f
from .estimatediff import estimatediff
assert umin >= 0
assert umin < 1
assert umax > 1
assert wmin >= 0
assert wmin < 1
assert wmax > 1
assert rmax >= rmin
_, mle = estimatediff(datagen, umin, umax, wmin, wmax, rmin, rmax, raiseonerr=raiseonerr)
num = mle['num']
Delta = 0.5 * f.isf(q=alpha, dfn=1, dfd=num-1)
phi = Delta - mle['primal']
rscale = max(1.0, rmax - rmin)
def dualobjective(p, sign):
beta, gamma, tau = p
logcost = -phi
n = 0
for c, u, w, r in datagen():
if c > 0:
n += c
denom = beta + gamma * u + tau * w + sign * (u - w) * r
logcost += c * log(denom)
logcost /= n
cost = exp(logcost)
return (- beta - gamma - tau + n * cost) / rscale
def jacdualobjective(p, sign):
beta, gamma, tau = p
logcost = -phi
jaclogcost = np.zeros(3)
n = 0
for c, u, w, r in datagen():
if c > 0:
n += c
denom = beta + gamma * u + tau * w + sign * (u - w) * r
logcost += c * log(denom)
gradlogcost = c / denom
jaclogcost[0] += gradlogcost
jaclogcost[1] += u * gradlogcost
jaclogcost[2] += w * gradlogcost
logcost /= n
cost = exp(logcost)
return (-np.ones(3) + exp(logcost) * jaclogcost) / rscale
def hessdualobjective(p, sign):
beta, gamma, tau = p
logcost = -phi
jaclogcost = np.zeros(3)
hesslogcost = np.zeros((3,3))
n = 0
for c, u, w, r in datagen():
if c > 0:
n += c
denom = beta + gamma * u + tau * w + sign * (u - w) * r
logcost += c * log(denom)
gradlogcost = c / denom
jaclogcost[0] += gradlogcost
jaclogcost[1] += u * gradlogcost
jaclogcost[2] += w * gradlogcost
gradgradlogcost = -c / denom**2
hesslogcost[0, 0] += gradgradlogcost
hesslogcost[0, 1] += gradgradlogcost * u
hesslogcost[0, 2] += gradgradlogcost * w
hesslogcost[1, 1] += gradgradlogcost * u**2
hesslogcost[1, 2] += gradgradlogcost * u * w
hesslogcost[2, 2] += gradgradlogcost * w**2
logcost /= n
cost = exp(logcost)
hesslogcost[1, 0] = hesslogcost[0, 1]
hesslogcost[2, 0] = hesslogcost[0, 2]
hesslogcost[2, 1] = hesslogcost[1, 2]
return (cost * (hesslogcost + np.outer(jaclogcost, jaclogcost) / n)) / rscale
# solve
consE = np.array([
[ 1, u, w ]
for u in (umin, umax)
for w in (wmin, wmax)
for r in (rmin, rmax)
], dtype='float64')
retvals = []
easybounds = [ (mle['deltavmin'] <= (rmin - rmax) + 1e-4, rmin - rmax),
(mle['deltavmax'] >= (rmax - rmin) - 1e-4, rmax - rmin) ]
for what in range(2):
if easybounds[what][0]:
retvals.append((easybounds[what][1], None))
continue
sign = 1 - 2 * what
x0 = np.array([num, -rmin, rmax] if sign > 0 else [num, rmax, -rmin],
dtype='float64')
d = np.array([ -sign * (u - w) * r + 1e-4
for u in (umin, umax)
for w in (wmin, wmax)
for r in (rmin, rmax)
], dtype='float64')
# from .gradcheck import gradcheck, hesscheck
# gradcheck(f=lambda p: dualobjective(p, sign),
# jac=lambda p: jacdualobjective(p, sign),
# x=x0,
# what='dualobjective')
# hesscheck(jac=lambda p: jacdualobjective(p, sign),
# hess=lambda p: hessdualobjective(p, sign),
# x=x0,
# what='jacdualobjective')
def F(x=None, z=None):
if x is None: return 0, matrix(x0)
f = -dualobjective(x, sign)
jf = -jacdualobjective(x, sign)
Df = matrix(jf).T
if z is None: return f, Df
hf = -z[0] * hessdualobjective(x, sign)
H = matrix(hf, hf.shape)
return f, Df, H
soln = solvers.cp(F,
G=-matrix(consE, consE.shape),
h=-matrix(d),
options={'show_progress': False})
if raiseonerr:
from pprint import pformat
assert soln['status'] == 'optimal', pformat({
'soln': soln,
'phi': phi,
'mle': mle,
})
betastar, gammastar, taustar = soln['x']
fstar = -rscale * soln['primal objective']
kappastar = (fstar + betastar + gammastar + taustar) / num
qfunc = lambda c, u, w, r, kappa=kappastar, beta=betastar, gamma=gammastar, tau=taustar: c*kappa / (beta + gamma * u + tau * w + (u - w) * r)
vbound = sign * fstar
retvals.append( ( vbound,
{
'kappastar': kappastar,
'betastar': betastar,
'gammastar': gammastar,
'taustar': taustar,
'qfunc': qfunc,
'phi': phi,
'mle': mle,
}
) )
return (retvals[0][0], retvals[1][0]), (retvals[0][1], retvals[1][1])
|
[
"scipy.stats.f.isf",
"numpy.ones",
"pprint.pformat",
"math.log",
"numpy.array",
"numpy.zeros",
"numpy.outer",
"cvxopt.matrix",
"math.exp"
] |
[((3035, 3144), 'numpy.array', 'np.array', (['[[1, u, w] for u in (umin, umax) for w in (wmin, wmax) for r in (rmin, rmax)]'], {'dtype': '"""float64"""'}), "([[1, u, w] for u in (umin, umax) for w in (wmin, wmax) for r in (\n rmin, rmax)], dtype='float64')\n", (3043, 3144), True, 'import numpy as np\n'), ((633, 667), 'scipy.stats.f.isf', 'f.isf', ([], {'q': 'alpha', 'dfn': '(1)', 'dfd': '(num - 1)'}), '(q=alpha, dfn=1, dfd=num - 1)\n', (638, 667), False, 'from scipy.stats import f\n'), ((1076, 1088), 'math.exp', 'exp', (['logcost'], {}), '(logcost)\n', (1079, 1088), False, 'from math import log, exp\n'), ((1258, 1269), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1266, 1269), True, 'import numpy as np\n'), ((1701, 1713), 'math.exp', 'exp', (['logcost'], {}), '(logcost)\n', (1704, 1713), False, 'from math import log, exp\n'), ((1892, 1903), 'numpy.zeros', 'np.zeros', (['(3)'], {}), '(3)\n', (1900, 1903), True, 'import numpy as np\n'), ((1926, 1942), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (1934, 1942), True, 'import numpy as np\n'), ((2770, 2782), 'math.exp', 'exp', (['logcost'], {}), '(logcost)\n', (2773, 2782), False, 'from math import log, exp\n'), ((3529, 3615), 'numpy.array', 'np.array', (['([num, -rmin, rmax] if sign > 0 else [num, rmax, -rmin])'], {'dtype': '"""float64"""'}), "([num, -rmin, rmax] if sign > 0 else [num, rmax, -rmin], dtype=\n 'float64')\n", (3537, 3615), True, 'import numpy as np\n'), ((3646, 3776), 'numpy.array', 'np.array', (['[(-sign * (u - w) * r + 0.0001) for u in (umin, umax) for w in (wmin, wmax) for\n r in (rmin, rmax)]'], {'dtype': '"""float64"""'}), "([(-sign * (u - w) * r + 0.0001) for u in (umin, umax) for w in (\n wmin, wmax) for r in (rmin, rmax)], dtype='float64')\n", (3654, 3776), True, 'import numpy as np\n'), ((4604, 4624), 'cvxopt.matrix', 'matrix', (['hf', 'hf.shape'], {}), '(hf, hf.shape)\n', (4610, 4624), False, 'from cvxopt import solvers, matrix\n'), ((4951, 4998), 'pprint.pformat', 'pformat', (["{'soln': soln, 'phi': phi, 'mle': mle}"], {}), "({'soln': soln, 'phi': phi, 'mle': mle})\n", (4958, 4998), False, 'from pprint import pformat\n'), ((4484, 4494), 'cvxopt.matrix', 'matrix', (['jf'], {}), '(jf)\n', (4490, 4494), False, 'from cvxopt import solvers, matrix\n'), ((1028, 1038), 'math.log', 'log', (['denom'], {}), '(denom)\n', (1031, 1038), False, 'from math import log, exp\n'), ((1470, 1480), 'math.log', 'log', (['denom'], {}), '(denom)\n', (1473, 1480), False, 'from math import log, exp\n'), ((1732, 1742), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (1739, 1742), True, 'import numpy as np\n'), ((1745, 1757), 'math.exp', 'exp', (['logcost'], {}), '(logcost)\n', (1748, 1757), False, 'from math import log, exp\n'), ((2142, 2152), 'math.log', 'log', (['denom'], {}), '(denom)\n', (2145, 2152), False, 'from math import log, exp\n'), ((4372, 4382), 'cvxopt.matrix', 'matrix', (['x0'], {}), '(x0)\n', (4378, 4382), False, 'from cvxopt import solvers, matrix\n'), ((4712, 4738), 'cvxopt.matrix', 'matrix', (['consE', 'consE.shape'], {}), '(consE, consE.shape)\n', (4718, 4738), False, 'from cvxopt import solvers, matrix\n'), ((4769, 4778), 'cvxopt.matrix', 'matrix', (['d'], {}), '(d)\n', (4775, 4778), False, 'from cvxopt import solvers, matrix\n'), ((2961, 2993), 'numpy.outer', 'np.outer', (['jaclogcost', 'jaclogcost'], {}), '(jaclogcost, jaclogcost)\n', (2969, 2993), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
import matplotlib.animation as animation
filename = "experiment_data/012522-16_29_48-data.csv"
data = np.genfromtxt(filename, delimiter=',', skip_header=2)
timestamps = data[:, 0]
timestamps -= timestamps[0]
cf1_actual_position = data[:, 18:21]
human_1_position = data[:,25:28]
def init_animation():
cf1_line.set_data([],[])
human1_line.set_data([],[])
human2_line.set_data([],[])
return cf1_line, human1_line, human2_line
def update_animation(frame):
cf1_line.set_data(cf1_actual_position[0:frame, 0],
cf1_actual_position[0:frame, 1])
cf1_line.set_3d_properties(cf1_actual_position[0:frame, 2])
return cf1_line, human1_line, human2_line
# Attaching 3D axis to the figure
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.set_xlim3d([-2.0, 2.0])
ax.set_xlabel('X')
ax.set_ylim3d([-2.0, 2.0])
ax.set_ylabel('Y')
ax.set_zlim3d([-2.0, 2.0])
ax.set_zlabel('Z')
cf1_line = ax.plot([],[],[], label="CF1 Position")[0]
human1_line = ax.plot([],[],[])[0]
human2_line = ax.plot([],[],[])[0]
line_ani = animation.FuncAnimation(fig,
update_animation,
init_func=init_animation,
frames=len(timestamps),
interval=50,
blit=False)
plt.show()
|
[
"mpl_toolkits.mplot3d.axes3d.Axes3D",
"matplotlib.pyplot.figure",
"numpy.genfromtxt",
"matplotlib.pyplot.show"
] |
[((197, 250), 'numpy.genfromtxt', 'np.genfromtxt', (['filename'], {'delimiter': '""","""', 'skip_header': '(2)'}), "(filename, delimiter=',', skip_header=2)\n", (210, 250), True, 'import numpy as np\n'), ((832, 844), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (842, 844), True, 'import matplotlib.pyplot as plt\n'), ((850, 864), 'mpl_toolkits.mplot3d.axes3d.Axes3D', 'p3.Axes3D', (['fig'], {}), '(fig)\n', (859, 864), True, 'import mpl_toolkits.mplot3d.axes3d as p3\n'), ((1439, 1449), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1447, 1449), True, 'import matplotlib.pyplot as plt\n')]
|
# ////////////////////////////////////////////////////////////////////////////
# // This file is part of NIID-Net. For more information
# // see <https://github.com/zju3dv/NIID-Net>.
# // If you use this code, please cite the corresponding publications as
# // listed on the above website.
# //
# // Copyright (c) ZJU-SenseTime Joint Lab of 3D Vision. All Rights Reserved.
# //
# // Permission to use, copy, modify and distribute this software and its
# // documentation for educational, research and non-profit purposes only.
# //
# // The above copyright notice and this permission notice shall be included in all
# // copies or substantial portions of the Software.
# //
# // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# // SOFTWARE.
# ////////////////////////////////////////////////////////////////////////////
import random
import numpy as np
import torch
def set_(with_random=True, determine=False, SEED=999):
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = False
if not with_random:
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
torch.cuda.manual_seed_all(SEED)
np.random.seed(SEED)
random.seed(SEED)
# torch.cuda.set_device(opt.gpu_devices[0])
# torch.multiprocessing.set_sharing_strategy('file_system')
torch.backends.cudnn.deterministic = determine
|
[
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"random.seed",
"numpy.random.seed",
"torch.cuda.manual_seed"
] |
[((1479, 1502), 'torch.manual_seed', 'torch.manual_seed', (['SEED'], {}), '(SEED)\n', (1496, 1502), False, 'import torch\n'), ((1511, 1539), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['SEED'], {}), '(SEED)\n', (1533, 1539), False, 'import torch\n'), ((1548, 1580), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['SEED'], {}), '(SEED)\n', (1574, 1580), False, 'import torch\n'), ((1589, 1609), 'numpy.random.seed', 'np.random.seed', (['SEED'], {}), '(SEED)\n', (1603, 1609), True, 'import numpy as np\n'), ((1618, 1635), 'random.seed', 'random.seed', (['SEED'], {}), '(SEED)\n', (1629, 1635), False, 'import random\n')]
|
from typing import Dict, Generator, Optional, Tuple, Union
import numpy as np
from joblib import ( # type: ignore
delayed,
Parallel,
)
from numpy import linalg
from sklearn.metrics import accuracy_score
from sklearn.base import BaseEstimator
from libifbtsvm.functions import (
fuzzy_membership,
train_model,
)
from libifbtsvm.models.ifbtsvm import (
ClassificationModel,
FuzzyMembership,
Hyperparameters,
Hyperplane,
)
TrainingSet = Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]
DAGSubSet = Union[TrainingSet, Generator[TrainingSet, None, None]]
class iFBTSVM(BaseEstimator):
def __init__(self, parameters: Hyperparameters, n_jobs=1):
super().__init__()
self.parameters = parameters
self._classifiers: Dict = {}
self.n_jobs = n_jobs
self.kernel = parameters.kernel
@classmethod
def _compute_score(cls, score, c):
"""
:param score:
:param c:
:return:
"""
if score is None:
score = np.asarray(c)
sc = np.ones(len(c))
score = np.array((score, sc))
else:
res, indices_score, indices_c = np.intersect1d(score[0], np.asarray(c), return_indices=True)
score[1][indices_score] += 1
diff = np.setdiff1d(np.asarray(c), score[0])
if diff.any():
_zdiff = np.ones(len(diff))
new_score = np.array((diff, _zdiff))
score_0 = np.append(score[0], [new_score[0]])
score_1 = np.append(score[1], [new_score[1]])
score = np.asarray([score_0, score_1])
return score
@staticmethod
def _decrement(candidates, score, alphas, fuzzy, data) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
:return:
"""
sco0 = np.delete(score[0], candidates)
sco1 = np.delete(score[1], candidates)
score = np.asarray([sco0, sco1])
alphas = np.delete(alphas, candidates)
fuzzy = np.delete(fuzzy, candidates)
data = np.delete(data, candidates, axis=0)
return score, alphas, fuzzy, data
@staticmethod
def _filter_gradients(weights: np.ndarray, gradients: np.ndarray, data:
np.ndarray, label: np.ndarray) -> Tuple[Optional[np.ndarray], Optional[np.ndarray]]:
"""
Filters data points based on the projected gradients.
Kept data will include only values for which the projected gradients that will expand the support
vectors, meaning that are outside boundaries of current support vectors of the classifier.
:param gradients: The gradients with which to perform the computation
:param data: Data to filter
:return: Filtered data
"""
_data = np.append(data, np.ones((len(data), 1)), axis=1)
_new_grads = np.matmul(-_data, weights) - 1
_del = np.argwhere(np.logical_or(_new_grads <= min(gradients), _new_grads >= max(gradients)))
index = np.reshape(_del, newshape=_del.shape[0],)
if not len(index):
return data, label
_data = np.delete(data, index, axis=0)
_label = np.delete(label, index)
return _data, _label
@classmethod
def _fit_dag_step(cls, subset: TrainingSet, parameters: Hyperparameters) -> ClassificationModel:
"""
Trains a classifier based on a sub-set of data, as a step in the DAG classifier algorithm.
:param subset: Sub-set of data containing the training data for this DAG step
:param parameters: The classifier hyperparameters
:returns: A classification model for this subset
"""
# Features (x_p) of the current "positive" class
x_p = subset[0]
y_p = subset[1]
# Features (x_n) of the current "negative" class
x_n = subset[2]
y_n = subset[3]
# Calculate fuzzy membership for points
membership: FuzzyMembership = fuzzy_membership(params=parameters, class_p=x_p, class_n=x_n)
# Build H matrix which is [X_p/n, e] where "e" is an extra column of ones ("1") appended at the end of the
# matrix
# i.e.
#
# if X_p = | 1 2 3 | and e = | 1 | then H_p = | 1 2 3 1 |
# | 4 5 6 | | 1 | | 4 5 6 1 |
# | 7 8 9 | | 1 | | 7 8 9 1 |
#
H_p = np.append(x_p, np.ones((x_p.shape[0], 1)), axis=1)
H_n = np.append(x_n, np.ones((x_n.shape[0], 1)), axis=1)
_C1 = parameters.C1 * membership.sn
_C3 = parameters.C3 * membership.sp
_C2 = parameters.C2
_C4 = parameters.C4
# Train the model using the algorithm described by (de Mello et al. 2019)
# Python
hyperplane_p: Hyperplane = train_model(parameters=parameters, H=H_n, G=H_p, C=_C4, CCx=_C3)
hyperplane_n: Hyperplane = train_model(parameters=parameters, H=H_p, G=H_n, C=_C2, CCx=_C1)
hyperplane_n.weights = -hyperplane_n.weights
return ClassificationModel(class_p=y_p[0],
class_n=y_n[0],
fuzzy=membership,
weights_p=hyperplane_p,
weights_n=hyperplane_n,
data_p=x_p,
data_n=x_n)
@classmethod
def _increment_dag_step(cls, subset: TrainingSet, parameters: Hyperparameters,
classifier: ClassificationModel) -> ClassificationModel:
"""
Increment already trained DAG models
:param subset: Sub-set of data containing the update data for this DAG step
:param parameters: The classifier hyperparameters
:param classifier: The classifier to update
:return: The updated classifier
"""
# Features (x_p) of the current "positive" class
x_p = subset[0]
y_p = subset[1]
# Features (x_n) of the current "negative" class
x_n = subset[2]
y_n = subset[3]
_batch_xp, _batch_yp = cls._filter_gradients(weights=classifier.p.weights,
gradients=classifier.p.projected_gradients,
data=x_p, label=y_p)
if _batch_xp is None:
return classifier
_batch_xn, _batch_yn = None, None
if x_n.any() and y_n.any():
_batch_xn, _batch_yn = cls._filter_gradients(weights=classifier.p.weights,
gradients=classifier.p.projected_gradients,
data=x_n, label=y_n)
_data_xp = classifier.data_p
if _batch_xp is not None and _batch_xp.any():
_data_xp = np.concatenate((_data_xp, _batch_xp)) if _batch_xp is not None else classifier.data_p
_data_xn = classifier.data_n
if _batch_xn is not None and _batch_xn.any():
_data_xn = np.concatenate((_data_xn, _batch_xn)) if _batch_xn is not None else classifier.data_n
# Calculate fuzzy membership for points
membership: FuzzyMembership = fuzzy_membership(params=parameters, class_p=_data_xp, class_n=_data_xn)
# Build H matrix which is [X_p/n, e] where "e" is an extra column of ones ("1") appended at the end of the
# matrix
# i.e.
#
# if X_p = | 1 2 3 | and e = | 1 | then H_p = | 1 2 3 1 |
# | 4 5 6 | | 1 | | 4 5 6 1 |
# | 7 8 9 | | 1 | | 7 8 9 1 |
#
H_p = np.append(_data_xp, np.ones((_data_xp.shape[0], 1)), axis=1)
H_n = np.append(_data_xn, np.ones((_data_xn.shape[0], 1)), axis=1)
_C1 = parameters.C1 * membership.sn
_C3 = parameters.C3 * membership.sp
_C2 = parameters.C2
_C4 = parameters.C4
# Recompute the training with the update data
hyperplane_p: Hyperplane = train_model(parameters=parameters, H=H_n, G=H_p, C=_C4, CCx=_C3)
hyperplane_n: Hyperplane = train_model(parameters=parameters, H=H_p, G=H_n, C=_C2, CCx=_C1)
hyperplane_n.weights = -hyperplane_n.weights
classifier.p = hyperplane_p
classifier.n = hyperplane_n
classifier.fuzzy_membership = membership
classifier.data_p = _data_xp
classifier.data_n = _data_xn
c_pos = np.argwhere(classifier.p.alpha <= parameters.phi)
c_pos = np.reshape(c_pos, newshape=(c_pos.shape[0],))
c_neg = np.argwhere(classifier.n.alpha <= parameters.phi)
c_neg = np.reshape(c_neg, newshape=(c_neg.shape[0],))
classifier.score_p = cls._compute_score(classifier.score_p, c_pos)
classifier.score_n = cls._compute_score(classifier.score_n, c_neg)
_candidates_p = np.argwhere(classifier.score_p[1] >= parameters.forget_score)
_candidates_p = np.reshape(_candidates_p, newshape=(_candidates_p.shape[0], ))
_candidates_n = np.argwhere(classifier.score_n[1] >= parameters.forget_score)
_candidates_n = np.reshape(_candidates_n, newshape=(_candidates_n.shape[0], ))
if _candidates_p.any():
score, alpha, fuzzy, data = cls._decrement(candidates=_candidates_p,
score=classifier.score_p,
alphas=classifier.p.alpha,
fuzzy=classifier.fuzzy_membership.sp,
data=_data_xp)
classifier.p.alpha = alpha
classifier.fuzzy_membership.sp = fuzzy
classifier.data_p = data
classifier.score_p = score
if _candidates_n.any():
score, alpha, fuzzy, data = cls._decrement(candidates=_candidates_n,
score=classifier.score_n,
alphas=classifier.n.alpha,
fuzzy=classifier.fuzzy_membership.sn,
data=_data_xn)
classifier.n.alpha = alpha
classifier.fuzzy_membership.sn = fuzzy
classifier.data_n = data
classifier.score_n = score
return classifier
@classmethod
def _generate_sub_sets(cls, X: np.ndarray, y: np.ndarray) -> DAGSubSet:
"""
Generates sub-data sets based on the DAG classification principle.
Example, for 4 classes, the function will return the following:
[0]: Values and labels of Class 1 and 2
[1]: Values and labels of Class 1 and 3
[2]: Values and labels of Class 1 and 4
[3]: Values and labels of Class 2 and 3
[4]: Values and labels of Class 2 and 4
[5]: Values and labels of Class 3 and 4
:param X: The full training set
:param y: The full training labels set
:return: Generator of tuple containing values and labels for positive and negative class
based on the current iteration in the classification DAG.
- [0] Values for current X positive
- [1] Labels for current X positive
- [2] Values for current X negative
- [3] Labels for current X negative
"""
classes = np.unique(y)
if len(classes) == 1:
return X[classes[0]], y[classes[0]], np.ndarray(), np.ndarray()
for _p in range(classes.size):
for _n in range(_p + 1, classes.size):
_index_p = np.where(y == classes[_p])[0]
_index_n = np.where(y == classes[_n])[0]
yield X[_index_p], y[_index_p], X[_index_n], y[_index_n]
def decision_function(self, X):
"""
Evalutes the decision function over X.
:param X: Array of features to evaluate the decision on.
:return: Array of decision evaluation.
"""
pass
def fit(self, X: np.ndarray, y: np.ndarray, sample_weight=None):
"""
Trains a iFBTSVM model
:param X: The training samples
:param y: The class labels for each training sample
:param sample_weight: (Not supported)
"""
X = self.kernel.fit_transform(X=X, y=y) if self.kernel else X # type: ignore
# Train the DAG models in parallel
trained_hyperplanes = Parallel(n_jobs=self.n_jobs, prefer='processes')(
delayed(self._fit_dag_step)(subset, self.parameters) for subset in self._generate_sub_sets(X, y)
)
# Create the DAG Model here
for hypp in trained_hyperplanes:
_clsf = self._classifiers.get(hypp.class_p, {})
_clsf[hypp.class_n] = hypp
self._classifiers[hypp.class_p] = _clsf
def update(self, X: np.ndarray, y: np.ndarray, batch_size: int = None):
"""
Update an already trained classifier
:param X: The training data with which to update the models.
:param y: The training labels with which to update the models.
:param batch_size: The batch size for updating models
"""
if not batch_size:
batch_size = len(y)
i = 0
while i < len(X):
batch_x = X[i: i + batch_size]
batch_y = y[i: i + batch_size]
batch_x = self.kernel.transform(X=batch_x) if self.kernel else batch_x # type: ignore
# Update the DAG models in parallel
updated_hyperplanes = Parallel(n_jobs=self.n_jobs, prefer='processes')(
delayed(self._increment_dag_step)
(
subset,
self.parameters,
self._classifiers[subset[1][0]][subset[3][0]] # Get classifier for ClassP/ClassN of this subset
)
for subset in self._generate_sub_sets(batch_x, batch_y)
)
# Create the DAG Model here
for hypp in updated_hyperplanes:
_clsf = self._classifiers.get(hypp.class_p, {})
_clsf[hypp.class_n] = hypp
self._classifiers[hypp.class_p] = _clsf
i += batch_size
def predict(self, X):
"""
Performs classification X.
:param X: Array of features to classify
:return: Array of classification result
"""
X = self.kernel.transform(X=X) if self.kernel else X
lh_keys = list(set(self._classifiers.keys()))
rh_keys = set()
for value in self._classifiers.values():
for key, _ in value.items():
rh_keys.add(key)
rh_keys = list(rh_keys)
classes = []
for row in X:
_dag_index_p = 0
_dag_index_n = 0
f_pos = 0
f_neg = 0
class_p = None
class_n = None
while True:
try:
class_p = lh_keys[_dag_index_p]
class_n = rh_keys[_dag_index_n]
model: ClassificationModel = self._classifiers[class_p][class_n]
f_pos = np.divide(np.matmul(row, model.p.weights[:-1]) + model.p.weights[-1],
linalg.norm(model.p.weights[:-1]))
f_neg = np.divide(np.matmul(row, model.n.weights[:-1]) + model.n.weights[-1],
linalg.norm(model.n.weights[:-1]))
if abs(f_pos) < abs(f_neg):
_dag_index_p = _dag_index_n + 1
_dag_index_n += 1
else:
_dag_index_n += 1
except (StopIteration, IndexError):
if abs(f_pos) < abs(f_neg):
classes.append(class_n)
else:
classes.append(class_p)
break
return classes
def score(self, X, y, sample_weight=None):
"""
Returns the accuracy of a classification.
:param X: Array of features to classify
:param y: 1-D Array of truth values for the features
:param sample_weight: Not supported
:return: Accuracy score of the classification
"""
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
|
[
"numpy.reshape",
"numpy.unique",
"numpy.ones",
"libifbtsvm.functions.train_model",
"numpy.delete",
"numpy.where",
"numpy.asarray",
"joblib.Parallel",
"numpy.array",
"numpy.append",
"numpy.argwhere",
"numpy.matmul",
"numpy.ndarray",
"numpy.concatenate",
"numpy.linalg.norm",
"joblib.delayed",
"libifbtsvm.functions.fuzzy_membership",
"libifbtsvm.models.ifbtsvm.ClassificationModel"
] |
[((1870, 1901), 'numpy.delete', 'np.delete', (['score[0]', 'candidates'], {}), '(score[0], candidates)\n', (1879, 1901), True, 'import numpy as np\n'), ((1917, 1948), 'numpy.delete', 'np.delete', (['score[1]', 'candidates'], {}), '(score[1], candidates)\n', (1926, 1948), True, 'import numpy as np\n'), ((1966, 1990), 'numpy.asarray', 'np.asarray', (['[sco0, sco1]'], {}), '([sco0, sco1])\n', (1976, 1990), True, 'import numpy as np\n'), ((2009, 2038), 'numpy.delete', 'np.delete', (['alphas', 'candidates'], {}), '(alphas, candidates)\n', (2018, 2038), True, 'import numpy as np\n'), ((2055, 2083), 'numpy.delete', 'np.delete', (['fuzzy', 'candidates'], {}), '(fuzzy, candidates)\n', (2064, 2083), True, 'import numpy as np\n'), ((2099, 2134), 'numpy.delete', 'np.delete', (['data', 'candidates'], {'axis': '(0)'}), '(data, candidates, axis=0)\n', (2108, 2134), True, 'import numpy as np\n'), ((3059, 3099), 'numpy.reshape', 'np.reshape', (['_del'], {'newshape': '_del.shape[0]'}), '(_del, newshape=_del.shape[0])\n', (3069, 3099), True, 'import numpy as np\n'), ((3177, 3207), 'numpy.delete', 'np.delete', (['data', 'index'], {'axis': '(0)'}), '(data, index, axis=0)\n', (3186, 3207), True, 'import numpy as np\n'), ((3225, 3248), 'numpy.delete', 'np.delete', (['label', 'index'], {}), '(label, index)\n', (3234, 3248), True, 'import numpy as np\n'), ((4021, 4082), 'libifbtsvm.functions.fuzzy_membership', 'fuzzy_membership', ([], {'params': 'parameters', 'class_p': 'x_p', 'class_n': 'x_n'}), '(params=parameters, class_p=x_p, class_n=x_n)\n', (4037, 4082), False, 'from libifbtsvm.functions import fuzzy_membership, train_model\n'), ((4890, 4954), 'libifbtsvm.functions.train_model', 'train_model', ([], {'parameters': 'parameters', 'H': 'H_n', 'G': 'H_p', 'C': '_C4', 'CCx': '_C3'}), '(parameters=parameters, H=H_n, G=H_p, C=_C4, CCx=_C3)\n', (4901, 4954), False, 'from libifbtsvm.functions import fuzzy_membership, train_model\n'), ((4990, 5054), 'libifbtsvm.functions.train_model', 'train_model', ([], {'parameters': 'parameters', 'H': 'H_p', 'G': 'H_n', 'C': '_C2', 'CCx': '_C1'}), '(parameters=parameters, H=H_p, G=H_n, C=_C2, CCx=_C1)\n', (5001, 5054), False, 'from libifbtsvm.functions import fuzzy_membership, train_model\n'), ((5124, 5269), 'libifbtsvm.models.ifbtsvm.ClassificationModel', 'ClassificationModel', ([], {'class_p': 'y_p[0]', 'class_n': 'y_n[0]', 'fuzzy': 'membership', 'weights_p': 'hyperplane_p', 'weights_n': 'hyperplane_n', 'data_p': 'x_p', 'data_n': 'x_n'}), '(class_p=y_p[0], class_n=y_n[0], fuzzy=membership,\n weights_p=hyperplane_p, weights_n=hyperplane_n, data_p=x_p, data_n=x_n)\n', (5143, 5269), False, 'from libifbtsvm.models.ifbtsvm import ClassificationModel, FuzzyMembership, Hyperparameters, Hyperplane\n'), ((7327, 7398), 'libifbtsvm.functions.fuzzy_membership', 'fuzzy_membership', ([], {'params': 'parameters', 'class_p': '_data_xp', 'class_n': '_data_xn'}), '(params=parameters, class_p=_data_xp, class_n=_data_xn)\n', (7343, 7398), False, 'from libifbtsvm.functions import fuzzy_membership, train_model\n'), ((8181, 8245), 'libifbtsvm.functions.train_model', 'train_model', ([], {'parameters': 'parameters', 'H': 'H_n', 'G': 'H_p', 'C': '_C4', 'CCx': '_C3'}), '(parameters=parameters, H=H_n, G=H_p, C=_C4, CCx=_C3)\n', (8192, 8245), False, 'from libifbtsvm.functions import fuzzy_membership, train_model\n'), ((8281, 8345), 'libifbtsvm.functions.train_model', 'train_model', ([], {'parameters': 'parameters', 'H': 'H_p', 'G': 'H_n', 'C': '_C2', 'CCx': '_C1'}), '(parameters=parameters, H=H_p, G=H_n, C=_C2, CCx=_C1)\n', (8292, 8345), False, 'from libifbtsvm.functions import fuzzy_membership, train_model\n'), ((8613, 8662), 'numpy.argwhere', 'np.argwhere', (['(classifier.p.alpha <= parameters.phi)'], {}), '(classifier.p.alpha <= parameters.phi)\n', (8624, 8662), True, 'import numpy as np\n'), ((8679, 8724), 'numpy.reshape', 'np.reshape', (['c_pos'], {'newshape': '(c_pos.shape[0],)'}), '(c_pos, newshape=(c_pos.shape[0],))\n', (8689, 8724), True, 'import numpy as np\n'), ((8741, 8790), 'numpy.argwhere', 'np.argwhere', (['(classifier.n.alpha <= parameters.phi)'], {}), '(classifier.n.alpha <= parameters.phi)\n', (8752, 8790), True, 'import numpy as np\n'), ((8807, 8852), 'numpy.reshape', 'np.reshape', (['c_neg'], {'newshape': '(c_neg.shape[0],)'}), '(c_neg, newshape=(c_neg.shape[0],))\n', (8817, 8852), True, 'import numpy as np\n'), ((9029, 9090), 'numpy.argwhere', 'np.argwhere', (['(classifier.score_p[1] >= parameters.forget_score)'], {}), '(classifier.score_p[1] >= parameters.forget_score)\n', (9040, 9090), True, 'import numpy as np\n'), ((9115, 9176), 'numpy.reshape', 'np.reshape', (['_candidates_p'], {'newshape': '(_candidates_p.shape[0],)'}), '(_candidates_p, newshape=(_candidates_p.shape[0],))\n', (9125, 9176), True, 'import numpy as np\n'), ((9202, 9263), 'numpy.argwhere', 'np.argwhere', (['(classifier.score_n[1] >= parameters.forget_score)'], {}), '(classifier.score_n[1] >= parameters.forget_score)\n', (9213, 9263), True, 'import numpy as np\n'), ((9288, 9349), 'numpy.reshape', 'np.reshape', (['_candidates_n'], {'newshape': '(_candidates_n.shape[0],)'}), '(_candidates_n, newshape=(_candidates_n.shape[0],))\n', (9298, 9349), True, 'import numpy as np\n'), ((11603, 11615), 'numpy.unique', 'np.unique', (['y'], {}), '(y)\n', (11612, 11615), True, 'import numpy as np\n'), ((1043, 1056), 'numpy.asarray', 'np.asarray', (['c'], {}), '(c)\n', (1053, 1056), True, 'import numpy as np\n'), ((1110, 1131), 'numpy.array', 'np.array', (['(score, sc)'], {}), '((score, sc))\n', (1118, 1131), True, 'import numpy as np\n'), ((2908, 2934), 'numpy.matmul', 'np.matmul', (['(-_data)', 'weights'], {}), '(-_data, weights)\n', (2917, 2934), True, 'import numpy as np\n'), ((4508, 4534), 'numpy.ones', 'np.ones', (['(x_p.shape[0], 1)'], {}), '((x_p.shape[0], 1))\n', (4515, 4534), True, 'import numpy as np\n'), ((4573, 4599), 'numpy.ones', 'np.ones', (['(x_n.shape[0], 1)'], {}), '((x_n.shape[0], 1))\n', (4580, 4599), True, 'import numpy as np\n'), ((7829, 7860), 'numpy.ones', 'np.ones', (['(_data_xp.shape[0], 1)'], {}), '((_data_xp.shape[0], 1))\n', (7836, 7860), True, 'import numpy as np\n'), ((7904, 7935), 'numpy.ones', 'np.ones', (['(_data_xn.shape[0], 1)'], {}), '((_data_xn.shape[0], 1))\n', (7911, 7935), True, 'import numpy as np\n'), ((12669, 12717), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.n_jobs', 'prefer': '"""processes"""'}), "(n_jobs=self.n_jobs, prefer='processes')\n", (12677, 12717), False, 'from joblib import delayed, Parallel\n'), ((1216, 1229), 'numpy.asarray', 'np.asarray', (['c'], {}), '(c)\n', (1226, 1229), True, 'import numpy as np\n'), ((1325, 1338), 'numpy.asarray', 'np.asarray', (['c'], {}), '(c)\n', (1335, 1338), True, 'import numpy as np\n'), ((1450, 1474), 'numpy.array', 'np.array', (['(diff, _zdiff)'], {}), '((diff, _zdiff))\n', (1458, 1474), True, 'import numpy as np\n'), ((1502, 1537), 'numpy.append', 'np.append', (['score[0]', '[new_score[0]]'], {}), '(score[0], [new_score[0]])\n', (1511, 1537), True, 'import numpy as np\n'), ((1564, 1599), 'numpy.append', 'np.append', (['score[1]', '[new_score[1]]'], {}), '(score[1], [new_score[1]])\n', (1573, 1599), True, 'import numpy as np\n'), ((1624, 1654), 'numpy.asarray', 'np.asarray', (['[score_0, score_1]'], {}), '([score_0, score_1])\n', (1634, 1654), True, 'import numpy as np\n'), ((6953, 6990), 'numpy.concatenate', 'np.concatenate', (['(_data_xp, _batch_xp)'], {}), '((_data_xp, _batch_xp))\n', (6967, 6990), True, 'import numpy as np\n'), ((7154, 7191), 'numpy.concatenate', 'np.concatenate', (['(_data_xn, _batch_xn)'], {}), '((_data_xn, _batch_xn))\n', (7168, 7191), True, 'import numpy as np\n'), ((11695, 11707), 'numpy.ndarray', 'np.ndarray', ([], {}), '()\n', (11705, 11707), True, 'import numpy as np\n'), ((11709, 11721), 'numpy.ndarray', 'np.ndarray', ([], {}), '()\n', (11719, 11721), True, 'import numpy as np\n'), ((13786, 13834), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': 'self.n_jobs', 'prefer': '"""processes"""'}), "(n_jobs=self.n_jobs, prefer='processes')\n", (13794, 13834), False, 'from joblib import delayed, Parallel\n'), ((11841, 11867), 'numpy.where', 'np.where', (['(y == classes[_p])'], {}), '(y == classes[_p])\n', (11849, 11867), True, 'import numpy as np\n'), ((11898, 11924), 'numpy.where', 'np.where', (['(y == classes[_n])'], {}), '(y == classes[_n])\n', (11906, 11924), True, 'import numpy as np\n'), ((12731, 12758), 'joblib.delayed', 'delayed', (['self._fit_dag_step'], {}), '(self._fit_dag_step)\n', (12738, 12758), False, 'from joblib import delayed, Parallel\n'), ((13852, 13885), 'joblib.delayed', 'delayed', (['self._increment_dag_step'], {}), '(self._increment_dag_step)\n', (13859, 13885), False, 'from joblib import delayed, Parallel\n'), ((15524, 15557), 'numpy.linalg.norm', 'linalg.norm', (['model.p.weights[:-1]'], {}), '(model.p.weights[:-1])\n', (15535, 15557), False, 'from numpy import linalg\n'), ((15695, 15728), 'numpy.linalg.norm', 'linalg.norm', (['model.n.weights[:-1]'], {}), '(model.n.weights[:-1])\n', (15706, 15728), False, 'from numpy import linalg\n'), ((15426, 15462), 'numpy.matmul', 'np.matmul', (['row', 'model.p.weights[:-1]'], {}), '(row, model.p.weights[:-1])\n', (15435, 15462), True, 'import numpy as np\n'), ((15597, 15633), 'numpy.matmul', 'np.matmul', (['row', 'model.n.weights[:-1]'], {}), '(row, model.n.weights[:-1])\n', (15606, 15633), True, 'import numpy as np\n')]
|
# Simple Python script to compute feature importance using univariate statistical analysis, recursive feature elimination, and elastic net
# by <NAME> and <NAME>, 2018
import numpy as np
import sys
# feature selection methods
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import GenericUnivariateSelect
from sklearn.feature_selection import RFECV
from sklearn.feature_selection import RFE
# this is a common feature selection method from bio-informatics that exploits ElasticNet
from sklearn.linear_model import ElasticNetCV
# other functions from sklearn
from sklearn.svm import SVC
# these are a few useful functions
from pandas import read_csv # incredibly useful!
from datetime import datetime
# local functions
import genericFunctions
def main() :
# hard-coded constants
methodologies = dict()
methodologies["univariate"] = SelectKBest(k=100)
# WARNING: RFE can take A LOT of time to complete. Be patient (or comment the following line)
methodologies["recursive-feature-elimination-svc"] = RFE( SVC(kernel='linear'), n_features_to_select=100, verbose=1 )
methodologies["elastic-net"] = ElasticNetCV()
featuresEFSFile = "../results/feature-importance-efs.csv"
print("Loading dataset...")
X, y, biomarkerNames = genericFunctions.loadTCGADataset()
for methodName in methodologies :
start_time = datetime.now()
print("\nComputing most relevant features using methodology \"" + methodName + "\"...")
featureSelectionMethod = methodologies[methodName]
featureSelectionMethod.fit(X, y)
delta_time = datetime.now() - start_time
# create list of tuples
sortedFeatures = None
if methodName.find("select-from-model") != -1 or methodName.find("recursive-feature-elimination") != -1 :
featureIndices = featureSelectionMethod.get_support(indices=True)
sortedFeatures = [ (1.0, biomarkerNames[i]) for i in featureIndices ]
elif methodName.find("elastic-net") != -1 :
coefficients = featureSelectionMethod.coef_
sortedFeatures = list( zip( list(coefficients), biomarkerNames ) )
else :
sortedFeatures = list( zip( list(featureSelectionMethod.scores_), biomarkerNames ) )
# remove all 'nan' values and sort on first element
sortedFeatures = [ x for x in sortedFeatures if not np.isnan(x[0]) ]
sortedFeatures = sorted( sortedFeatures, key=lambda x : x[0], reverse=True )
# save everything to file
outputFile = "feature-importance-" + methodName + ".csv"
with open(outputFile, "w") as fp :
for score, feature in sortedFeatures :
print(feature + ": " + str(score))
fp.write(feature + "," + str(score) + "\n")
# also, try a comparison with the features obtained through ML
featuresML = []
with open(featuresEFSFile, "r") as fp :
lines = fp.readlines()
lines.pop(0)
featuresML = [ lines[i].rstrip().split(',')[0] for i in range(0,100) ]
logFile = "feature-importance-" + methodName + ".log"
with open(logFile, "w") as fp :
commonFeatures = 0
for f in sortedFeatures[:100] :
if f[1] in featuresML :
commonFeatures += 1
string = "Feature \"" + f[1] + "\" is common to both ML and univariate feature selection."
print(string)
fp.write(string + "\n")
string = "\nA total of " + str(commonFeatures) + " features are common to method \"" + methodName + "\" and ensemble ML feature selection."
print(string)
fp.write(string + "\n")
string = "Total time taken by method \"" + methodName + "\": " + str(delta_time)
print(string)
fp.write(string + "\n")
return
if __name__ == "__main__" :
sys.exit( main() )
|
[
"sklearn.linear_model.ElasticNetCV",
"sklearn.feature_selection.SelectKBest",
"datetime.datetime.now",
"genericFunctions.loadTCGADataset",
"numpy.isnan",
"sklearn.svm.SVC"
] |
[((876, 894), 'sklearn.feature_selection.SelectKBest', 'SelectKBest', ([], {'k': '(100)'}), '(k=100)\n', (887, 894), False, 'from sklearn.feature_selection import SelectKBest\n'), ((1141, 1155), 'sklearn.linear_model.ElasticNetCV', 'ElasticNetCV', ([], {}), '()\n', (1153, 1155), False, 'from sklearn.linear_model import ElasticNetCV\n'), ((1271, 1305), 'genericFunctions.loadTCGADataset', 'genericFunctions.loadTCGADataset', ([], {}), '()\n', (1303, 1305), False, 'import genericFunctions\n'), ((1049, 1069), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""linear"""'}), "(kernel='linear')\n", (1052, 1069), False, 'from sklearn.svm import SVC\n'), ((1361, 1375), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1373, 1375), False, 'from datetime import datetime\n'), ((1576, 1590), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1588, 1590), False, 'from datetime import datetime\n'), ((2278, 2292), 'numpy.isnan', 'np.isnan', (['x[0]'], {}), '(x[0])\n', (2286, 2292), True, 'import numpy as np\n')]
|
"""Collage renderer."""
import itertools
import logger
import numpy
from PIL import Image, ImageEnhance
from distance_matrix import imageMSE
ENABLE_POST_OPTIMIZATION = True
def adjustImage(image, parameters):
"""Adjusts the brightness, contrast, and saturation of the given image."""
(brightness, contrast, saturation) = parameters
newImage = ImageEnhance.Brightness(image).enhance(brightness)
newImage = ImageEnhance.Contrast(newImage).enhance(contrast)
newImage = ImageEnhance.Color(newImage).enhance(saturation)
return newImage
def postOptimize(image, goalImage):
"""Adjusts the brightness, contrast, and saturation of the given image in such
a way that the MSE between the adjusted image and the goal image is minimized."""
if not ENABLE_POST_OPTIMIZATION:
return (1, 1, 1)
# Vary brightness, saturation, contrast to better match the goal image
brightnessSet = numpy.arange(0.6, 1.3, 0.05)
contrastSet = numpy.arange(0.9, 1.2, 0.05)
saturationSet = numpy.arange(1.0, 1.3, 0.05)
settings = itertools.product(brightnessSet, contrastSet, saturationSet)
bestMSE = None
for parameters in settings:
newImage = adjustImage(image, parameters)
MSE = imageMSE(newImage, goalImage)
if not bestMSE or MSE < bestMSE:
bestMSE = MSE
bestParameters = parameters
if not bestParameters:
raise Exception("Post-optimization failed")
return bestParameters
def renderCollage(solution, grid, sampleGrid, imageLibrary, outputFile, cheatFactor=0):
"""Post-optimizes the solution and renders the output."""
logger.info("Post-optimizing ...")
optimalParameters = {}
for i in range(grid.imageCountX):
logger.progress(i, grid.imageCountX)
for j in range(grid.imageCountY):
imageIndex = solution[i, j]
image = imageLibrary.images[imageIndex]
sampleImage = image.get(sampleGrid.imageWidth, sampleGrid.imageHeight).get()
optimalParameters[i, j] = postOptimize(sampleImage, sampleGrid[i, j].get())
logger.info("Rendering collage ...")
background = Image.new("RGB", grid.size, "white")
collage = Image.new("RGB", grid.size, "white")
for i in range(grid.imageCountX):
logger.progress(i, grid.imageCountX)
for j in range(grid.imageCountY):
offset = (i * grid.imageWidth, j * grid.imageHeight)
imageIndex = solution[i, j]
image = imageLibrary.images[imageIndex]
subImage = image.get(grid.imageWidth, grid.imageHeight).get()
image = adjustImage(subImage, optimalParameters[i, j])
background.paste(grid[i, j].get(), offset)
collage.paste(image, offset)
logger.info("Saving ...")
output = Image.blend(collage, background, cheatFactor)
output.save(outputFile)
|
[
"distance_matrix.imageMSE",
"PIL.Image.new",
"PIL.Image.blend",
"itertools.product",
"logger.info",
"PIL.ImageEnhance.Brightness",
"PIL.ImageEnhance.Contrast",
"PIL.ImageEnhance.Color",
"logger.progress",
"numpy.arange"
] |
[((898, 926), 'numpy.arange', 'numpy.arange', (['(0.6)', '(1.3)', '(0.05)'], {}), '(0.6, 1.3, 0.05)\n', (910, 926), False, 'import numpy\n'), ((943, 971), 'numpy.arange', 'numpy.arange', (['(0.9)', '(1.2)', '(0.05)'], {}), '(0.9, 1.2, 0.05)\n', (955, 971), False, 'import numpy\n'), ((990, 1018), 'numpy.arange', 'numpy.arange', (['(1.0)', '(1.3)', '(0.05)'], {}), '(1.0, 1.3, 0.05)\n', (1002, 1018), False, 'import numpy\n'), ((1032, 1092), 'itertools.product', 'itertools.product', (['brightnessSet', 'contrastSet', 'saturationSet'], {}), '(brightnessSet, contrastSet, saturationSet)\n', (1049, 1092), False, 'import itertools\n'), ((1569, 1603), 'logger.info', 'logger.info', (['"""Post-optimizing ..."""'], {}), "('Post-optimizing ...')\n", (1580, 1603), False, 'import logger\n'), ((1992, 2028), 'logger.info', 'logger.info', (['"""Rendering collage ..."""'], {}), "('Rendering collage ...')\n", (2003, 2028), False, 'import logger\n'), ((2044, 2080), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'grid.size', '"""white"""'], {}), "('RGB', grid.size, 'white')\n", (2053, 2080), False, 'from PIL import Image, ImageEnhance\n'), ((2093, 2129), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'grid.size', '"""white"""'], {}), "('RGB', grid.size, 'white')\n", (2102, 2129), False, 'from PIL import Image, ImageEnhance\n'), ((2600, 2625), 'logger.info', 'logger.info', (['"""Saving ..."""'], {}), "('Saving ...')\n", (2611, 2625), False, 'import logger\n'), ((2637, 2682), 'PIL.Image.blend', 'Image.blend', (['collage', 'background', 'cheatFactor'], {}), '(collage, background, cheatFactor)\n', (2648, 2682), False, 'from PIL import Image, ImageEnhance\n'), ((1197, 1226), 'distance_matrix.imageMSE', 'imageMSE', (['newImage', 'goalImage'], {}), '(newImage, goalImage)\n', (1205, 1226), False, 'from distance_matrix import imageMSE\n'), ((1669, 1705), 'logger.progress', 'logger.progress', (['i', 'grid.imageCountX'], {}), '(i, grid.imageCountX)\n', (1684, 1705), False, 'import logger\n'), ((2170, 2206), 'logger.progress', 'logger.progress', (['i', 'grid.imageCountX'], {}), '(i, grid.imageCountX)\n', (2185, 2206), False, 'import logger\n'), ((353, 383), 'PIL.ImageEnhance.Brightness', 'ImageEnhance.Brightness', (['image'], {}), '(image)\n', (376, 383), False, 'from PIL import Image, ImageEnhance\n'), ((417, 448), 'PIL.ImageEnhance.Contrast', 'ImageEnhance.Contrast', (['newImage'], {}), '(newImage)\n', (438, 448), False, 'from PIL import Image, ImageEnhance\n'), ((480, 508), 'PIL.ImageEnhance.Color', 'ImageEnhance.Color', (['newImage'], {}), '(newImage)\n', (498, 508), False, 'from PIL import Image, ImageEnhance\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 29 16:39:41 2021
@author: harsh
"""
import numpy as np
import math as mm
import opensees as op
import time as tt
##################################################################
# #
# Effective stress site response analysis for a layered #
# soil profile located on a 2% slope and underlain by an #
# elastic half-space. 9-node quadUP elements are used. #
# The finite rigidity of the elastic half space is #
# considered through the use of a viscous damper at the #
# base. #
# #
# Converted to openseespy by: <NAME> #
# The University of Manchester #
# #
# Created by: <NAME> #
# <NAME> #
# <NAME> #
# <NAME> #
# --University of Washington-- #
# #
# ---> Basic units are kN and m (unless specified) #
# #
##################################################################
#-----------------------------------------------------------------------------------------
# 1. DEFINE SOIL AND MESH GEOMETRY
#-----------------------------------------------------------------------------------------
op.wipe()
nodes_dict = dict()
#---SOIL GEOMETRY
# thicknesses of soil profile (m)
soilThick = 30.0
# number of soil layers
numLayers = 3
# layer thicknesses
layerThick=[20.0,8.0,2.0]
# depth of water table
waterTable = 2.0
# define layer boundaries
layerBound=np.zeros((numLayers,1))
layerBound[0]=layerThick[0];
for i in range(1,numLayers):
layerBound[i]=layerBound[i-1]+layerThick[i]
#---MESH GEOMETRY
# number of elements in horizontal direction
nElemX = 1
# number of nodes in horizontal direction
nNodeX =2 * nElemX+1
# horizontal element size (m)
sElemX = 2.0
# number of elements in vertical direction for each layer
nElemY = [40,16,4]
# total number of elements in vertical direction
nElemT = 60
sElemY = np.zeros((numLayers,1))
# vertical element size in each layer
for i in range(numLayers):
sElemY[i] = [layerThick[i-1]/nElemY[i-1]]
print('size:',sElemY[i])
# number of nodes in vertical direction
nNodeY = 2 * nElemT+1
# total number of nodes
nNodeT = nNodeX * nNodeY
#-----------------------------------------------------------------------------------------
# 2. CREATE PORE PRESSURE NODES AND FIXITIES
#-----------------------------------------------------------------------------------------
op.model('basic', '-ndm', 2, '-ndf', 3)
count = 1
layerNodeCount = 0
dry_Node=np.zeros((500,1))
node_save=np.zeros((500,1))
# loop over soil layers
for k in range(1,numLayers+1):
# loop in horizontal direction
for i in range(1,nNodeX+1,2):
if k==1:
bump = 1
else:
bump = 0
j_end=2 * nElemY[k-1] + bump
for j in range(1,j_end+1,2):
xCoord = (i-1) * (sElemX/2)
yctr = j + layerNodeCount
yCoord = (yctr-1) * (np.float(sElemY[k-1]))/2
nodeNum = i + ((yctr-1) * nNodeX)
op.node(nodeNum, xCoord, yCoord)
# output nodal information to data file
nodes_dict[nodeNum] = (nodeNum, xCoord, yCoord)
node_save[nodeNum] = np.int(nodeNum)
# designate nodes above water table
waterHeight = soilThick - waterTable
if yCoord >= waterHeight:
dry_Node[count] = nodeNum
count = count+1
layerNodeCount = yctr + 1
dryNode=np.trim_zeros(dry_Node)
Node_d=np.unique(node_save)
Node_d=np.trim_zeros(Node_d)
np.savetxt('Node_record.txt',Node_d)
print('Finished creating all -ndf 3 nodes')
print('Number of Dry Nodes:',len(dryNode))
# define fixities for pore pressure nodes above water table
for i in range(count-1):
n_dryNode=np.int(dryNode[i])
op.fix(n_dryNode, 0, 0, 1)
op.fix(1, 0, 1, 0)
op.fix(3, 0, 1, 0)
print('Finished creating all -ndf 3 boundary conditions...')
# define equal degrees of freedom for pore pressure nodes
for i in range(1,((3*nNodeY)-2),6):
op.equalDOF(i, i+2, 1, 2)
print("Finished creating equalDOF for pore pressure nodes...")
#-----------------------------------------------------------------------------------------
# 3. CREATE INTERIOR NODES AND FIXITIES
#-----------------------------------------------------------------------------------------
op.model('basic', '-ndm', 2, '-ndf', 2)
xCoord = np.float(sElemX/2)
# loop over soil layers
layerNodeCount = 0
for k in range(1,numLayers+1):
if k==1:
bump = 1
else:
bump = 0
j_end=2 * nElemY[k-1] + bump
for j in range(1,j_end+1,1):
yctr = j + layerNodeCount
yCoord = (yctr-1) * (np.float(sElemY[k-1]))/2
nodeNum = (3*yctr) - 1
op.node(nodeNum, xCoord, yCoord)
# output nodal information to data file
nodes_dict[nodeNum] = (nodeNum, xCoord, yCoord)
layerNodeCount = yctr
# interior nodes on the element edges
# loop over layers
layerNodeCount = 0
for k in range(1,numLayers+1):
# loop in vertical direction
for j in range(1,((nElemY[k-1])+1)):
yctr = j + layerNodeCount;
yCoord = np.float(sElemY[k-1])*(yctr-0.5)
nodeNumL = (6*yctr) - 2
nodeNumR = nodeNumL + 2
op.node(nodeNumL ,0.0, yCoord)
op.node(nodeNumR , sElemX, yCoord)
# output nodal information to data file
nodes_dict[nodeNumL] = (nodeNumL ,0.0, yCoord)
nodes_dict[nodeNumR] = (nodeNumR , sElemX, yCoord)
layerNodeCount = yctr
print("Finished creating all -ndf 2 nodes...")
# define fixities for interior nodes at base of soil column
op.fix(2, 0, 1)
print('Finished creating all -ndf 2 boundary conditions...')
# define equal degrees of freedom which have not yet been defined
for i in range(1,((3*nNodeY)-6),6):
op.equalDOF(i , i+1, 1, 2)
op.equalDOF(i+3, i+4, 1, 2)
op.equalDOF(i+3, i+5, 1, 2)
op.equalDOF(nNodeT-2, nNodeT-1, 1, 2)
print('Finished creating equalDOF constraints...')
#-----------------------------------------------------------------------------------------
# 4. CREATE SOIL MATERIALS
#-----------------------------------------------------------------------------------------
# define grade of slope (%)
grade = 2.0
slope = mm.atan(grade/100.0)
g = -9.81
xwgt_var = g * (mm.sin(slope))
ywgt_var = g * (mm.cos(slope))
thick = [1.0,1.0,1.0]
xWgt = [xwgt_var, xwgt_var, xwgt_var]
yWgt = [ywgt_var, ywgt_var, ywgt_var]
uBulk = [6.88E6, 5.06E6, 5.0E-6]
hPerm = [1.0E-4, 1.0E-4, 1.0E-4]
vPerm = [1.0E-4, 1.0E-4, 1.0E-4]
# nDMaterial PressureDependMultiYield02
# nDMaterial('PressureDependMultiYield02', matTag, nd, rho, refShearModul, refBulkModul,\
# frictionAng, peakShearStra, refPress, pressDependCoe, PTAng,\
# contrac[0], contrac[2], dilat[0], dilat[2], noYieldSurf=20.0,\
# *yieldSurf=[], contrac[1]=5.0, dilat[1]=3.0, *liquefac=[1.0,0.0],e=0.6, \
# *params=[0.9, 0.02, 0.7, 101.0], c=0.1)
op.nDMaterial('PressureDependMultiYield02',3, 2, 1.8, 9.0e4, 2.2e5, 32, 0.1, \
101.0, 0.5, 26, 0.067, 0.23, 0.06, \
0.27, 20, 5.0, 3.0, 1.0, \
0.0, 0.77, 0.9, 0.02, 0.7, 101.0)
op.nDMaterial('PressureDependMultiYield02', 2, 2, 2.24, 9.0e4, 2.2e5, 32, 0.1, \
101.0, 0.5, 26, 0.067, 0.23, 0.06, \
0.27, 20, 5.0, 3.0, 1.0, \
0.0, 0.77, 0.9, 0.02, 0.7, 101.0)
op.nDMaterial('PressureDependMultiYield02',1, 2, 2.45, 1.3e5, 2.6e5, 39, 0.1, \
101.0, 0.5, 26, 0.010, 0.0, 0.35, \
0.0, 20, 5.0, 3.0, 1.0, \
0.0, 0.47, 0.9, 0.02, 0.7, 101.0)
print("Finished creating all soil materials...")
#-----------------------------------------------------------------------------------------
# 5. CREATE SOIL ELEMENTS
#-----------------------------------------------------------------------------------------
for j in range(1,nElemT+1):
nI = ( 6*j) - 5
nJ = nI + 2
nK = nI + 8
nL = nI + 6
nM = nI + 1
nN = nI + 5
nP = nI + 7
nQ = nI + 3
nR = nI + 4
lowerBound = 0.0
for i in range(1,numLayers+1):
if j * sElemY[i-1] <= layerBound[i-1] and j * sElemY[i-1] > lowerBound:
# permeabilities are initially set at 1.0 m/s for gravity analysis,
op.element('9_4_QuadUP', j, nI, nJ, nK, nL, nM, nN, nP, nQ, nR, \
thick[i-1], i, uBulk[i-1], 1.0, 1.0, 1.0, xWgt[i-1], yWgt[i-1])
lowerBound = layerBound[i-1]
print("Finished creating all soil elements...")
#-----------------------------------------------------------------------------------------
# 6. LYSMER DASHPOT
#-----------------------------------------------------------------------------------------
# define dashpot nodes
dashF = nNodeT+1
dashS = nNodeT+2
op.node(dashF, 0.0, 0.0)
op.node(dashS, 0.0, 0.0)
# define fixities for dashpot nodes
op.fix(dashF, 1, 1)
op.fix(dashS, 0, 1)
# define equal DOF for dashpot and base soil node
op.equalDOF(1, dashS, 1)
print('Finished creating dashpot nodes and boundary conditions...')
# define dashpot material
colArea = sElemX * thick[0]
rockVS = 700.0
rockDen = 2.5
dashpotCoeff = rockVS * rockDen
#uniaxialMaterial('Viscous', matTag, C, alpha)
op.uniaxialMaterial('Viscous', numLayers+1, dashpotCoeff * colArea, 1)
# define dashpot element
op.element('zeroLength', nElemT+1, dashF, dashS, '-mat', numLayers+1, '-dir', 1)
print("Finished creating dashpot material and element...")
#-----------------------------------------------------------------------------------------
# 7. CREATE GRAVITY RECORDERS
#-----------------------------------------------------------------------------------------
# create list for pore pressure nodes
load_nodeList3=np.loadtxt('Node_record.txt')
nodeList3=[]
for i in range(len(load_nodeList3)):
nodeList3.append(np.int(load_nodeList3[i]))
# record nodal displacment, acceleration, and porepressure
op.recorder('Node','-file','Gdisplacement.txt','-time','-node',*nodeList3,'-dof', 1, 2, 'disp')
op.recorder('Node','-file','Gacceleration.txt','-time','-node',*nodeList3,'-dof', 1, 2, 'accel')
op.recorder('Node','-file','GporePressure.txt','-time','-node',*nodeList3,'-dof', 3, 'vel')
# record elemental stress and strain (files are names to reflect GiD gp numbering)
op.recorder('Element','-file','Gstress1.txt','-time','-eleRange', 1,nElemT,'material','1','stress')
op.recorder('Element','-file','Gstress2.txt','-time','-eleRange', 1,nElemT,'material','2','stress')
op.recorder('Element','-file','Gstress3.txt','-time','-eleRange', 1,nElemT,'material','3','stress')
op.recorder('Element','-file','Gstress4.txt','-time','-eleRange', 1,nElemT,'material','4','stress')
op.recorder('Element','-file','Gstress9.txt','-time','-eleRange', 1,nElemT,'material','9','stress')
op.recorder('Element','-file','Gstrain1.txt','-time','-eleRange', 1,nElemT,'material','1','strain')
op.recorder('Element','-file','Gstrain2.txt','-time','-eleRange', 1,nElemT,'material','2','strain')
op.recorder('Element','-file','Gstrain3.txt','-time','-eleRange', 1,nElemT,'material','3','strain')
op.recorder('Element','-file','Gstrain4.txt','-time','-eleRange', 1,nElemT,'material','4','strain')
op.recorder('Element','-file','Gstrain9.txt','-time','-eleRange', 1,nElemT,'material','9','strain')
print("Finished creating gravity recorders...")
#-----------------------------------------------------------------------------------------
# 8. DEFINE ANALYSIS PARAMETERS
#-----------------------------------------------------------------------------------------
#---GROUND MOTION PARAMETERS
# time step in ground motion record
motionDT = 0.005
# number of steps in ground motion record
motionSteps = 7990
#---RAYLEIGH DAMPING PARAMETERS
# damping ratio
damp = 0.02
# lower frequency
omega1 = 2 * np.pi * 0.2
# upper frequency
omega2 = 2 * np.pi * 20
# damping coefficients
a0 = 2*damp*omega1*omega2/(omega1 + omega2)
a1 = 2*damp/(omega1 + omega2)
print("Damping Coefficients: a_0 = $a0; a_1 = $a1")
#---DETERMINE STABLE ANALYSIS TIME STEP USING CFL CONDITION
# maximum shear wave velocity (m/s)
vsMax = 250.0
# duration of ground motion (s)
duration = motionDT*motionSteps
# minimum element size
minSize = sElemY[0]
for i in range(2,numLayers+1):
if sElemY[i-1] <= minSize:
minSize = sElemY[i-1]
# trial analysis time step
kTrial = minSize/(vsMax**0.5)
# define time step and number of steps for analysis
if motionDT <= kTrial:
nSteps = motionSteps
dT = motionDT
else:
nSteps = np.int(mm.floor(duration/kTrial)+1)
dT = duration/nSteps
print("Number of steps in analysis: $nSteps")
print("Analysis time step: $dT")
#---ANALYSIS PARAMETERS
# Newmark parameters
gamma = 0.5
beta = 0.25
#-----------------------------------------------------------------------------------------
# 9. GRAVITY ANALYSIS
#-----------------------------------------------------------------------------------------
# update materials to ensure elastic behavior
op.updateMaterialStage('-material', 1, '-stage', 0)
op.updateMaterialStage('-material', 2, '-stage', 0)
op.updateMaterialStage('-material', 3, '-stage', 0)
op.constraints('Penalty', 1.0E14, 1.0E14)
op.test('NormDispIncr', 1e-4, 35, 1)
op.algorithm('KrylovNewton')
op.numberer('RCM')
op.system('ProfileSPD')
op.integrator('Newmark', gamma, beta)
op.analysis('Transient')
startT = tt.time()
op.analyze(10, 5.0E2)
print('Finished with elastic gravity analysis...')
# update material to consider elastoplastic behavior
op.updateMaterialStage('-material', 1, '-stage', 1)
op.updateMaterialStage('-material', 2, '-stage', 1)
op.updateMaterialStage('-material', 3, '-stage', 1)
# plastic gravity loading
op.analyze(40, 5.0e2)
print('Finished with plastic gravity analysis...')
#-----------------------------------------------------------------------------------------
# 10. UPDATE ELEMENT PERMEABILITY VALUES FOR POST-GRAVITY ANALYSIS
#-----------------------------------------------------------------------------------------
# choose base number for parameter IDs which is higer than other tags used in analysis
ctr = 10000.0
# loop over elements to define parameter IDs
for i in range(1,nElemT+1):
op.parameter(np.int(ctr+1.0), 'element', i, 'vPerm')
op.parameter(np.int(ctr+2.0), 'element', i, 'hPerm')
ctr = ctr+2.0
# update permeability parameters for each element using parameter IDs
ctr = 10000.0
for j in range(1,nElemT+1):
lowerBound = 0.0
for i in range(1,numLayers+1):
if j * sElemY[i-1] <= layerBound[i-1] and j*sElemY[i-1] > lowerBound:
op.updateParameter(np.int(ctr+1.0), vPerm[i-1])
op.updateParameter(np.int(ctr+2.0), hPerm[i-1])
lowerBound = layerBound[i-1]
ctr = ctr+2.0
print("Finished updating permeabilities for dynamic analysis...")
#-----------------------------------------------------------------------------------------
# 11. CREATE POST-GRAVITY RECORDERS
#-----------------------------------------------------------------------------------------
# reset time and analysis
op.setTime(0.0)
op.wipeAnalysis()
op.remove('recorders')
# recorder time step
recDT = 10*motionDT
# record nodal displacment, acceleration, and porepressure
op.recorder('Node','-file','displacement.txt','-time', '-dT',recDT,'-node',*nodeList3,'-dof', 1, 2, 'disp')
op.recorder('Node','-file','acceleration.txt','-time', '-dT',recDT,'-node',*nodeList3,'-dof', 1, 2, 'accel')
op.recorder('Node','-file','porePressure.txt','-time', '-dT',recDT,'-node',*nodeList3,'-dof', 3, 'vel')
# record elemental stress and strain (files are names to reflect GiD gp numbering)
op.recorder('Element','-file','stress1.txt','-time', '-dT',recDT,'-eleRange', 1,nElemT,'material','1','stress')
op.recorder('Element','-file','stress2.txt','-time', '-dT',recDT,'-eleRange', 1,nElemT,'material','2','stress')
op.recorder('Element','-file','stress3.txt','-time', '-dT',recDT,'-eleRange', 1,nElemT,'material','3','stress')
op.recorder('Element','-file','stress4.txt','-time', '-dT',recDT,'-eleRange', 1,nElemT,'material','4','stress')
op.recorder('Element','-file','stress9.txt','-time', '-dT',recDT,'-eleRange', 1,nElemT,'material','9','stress')
op.recorder('Element','-file','strain1.txt','-time', '-dT',recDT,'-eleRange', 1,nElemT,'material','1','strain')
op.recorder('Element','-file','strain2.txt','-time', '-dT',recDT,'-eleRange', 1,nElemT,'material','2','strain')
op.recorder('Element','-file','strain3.txt','-time', '-dT',recDT,'-eleRange', 1,nElemT,'material','3','strain')
op.recorder('Element','-file','strain4.txt','-time', '-dT',recDT,'-eleRange', 1,nElemT,'material','4','strain')
op.recorder('Element','-file','strain9.txt','-time', '-dT',recDT,'-eleRange', 1,nElemT,'material','9','strain')
print("Finished creating all recorders...")
#-----------------------------------------------------------------------------------------
# 12. DYNAMIC ANALYSIS
#-----------------------------------------------------------------------------------------
op.model('basic', '-ndm', 2, '-ndf', 3)
# define constant scaling factor for applied velocity
cFactor = colArea * dashpotCoeff
# define velocity time history file
velocityFile='velocityHistory';
data_gm=np.loadtxt('velocityHistory.txt')
#motionSteps=len(data_gm)
#print('Number of point for GM:',motionSteps)
# timeseries object for force history
op.timeSeries('Path', 2, '-dt', motionDT, '-filePath', velocityFile+'.txt', '-factor', cFactor)
op.pattern('Plain', 10, 2)
op.load(1, 1.0, 0.0, 0.0)
print( "Dynamic loading created...")
op.constraints('Penalty', 1.0E16, 1.0E16)
op.test('NormDispIncr', 1e-3, 35, 1)
op.algorithm('KrylovNewton')
op.numberer('RCM')
op.system('ProfileSPD')
op.integrator('Newmark', gamma, beta)
op.rayleigh(a0, a1, 0.0, 0.0)
op.analysis('Transient')
# perform analysis with timestep reduction loop
ok = op.analyze(nSteps,dT)
# if analysis fails, reduce timestep and continue with analysis
if ok !=0:
print("did not converge, reducing time step")
curTime = op.getTime()
mTime = curTime
print("curTime: ", curTime)
curStep = curTime/dT
print("curStep: ", curStep)
rStep = (nSteps-curStep)*2.0
remStep = np.int((nSteps-curStep)*2.0)
print("remStep: ", remStep)
dT = dT/2.0
print("dT: ", dT)
ok = op.analyze(remStep, dT)
# if analysis fails again, reduce timestep and continue with analysis
if ok !=0:
print("did not converge, reducing time step")
curTime = op.getTime()
print("curTime: ", curTime)
curStep = (curTime-mTime)/dT
print("curStep: ", curStep)
remStep = np.int((rStep-curStep)*2.0)
print("remStep: ", remStep)
dT = dT/2.0
print("dT: ", dT)
ok = op.analyze(remStep, dT)
endT = tt.time()
print("Finished with dynamic analysis...")
print("Analysis execution time: ",(endT-startT))
op.wipe()
|
[
"opensees.updateMaterialStage",
"opensees.wipe",
"math.floor",
"opensees.nDMaterial",
"opensees.pattern",
"math.cos",
"opensees.getTime",
"opensees.numberer",
"opensees.integrator",
"opensees.constraints",
"math.atan",
"opensees.element",
"opensees.wipeAnalysis",
"opensees.timeSeries",
"opensees.analyze",
"opensees.load",
"opensees.model",
"opensees.node",
"numpy.trim_zeros",
"opensees.fix",
"opensees.test",
"opensees.uniaxialMaterial",
"numpy.savetxt",
"time.time",
"numpy.int",
"opensees.system",
"opensees.rayleigh",
"numpy.float",
"numpy.unique",
"opensees.analysis",
"opensees.remove",
"opensees.setTime",
"numpy.zeros",
"opensees.recorder",
"numpy.loadtxt",
"math.sin",
"opensees.equalDOF",
"opensees.algorithm"
] |
[((1778, 1787), 'opensees.wipe', 'op.wipe', ([], {}), '()\n', (1785, 1787), True, 'import opensees as op\n'), ((2056, 2080), 'numpy.zeros', 'np.zeros', (['(numLayers, 1)'], {}), '((numLayers, 1))\n', (2064, 2080), True, 'import numpy as np\n'), ((2540, 2564), 'numpy.zeros', 'np.zeros', (['(numLayers, 1)'], {}), '((numLayers, 1))\n', (2548, 2564), True, 'import numpy as np\n'), ((3065, 3104), 'opensees.model', 'op.model', (['"""basic"""', '"""-ndm"""', '(2)', '"""-ndf"""', '(3)'], {}), "('basic', '-ndm', 2, '-ndf', 3)\n", (3073, 3104), True, 'import opensees as op\n'), ((3149, 3167), 'numpy.zeros', 'np.zeros', (['(500, 1)'], {}), '((500, 1))\n', (3157, 3167), True, 'import numpy as np\n'), ((3178, 3196), 'numpy.zeros', 'np.zeros', (['(500, 1)'], {}), '((500, 1))\n', (3186, 3196), True, 'import numpy as np\n'), ((4130, 4153), 'numpy.trim_zeros', 'np.trim_zeros', (['dry_Node'], {}), '(dry_Node)\n', (4143, 4153), True, 'import numpy as np\n'), ((4162, 4182), 'numpy.unique', 'np.unique', (['node_save'], {}), '(node_save)\n', (4171, 4182), True, 'import numpy as np\n'), ((4199, 4220), 'numpy.trim_zeros', 'np.trim_zeros', (['Node_d'], {}), '(Node_d)\n', (4212, 4220), True, 'import numpy as np\n'), ((4222, 4259), 'numpy.savetxt', 'np.savetxt', (['"""Node_record.txt"""', 'Node_d'], {}), "('Node_record.txt', Node_d)\n", (4232, 4259), True, 'import numpy as np\n'), ((4518, 4536), 'opensees.fix', 'op.fix', (['(1)', '(0)', '(1)', '(0)'], {}), '(1, 0, 1, 0)\n', (4524, 4536), True, 'import opensees as op\n'), ((4538, 4556), 'opensees.fix', 'op.fix', (['(3)', '(0)', '(1)', '(0)'], {}), '(3, 0, 1, 0)\n', (4544, 4556), True, 'import opensees as op\n'), ((5051, 5090), 'opensees.model', 'op.model', (['"""basic"""', '"""-ndm"""', '(2)', '"""-ndf"""', '(2)'], {}), "('basic', '-ndm', 2, '-ndf', 2)\n", (5059, 5090), True, 'import opensees as op\n'), ((5104, 5124), 'numpy.float', 'np.float', (['(sElemX / 2)'], {}), '(sElemX / 2)\n', (5112, 5124), True, 'import numpy as np\n'), ((6413, 6428), 'opensees.fix', 'op.fix', (['(2)', '(0)', '(1)'], {}), '(2, 0, 1)\n', (6419, 6428), True, 'import opensees as op\n'), ((6699, 6740), 'opensees.equalDOF', 'op.equalDOF', (['(nNodeT - 2)', '(nNodeT - 1)', '(1)', '(2)'], {}), '(nNodeT - 2, nNodeT - 1, 1, 2)\n', (6710, 6740), True, 'import opensees as op\n'), ((7057, 7079), 'math.atan', 'mm.atan', (['(grade / 100.0)'], {}), '(grade / 100.0)\n', (7064, 7079), True, 'import math as mm\n'), ((7793, 7979), 'opensees.nDMaterial', 'op.nDMaterial', (['"""PressureDependMultiYield02"""', '(3)', '(2)', '(1.8)', '(90000.0)', '(220000.0)', '(32)', '(0.1)', '(101.0)', '(0.5)', '(26)', '(0.067)', '(0.23)', '(0.06)', '(0.27)', '(20)', '(5.0)', '(3.0)', '(1.0)', '(0.0)', '(0.77)', '(0.9)', '(0.02)', '(0.7)', '(101.0)'], {}), "('PressureDependMultiYield02', 3, 2, 1.8, 90000.0, 220000.0, \n 32, 0.1, 101.0, 0.5, 26, 0.067, 0.23, 0.06, 0.27, 20, 5.0, 3.0, 1.0, \n 0.0, 0.77, 0.9, 0.02, 0.7, 101.0)\n", (7806, 7979), True, 'import opensees as op\n'), ((8090, 8277), 'opensees.nDMaterial', 'op.nDMaterial', (['"""PressureDependMultiYield02"""', '(2)', '(2)', '(2.24)', '(90000.0)', '(220000.0)', '(32)', '(0.1)', '(101.0)', '(0.5)', '(26)', '(0.067)', '(0.23)', '(0.06)', '(0.27)', '(20)', '(5.0)', '(3.0)', '(1.0)', '(0.0)', '(0.77)', '(0.9)', '(0.02)', '(0.7)', '(101.0)'], {}), "('PressureDependMultiYield02', 2, 2, 2.24, 90000.0, 220000.0, \n 32, 0.1, 101.0, 0.5, 26, 0.067, 0.23, 0.06, 0.27, 20, 5.0, 3.0, 1.0, \n 0.0, 0.77, 0.9, 0.02, 0.7, 101.0)\n", (8103, 8277), True, 'import opensees as op\n'), ((8393, 8577), 'opensees.nDMaterial', 'op.nDMaterial', (['"""PressureDependMultiYield02"""', '(1)', '(2)', '(2.45)', '(130000.0)', '(260000.0)', '(39)', '(0.1)', '(101.0)', '(0.5)', '(26)', '(0.01)', '(0.0)', '(0.35)', '(0.0)', '(20)', '(5.0)', '(3.0)', '(1.0)', '(0.0)', '(0.47)', '(0.9)', '(0.02)', '(0.7)', '(101.0)'], {}), "('PressureDependMultiYield02', 1, 2, 2.45, 130000.0, 260000.0,\n 39, 0.1, 101.0, 0.5, 26, 0.01, 0.0, 0.35, 0.0, 20, 5.0, 3.0, 1.0, 0.0, \n 0.47, 0.9, 0.02, 0.7, 101.0)\n", (8406, 8577), True, 'import opensees as op\n'), ((9933, 9957), 'opensees.node', 'op.node', (['dashF', '(0.0)', '(0.0)'], {}), '(dashF, 0.0, 0.0)\n', (9940, 9957), True, 'import opensees as op\n'), ((9960, 9984), 'opensees.node', 'op.node', (['dashS', '(0.0)', '(0.0)'], {}), '(dashS, 0.0, 0.0)\n', (9967, 9984), True, 'import opensees as op\n'), ((10026, 10045), 'opensees.fix', 'op.fix', (['dashF', '(1)', '(1)'], {}), '(dashF, 1, 1)\n', (10032, 10045), True, 'import opensees as op\n'), ((10047, 10066), 'opensees.fix', 'op.fix', (['dashS', '(0)', '(1)'], {}), '(dashS, 0, 1)\n', (10053, 10066), True, 'import opensees as op\n'), ((10121, 10145), 'opensees.equalDOF', 'op.equalDOF', (['(1)', 'dashS', '(1)'], {}), '(1, dashS, 1)\n', (10132, 10145), True, 'import opensees as op\n'), ((10405, 10477), 'opensees.uniaxialMaterial', 'op.uniaxialMaterial', (['"""Viscous"""', '(numLayers + 1)', '(dashpotCoeff * colArea)', '(1)'], {}), "('Viscous', numLayers + 1, dashpotCoeff * colArea, 1)\n", (10424, 10477), True, 'import opensees as op\n'), ((10505, 10593), 'opensees.element', 'op.element', (['"""zeroLength"""', '(nElemT + 1)', 'dashF', 'dashS', '"""-mat"""', '(numLayers + 1)', '"""-dir"""', '(1)'], {}), "('zeroLength', nElemT + 1, dashF, dashS, '-mat', numLayers + 1,\n '-dir', 1)\n", (10515, 10593), True, 'import opensees as op\n'), ((10923, 10952), 'numpy.loadtxt', 'np.loadtxt', (['"""Node_record.txt"""'], {}), "('Node_record.txt')\n", (10933, 10952), True, 'import numpy as np\n'), ((11117, 11223), 'opensees.recorder', 'op.recorder', (['"""Node"""', '"""-file"""', '"""Gdisplacement.txt"""', '"""-time"""', '"""-node"""', '*nodeList3', '"""-dof"""', '(1)', '(2)', '"""disp"""'], {}), "('Node', '-file', 'Gdisplacement.txt', '-time', '-node', *\n nodeList3, '-dof', 1, 2, 'disp')\n", (11128, 11223), True, 'import opensees as op\n'), ((11214, 11321), 'opensees.recorder', 'op.recorder', (['"""Node"""', '"""-file"""', '"""Gacceleration.txt"""', '"""-time"""', '"""-node"""', '*nodeList3', '"""-dof"""', '(1)', '(2)', '"""accel"""'], {}), "('Node', '-file', 'Gacceleration.txt', '-time', '-node', *\n nodeList3, '-dof', 1, 2, 'accel')\n", (11225, 11321), True, 'import opensees as op\n'), ((11312, 11414), 'opensees.recorder', 'op.recorder', (['"""Node"""', '"""-file"""', '"""GporePressure.txt"""', '"""-time"""', '"""-node"""', '*nodeList3', '"""-dof"""', '(3)', '"""vel"""'], {}), "('Node', '-file', 'GporePressure.txt', '-time', '-node', *\n nodeList3, '-dof', 3, 'vel')\n", (11323, 11414), True, 'import opensees as op\n'), ((11491, 11602), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""Gstress1.txt"""', '"""-time"""', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""1"""', '"""stress"""'], {}), "('Element', '-file', 'Gstress1.txt', '-time', '-eleRange', 1,\n nElemT, 'material', '1', 'stress')\n", (11502, 11602), True, 'import opensees as op\n'), ((11592, 11703), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""Gstress2.txt"""', '"""-time"""', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""2"""', '"""stress"""'], {}), "('Element', '-file', 'Gstress2.txt', '-time', '-eleRange', 1,\n nElemT, 'material', '2', 'stress')\n", (11603, 11703), True, 'import opensees as op\n'), ((11693, 11804), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""Gstress3.txt"""', '"""-time"""', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""3"""', '"""stress"""'], {}), "('Element', '-file', 'Gstress3.txt', '-time', '-eleRange', 1,\n nElemT, 'material', '3', 'stress')\n", (11704, 11804), True, 'import opensees as op\n'), ((11794, 11905), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""Gstress4.txt"""', '"""-time"""', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""4"""', '"""stress"""'], {}), "('Element', '-file', 'Gstress4.txt', '-time', '-eleRange', 1,\n nElemT, 'material', '4', 'stress')\n", (11805, 11905), True, 'import opensees as op\n'), ((11895, 12006), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""Gstress9.txt"""', '"""-time"""', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""9"""', '"""stress"""'], {}), "('Element', '-file', 'Gstress9.txt', '-time', '-eleRange', 1,\n nElemT, 'material', '9', 'stress')\n", (11906, 12006), True, 'import opensees as op\n'), ((11996, 12107), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""Gstrain1.txt"""', '"""-time"""', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""1"""', '"""strain"""'], {}), "('Element', '-file', 'Gstrain1.txt', '-time', '-eleRange', 1,\n nElemT, 'material', '1', 'strain')\n", (12007, 12107), True, 'import opensees as op\n'), ((12097, 12208), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""Gstrain2.txt"""', '"""-time"""', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""2"""', '"""strain"""'], {}), "('Element', '-file', 'Gstrain2.txt', '-time', '-eleRange', 1,\n nElemT, 'material', '2', 'strain')\n", (12108, 12208), True, 'import opensees as op\n'), ((12198, 12309), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""Gstrain3.txt"""', '"""-time"""', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""3"""', '"""strain"""'], {}), "('Element', '-file', 'Gstrain3.txt', '-time', '-eleRange', 1,\n nElemT, 'material', '3', 'strain')\n", (12209, 12309), True, 'import opensees as op\n'), ((12299, 12410), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""Gstrain4.txt"""', '"""-time"""', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""4"""', '"""strain"""'], {}), "('Element', '-file', 'Gstrain4.txt', '-time', '-eleRange', 1,\n nElemT, 'material', '4', 'strain')\n", (12310, 12410), True, 'import opensees as op\n'), ((12400, 12511), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""Gstrain9.txt"""', '"""-time"""', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""9"""', '"""strain"""'], {}), "('Element', '-file', 'Gstrain9.txt', '-time', '-eleRange', 1,\n nElemT, 'material', '9', 'strain')\n", (12411, 12511), True, 'import opensees as op\n'), ((14249, 14300), 'opensees.updateMaterialStage', 'op.updateMaterialStage', (['"""-material"""', '(1)', '"""-stage"""', '(0)'], {}), "('-material', 1, '-stage', 0)\n", (14271, 14300), True, 'import opensees as op\n'), ((14302, 14353), 'opensees.updateMaterialStage', 'op.updateMaterialStage', (['"""-material"""', '(2)', '"""-stage"""', '(0)'], {}), "('-material', 2, '-stage', 0)\n", (14324, 14353), True, 'import opensees as op\n'), ((14355, 14406), 'opensees.updateMaterialStage', 'op.updateMaterialStage', (['"""-material"""', '(3)', '"""-stage"""', '(0)'], {}), "('-material', 3, '-stage', 0)\n", (14377, 14406), True, 'import opensees as op\n'), ((14410, 14473), 'opensees.constraints', 'op.constraints', (['"""Penalty"""', '(100000000000000.0)', '(100000000000000.0)'], {}), "('Penalty', 100000000000000.0, 100000000000000.0)\n", (14424, 14473), True, 'import opensees as op\n'), ((14453, 14491), 'opensees.test', 'op.test', (['"""NormDispIncr"""', '(0.0001)', '(35)', '(1)'], {}), "('NormDispIncr', 0.0001, 35, 1)\n", (14460, 14491), True, 'import opensees as op\n'), ((14491, 14519), 'opensees.algorithm', 'op.algorithm', (['"""KrylovNewton"""'], {}), "('KrylovNewton')\n", (14503, 14519), True, 'import opensees as op\n'), ((14521, 14539), 'opensees.numberer', 'op.numberer', (['"""RCM"""'], {}), "('RCM')\n", (14532, 14539), True, 'import opensees as op\n'), ((14541, 14564), 'opensees.system', 'op.system', (['"""ProfileSPD"""'], {}), "('ProfileSPD')\n", (14550, 14564), True, 'import opensees as op\n'), ((14566, 14603), 'opensees.integrator', 'op.integrator', (['"""Newmark"""', 'gamma', 'beta'], {}), "('Newmark', gamma, beta)\n", (14579, 14603), True, 'import opensees as op\n'), ((14605, 14629), 'opensees.analysis', 'op.analysis', (['"""Transient"""'], {}), "('Transient')\n", (14616, 14629), True, 'import opensees as op\n'), ((14642, 14651), 'time.time', 'tt.time', ([], {}), '()\n', (14649, 14651), True, 'import time as tt\n'), ((14653, 14674), 'opensees.analyze', 'op.analyze', (['(10)', '(500.0)'], {}), '(10, 500.0)\n', (14663, 14674), True, 'import opensees as op\n'), ((14784, 14835), 'opensees.updateMaterialStage', 'op.updateMaterialStage', (['"""-material"""', '(1)', '"""-stage"""', '(1)'], {}), "('-material', 1, '-stage', 1)\n", (14806, 14835), True, 'import opensees as op\n'), ((14837, 14888), 'opensees.updateMaterialStage', 'op.updateMaterialStage', (['"""-material"""', '(2)', '"""-stage"""', '(1)'], {}), "('-material', 2, '-stage', 1)\n", (14859, 14888), True, 'import opensees as op\n'), ((14890, 14941), 'opensees.updateMaterialStage', 'op.updateMaterialStage', (['"""-material"""', '(3)', '"""-stage"""', '(1)'], {}), "('-material', 3, '-stage', 1)\n", (14912, 14941), True, 'import opensees as op\n'), ((14972, 14993), 'opensees.analyze', 'op.analyze', (['(40)', '(500.0)'], {}), '(40, 500.0)\n', (14982, 14993), True, 'import opensees as op\n'), ((16377, 16392), 'opensees.setTime', 'op.setTime', (['(0.0)'], {}), '(0.0)\n', (16387, 16392), True, 'import opensees as op\n'), ((16394, 16411), 'opensees.wipeAnalysis', 'op.wipeAnalysis', ([], {}), '()\n', (16409, 16411), True, 'import opensees as op\n'), ((16413, 16435), 'opensees.remove', 'op.remove', (['"""recorders"""'], {}), "('recorders')\n", (16422, 16435), True, 'import opensees as op\n'), ((16544, 16662), 'opensees.recorder', 'op.recorder', (['"""Node"""', '"""-file"""', '"""displacement.txt"""', '"""-time"""', '"""-dT"""', 'recDT', '"""-node"""', '*nodeList3', '"""-dof"""', '(1)', '(2)', '"""disp"""'], {}), "('Node', '-file', 'displacement.txt', '-time', '-dT', recDT,\n '-node', *nodeList3, '-dof', 1, 2, 'disp')\n", (16555, 16662), True, 'import opensees as op\n'), ((16653, 16772), 'opensees.recorder', 'op.recorder', (['"""Node"""', '"""-file"""', '"""acceleration.txt"""', '"""-time"""', '"""-dT"""', 'recDT', '"""-node"""', '*nodeList3', '"""-dof"""', '(1)', '(2)', '"""accel"""'], {}), "('Node', '-file', 'acceleration.txt', '-time', '-dT', recDT,\n '-node', *nodeList3, '-dof', 1, 2, 'accel')\n", (16664, 16772), True, 'import opensees as op\n'), ((16763, 16877), 'opensees.recorder', 'op.recorder', (['"""Node"""', '"""-file"""', '"""porePressure.txt"""', '"""-time"""', '"""-dT"""', 'recDT', '"""-node"""', '*nodeList3', '"""-dof"""', '(3)', '"""vel"""'], {}), "('Node', '-file', 'porePressure.txt', '-time', '-dT', recDT,\n '-node', *nodeList3, '-dof', 3, 'vel')\n", (16774, 16877), True, 'import opensees as op\n'), ((16954, 17078), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""stress1.txt"""', '"""-time"""', '"""-dT"""', 'recDT', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""1"""', '"""stress"""'], {}), "('Element', '-file', 'stress1.txt', '-time', '-dT', recDT,\n '-eleRange', 1, nElemT, 'material', '1', 'stress')\n", (16965, 17078), True, 'import opensees as op\n'), ((17067, 17191), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""stress2.txt"""', '"""-time"""', '"""-dT"""', 'recDT', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""2"""', '"""stress"""'], {}), "('Element', '-file', 'stress2.txt', '-time', '-dT', recDT,\n '-eleRange', 1, nElemT, 'material', '2', 'stress')\n", (17078, 17191), True, 'import opensees as op\n'), ((17180, 17304), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""stress3.txt"""', '"""-time"""', '"""-dT"""', 'recDT', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""3"""', '"""stress"""'], {}), "('Element', '-file', 'stress3.txt', '-time', '-dT', recDT,\n '-eleRange', 1, nElemT, 'material', '3', 'stress')\n", (17191, 17304), True, 'import opensees as op\n'), ((17293, 17417), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""stress4.txt"""', '"""-time"""', '"""-dT"""', 'recDT', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""4"""', '"""stress"""'], {}), "('Element', '-file', 'stress4.txt', '-time', '-dT', recDT,\n '-eleRange', 1, nElemT, 'material', '4', 'stress')\n", (17304, 17417), True, 'import opensees as op\n'), ((17406, 17530), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""stress9.txt"""', '"""-time"""', '"""-dT"""', 'recDT', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""9"""', '"""stress"""'], {}), "('Element', '-file', 'stress9.txt', '-time', '-dT', recDT,\n '-eleRange', 1, nElemT, 'material', '9', 'stress')\n", (17417, 17530), True, 'import opensees as op\n'), ((17519, 17643), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""strain1.txt"""', '"""-time"""', '"""-dT"""', 'recDT', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""1"""', '"""strain"""'], {}), "('Element', '-file', 'strain1.txt', '-time', '-dT', recDT,\n '-eleRange', 1, nElemT, 'material', '1', 'strain')\n", (17530, 17643), True, 'import opensees as op\n'), ((17632, 17756), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""strain2.txt"""', '"""-time"""', '"""-dT"""', 'recDT', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""2"""', '"""strain"""'], {}), "('Element', '-file', 'strain2.txt', '-time', '-dT', recDT,\n '-eleRange', 1, nElemT, 'material', '2', 'strain')\n", (17643, 17756), True, 'import opensees as op\n'), ((17745, 17869), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""strain3.txt"""', '"""-time"""', '"""-dT"""', 'recDT', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""3"""', '"""strain"""'], {}), "('Element', '-file', 'strain3.txt', '-time', '-dT', recDT,\n '-eleRange', 1, nElemT, 'material', '3', 'strain')\n", (17756, 17869), True, 'import opensees as op\n'), ((17858, 17982), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""strain4.txt"""', '"""-time"""', '"""-dT"""', 'recDT', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""4"""', '"""strain"""'], {}), "('Element', '-file', 'strain4.txt', '-time', '-dT', recDT,\n '-eleRange', 1, nElemT, 'material', '4', 'strain')\n", (17869, 17982), True, 'import opensees as op\n'), ((17971, 18095), 'opensees.recorder', 'op.recorder', (['"""Element"""', '"""-file"""', '"""strain9.txt"""', '"""-time"""', '"""-dT"""', 'recDT', '"""-eleRange"""', '(1)', 'nElemT', '"""material"""', '"""9"""', '"""strain"""'], {}), "('Element', '-file', 'strain9.txt', '-time', '-dT', recDT,\n '-eleRange', 1, nElemT, 'material', '9', 'strain')\n", (17982, 18095), True, 'import opensees as op\n'), ((18342, 18381), 'opensees.model', 'op.model', (['"""basic"""', '"""-ndm"""', '(2)', '"""-ndf"""', '(3)'], {}), "('basic', '-ndm', 2, '-ndf', 3)\n", (18350, 18381), True, 'import opensees as op\n'), ((18554, 18587), 'numpy.loadtxt', 'np.loadtxt', (['"""velocityHistory.txt"""'], {}), "('velocityHistory.txt')\n", (18564, 18587), True, 'import numpy as np\n'), ((18704, 18805), 'opensees.timeSeries', 'op.timeSeries', (['"""Path"""', '(2)', '"""-dt"""', 'motionDT', '"""-filePath"""', "(velocityFile + '.txt')", '"""-factor"""', 'cFactor'], {}), "('Path', 2, '-dt', motionDT, '-filePath', velocityFile +\n '.txt', '-factor', cFactor)\n", (18717, 18805), True, 'import opensees as op\n'), ((18801, 18827), 'opensees.pattern', 'op.pattern', (['"""Plain"""', '(10)', '(2)'], {}), "('Plain', 10, 2)\n", (18811, 18827), True, 'import opensees as op\n'), ((18829, 18854), 'opensees.load', 'op.load', (['(1)', '(1.0)', '(0.0)', '(0.0)'], {}), '(1, 1.0, 0.0, 0.0)\n', (18836, 18854), True, 'import opensees as op\n'), ((18898, 18937), 'opensees.constraints', 'op.constraints', (['"""Penalty"""', '(1e+16)', '(1e+16)'], {}), "('Penalty', 1e+16, 1e+16)\n", (18912, 18937), True, 'import opensees as op\n'), ((18941, 18978), 'opensees.test', 'op.test', (['"""NormDispIncr"""', '(0.001)', '(35)', '(1)'], {}), "('NormDispIncr', 0.001, 35, 1)\n", (18948, 18978), True, 'import opensees as op\n'), ((18979, 19007), 'opensees.algorithm', 'op.algorithm', (['"""KrylovNewton"""'], {}), "('KrylovNewton')\n", (18991, 19007), True, 'import opensees as op\n'), ((19009, 19027), 'opensees.numberer', 'op.numberer', (['"""RCM"""'], {}), "('RCM')\n", (19020, 19027), True, 'import opensees as op\n'), ((19029, 19052), 'opensees.system', 'op.system', (['"""ProfileSPD"""'], {}), "('ProfileSPD')\n", (19038, 19052), True, 'import opensees as op\n'), ((19054, 19091), 'opensees.integrator', 'op.integrator', (['"""Newmark"""', 'gamma', 'beta'], {}), "('Newmark', gamma, beta)\n", (19067, 19091), True, 'import opensees as op\n'), ((19093, 19122), 'opensees.rayleigh', 'op.rayleigh', (['a0', 'a1', '(0.0)', '(0.0)'], {}), '(a0, a1, 0.0, 0.0)\n', (19104, 19122), True, 'import opensees as op\n'), ((19124, 19148), 'opensees.analysis', 'op.analysis', (['"""Transient"""'], {}), "('Transient')\n", (19135, 19148), True, 'import opensees as op\n'), ((19206, 19228), 'opensees.analyze', 'op.analyze', (['nSteps', 'dT'], {}), '(nSteps, dT)\n', (19216, 19228), True, 'import opensees as op\n'), ((20172, 20181), 'time.time', 'tt.time', ([], {}), '()\n', (20179, 20181), True, 'import time as tt\n'), ((20277, 20286), 'opensees.wipe', 'op.wipe', ([], {}), '()\n', (20284, 20286), True, 'import opensees as op\n'), ((4460, 4478), 'numpy.int', 'np.int', (['dryNode[i]'], {}), '(dryNode[i])\n', (4466, 4478), True, 'import numpy as np\n'), ((4484, 4510), 'opensees.fix', 'op.fix', (['n_dryNode', '(0)', '(0)', '(1)'], {}), '(n_dryNode, 0, 0, 1)\n', (4490, 4510), True, 'import opensees as op\n'), ((4726, 4753), 'opensees.equalDOF', 'op.equalDOF', (['i', '(i + 2)', '(1)', '(2)'], {}), '(i, i + 2, 1, 2)\n', (4737, 4753), True, 'import opensees as op\n'), ((6602, 6629), 'opensees.equalDOF', 'op.equalDOF', (['i', '(i + 1)', '(1)', '(2)'], {}), '(i, i + 1, 1, 2)\n', (6613, 6629), True, 'import opensees as op\n'), ((6635, 6666), 'opensees.equalDOF', 'op.equalDOF', (['(i + 3)', '(i + 4)', '(1)', '(2)'], {}), '(i + 3, i + 4, 1, 2)\n', (6646, 6666), True, 'import opensees as op\n'), ((6668, 6699), 'opensees.equalDOF', 'op.equalDOF', (['(i + 3)', '(i + 5)', '(1)', '(2)'], {}), '(i + 3, i + 5, 1, 2)\n', (6679, 6699), True, 'import opensees as op\n'), ((7112, 7125), 'math.sin', 'mm.sin', (['slope'], {}), '(slope)\n', (7118, 7125), True, 'import math as mm\n'), ((7144, 7157), 'math.cos', 'mm.cos', (['slope'], {}), '(slope)\n', (7150, 7157), True, 'import math as mm\n'), ((19373, 19385), 'opensees.getTime', 'op.getTime', ([], {}), '()\n', (19383, 19385), True, 'import opensees as op\n'), ((19550, 19582), 'numpy.int', 'np.int', (['((nSteps - curStep) * 2.0)'], {}), '((nSteps - curStep) * 2.0)\n', (19556, 19582), True, 'import numpy as np\n'), ((19664, 19687), 'opensees.analyze', 'op.analyze', (['remStep', 'dT'], {}), '(remStep, dT)\n', (19674, 19687), True, 'import opensees as op\n'), ((5470, 5502), 'opensees.node', 'op.node', (['nodeNum', 'xCoord', 'yCoord'], {}), '(nodeNum, xCoord, yCoord)\n', (5477, 5502), True, 'import opensees as op\n'), ((6014, 6044), 'opensees.node', 'op.node', (['nodeNumL', '(0.0)', 'yCoord'], {}), '(nodeNumL, 0.0, yCoord)\n', (6021, 6044), True, 'import opensees as op\n'), ((6054, 6087), 'opensees.node', 'op.node', (['nodeNumR', 'sElemX', 'yCoord'], {}), '(nodeNumR, sElemX, yCoord)\n', (6061, 6087), True, 'import opensees as op\n'), ((11029, 11054), 'numpy.int', 'np.int', (['load_nodeList3[i]'], {}), '(load_nodeList3[i])\n', (11035, 11054), True, 'import numpy as np\n'), ((15502, 15519), 'numpy.int', 'np.int', (['(ctr + 1.0)'], {}), '(ctr + 1.0)\n', (15508, 15519), True, 'import numpy as np\n'), ((15560, 15577), 'numpy.int', 'np.int', (['(ctr + 2.0)'], {}), '(ctr + 2.0)\n', (15566, 15577), True, 'import numpy as np\n'), ((19857, 19869), 'opensees.getTime', 'op.getTime', ([], {}), '()\n', (19867, 19869), True, 'import opensees as op\n'), ((20001, 20032), 'numpy.int', 'np.int', (['((rStep - curStep) * 2.0)'], {}), '((rStep - curStep) * 2.0)\n', (20007, 20032), True, 'import numpy as np\n'), ((20134, 20157), 'opensees.analyze', 'op.analyze', (['remStep', 'dT'], {}), '(remStep, dT)\n', (20144, 20157), True, 'import opensees as op\n'), ((3679, 3711), 'opensees.node', 'op.node', (['nodeNum', 'xCoord', 'yCoord'], {}), '(nodeNum, xCoord, yCoord)\n', (3686, 3711), True, 'import opensees as op\n'), ((3860, 3875), 'numpy.int', 'np.int', (['nodeNum'], {}), '(nodeNum)\n', (3866, 3875), True, 'import numpy as np\n'), ((5896, 5919), 'numpy.float', 'np.float', (['sElemY[k - 1]'], {}), '(sElemY[k - 1])\n', (5904, 5919), True, 'import numpy as np\n'), ((9383, 9523), 'opensees.element', 'op.element', (['"""9_4_QuadUP"""', 'j', 'nI', 'nJ', 'nK', 'nL', 'nM', 'nN', 'nP', 'nQ', 'nR', 'thick[i - 1]', 'i', 'uBulk[i - 1]', '(1.0)', '(1.0)', '(1.0)', 'xWgt[i - 1]', 'yWgt[i - 1]'], {}), "('9_4_QuadUP', j, nI, nJ, nK, nL, nM, nN, nP, nQ, nR, thick[i - 1\n ], i, uBulk[i - 1], 1.0, 1.0, 1.0, xWgt[i - 1], yWgt[i - 1])\n", (9393, 9523), True, 'import opensees as op\n'), ((13767, 13794), 'math.floor', 'mm.floor', (['(duration / kTrial)'], {}), '(duration / kTrial)\n', (13775, 13794), True, 'import math as mm\n'), ((5403, 5426), 'numpy.float', 'np.float', (['sElemY[k - 1]'], {}), '(sElemY[k - 1])\n', (5411, 5426), True, 'import numpy as np\n'), ((15907, 15924), 'numpy.int', 'np.int', (['(ctr + 1.0)'], {}), '(ctr + 1.0)\n', (15913, 15924), True, 'import numpy as np\n'), ((15968, 15985), 'numpy.int', 'np.int', (['(ctr + 2.0)'], {}), '(ctr + 2.0)\n', (15974, 15985), True, 'import numpy as np\n'), ((3594, 3617), 'numpy.float', 'np.float', (['sElemY[k - 1]'], {}), '(sElemY[k - 1])\n', (3602, 3617), True, 'import numpy as np\n')]
|
import logging
from typing import Union, Tuple
import threading
import numpy as np
from pyobs.comm import RemoteException
from pyobs.interfaces import IFocuser, ICamera, IAutoFocus, IFilters, ICameraExposureTime, IImageType
from pyobs.events import FocusFoundEvent
from pyobs.object import get_object
from pyobs.mixins import CameraSettingsMixin
from pyobs.modules import timeout, Module
from pyobs.utils.enums import ImageType
from pyobs.utils.focusseries import FocusSeries
log = logging.getLogger(__name__)
class AutoFocusSeries(Module, CameraSettingsMixin, IAutoFocus):
"""Module for auto-focusing a telescope."""
__module__ = 'pyobs.modules.focus'
def __init__(self, focuser: Union[str, IFocuser], camera: Union[str, ICamera], filters: Union[str, IFilters],
series: FocusSeries, offset: bool = False, *args, **kwargs):
"""Initialize a new auto focus system.
Args:
focuser: Name of IFocuser.
camera: Name of ICamera.
filters: Name of IFilters, if any.
offset: If True, offsets are used instead of absolute focus values.
"""
Module.__init__(self, *args, **kwargs)
# store focuser and camera
self._focuser = focuser
self._camera = camera
self._filters = filters
self._offset = offset
self._abort = threading.Event()
# create focus series
self._series: FocusSeries = get_object(series, FocusSeries)
# init camera settings mixin
CameraSettingsMixin.__init__(self, *args, filters=filters, **kwargs)
def open(self):
"""Open module"""
Module.open(self)
# register event
self.comm.register_event(FocusFoundEvent)
# check focuser and camera
try:
self.proxy(self._focuser, IFocuser)
self.proxy(self._camera, ICamera)
except ValueError:
log.warning('Either camera or focuser do not exist or are not of correct type at the moment.')
def close(self):
"""Close module."""
@timeout(600)
def auto_focus(self, count: int, step: float, exposure_time: int, *args, **kwargs) -> Tuple[float, float]:
"""Perform an auto-focus series.
This method performs an auto-focus series with "count" images on each side of the initial guess and the given
step size. With count=3, step=1 and guess=10, this takes images at the following focus values:
7, 8, 9, 10, 11, 12, 13
Args:
count: Number of images to take on each side of the initial guess. Should be an odd number.
step: Step size.
exposure_time: Exposure time for images.
Returns:
Tuple of obtained best focus value and its uncertainty. Or Nones, if focus series failed.
Raises:
FileNotFoundException: If image could not be downloaded.
"""
log.info('Performing auto-focus...')
# get focuser
log.info('Getting proxy for focuser...')
focuser: IFocuser = self.proxy(self._focuser, IFocuser)
# get camera
log.info('Getting proxy for camera...')
camera: ICamera = self.proxy(self._camera, ICamera)
# do camera settings
self._do_camera_settings(camera)
# get filter wheel and current filter
filter_name = 'unknown'
try:
filter_wheel: IFilters = self.proxy(self._filters, IFilters)
filter_name = filter_wheel.get_filter().wait()
except ValueError:
log.warning('Filter module is not of type IFilters. Could not get filter.')
# get focus as first guess
try:
if self._offset:
guess = 0
log.info('Using focus offset of 0mm as initial guess.')
else:
guess = focuser.get_focus().wait()
log.info('Using current focus of %.2fmm as initial guess.', guess)
except RemoteException:
raise ValueError('Could not fetch current focus value.')
# define array of focus values to iterate
focus_values = np.linspace(guess - count * step, guess + count * step, 2 * count + 1)
# define set_focus method
set_focus = focuser.set_focus_offset if self._offset else focuser.set_focus
# reset
self._series.reset()
self._abort = threading.Event()
# loop focus values
log.info('Starting focus series...')
for foc in focus_values:
# set focus
log.info('Changing focus to %.2fmm...', foc)
if self._abort.is_set():
raise InterruptedError()
try:
set_focus(float(foc)).wait()
except RemoteException:
raise ValueError('Could not set new focus value.')
# do exposure
log.info('Taking picture...')
if self._abort.is_set():
raise InterruptedError()
try:
if isinstance(camera, ICameraExposureTime):
camera.set_exposure_time(exposure_time)
if isinstance(camera, IImageType):
camera.set_image_type(ImageType.FOCUS)
filename = camera.expose().wait()
except RemoteException:
log.error('Could not take image.')
continue
# download image
log.info('Downloading image...')
image = self.vfs.read_image(filename)
# analyse
log.info('Analysing picture...')
try:
self._series.analyse_image(image)
except:
# do nothing..
log.error('Could not analyse image.')
continue
# fit focus
if self._abort.is_set():
raise InterruptedError()
focus = self._series.fit_focus()
# did focus series fail?
if focus is None or focus[0] is None or np.isnan(focus[0]):
log.warning('Focus series failed.')
# reset to initial values
if self._offset:
log.info('Resetting focus offset to initial guess of %.3f mm.', guess)
focuser.set_focus_offset(focus[0]).wait()
else:
log.info('Resetting focus to initial guess of %.3f mm.', guess)
focuser.set_focus(focus[0]).wait()
# raise error
raise ValueError('Could not find best focus.')
# "absolute" will be the absolute focus value, i.e. focus+offset
absolute = None
# log and set focus
if self._offset:
log.info('Setting new focus offset of (%.3f+-%.3f) mm.', focus[0], focus[1])
absolute = focus[0] + focuser.get_focus().wait()
focuser.set_focus_offset(focus[0]).wait()
else:
log.info('Setting new focus value of (%.3f+-%.3f) mm.', focus[0], focus[1])
absolute = focus[0] + focuser.get_focus_offset().wait()
focuser.set_focus(focus[0]).wait()
# send event
self.comm.send_event(FocusFoundEvent(absolute, focus[1], filter_name))
# return result
return focus[0], focus[1]
def auto_focus_status(self, *args, **kwargs) -> dict:
"""Returns current status of auto focus.
Returned dictionary contains a list of focus/fwhm pairs in X and Y direction.
Returns:
Dictionary with current status.
"""
return {}
@timeout(20)
def abort(self, *args, **kwargs):
"""Abort current actions."""
self._abort.set()
__all__ = ['AutoFocusSeries']
|
[
"logging.getLogger",
"pyobs.mixins.CameraSettingsMixin.__init__",
"pyobs.object.get_object",
"threading.Event",
"pyobs.modules.Module.open",
"pyobs.modules.timeout",
"numpy.linspace",
"numpy.isnan",
"pyobs.modules.Module.__init__",
"pyobs.events.FocusFoundEvent"
] |
[((484, 511), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (501, 511), False, 'import logging\n'), ((2078, 2090), 'pyobs.modules.timeout', 'timeout', (['(600)'], {}), '(600)\n', (2085, 2090), False, 'from pyobs.modules import timeout, Module\n'), ((7549, 7560), 'pyobs.modules.timeout', 'timeout', (['(20)'], {}), '(20)\n', (7556, 7560), False, 'from pyobs.modules import timeout, Module\n'), ((1143, 1181), 'pyobs.modules.Module.__init__', 'Module.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (1158, 1181), False, 'from pyobs.modules import timeout, Module\n'), ((1364, 1381), 'threading.Event', 'threading.Event', ([], {}), '()\n', (1379, 1381), False, 'import threading\n'), ((1449, 1480), 'pyobs.object.get_object', 'get_object', (['series', 'FocusSeries'], {}), '(series, FocusSeries)\n', (1459, 1480), False, 'from pyobs.object import get_object\n'), ((1527, 1595), 'pyobs.mixins.CameraSettingsMixin.__init__', 'CameraSettingsMixin.__init__', (['self', '*args'], {'filters': 'filters'}), '(self, *args, filters=filters, **kwargs)\n', (1555, 1595), False, 'from pyobs.mixins import CameraSettingsMixin\n'), ((1651, 1668), 'pyobs.modules.Module.open', 'Module.open', (['self'], {}), '(self)\n', (1662, 1668), False, 'from pyobs.modules import timeout, Module\n'), ((4140, 4210), 'numpy.linspace', 'np.linspace', (['(guess - count * step)', '(guess + count * step)', '(2 * count + 1)'], {}), '(guess - count * step, guess + count * step, 2 * count + 1)\n', (4151, 4210), True, 'import numpy as np\n'), ((4398, 4415), 'threading.Event', 'threading.Event', ([], {}), '()\n', (4413, 4415), False, 'import threading\n'), ((6007, 6025), 'numpy.isnan', 'np.isnan', (['focus[0]'], {}), '(focus[0])\n', (6015, 6025), True, 'import numpy as np\n'), ((7147, 7195), 'pyobs.events.FocusFoundEvent', 'FocusFoundEvent', (['absolute', 'focus[1]', 'filter_name'], {}), '(absolute, focus[1], filter_name)\n', (7162, 7195), False, 'from pyobs.events import FocusFoundEvent\n')]
|
import matplotlib.pylab as plt
import numpy as np
def plotFlow(env,policy,x2d):
flow = []
for s in range(env.nx):
env.reset(s)
x = x2d(s)
a = policy(s)
snext,r = env.step(a)
xnext = x2d(snext)
flow.append( [x,xnext-x] )
flow=np.array( [ np.concatenate(a) for a in flow ])
h = plt.quiver(flow[:,0],flow[:,1],flow[:,2],flow[:,3])
return h
|
[
"matplotlib.pylab.quiver",
"numpy.concatenate"
] |
[((342, 400), 'matplotlib.pylab.quiver', 'plt.quiver', (['flow[:, 0]', 'flow[:, 1]', 'flow[:, 2]', 'flow[:, 3]'], {}), '(flow[:, 0], flow[:, 1], flow[:, 2], flow[:, 3])\n', (352, 400), True, 'import matplotlib.pylab as plt\n'), ((299, 316), 'numpy.concatenate', 'np.concatenate', (['a'], {}), '(a)\n', (313, 316), True, 'import numpy as np\n')]
|
import pytest
from metagraph.tests.util import default_plugin_resolver
from . import RoundTripper
from metagraph.plugins.python.types import PythonNodeSetType
from metagraph.plugins.numpy.types import NumpyNodeSet, NumpyNodeMap
import numpy as np
def test_nodeset_roundtrip(default_plugin_resolver):
rt = RoundTripper(default_plugin_resolver)
ns = {2, 3, 55}
rt.verify_round_trip(ns)
def test_np_nodemap_2_np_nodeset(default_plugin_resolver):
dpr = default_plugin_resolver
x = NumpyNodeMap(np.array([00, 10, 20]))
assert len(x) == 3
intermediate = NumpyNodeSet(np.array([0, 1, 2]))
y = dpr.translate(x, NumpyNodeSet)
dpr.assert_equal(y, intermediate)
def test_np_nodeset_2_py_nodeset(default_plugin_resolver):
dpr = default_plugin_resolver
x = NumpyNodeSet(np.array([9, 5, 1]))
assert len(x) == 3
intermediate = {5, 1, 9}
y = dpr.translate(x, PythonNodeSetType)
dpr.assert_equal(y, intermediate)
def test_py_nodeset_2_np_nodeset(default_plugin_resolver):
dpr = default_plugin_resolver
x = {2, 1, 5}
assert len(x) == 3
intermediate = NumpyNodeSet.from_mask(
np.array([False, True, True, False, False, True])
)
y = dpr.translate(x, NumpyNodeSet)
dpr.assert_equal(y, intermediate)
|
[
"numpy.array"
] |
[((514, 535), 'numpy.array', 'np.array', (['[0, 10, 20]'], {}), '([0, 10, 20])\n', (522, 535), True, 'import numpy as np\n'), ((593, 612), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (601, 612), True, 'import numpy as np\n'), ((807, 826), 'numpy.array', 'np.array', (['[9, 5, 1]'], {}), '([9, 5, 1])\n', (815, 826), True, 'import numpy as np\n'), ((1149, 1198), 'numpy.array', 'np.array', (['[False, True, True, False, False, True]'], {}), '([False, True, True, False, False, True])\n', (1157, 1198), True, 'import numpy as np\n')]
|
"""
Definition of direct collocation problem.
Authors: <NAME>, <NAME>
Date: 05/01/2021
"""
# third party imports
try:
import ipyopt
_ipyopt_imported = True
except:
_ipyopt_imported = False
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import minimize, NonlinearConstraint
from scipy.integrate import solve_ivp
from sympy import Matrix, Symbol, lambdify
from sympy.core.function import BadArgumentsError
# pydcol imports
from .Objective import Objective
from .EqualityConstraints import EqualityConstraints
from .CollocMethods import *
from .Solution import Solution
class CollocationProblem:
def __init__(self,
state_vars,
control_vars,
ode,
tspan,
X_start,
X_goal=None,
colloc_method=HERM,
custom_objective=None):
self.ode = ode
self.state_vars = state_vars
self.control_vars = control_vars
self.ode_fun = lambdify(self.state_vars+self.control_vars, Matrix(self.ode), 'numpy')
self.colloc_method = colloc_method
self.tspan = tspan
self.objective = custom_objective
self.X_start = X_start
self.X_goal = X_goal
# Get variable dimensions
self.N = self.tspan.size
self.Ntilde=self.tspan.size
self.X_dim = len(state_vars)
self.U_dim = len(control_vars)
self.all_vars = state_vars + control_vars
self.h = Symbol("h") # symbolic time step
self._h = self.tspan[1:] - self.tspan[:-1] # time steps
# Create a set of "prev" and "mid" variables for accessing values at previous time step
self.prev_all_vars = [Symbol(str(var)+"_prev") for var in self.all_vars]
self.prev_dict = {}
for i in range(len(self.all_vars)):
self.prev_dict[self.all_vars[i]] = self.prev_all_vars[i]
if self.colloc_method in MIDPOINT_METHODS:
self.mid_all_vars = [Symbol(str(var)+"_mid") for var in self.all_vars]
self.mid_dict = {}
for i in range(len(self.all_vars)):
self.mid_dict[self.all_vars[i]] = self.mid_all_vars[i]
else:
self.mid_all_vars = []
X = Matrix(state_vars)
U = Matrix(control_vars)
# Scalar Objective
if self.objective is None:
if self.colloc_method in [HERM]:
Obj = 0
for i in range(self.U_dim):
effort = self.control_vars[i]**2
Obj += (self.h/6.0) * (effort + 4.0 * effort.subs(self.mid_dict) + effort.subs(self.prev_dict))
elif self.colloc_method in [RADAU]:
Obj = 0
for i in range(self.U_dim):
effort = self.control_vars[i]**2
Obj += (self.h/4.0) * (3.0 * effort.subs(self.mid_dict) + effort)
else:
effort = self.h * U.multiply_elementwise(U)
Obj = np.sum(effort[:])
# Equality Constraints
C_eq = []
if colloc_method == TRAP:
# Trapezoid method
for i in range(self.X_dim):
C_eq += [state_vars[i] - state_vars[i].subs(self.prev_dict) - 0.5 * self.h * (ode[i] + ode[i].subs(self.prev_dict))]
elif colloc_method == EB:
# Euler Backward method
for i in range(self.X_dim):
C_eq += [state_vars[i] - state_vars[i].subs(self.prev_dict) - self.h * ode[i]]
elif colloc_method == EF:
# Euler Forward method
for i in range(self.X_dim):
C_eq += [state_vars[i] - state_vars[i].subs(self.prev_dict) - self.h * ode[i].subs(self.prev_dict)]
elif colloc_method == HERM:
# Hermite Simpson method
self.Ntilde=self.Ntilde*2-1 # actual number of node points due to addition of "mid" points
for i in range(self.X_dim):
C_eq+=[state_vars[i].subs(self.mid_dict) - 0.5 * (state_vars[i] + state_vars[i].subs(self.prev_dict)) - (self.h/8.0) * (ode[i].subs(self.prev_dict) - ode[i])]
for i in range(self.X_dim):
C_eq += [state_vars[i] - state_vars[i].subs(self.prev_dict) - (self.h/6.0) * (ode[i] + 4.0 * ode[i].subs(self.mid_dict) + ode[i].subs(self.prev_dict))]
elif colloc_method == RADAU:
# Radau 3rd order
self.Ntilde=self.Ntilde*2-1 # actual number of node points due to addition of "mid" points
for i in range(self.X_dim):
C_eq+=[state_vars[i].subs(self.mid_dict) - state_vars[i].subs(self.prev_dict)-5.0/12.0*self.h*ode[i].subs(self.mid_dict)+1.0/12.0*self.h*ode[i]] # intermediate point residue
for i in range(self.X_dim):
C_eq+=[state_vars[i] - state_vars[i].subs(self.prev_dict)-3.0/4.0*self.h*ode[i].subs(self.mid_dict)-1.0/4.0*self.h*ode[i]] # end point residue
# Compile objective and equality constraints
self.equality_constr = EqualityConstraints(self, Matrix(C_eq))
if self.objective is None:
self.objective = Objective(self, Obj)
def solve(self, x0: np.array = None, bounds: list = None, solver: str='scipy')->Solution:
"""
Solve the direct collocation problem as a nonlinear program.
Parameters
----------
x0 -- initial guess for solution, if not provided, an educated guess is based on initial/final state.
bounds -- list of [upper, lower] bound lists, one for each variable (order should match x0)
solver -- which optimizer to use (options: scipy, ipopt)
Returns
-------
pydcol.Solution containing solution and problem metadata
"""
self.is_solved = False
if x0 is None:
# Initialize optimization variables
if bounds is not None:
u_bounds = bounds[self.X_dim:]
u_mid = []
for ubnd in u_bounds:
if ubnd[0] is not None and ubnd[1] is not None:
u_mid += [(ubnd[0]+ubnd[1])/2.0]
elif ubnd[1] is not None:
u_mid += [ubnd[1]]
elif ubnd[0] is not None:
u_mid += [ubnd[0]]
else:
u_mid += [0.0]
else:
u_mid = [0.1] * self.U_dim
x0 = [self.X_start.tolist() + u_mid]
x0_mid = []
for i in range(self.N - 1):
if self.X_goal is not None:
xnew = self.X_start + (self.X_goal - self.X_start) * i / self.Ntilde
else:
xnew = self.X_start + i / self.Ntilde
x0.append(xnew.tolist() + u_mid)
if self.N != self.Ntilde:
x0_mid.append(0.5*(np.array(x0[-1]) + np.array(x0[-2])))
x0 = np.array(x0 + x0_mid).ravel()
if solver=='scipy':
_bounds = bounds * self.Ntilde
# Problem constraints
constr_eq = NonlinearConstraint(self.equality_constr.eval,
lb=0,
ub=0,
jac=self.equality_constr.jac,
hess=self.equality_constr.hess)
# Solve Problem
sol_opt = minimize(self.objective.eval,
x0,
method="trust-constr",
jac=self.objective.jac,
hess=self.objective.hess,
constraints=(constr_eq),
bounds=_bounds,
options={'sparse_jacobian': True})
# convert scipy solution to our format
self.sol_c = Solution(sol_opt, self.colloc_method, (self.N, self.Ntilde, self.X_dim, self.U_dim), self.tspan, solver)
self.is_solved = sol_opt.success
elif solver == "ipopt":
if not _ipyopt_imported:
raise(ImportError("Ipyopt could not be imported! Please use scipy solver."))
# setup variable bounds
nvar = self.Ntilde * len(bounds)
x_L = np.zeros(nvar)
x_U = np.zeros(nvar)
v_idx = 0
for i in range(self.Ntilde):
for b_pair in bounds:
if b_pair[0] is None:
x_L[v_idx] = -1e9
else:
x_L[v_idx] = b_pair[0]
if b_pair[1] is None:
x_U[v_idx] = 1e9
else:
x_U[v_idx] = b_pair[1]
v_idx += 1
# setup equality constraints
ncon = self.equality_constr.eval(x0).size
g_L = np.zeros((ncon,))
g_U = np.zeros((ncon,))
# finding out which entries of the constraint jacobian and problem hessian are allways
# nonzero.
jac_g_idx = self.equality_constr.jac(x0, return_sparse_indices=True)
lagrange = np.ones(ncon)
h_obj_idx = self.objective.hess(x0, return_sparse_indices=True)
h_con_idx = self.equality_constr.hess(x0, lagrange, return_sparse_indices=True)
# merge objective and constraint hessian indices
coords = set()
for i in range(len(h_obj_idx[0])):
coords.add((h_obj_idx[0][i], h_obj_idx[1][i]))
for i in range(len(h_con_idx[0])):
coords.add((h_con_idx[0][i], h_con_idx[1][i]))
coords = np.array(list(coords))
h_idx = (coords[:,0], coords[:,1])
def eval_grad_f(x, out):
out[()] = self.objective.jac(x).ravel()
return out
def eval_g(x, out):
out[()] = self.equality_constr.eval(x).ravel()
return out
def eval_jac_g(x, out):
out[()] = self.equality_constr.jac(x).data
return out
def eval_h(x, lagrange, obj_factor, out):
"""
Combined hessian for the problem.
"""
H = self.objective.hess(x) * (obj_factor) + self.equality_constr.hess(x, lagrange)
out[()] = H.data
return out
nlp = ipyopt.Problem(nvar, x_L, x_U,
ncon, g_L, g_U,
jac_g_idx, h_idx,
self.objective.eval, eval_grad_f,
eval_g, eval_jac_g, eval_h)
# nlp.set(print_level=0)
sol_x, obj, status = nlp.solve(x0)
# convert scipy solution to our format
self.sol_c = Solution(sol_x, self.colloc_method, (self.N, self.Ntilde, self.X_dim, self.U_dim), self.tspan, solver)
self.is_solved = (status == 0) or (status == 1) # solver either succeeded or converged to acceptable accuracy
else:
raise(BadArgumentsError("Error unsupported solver!"))
self.sol_c.obj = self.objective.eval(self.sol_c.opt_x)
print("Done")
if self.is_solved:
print("Success :-)")
else:
print("Failure :-(")
return self.sol_c
def evaluate(self, ivp_method: str='RK45'):
"""
Creates a plot comparing the direct collocation solution to an implicit IVP solver solution
generated by applying the U from the solution from the initial condition from t0 to tf.
Parameters
----------
ivp_method -- string representing ivp solution method to use
Returns
-------
None
"""
tspan = self.sol_c.t
X = self.sol_c.x.copy()
U = self.sol_c.u
def system_eqs(t, x_t):
U_t = self.sol_c.u_t(t)
return self.ode_fun(*x_t, *U_t).ravel()
eval_tspan = np.linspace(tspan[0],tspan[-1],100)
sol_ivp = solve_ivp(system_eqs, [tspan[0],tspan[-1]], self.X_start, method=ivp_method, t_eval=eval_tspan)
colors = ['k', 'g', 'b', 'r', 'c', 'm', 'y']
_, axs = plt.subplots(2, 1)
axs[0].set_title("Collocation Points vs. Integration Results")
for i in range(self.X_dim):
axs[0].plot(tspan, X[:,i],'o',color=colors[i],markersize=3)
axs[0].plot(sol_ivp.t, sol_ivp.y[i,:],color=colors[i])
axs[0].set_ylabel("State Variables")
axs[0].plot([], [],'o',color='k',label='Colloc solution')
axs[0].plot([], [],color='k',label='IVP solution')
axs[0].legend()
U_t = np.array(self.sol_c.u_t(sol_ivp.t)).T.reshape(-1, self.U_dim)
for j in range(self.U_dim):
axs[1].plot(tspan, U[:,j],'o',color=colors[j],markersize=3)
axs[1].plot(sol_ivp.t, U_t[:,j],color=colors[j])
axs[1].set_ylabel("Control Variables")
axs[1].set_xlabel("Time [s]")
plt.show()
|
[
"sympy.Symbol",
"numpy.ones",
"sympy.core.function.BadArgumentsError",
"scipy.optimize.minimize",
"scipy.integrate.solve_ivp",
"sympy.Matrix",
"scipy.optimize.NonlinearConstraint",
"numpy.sum",
"numpy.linspace",
"numpy.zeros",
"numpy.array",
"ipyopt.Problem",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((1312, 1323), 'sympy.Symbol', 'Symbol', (['"""h"""'], {}), "('h')\n", (1318, 1323), False, 'from sympy import Matrix, Symbol, lambdify\n'), ((1973, 1991), 'sympy.Matrix', 'Matrix', (['state_vars'], {}), '(state_vars)\n', (1979, 1991), False, 'from sympy import Matrix, Symbol, lambdify\n'), ((1998, 2018), 'sympy.Matrix', 'Matrix', (['control_vars'], {}), '(control_vars)\n', (2004, 2018), False, 'from sympy import Matrix, Symbol, lambdify\n'), ((9672, 9709), 'numpy.linspace', 'np.linspace', (['tspan[0]', 'tspan[-1]', '(100)'], {}), '(tspan[0], tspan[-1], 100)\n', (9683, 9709), True, 'import numpy as np\n'), ((9720, 9821), 'scipy.integrate.solve_ivp', 'solve_ivp', (['system_eqs', '[tspan[0], tspan[-1]]', 'self.X_start'], {'method': 'ivp_method', 't_eval': 'eval_tspan'}), '(system_eqs, [tspan[0], tspan[-1]], self.X_start, method=\n ivp_method, t_eval=eval_tspan)\n', (9729, 9821), False, 'from scipy.integrate import solve_ivp\n'), ((9876, 9894), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (9888, 9894), True, 'import matplotlib.pyplot as plt\n'), ((10573, 10583), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10581, 10583), True, 'import matplotlib.pyplot as plt\n'), ((935, 951), 'sympy.Matrix', 'Matrix', (['self.ode'], {}), '(self.ode)\n', (941, 951), False, 'from sympy import Matrix, Symbol, lambdify\n'), ((4329, 4341), 'sympy.Matrix', 'Matrix', (['C_eq'], {}), '(C_eq)\n', (4335, 4341), False, 'from sympy import Matrix, Symbol, lambdify\n'), ((5916, 6041), 'scipy.optimize.NonlinearConstraint', 'NonlinearConstraint', (['self.equality_constr.eval'], {'lb': '(0)', 'ub': '(0)', 'jac': 'self.equality_constr.jac', 'hess': 'self.equality_constr.hess'}), '(self.equality_constr.eval, lb=0, ub=0, jac=self.\n equality_constr.jac, hess=self.equality_constr.hess)\n', (5935, 6041), False, 'from scipy.optimize import minimize, NonlinearConstraint\n'), ((6114, 6303), 'scipy.optimize.minimize', 'minimize', (['self.objective.eval', 'x0'], {'method': '"""trust-constr"""', 'jac': 'self.objective.jac', 'hess': 'self.objective.hess', 'constraints': 'constr_eq', 'bounds': '_bounds', 'options': "{'sparse_jacobian': True}"}), "(self.objective.eval, x0, method='trust-constr', jac=self.objective\n .jac, hess=self.objective.hess, constraints=constr_eq, bounds=_bounds,\n options={'sparse_jacobian': True})\n", (6122, 6303), False, 'from scipy.optimize import minimize, NonlinearConstraint\n'), ((6756, 6770), 'numpy.zeros', 'np.zeros', (['nvar'], {}), '(nvar)\n', (6764, 6770), True, 'import numpy as np\n'), ((6780, 6794), 'numpy.zeros', 'np.zeros', (['nvar'], {}), '(nvar)\n', (6788, 6794), True, 'import numpy as np\n'), ((7156, 7173), 'numpy.zeros', 'np.zeros', (['(ncon,)'], {}), '((ncon,))\n', (7164, 7173), True, 'import numpy as np\n'), ((7183, 7200), 'numpy.zeros', 'np.zeros', (['(ncon,)'], {}), '((ncon,))\n', (7191, 7200), True, 'import numpy as np\n'), ((7393, 7406), 'numpy.ones', 'np.ones', (['ncon'], {}), '(ncon)\n', (7400, 7406), True, 'import numpy as np\n'), ((8383, 8514), 'ipyopt.Problem', 'ipyopt.Problem', (['nvar', 'x_L', 'x_U', 'ncon', 'g_L', 'g_U', 'jac_g_idx', 'h_idx', 'self.objective.eval', 'eval_grad_f', 'eval_g', 'eval_jac_g', 'eval_h'], {}), '(nvar, x_L, x_U, ncon, g_L, g_U, jac_g_idx, h_idx, self.\n objective.eval, eval_grad_f, eval_g, eval_jac_g, eval_h)\n', (8397, 8514), False, 'import ipyopt\n'), ((8908, 8954), 'sympy.core.function.BadArgumentsError', 'BadArgumentsError', (['"""Error unsupported solver!"""'], {}), "('Error unsupported solver!')\n", (8925, 8954), False, 'from sympy.core.function import BadArgumentsError\n'), ((2548, 2565), 'numpy.sum', 'np.sum', (['effort[:]'], {}), '(effort[:])\n', (2554, 2565), True, 'import numpy as np\n'), ((5788, 5809), 'numpy.array', 'np.array', (['(x0 + x0_mid)'], {}), '(x0 + x0_mid)\n', (5796, 5809), True, 'import numpy as np\n'), ((5742, 5758), 'numpy.array', 'np.array', (['x0[-1]'], {}), '(x0[-1])\n', (5750, 5758), True, 'import numpy as np\n'), ((5761, 5777), 'numpy.array', 'np.array', (['x0[-2]'], {}), '(x0[-2])\n', (5769, 5777), True, 'import numpy as np\n')]
|
from bluesky.plan_patterns import spiral_square_pattern
import time as ttime
import numpy as np
import bluesky.plans as bp
from bluesky.plans import rel_spiral_square
from ophyd.sim import NullStatus
# def sample_spiral_scan():
# detectors = [apb_ave]
#
# return general_spiral_scan(detectors, giantxy.x, giantxy.y, 15, 15, 15, 15, time_step=0.1)
# channels = [apb_ave.ch1, apb_ave.ch2, apb_ave.ch3, apb_ave.ch4]
# offsets = [apb.ch1_offset, apb.ch2_offset, apb.ch3_offset, apb.ch4_offset, ]
# plan = rel_spiral_square(detectors, giantxy.x, giantxy.y, 15, 15, 15, 15)
# time_step = 0.1
# samples = 250 * (np.ceil(time_step * 10443 / 250)) # hn I forget what that does... let's look into the new PB OPI
# yield from bps.abs_set(apb_ave.sample_len, time_step*1e3, wait=True)
# yield from bps.abs_set(apb_ave.wf_len, time_step*1e3, wait=True)
# yield from bps.abs_set(apb_ave.divide, 374, wait=True)
# if hasattr(detector, 'kickoff'):
# plan_with_flyers = bpp.fly_during_wrapper(plan, [detectors])
# uid = (yield from plan)
# table = db[uid].table()
# row_num = table[detector.volt.name].idxmin()
# x_pos = table['giantxy_x'][row_num]
# y_pos = table['giantxy_y'][row_num]
def general_spiral_scan(detectors_list, *, motor1=giantxy.x, motor2=giantxy.y, motor1_range=15, motor2_range=15, motor1_nsteps=15, motor2_nsteps=15, time_step=0.1, **kwargs):
sys.stdout = kwargs.pop('stdout', sys.stdout)
print(f'Dets {detectors_list}')
print(f'Motors {motor1}, {motor2}')
plan = rel_spiral_square(detectors_list, motor1, motor2,
motor1_range, motor2_range, motor1_nsteps, motor2_nsteps,
md={"plan_name": "spiral scan"})
if apb_ave in detectors_list:
print('Preparing pizzabox')
cur_divide_value = apb_ave.divide.value
cur_sample_len = apb_ave.sample_len.value
cur_wf_len = apb_ave.wf_len.value
print('[General Spiral Scan] Starting scan...')
yield from bps.abs_set(apb_ave.divide, 374, wait=True)
yield from bps.abs_set(apb_ave.sample_len, int(time_step * 1e3), wait=True)
yield from bps.abs_set(apb_ave.wf_len, int(time_step * 1e3), wait=True)
uid = (yield from plan)
if apb_ave in detectors_list:
print('Returning the pizzabox to its original state')
yield from bps.abs_set(apb_ave.divide, cur_divide_value, wait=True)
yield from bps.abs_set(apb_ave.sample_len, cur_sample_len, wait=True)
yield from bps.abs_set(apb_ave.wf_len, cur_wf_len, wait=True)
return uid
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def get_mus():
data = db[-1].table()
x = data['giantxy_x']
y = data['giantxy_y']
mut = np.log(data['apb_ave_ch1_mean']/data['apb_ave_ch2_mean'])
muf = data['apb_ave_ch4_mean']/data['apb_ave_ch1_mean']
return x,y, mut, muf
def analyze_surface():
x, y, mut, muf = get_mus()
plot_xyz(x, y, mut)
plot_xyz(x, y, muf)
def com(a_orig, w_orig, mask=None):
a = a_orig.copy()
w = w_orig.copy()
if mask is not None:
a = a[mask]
w = w[mask]
return np.sum(a * w)/np.sum(w)
def plot_xyz(x, y, z, r1=5, r2=(13.4/2-1)):
fig = plt.figure()
# ax = fig.gca(projection='3d')
# ax.plot_trisurf(x, y, z, linewidth=0.2, antialiased=True, cmap=plt.cm.Spectral)
ax = fig.gca()
x_im_center = x.iloc[0]
y_im_center = y.iloc[0]
# R = r1 #13.4/2-1
xy_mask = (np.sqrt(np.abs(x - x_im_center)**2 +
np.abs(y - y_im_center)**2) < r1)
x_ho_com = com(x, z.max() - z, ~xy_mask)
y_ho_com = com(y, z.max() - z, ~xy_mask)
xy_mask_recen = (np.sqrt(np.abs(x - x_ho_com) ** 2 +
np.abs(y - y_ho_com) ** 2) < r2)
# x_max = x[xy_mask_recen][np.argmax(z[xy_mask_recen])]
# y_max = y[xy_mask_recen][np.argmax(z[xy_mask_recen])]
x_max = com(x, (z - z.min())**2, xy_mask_recen)
y_max = com(y, (z - z.min())**2, xy_mask_recen)
ax.tricontourf(x, y, z, 50)
ax.plot(x_im_center, y_im_center, 'ro', ms=25)
ax.plot(x_ho_com, y_ho_com, 'bx', ms=25, markeredgewidth=5)
ax.plot(x_max, y_max, 'm+', ms=25, markeredgewidth=5)
# plt.plot(x[xy_mask], y[xy_mask], 'g.', alpha=0.5)
# plt.plot(x[~xy_mask], y[~xy_mask], 'r.', alpha=0.5)
# plt.show()
class SnakeFlyer():
def __init__(self, det, pbs, motor_stage):
self.name = 'snake_flyer'
self.parent = None
self.det = det
self.pbs = pbs # a list of passed pizza-boxes
self.motor_stage = motor_stage
self._motor_status = None
self.traj = None
def _motor_snaker(self, motor_x=None, range_x=None, motor_y=None, range_y=None):
"""Snake tragectory for flyer.
:param motor_x: ophyd object for motor
:param range_x: range in motor units
:param motor_y: ophyd object for motor
:param range_y: range in motor units
:return: None
"""
# Read start positions.
start_pos_x = motor_x.user_readback.get()
start_pos_y = motor_y.user_readback.get()
step = 1
# We need the grid scan here to get the tragectory.
plan = bp.rel_grid_scan([], motor_y, -range_y / 2, range_y / 2, (range_y / step + 1),
motor_x, -range_x / 2, range_x / 2, 2,
True # snake=True
)
# This is adapted from plot_raster_scan in bluesky.
cur_x = cur_y = None
self.traj = []
for msg in plan:
cmd = msg.command
if cmd == 'set':
if msg.obj.name == motor_x.name:
cur_x = msg.args[0]
if msg.obj.name == motor_y.name:
cur_y = msg.args[0]
elif cmd == 'save':
self.traj.append((cur_x, cur_y))
# Move motors along the trajectory.
for (x, y) in self.traj:
print(x, y)
if abs(motor_x.user_readback.get() - x) > 5e-3:
print(f"Moving {motor_x.name}")
# .move blocks the operation, and waits until the motor arrives to the target position.
motor_x.move(x)
if abs(motor_y.user_readback.get() - y) > 5e-3:
print(f"Moving {motor_y.name}")
# .move blocks the operation, and waits until the motor arrives to the target position.
motor_y.move(y)
# Move back to the original position both motors simultaneously.
self._motor_status = motor_x.set(start_pos_x)
self._motor_status &= motor_y.set(start_pos_y)
def kickoff(self, *args, **kwargs):
for pb in self.pbs:
pb.stage()
pb.kickoff()
self.det.stage()
# Start apb after encoder pizza-boxes, which will trigger the motor.
self.det.stream.set(1)
self._motor_snaker(motor_x=self.motor_stage.x, range_x=10, motor_y=self.motor_stage.y, range_y=4)
print(f"Motor status in kickoff: {self._motor_status}")
return NullStatus()
def complete(self):
print(f"Motor status in complete: {self._motor_status}")
def callback_det(value, old_value, **kwargs):
if int(round(old_value)) == 1 and int(round(value)) == 0:
print(f'callback_det {ttime.ctime()}')
return True
else:
return False
streaming_st = SubscriptionStatus(self.det.streaming, callback_det)
def callback_motor():
print(f'callback_motor {ttime.ctime()}')
for pb in self.pbs:
pb.complete()
# TODO: see if this set is still needed (also called in self.det.unstage())
self.det.stream.put(0)
self.det.complete()
self._motor_status.add_callback(callback_motor)
# Jdun!
return streaming_st & self._motor_status
def describe_collect(self):
return_dict = {self.det.name:
{f'{self.det.name}': {'source': 'APB',
'dtype': 'array',
'shape': [-1, -1],
'filename_bin': self.det.filename_bin,
'filename_txt': self.det.filename_txt,
'external': 'FILESTORE:'}}}
# Also do it for all pizza-boxes
for pb in self.pbs:
return_dict[pb.name] = pb.describe_collect()[pb.name]
# Add a stream for the motor positions.
return_dict[self.motor_stage.name] = {f'{self.motor_stage.x.name}': {'source': 'SNAKE',
'dtype': 'number',
'shape': []},
f'{self.motor_stage.y.name}': {'source': 'SNAKE',
'dtype': 'number',
'shape': []}
}
return return_dict
def collect_asset_docs(self):
yield from self.det.collect_asset_docs()
for pb in self.pbs:
yield from pb.collect_asset_docs()
def collect(self):
print(f"Motor status in collect: {self._motor_status}")
self.det.unstage()
for pb in self.pbs:
pb.unstage()
def collect_all():
for pb in self.pbs:
yield from pb.collect()
yield from self.det.collect()
# Collect docs for motor positions.
now = ttime.time()
for (x, y) in self.traj:
data = {f"{self.motor_stage.x.name}": x,
f"{self.motor_stage.y.name}": y}
yield {'data': data,
'timestamps': {key: now for key in data}, 'time': now,
'filled': {key: False for key in data}}
return collect_all()
snake_flyer = SnakeFlyer(det=apb_stream, pbs=[pb4.enc3, pb4.enc4], motor_stage=giantxy)
|
[
"numpy.abs",
"time.ctime",
"bluesky.plans.rel_spiral_square",
"numpy.log",
"ophyd.sim.NullStatus",
"bluesky.plans.rel_grid_scan",
"numpy.sum",
"matplotlib.pyplot.figure",
"time.time"
] |
[((1561, 1710), 'bluesky.plans.rel_spiral_square', 'rel_spiral_square', (['detectors_list', 'motor1', 'motor2', 'motor1_range', 'motor2_range', 'motor1_nsteps', 'motor2_nsteps'], {'md': "{'plan_name': 'spiral scan'}"}), "(detectors_list, motor1, motor2, motor1_range,\n motor2_range, motor1_nsteps, motor2_nsteps, md={'plan_name': 'spiral scan'}\n )\n", (1578, 1710), False, 'from bluesky.plans import rel_spiral_square\n'), ((2814, 2873), 'numpy.log', 'np.log', (["(data['apb_ave_ch1_mean'] / data['apb_ave_ch2_mean'])"], {}), "(data['apb_ave_ch1_mean'] / data['apb_ave_ch2_mean'])\n", (2820, 2873), True, 'import numpy as np\n'), ((3301, 3313), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3311, 3313), True, 'from matplotlib import pyplot as plt\n'), ((3219, 3232), 'numpy.sum', 'np.sum', (['(a * w)'], {}), '(a * w)\n', (3225, 3232), True, 'import numpy as np\n'), ((3233, 3242), 'numpy.sum', 'np.sum', (['w'], {}), '(w)\n', (3239, 3242), True, 'import numpy as np\n'), ((5296, 5421), 'bluesky.plans.rel_grid_scan', 'bp.rel_grid_scan', (['[]', 'motor_y', '(-range_y / 2)', '(range_y / 2)', '(range_y / step + 1)', 'motor_x', '(-range_x / 2)', '(range_x / 2)', '(2)', '(True)'], {}), '([], motor_y, -range_y / 2, range_y / 2, range_y / step + 1,\n motor_x, -range_x / 2, range_x / 2, 2, True)\n', (5312, 5421), True, 'import bluesky.plans as bp\n'), ((7199, 7211), 'ophyd.sim.NullStatus', 'NullStatus', ([], {}), '()\n', (7209, 7211), False, 'from ophyd.sim import NullStatus\n'), ((9927, 9939), 'time.time', 'ttime.time', ([], {}), '()\n', (9937, 9939), True, 'import time as ttime\n'), ((3558, 3581), 'numpy.abs', 'np.abs', (['(x - x_im_center)'], {}), '(x - x_im_center)\n', (3564, 3581), True, 'import numpy as np\n'), ((3610, 3633), 'numpy.abs', 'np.abs', (['(y - y_im_center)'], {}), '(y - y_im_center)\n', (3616, 3633), True, 'import numpy as np\n'), ((3765, 3785), 'numpy.abs', 'np.abs', (['(x - x_ho_com)'], {}), '(x - x_ho_com)\n', (3771, 3785), True, 'import numpy as np\n'), ((3822, 3842), 'numpy.abs', 'np.abs', (['(y - y_ho_com)'], {}), '(y - y_ho_com)\n', (3828, 3842), True, 'import numpy as np\n'), ((7700, 7713), 'time.ctime', 'ttime.ctime', ([], {}), '()\n', (7711, 7713), True, 'import time as ttime\n'), ((7465, 7478), 'time.ctime', 'ttime.ctime', ([], {}), '()\n', (7476, 7478), True, 'import time as ttime\n')]
|
import cv2
import time
import os
import matplotlib.pyplot as plt
import torch
from torch import nn
import torchvision.models as models
import torchvision.transforms as transforms
import numpy as np
savepath='./color_heatmap'
if not os.path.exists(savepath):
os.mkdir(savepath)
def draw_features(width, height, x, savename):
tic = time.time()
fig = plt.figure(figsize=(16, 16))
fig.subplots_adjust(left=0.05, right=0.95, bottom=0.05, top=0.95, wspace=0.05, hspace=0.05)
for i in range(width * height):
plt.subplot(height, width, i + 1)
plt.axis('off')
img = x[0, i, :, :]
pmin = np.min(img)
pmax = np.max(img)
img = ((img - pmin) / (pmax - pmin + 0.000001)) * 255 # float在[0,1]之间,转换成0-255
img = img.astype(np.uint8) # 转成unit8
img = cv2.applyColorMap(img, cv2.COLORMAP_JET) # 生成heat map
img = img[:, :, ::-1] # 注意cv2(BGR)和matplotlib(RGB)通道是相反的
plt.imshow(img)
print("{}/{}".format(i, width * height))
fig.savefig(savename, dpi=100)
fig.clf()
plt.close()
print("time:{}".format(time.time() - tic))
class ft_net(nn.Module):
def __init__(self):
super(ft_net, self).__init__()
model_ft = models.resnet101(pretrained=True)
self.model = model_ft
def forward(self, x):
if True: # draw features or not
x = self.model.conv1(x)
draw_features(8, 8, x.cpu().numpy(), "{}/f1_conv1.png".format(savepath))
x = self.model.bn1(x)
draw_features(8, 8, x.cpu().numpy(), "{}/f2_bn1.png".format(savepath))
x = self.model.relu(x)
draw_features(8, 8, x.cpu().numpy(), "{}/f3_relu.png".format(savepath))
x = self.model.maxpool(x)
draw_features(8, 8, x.cpu().numpy(), "{}/f4_maxpool.png".format(savepath))
x = self.model.layer1(x)
draw_features(16, 16, x.cpu().numpy(), "{}/f5_layer1.png".format(savepath))
x = self.model.layer2(x)
draw_features(16, 32, x.cpu().numpy(), "{}/f6_layer2.png".format(savepath))
x = self.model.layer3(x)
draw_features(32, 32, x.cpu().numpy(), "{}/f7_layer3.png".format(savepath))
x = self.model.layer4(x)
draw_features(32, 32, x.cpu().numpy()[:, 0:1024, :, :], "{}/f8_layer4_1.png".format(savepath))
draw_features(32, 32, x.cpu().numpy()[:, 1024:2048, :, :], "{}/f8_layer4_2.png".format(savepath))
x = self.model.avgpool(x)
plt.plot(np.linspace(1, 2048, 2048), x.cpu().numpy()[0, :, 0, 0])
plt.savefig("{}/f9_avgpool.png".format(savepath))
plt.clf()
plt.close()
x = x.view(x.size(0), -1)
x = self.model.fc(x)
plt.plot(np.linspace(1, 1000, 1000), x.cpu().numpy()[0, :])
plt.savefig("{}/f10_fc.png".format(savepath))
plt.clf()
plt.close()
else:
x = self.model.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.maxpool(x)
x = self.model.layer1(x)
x = self.model.layer2(x)
x = self.model.layer3(x)
x = self.model.layer4(x)
x = self.model.avgpool(x)
x = x.view(x.size(0), -1)
x = self.model.fc(x)
return x
model = ft_net().cuda()
# pretrained_dict = resnet50.state_dict()
# pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
# model_dict.update(pretrained_dict)
# net.load_state_dict(model_dict)
model.eval()
img = cv2.imread('example.jpg')
img = cv2.resize(img, (224, 224))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
img = transform(img).cuda()
img = img.unsqueeze(0)
with torch.no_grad():
start = time.time()
out = model(img)
print("total time:{}".format(time.time() - start))
result = out.cpu().numpy()
# ind=np.argmax(out.cpu().numpy())
ind = np.argsort(result, axis=1)
for i in range(5):
print("predict:top {} = cls {} : score {}".format(i + 1, ind[0, 1000 - i - 1], result[0, 1000 - i - 1]))
print("done")
|
[
"numpy.argsort",
"matplotlib.pyplot.imshow",
"os.path.exists",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.linspace",
"os.mkdir",
"numpy.min",
"matplotlib.pyplot.axis",
"torchvision.transforms.ToTensor",
"torchvision.models.resnet101",
"cv2.cvtColor",
"torchvision.transforms.Normalize",
"cv2.resize",
"time.time",
"cv2.imread",
"cv2.applyColorMap",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.figure",
"torch.no_grad",
"matplotlib.pyplot.subplot"
] |
[((3632, 3657), 'cv2.imread', 'cv2.imread', (['"""example.jpg"""'], {}), "('example.jpg')\n", (3642, 3657), False, 'import cv2\n'), ((3664, 3691), 'cv2.resize', 'cv2.resize', (['img', '(224, 224)'], {}), '(img, (224, 224))\n', (3674, 3691), False, 'import cv2\n'), ((3698, 3734), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2RGB'], {}), '(img, cv2.COLOR_BGR2RGB)\n', (3710, 3734), False, 'import cv2\n'), ((233, 257), 'os.path.exists', 'os.path.exists', (['savepath'], {}), '(savepath)\n', (247, 257), False, 'import os\n'), ((263, 281), 'os.mkdir', 'os.mkdir', (['savepath'], {}), '(savepath)\n', (271, 281), False, 'import os\n'), ((341, 352), 'time.time', 'time.time', ([], {}), '()\n', (350, 352), False, 'import time\n'), ((363, 391), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 16)'}), '(figsize=(16, 16))\n', (373, 391), True, 'import matplotlib.pyplot as plt\n'), ((1067, 1078), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1076, 1078), True, 'import matplotlib.pyplot as plt\n'), ((3914, 3929), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3927, 3929), False, 'import torch\n'), ((3943, 3954), 'time.time', 'time.time', ([], {}), '()\n', (3952, 3954), False, 'import time\n'), ((4111, 4137), 'numpy.argsort', 'np.argsort', (['result'], {'axis': '(1)'}), '(result, axis=1)\n', (4121, 4137), True, 'import numpy as np\n'), ((532, 565), 'matplotlib.pyplot.subplot', 'plt.subplot', (['height', 'width', '(i + 1)'], {}), '(height, width, i + 1)\n', (543, 565), True, 'import matplotlib.pyplot as plt\n'), ((574, 589), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (582, 589), True, 'import matplotlib.pyplot as plt\n'), ((633, 644), 'numpy.min', 'np.min', (['img'], {}), '(img)\n', (639, 644), True, 'import numpy as np\n'), ((660, 671), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (666, 671), True, 'import numpy as np\n'), ((820, 860), 'cv2.applyColorMap', 'cv2.applyColorMap', (['img', 'cv2.COLORMAP_JET'], {}), '(img, cv2.COLORMAP_JET)\n', (837, 860), False, 'import cv2\n'), ((949, 964), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (959, 964), True, 'import matplotlib.pyplot as plt\n'), ((1236, 1269), 'torchvision.models.resnet101', 'models.resnet101', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (1252, 1269), True, 'import torchvision.models as models\n'), ((3772, 3793), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (3791, 3793), True, 'import torchvision.transforms as transforms\n'), ((3800, 3854), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (3820, 3854), True, 'import torchvision.transforms as transforms\n'), ((2677, 2686), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2684, 2686), True, 'import matplotlib.pyplot as plt\n'), ((2699, 2710), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2708, 2710), True, 'import matplotlib.pyplot as plt\n'), ((2925, 2934), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2932, 2934), True, 'import matplotlib.pyplot as plt\n'), ((2947, 2958), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2956, 2958), True, 'import matplotlib.pyplot as plt\n'), ((1106, 1117), 'time.time', 'time.time', ([], {}), '()\n', (1115, 1117), False, 'import time\n'), ((2546, 2572), 'numpy.linspace', 'np.linspace', (['(1)', '(2048)', '(2048)'], {}), '(1, 2048, 2048)\n', (2557, 2572), True, 'import numpy as np\n'), ((2804, 2830), 'numpy.linspace', 'np.linspace', (['(1)', '(1000)', '(1000)'], {}), '(1, 1000, 1000)\n', (2815, 2830), True, 'import numpy as np\n'), ((4009, 4020), 'time.time', 'time.time', ([], {}), '()\n', (4018, 4020), False, 'import time\n')]
|
import random
import torch
import time
import os
import numpy as np
from torch.utils.data import Dataset
from functools import partial
from .utils import dataset_to_dataloader, max_io_workers
from pytorch_transformers.tokenization_bert import BertTokenizer
# the following will be shared on other datasets too if not, they should become part of the ListeningDataset
# maybe make SegmentedScanDataset with only static functions and then inherit.
from .utils import check_segmented_object_order, sample_scan_object, pad_samples, objects_bboxes
from .utils import instance_labels_of_context, mean_rgb_unit_norm_transform
from ...data_generation.nr3d import decode_stimulus_string
class ListeningDataset(Dataset):
def __init__(self, references, scans, vocab, max_seq_len, points_per_object, max_distractors,
class_to_idx=None, object_transformation=None,
visualization=False, feat2dtype=None,
num_class_dim=525, evalmode=False):
self.references = references
self.scans = scans
self.vocab = vocab
self.max_seq_len = max_seq_len
self.points_per_object = points_per_object
self.max_distractors = max_distractors
self.max_context_size = self.max_distractors + 1 # to account for the target.
self.class_to_idx = class_to_idx
self.visualization = visualization
self.object_transformation = object_transformation
self.feat2dtype = feat2dtype
self.max_2d_view = 5
self.num_class_dim = num_class_dim
self.evalmode = evalmode
self.bert_tokenizer = BertTokenizer.from_pretrained(
'bert-base-uncased')
assert self.bert_tokenizer.encode(self.bert_tokenizer.pad_token) == [0]
if not check_segmented_object_order(scans):
raise ValueError
def __len__(self):
return len(self.references)
def get_reference_data(self, index):
ref = self.references.loc[index]
scan = self.scans[ref['scan_id']]
target = scan.three_d_objects[ref['target_id']]
tokens = np.array(self.vocab.encode(ref['tokens'], self.max_seq_len), dtype=np.long)
is_nr3d = ref['dataset'] == 'nr3d'
return scan, target, tokens, ref['tokens'], is_nr3d
def prepare_distractors(self, scan, target):
target_label = target.instance_label
# First add all objects with the same instance-label as the target
distractors = [o for o in scan.three_d_objects if
(o.instance_label == target_label and (o != target))]
# Then all more objects up to max-number of distractors
already_included = {target_label}
clutter = [o for o in scan.three_d_objects if o.instance_label not in already_included]
np.random.shuffle(clutter)
distractors.extend(clutter)
distractors = distractors[:self.max_distractors]
np.random.shuffle(distractors)
return distractors
def __getitem__(self, index):
res = dict()
scan, target, tokens, text_tokens, is_nr3d = self.get_reference_data(index)
## BERT tokenize
token_inds = torch.zeros(self.max_seq_len, dtype=torch.long)
indices = self.bert_tokenizer.encode(
' '.join(text_tokens), add_special_tokens=True)
indices = indices[:self.max_seq_len]
token_inds[:len(indices)] = torch.tensor(indices)
token_num = torch.tensor(len(indices), dtype=torch.long)
# Make a context of distractors
context = self.prepare_distractors(scan, target)
# Add target object in 'context' list
target_pos = np.random.randint(len(context) + 1)
context.insert(target_pos, target)
# sample point/color for them
samples = np.array([sample_scan_object(o, self.points_per_object) for o in context])
# mark their classes
res['class_labels'] = instance_labels_of_context(context, self.max_context_size, self.class_to_idx)
if self.object_transformation is not None:
samples, offset = self.object_transformation(samples)
res['obj_offset'] = np.zeros((self.max_context_size, offset.shape[1])).astype(np.float32)
res['obj_offset'][:len(offset),:] = offset.astype(np.float32)
res['context_size'] = len(samples)
# take care of padding, so that a batch has same number of N-objects across scans.
res['objects'] = pad_samples(samples, self.max_context_size)
# Get a mask indicating which objects have the same instance-class as the target.
target_class_mask = np.zeros(self.max_context_size, dtype=np.bool)
target_class_mask[:len(context)] = [target.instance_label == o.instance_label for o in context]
res['target_class'] = self.class_to_idx[target.instance_label]
res['target_pos'] = target_pos
res['target_class_mask'] = target_class_mask
res['tokens'] = tokens
res['token_inds'] = token_inds.numpy().astype(np.int64)
res['token_num'] = token_num.numpy().astype(np.int64)
res['is_nr3d'] = is_nr3d
if self.visualization:
distrators_pos = np.zeros((6)) # 6 is the maximum context size we used in dataset collection
object_ids = np.zeros((self.max_context_size))
j = 0
for k, o in enumerate(context):
if o.instance_label == target.instance_label and o.object_id != target.object_id:
distrators_pos[j] = k
j += 1
for k, o in enumerate(context):
object_ids[k] = o.object_id
res['utterance'] = self.references.loc[index]['utterance']
res['stimulus_id'] = self.references.loc[index]['stimulus_id']
res['distrators_pos'] = distrators_pos
res['object_ids'] = object_ids
res['target_object_id'] = target.object_id
if self.evalmode:
return res
# load cached 2D context information
if os.path.isfile('../data/scannet_frames_25k_gtobjfeat_aggregate/%s.npy'%scan.scan_id):
context_2d = np.load('../data/scannet_frames_25k_gtobjfeat_aggregate/%s.npy'%scan.scan_id,allow_pickle=True,encoding='latin1')
objfeat_2d = context_2d.item()['obj_feat']
bbox_2d = context_2d.item()['obj_coord']
bboxsize_2d = context_2d.item()['obj_size']
obj_depth = context_2d.item()['obj_depth']
campose_2d = context_2d.item()['camera_pose']
ins_id_2d = context_2d.item()['instance_id']
if (self.feat2dtype.replace('3D',''))=='ROI': featdim = 2048
elif (self.feat2dtype.replace('3D',''))=='clsvec': featdim = self.num_class_dim
elif (self.feat2dtype.replace('3D',''))=='clsvecROI': featdim = 2048+self.num_class_dim
feat_2d = np.zeros((self.max_context_size, featdim)).astype(np.float32)
coords_2d = np.zeros((self.max_context_size, 4+12)).astype(np.float32)
selected_2d_idx = 0
selected_context_id = [o.object_id+1 for o in context] ## backbround included in cache, so +1
## only for creating tensor of the correct size
selected_objfeat_2d = objfeat_2d[selected_context_id,selected_2d_idx,:]
selected_bbox_2d = bbox_2d[selected_context_id,selected_2d_idx,:]
selected_bboxsize_2d = bboxsize_2d[selected_context_id,selected_2d_idx]
selected_obj_depth = obj_depth[selected_context_id,selected_2d_idx]
selected_campose_2d = campose_2d[selected_context_id,selected_2d_idx,:]
selected_ins_id_2d = ins_id_2d[selected_context_id,selected_2d_idx]
## Fill in randomly selected view of 2D features
for ii in range(len(selected_context_id)):
cxt_id = selected_context_id[ii]
view_id = random.randint(0, max(0,int((ins_id_2d[cxt_id,:]!=0).astype(np.float32).sum())-1))
selected_objfeat_2d[ii,:] = objfeat_2d[cxt_id,view_id,:]
selected_bbox_2d[ii,:] = bbox_2d[cxt_id,view_id,:]
selected_bboxsize_2d[ii] = bboxsize_2d[cxt_id,view_id]
selected_obj_depth[ii] = obj_depth[cxt_id,view_id]
selected_campose_2d[ii,:] = campose_2d[cxt_id,view_id,:]
if self.feat2dtype!='clsvec':
feat_2d[:len(selected_context_id),:2048] = selected_objfeat_2d
for ii in range(len(res['class_labels'])):
if self.feat2dtype=='clsvec':
feat_2d[ii,res['class_labels'][ii]] = 1.
if self.feat2dtype=='clsvecROI':
feat_2d[ii,2048+res['class_labels'][ii]] = 1.
coords_2d[:len(selected_context_id),:] = np.concatenate([selected_bbox_2d, selected_campose_2d[:,:12]],axis=-1)
coords_2d[:,0], coords_2d[:,2] = coords_2d[:,0]/1296., coords_2d[:,2]/1296. ## norm by image size
coords_2d[:,1], coords_2d[:,3] = coords_2d[:,1]/968., coords_2d[:,3]/968.
else:
print('please prepare the cached 2d feature')
exit(0)
res['feat_2d'] = feat_2d
res['coords_2d'] = coords_2d
return res
def make_data_loaders(args, referit_data, vocab, class_to_idx, scans, mean_rgb, seed=None):
n_workers = args.n_workers
if n_workers == -1:
n_workers = max_io_workers()
data_loaders = dict()
is_train = referit_data['is_train']
splits = ['train', 'test']
object_transformation = partial(mean_rgb_unit_norm_transform, mean_rgb=mean_rgb,
unit_norm=args.unit_sphere_norm)
for split in splits:
mask = is_train if split == 'train' else ~is_train
d_set = referit_data[mask]
d_set.reset_index(drop=True, inplace=True)
max_distractors = args.max_distractors if split == 'train' else args.max_test_objects - 1
## this is a silly small bug -> not the minus-1.
# if split == test remove the utterances of unique targets
if split == 'test':
def multiple_targets_utterance(x):
_, _, _, _, distractors_ids = decode_stimulus_string(x.stimulus_id)
return len(distractors_ids) > 0
multiple_targets_mask = d_set.apply(multiple_targets_utterance, axis=1)
d_set = d_set[multiple_targets_mask]
d_set.reset_index(drop=True, inplace=True)
print("length of dataset before removing non multiple test utterances {}".format(len(d_set)))
print("removed {} utterances from the test set that don't have multiple distractors".format(
np.sum(~multiple_targets_mask)))
print("length of dataset after removing non multiple test utterances {}".format(len(d_set)))
assert np.sum(~d_set.apply(multiple_targets_utterance, axis=1)) == 0
dataset = ListeningDataset(references=d_set,
scans=scans,
vocab=vocab,
max_seq_len=args.max_seq_len,
points_per_object=args.points_per_object,
max_distractors=max_distractors,
class_to_idx=class_to_idx,
object_transformation=object_transformation,
visualization=args.mode == 'evaluate',
feat2dtype=args.feat2d,
num_class_dim = 525 if '00' in args.scannet_file else 608,
evalmode=(args.mode=='evaluate'))
seed = seed
if split == 'test':
seed = args.random_seed
data_loaders[split] = dataset_to_dataloader(dataset, split, args.batch_size, n_workers, pin_memory=True, seed=seed)
return data_loaders
|
[
"pytorch_transformers.tokenization_bert.BertTokenizer.from_pretrained",
"os.path.isfile",
"torch.tensor",
"numpy.zeros",
"numpy.sum",
"functools.partial",
"numpy.concatenate",
"numpy.load",
"torch.zeros",
"numpy.random.shuffle"
] |
[((9564, 9658), 'functools.partial', 'partial', (['mean_rgb_unit_norm_transform'], {'mean_rgb': 'mean_rgb', 'unit_norm': 'args.unit_sphere_norm'}), '(mean_rgb_unit_norm_transform, mean_rgb=mean_rgb, unit_norm=args.\n unit_sphere_norm)\n', (9571, 9658), False, 'from functools import partial\n'), ((1616, 1666), 'pytorch_transformers.tokenization_bert.BertTokenizer.from_pretrained', 'BertTokenizer.from_pretrained', (['"""bert-base-uncased"""'], {}), "('bert-base-uncased')\n", (1645, 1666), False, 'from pytorch_transformers.tokenization_bert import BertTokenizer\n'), ((2797, 2823), 'numpy.random.shuffle', 'np.random.shuffle', (['clutter'], {}), '(clutter)\n', (2814, 2823), True, 'import numpy as np\n'), ((2926, 2956), 'numpy.random.shuffle', 'np.random.shuffle', (['distractors'], {}), '(distractors)\n', (2943, 2956), True, 'import numpy as np\n'), ((3171, 3218), 'torch.zeros', 'torch.zeros', (['self.max_seq_len'], {'dtype': 'torch.long'}), '(self.max_seq_len, dtype=torch.long)\n', (3182, 3218), False, 'import torch\n'), ((3406, 3427), 'torch.tensor', 'torch.tensor', (['indices'], {}), '(indices)\n', (3418, 3427), False, 'import torch\n'), ((4626, 4672), 'numpy.zeros', 'np.zeros', (['self.max_context_size'], {'dtype': 'np.bool'}), '(self.max_context_size, dtype=np.bool)\n', (4634, 4672), True, 'import numpy as np\n'), ((6046, 6136), 'os.path.isfile', 'os.path.isfile', (["('../data/scannet_frames_25k_gtobjfeat_aggregate/%s.npy' % scan.scan_id)"], {}), "('../data/scannet_frames_25k_gtobjfeat_aggregate/%s.npy' %\n scan.scan_id)\n", (6060, 6136), False, 'import os\n'), ((5192, 5203), 'numpy.zeros', 'np.zeros', (['(6)'], {}), '(6)\n', (5200, 5203), True, 'import numpy as np\n'), ((5294, 5325), 'numpy.zeros', 'np.zeros', (['self.max_context_size'], {}), '(self.max_context_size)\n', (5302, 5325), True, 'import numpy as np\n'), ((6157, 6279), 'numpy.load', 'np.load', (["('../data/scannet_frames_25k_gtobjfeat_aggregate/%s.npy' % scan.scan_id)"], {'allow_pickle': '(True)', 'encoding': '"""latin1"""'}), "('../data/scannet_frames_25k_gtobjfeat_aggregate/%s.npy' % scan.\n scan_id, allow_pickle=True, encoding='latin1')\n", (6164, 6279), True, 'import numpy as np\n'), ((8803, 8875), 'numpy.concatenate', 'np.concatenate', (['[selected_bbox_2d, selected_campose_2d[:, :12]]'], {'axis': '(-1)'}), '([selected_bbox_2d, selected_campose_2d[:, :12]], axis=-1)\n', (8817, 8875), True, 'import numpy as np\n'), ((4158, 4208), 'numpy.zeros', 'np.zeros', (['(self.max_context_size, offset.shape[1])'], {}), '((self.max_context_size, offset.shape[1]))\n', (4166, 4208), True, 'import numpy as np\n'), ((6892, 6934), 'numpy.zeros', 'np.zeros', (['(self.max_context_size, featdim)'], {}), '((self.max_context_size, featdim))\n', (6900, 6934), True, 'import numpy as np\n'), ((6978, 7019), 'numpy.zeros', 'np.zeros', (['(self.max_context_size, 4 + 12)'], {}), '((self.max_context_size, 4 + 12))\n', (6986, 7019), True, 'import numpy as np\n'), ((10707, 10737), 'numpy.sum', 'np.sum', (['(~multiple_targets_mask)'], {}), '(~multiple_targets_mask)\n', (10713, 10737), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: parser_funs
Description :
Author : <NAME>
date:
-------------------------------------------------
Change Activity:
2019/7/28:
-------------------------------------------------
"""
import torch
import numpy as np
def sdp_decoder(semgraph_probs, sentlens):
'''
semhead_probs type:ndarray, shape:(n,m,m)
'''
semhead_probs = semgraph_probs.sum(axis=-1)
semhead_preds = np.where(semhead_probs >= 0.5, 1, 0)
masked_semhead_preds = np.zeros(semhead_preds.shape, dtype=np.int32)
for i, (sem_preds, length) in enumerate(zip(semhead_preds, sentlens)):
masked_semhead_preds[i, :length, :length] = sem_preds[:length, :length]
n_counts = {'no_root': 0, 'multi_root': 0, 'no_head': 0, 'self_circle': 0}
for i, length in enumerate(sentlens):
for j in range(length):
if masked_semhead_preds[i, j, j] == 1:
n_counts['self_circle'] += 1
masked_semhead_preds[i, j, j] = 0
n_root = np.sum(masked_semhead_preds[i, :, 0])
if n_root == 0:
n_counts['no_root'] += 1
new_root = np.argmax(semhead_probs[i, 1:, 0]) + 1
masked_semhead_preds[i, new_root, 0] = 1
elif n_root > 1:
n_counts['multi_root'] += 1
kept_root = np.argmax(semhead_probs[i, 1:, 0]) + 1
masked_semhead_preds[i, :, 0] = 0
masked_semhead_preds[i, kept_root, 0] = 1
n_heads = masked_semhead_preds[i, :length, :length].sum(axis=-1)
n_heads[0] = 1
for j, n_head in enumerate(n_heads):
if n_head == 0:
n_counts['no_head'] += 1
semhead_probs[i, j, j] = 0
new_head = np.argmax(semhead_probs[i, j, 1:length]) + 1
masked_semhead_preds[i, j, new_head] = 1
# (n x m x m x c) -> (n x m x m)
semrel_preds = np.argmax(semgraph_probs, axis=-1)
# (n x m x m) (*) (n x m x m) -> (n x m x m)
semgraph_preds = masked_semhead_preds * semrel_preds
result = masked_semhead_preds + semgraph_preds
return result
def parse_semgraph(semgraph, sentlens):
semgraph = semgraph.tolist()
sents = []
for s, l in zip(semgraph, sentlens):
words = []
for w in s[1:l]:
arc = []
for head_idx, deprel in enumerate(w[:l]):
if deprel == 0:
continue
arc.append([head_idx, deprel - 1])
words.append(arc)
sents.append(words)
return sents
|
[
"numpy.where",
"numpy.sum",
"numpy.zeros",
"numpy.argmax"
] |
[((520, 556), 'numpy.where', 'np.where', (['(semhead_probs >= 0.5)', '(1)', '(0)'], {}), '(semhead_probs >= 0.5, 1, 0)\n', (528, 556), True, 'import numpy as np\n'), ((584, 629), 'numpy.zeros', 'np.zeros', (['semhead_preds.shape'], {'dtype': 'np.int32'}), '(semhead_preds.shape, dtype=np.int32)\n', (592, 629), True, 'import numpy as np\n'), ((1981, 2015), 'numpy.argmax', 'np.argmax', (['semgraph_probs'], {'axis': '(-1)'}), '(semgraph_probs, axis=-1)\n', (1990, 2015), True, 'import numpy as np\n'), ((1101, 1138), 'numpy.sum', 'np.sum', (['masked_semhead_preds[i, :, 0]'], {}), '(masked_semhead_preds[i, :, 0])\n', (1107, 1138), True, 'import numpy as np\n'), ((1223, 1257), 'numpy.argmax', 'np.argmax', (['semhead_probs[i, 1:, 0]'], {}), '(semhead_probs[i, 1:, 0])\n', (1232, 1257), True, 'import numpy as np\n'), ((1404, 1438), 'numpy.argmax', 'np.argmax', (['semhead_probs[i, 1:, 0]'], {}), '(semhead_probs[i, 1:, 0])\n', (1413, 1438), True, 'import numpy as np\n'), ((1823, 1863), 'numpy.argmax', 'np.argmax', (['semhead_probs[i, j, 1:length]'], {}), '(semhead_probs[i, j, 1:length])\n', (1832, 1863), True, 'import numpy as np\n')]
|
import functools
import json
import re
from collections import Counter
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
import seaborn as sns
from statics import STRUCTURE_TYPES
sns.set_style("whitegrid")
plt.rcParams["figure.figsize"] = (18, 12)
plt.rcParams["font.size"] = 12
np.random.seed(1234)
def get_jaccard(structure1, structure2, structure_type):
if structure_type == "clique":
nodes1 = set(structure1["nodes"])
nodes2 = set(structure2["nodes"])
overlap = nodes1.intersection(nodes2)
union = nodes1.union(nodes2)
return len(overlap) / len(union)
if structure_type in ["biclique", "starclique"]:
left1, left2 = set(structure1["left_nodes"]), set(structure2["left_nodes"])
right1, right2 = set(structure1["right_nodes"]), set(structure2["right_nodes"])
left_overlap = left1.intersection(left2)
left_union = left1.union(left2)
right_overlap = right1.intersection(right2)
right_union = right1.union(right2)
return (
len(left_overlap) / len(left_union) + len(right_overlap) / len(right_union)
) / 2
if structure_type == "star":
hub1, hub2 = {structure1["hub"]}, {structure2["hub"]}
spokes1, spokes2 = set(structure1["spokes"]), set(structure2["spokes"])
hub_overlap = hub1.intersection(hub2)
hub_union = hub1.union(hub2)
spoke_overlap = spokes1.intersection(spokes2)
spoke_union = spokes1.union(spokes2)
return (
len(hub_overlap) / len(hub_union) + len(spoke_overlap) / len(spoke_union)
) / 2
raise Exception(f"Unknown structure type: {structure_type}!")
def get_dataset_color(dataset):
if dataset.startswith("ors") or dataset.startswith("asb"):
return "dodgerblue"
elif dataset.startswith("orp") or dataset.startswith("asp"):
return "lightskyblue"
elif dataset.startswith("usl") or dataset.startswith("lus"):
return "r"
elif dataset.startswith("del") or dataset.startswith("lde"):
return "darkorange"
elif dataset.startswith("clg"):
return "purple"
elif dataset.startswith("csi"):
return "magenta"
elif "bio$_{\mathcal{A}}" in dataset:
return "green"
elif dataset.startswith("bio\n") or dataset.startswith("bio"):
return "g"
elif dataset.startswith("bag") or dataset.startswith("rba"):
return "gray"
elif dataset.startswith("erg") or dataset.startswith("rer"):
return "darkgray"
else:
raise Exception(dataset)
def load_json(file):
"""
load a json file as a dictionary
"""
with open(file) as f:
model_json = json.load(f)
return model_json
def load_log(file):
"""
load a log file as a list of log file lines
"""
with open(file) as f:
model_log = f.read().split("\n")
return model_log
def create_df(model_json):
"""
convert the model json computed by julia into a pd.DataFrame
"""
tuples = list(
zip(
model_json["macro_structures"],
model_json["macro_structure_description_lengths"],
model_json["description_lengths_over_time"],
)
)
df = pd.DataFrame(
tuples, columns=["structure", "structure_cost", "description_length"]
)
df["n_edges_total"] = [
x.get("n_edges_total", model_json["m"]) for x in df.structure
]
df["n_nodes_total"] = [
x.get("n_nodes_total", model_json["n"]) for x in df.structure
]
df["structure_type"] = [x.get("structure_type") for x in df.structure]
df["structure_shape"] = [
get_node_marker(x) if x in STRUCTURE_TYPES else "X" for x in df.structure_type
]
df["structure_color"] = [
get_node_color(x) if x in STRUCTURE_TYPES else "k" for x in df.structure_type
]
return df
def create_progression_plot(df, save_path=None):
"""
position of structure in the sequence on x, description length after adding structure on y, color signaling structure type, size signalling number of edges
"""
scattertuples = list(
zip(
df.index - 1,
df.description_length / df.description_length.max(),
df.n_edges_total,
df.structure_color,
df.structure_shape,
)
)
for t in reversed(scattertuples[1:]):
plt.scatter(t[0], t[1], s=t[2] if t[3] != "k" else 10, c=t[3], marker="o")
plt.xticks(range(0, len(scattertuples[1:]) + 1, 2))
plt.xlim(-1, len(scattertuples[1:]) + 1)
plt.xlabel("Selected structure")
plt.ylabel("Total description length after structure selected")
plt.title(save_path)
plt.tight_layout()
if save_path is not None:
plt.savefig(save_path)
plt.close()
def create_size_plot(model_json, x_granularity, y_granularity, save_path=None):
"""
number of nodes on x, number of edges on y, color signaling structure type
"""
structure_types, n_nodes, n_edges = list(
zip(
*(
[
(s["structure_type"], s.get("n_nodes_total", 0), s["n_edges_total"])
for s in model_json["macro_structures"]
]
)
)
)
plt.scatter(
n_nodes[2:],
n_edges[2:],
c=list(map(get_node_color, structure_types[2:])),
)
plt.xlabel("Number of Nodes")
plt.xticks(range(0, max(n_nodes[2:]) + x_granularity, x_granularity))
plt.yticks(range(0, max(n_edges[2:]) + y_granularity, y_granularity))
plt.ylim(0, max(n_edges[2:]) + y_granularity)
plt.ylabel("Number of Edges")
plt.title(save_path)
plt.tight_layout()
if save_path is not None:
plt.savefig(save_path)
plt.close()
def get_structures_added(model_json):
"""
return list of dicts, with each dict a structure added in the model building process (i.e., generic structures are excluded)
"""
return model_json["macro_structures"][2:]
def get_node_sets(structures_added):
"""
return a list of lists, with each inner list holding the nodes of a structure
"""
return [_get_nodes(structure) for structure in structures_added]
def _get_nodes(structure):
"""
helper for get_node_sets
"""
if structure["structure_type"] in ["biclique", "starclique"]:
return structure["left_nodes"] + structure["right_nodes"]
elif structure["structure_type"] == "clique":
return structure["nodes"]
elif structure["structure_type"] == "star":
return [structure["hub"]] + structure["spokes"]
else:
raise Exception(f"Unknown structure type {structure['structure_type']}!")
def get_structure_dfs(structures_added, node_sets):
"""
return two pd.DataFrame objects encoding the node overlap between structures: abs_df (# nodes in the overlap), rel_df (jaccard similarity)
"""
abs_df = pd.DataFrame(
index=range(len(structures_added)),
columns=range(len(structures_added)),
data=np.nan,
)
rel_df = pd.DataFrame(
index=range(len(structures_added)),
columns=range(len(structures_added)),
data=np.nan,
)
for idx in range(0, len(node_sets) - 1):
for idx2 in range(idx + 1, len(node_sets)):
abs_df.at[idx, idx2] = len(
set(node_sets[idx]).intersection(set(node_sets[idx2]))
)
abs_df.at[idx2, idx] = abs_df.at[idx, idx2]
rel_df.at[idx, idx2] = len(
set(node_sets[idx]).intersection(set(node_sets[idx2]))
) / len(set(node_sets[idx]).union(set(node_sets[idx2])))
rel_df.at[idx2, idx] = rel_df.at[idx, idx2]
return abs_df, rel_df
def _get_n_nodes_covered(node_sets):
"""
helper for get_fraction_nodes_covered
"""
return len(set(functools.reduce(lambda x, y: x + y, node_sets, [])))
def get_fraction_nodes_covered(node_sets, model_json):
return _get_n_nodes_covered(node_sets) / model_json["n"]
def plot_overlap_heatmap(df, save_path=None):
"""
structures added to model on x and y, similarity as per df as color, default colormap, robust=False
"""
sns.heatmap(df, square=True)
if save_path is not None:
plt.savefig(save_path)
plt.close()
def create_rooted_bfs_tree(df, layout=False):
G = nx.Graph(df.fillna(0))
maxst = nx.tree.maximum_spanning_tree(G)
artificial_root = G.number_of_nodes()
ccs = list(nx.connected_components(G))
for c in ccs:
component_subgraph = maxst.subgraph(c)
component_root = max(nx.degree(component_subgraph), key=lambda tup: tup[-1])[
0
] # node with max unweighted degree
maxst.add_edge(artificial_root, component_root, weight=np.finfo(float).eps)
tree = nx.traversal.bfs_tree(maxst, artificial_root)
for e in tree.edges():
tree.edges[e]["weight"] = maxst.edges[e]["weight"]
if layout:
pos = nx.layout.kamada_kawai_layout(maxst, weight=None)
return tree, pos
else:
return tree
def add_tree_layout(G, root, node_sep, level_sep):
for node in G.nodes():
G.nodes[node]["y"] = -level_sep * nx.dijkstra_path_length(
G, root, node, weight=None
)
base = 0
for node in nx.dfs_postorder_nodes(G, root):
succ = sorted(list(G.successors(node)), reverse=True)
if len(succ) < 1:
G.nodes[node]["x"] = base + node_sep
base += node_sep
else:
xmin = min([G.nodes[node]["x"] for node in succ])
xmax = max([G.nodes[node]["x"] for node in succ])
G.nodes[node]["x"] = xmin + (xmax - xmin) / 2
for node in G.nodes:
G.nodes[node]["x"] = -G.nodes[node]["x"]
return G
def add_color(G, df):
for node in G.nodes():
G.nodes[node]["color"] = (
df.at[node + 2, "structure_color"] if node != len(df) - 2 else "k"
)
return G
def plot_tree(G, df, save_path=None):
G = add_color(G, df)
_, ax = plt.subplots(1, 1, figsize=(12, 12))
for node in G.nodes():
x = G.nodes[node]["x"]
y = G.nodes[node]["y"]
color = G.nodes[node]["color"]
for succ in G.successors(node):
ax.plot(
[x, G.nodes[succ]["x"]],
[y, G.nodes[succ]["y"]],
"-k",
linewidth=max(G.edges[node, succ]["weight"] * 10, 1),
zorder=1,
alpha=1,
)
ax.scatter(
x,
y,
color=color,
s=df.at[node + 2, "n_nodes_total"] * 6 if node != len(df) - 2 else 300,
marker=df.at[node + 2, "structure_shape"] if node != len(df) - 2 else "X",
zorder=2,
alpha=1,
)
# if node != len(df) - 2:
# ax.annotate(node + 1, (x, y), fontsize=10, ha="center", va="center")
plt.tick_params(left=False, labelleft=False, bottom=False, labelbottom=False)
plt.axis("off")
plt.tight_layout()
if save_path is not None:
plt.savefig(save_path, transparent=True, bbox_inches="tight")
plt.close()
def plot_structure_tree(tree, layout, df, save_path=None):
"""
plot structure tree in basic kamada kawai layout;
structure identifiers in order of structure addition and color corresponding to structure type (artificial root node black)
"""
nx.draw_networkx_edges(tree, pos=layout)
for node, (x, y) in layout.items():
plt.scatter(
x,
y,
color=df.at[node + 2, "structure_color"] if node != len(df) - 2 else "k",
s=df.at[node + 2, "n_nodes_total"] * 6 if node != len(df) - 2 else 100,
marker=df.at[node + 2, "structure_shape"] if node != len(df) - 2 else "X",
zorder=2,
alpha=0.8,
)
labels = {idx: idx + 1 for idx in tree.nodes()}
nx.draw_networkx_labels(tree, pos=layout, labels=labels)
plt.axis("off")
if save_path is not None:
plt.savefig(save_path)
plt.close()
def write_plots_for_model_json(
json_path,
save_base,
x_granularity_size,
y_granularity_size,
):
"""
end-to-end plot generation for json file at given json_path
"""
print(f"Starting {json_path}...")
model_json = load_json(json_path)
save_base = save_base.split("_size")[0]
df = create_df(model_json)
df.to_csv(re.sub("figure", "structures", save_base) + ".csv", index=False)
structures_added = get_structures_added(model_json)
node_sets = get_node_sets(structures_added)
try:
abs_df, rel_df = get_structure_dfs(structures_added, node_sets)
rel_df.to_csv(re.sub("figure", "structure_overlap_matrix", save_base) + ".csv")
tree, layout = create_rooted_bfs_tree(rel_df, layout=True)
plot_tree(
add_tree_layout(tree, tree.number_of_nodes() - 1, 10, 10),
df,
re.sub("figure", "tree-hierarchical", save_base) + ".pdf",
)
plot_structure_tree(
tree, layout, df, re.sub("figure", "tree-kamada", save_base) + ".pdf"
)
G = create_overlap_quotient_graph(structures_added, abs_df, model_json["n"])
plot_overlap_quotient_graph(
G,
df,
model_json["n"],
re.sub("figure", "overlap-quotient", save_base) + ".pdf",
)
G = create_structure_quotient_graph(node_sets, save_base)
plot_structure_quotient_graph(
G,
node_sets,
structures_added,
save_path=re.sub("figure", "structure-quotient", save_base) + ".pdf",
)
except:
print(
f"Error for overlap dataframes or graph plots: {json_path} - moving on..."
)
try:
create_progression_plot(
df,
re.sub("figure", "progress", save_base) + ".pdf",
)
except:
print(f"Error for progression plot: {json_path} - moving on...")
try:
create_size_plot(
model_json,
x_granularity_size,
y_granularity_size,
re.sub("figure", "sizes", save_base) + ".pdf",
)
except:
print(f"Error for size plot: {json_path} - moving on...")
def get_edgelist_separator(edgelist_path):
with open(edgelist_path) as f:
for line in f:
if not line.startswith("#"):
if "\t" in line:
return "\t"
elif "," in line:
return ","
elif " " in line:
return " "
else:
raise
def create_structure_quotient_graph(nodes, save_base):
nodemap_path = (
re.sub("figure-", "", re.sub("graphics/", "results/", save_base))
+ "-nodemap.csv"
)
nodemap = pd.read_csv(nodemap_path)
edgelist_path = (
re.sub("figure-", "", re.sub("graphics/", "data/", save_base)) + ".txt"
)
edges = pd.read_csv(
edgelist_path,
sep=get_edgelist_separator(edgelist_path),
comment="#",
header=None,
usecols=[0, 1],
).rename({0: "u", 1: "v"}, axis=1)
new_edges = edges.merge(nodemap, left_on="u", right_on="original_id").merge(
nodemap, left_on="v", right_on="original_id", suffixes=("_u", "_v")
)[["julia_id_u", "julia_id_v"]]
assert len(edges) == len(new_edges)
nodes_to_structures = get_nodes_to_structures(nodes)
G = nx.MultiGraph()
G.add_nodes_from(range(1, len(nodes) + 1))
for u, v in zip(new_edges.julia_id_u, new_edges.julia_id_v):
u_structures = nodes_to_structures.get(u, [])
v_structures = nodes_to_structures.get(v, [])
if (
u_structures
and v_structures
and not set(u_structures).intersection(v_structures)
):
for us in u_structures:
for vs in v_structures:
G.add_edge(us, vs)
wG = nx.Graph()
wG.add_nodes_from(G.nodes())
wG.add_weighted_edges_from([(*k, v) for k, v in dict(Counter(G.edges())).items()])
return wG
def get_nodes_to_structures(nodes):
nodes_to_structures = {}
for idx, nodeset in enumerate(nodes, start=1):
for node in nodeset:
nodes_to_structures[node] = nodes_to_structures.get(node, []) + [idx]
return nodes_to_structures
def get_node_color(node_type):
if node_type == "star":
return "orange"
elif node_type == "clique":
return "dodgerblue"
elif node_type == "biclique":
return "#BE271A" # red3
elif node_type == "starclique":
return "orchid"
else:
raise
def get_node_marker(node_type):
if node_type == "star":
return "^"
elif node_type == "clique":
return "o"
elif node_type == "biclique":
return "s"
elif node_type == "starclique":
return "d"
else:
raise
def plot_structure_quotient_graph(wG, nodes, structures, save_path=None):
pos = nx.layout.fruchterman_reingold_layout(wG, k=2.5, seed=0)
_ = plt.figure(figsize=(12, 12))
nx.draw_networkx_edges(
wG,
pos=pos,
edgelist=wG.edges(),
width=[w / 100 for u, v, w in wG.edges(data="weight")],
)
for node in wG.nodes():
plt.scatter(
*pos[node],
s=len(nodes[node - 1]) * 5,
c=get_node_color(structures[node - 1]["structure_type"]),
marker=get_node_marker(structures[node - 1]["structure_type"]),
)
nx.draw_networkx_labels(wG, pos, zorder=100)
plt.axis("off")
plt.tight_layout()
if save_path is not None:
plt.savefig(save_path)
plt.close()
def create_overlap_quotient_graph(structures_added, abs_df, n_total):
G = nx.Graph()
for idx, structure in enumerate(structures_added, start=1):
G.add_node(
idx, **{**structure, "n_relative": structure["n_nodes_total"] / n_total}
)
for i in range(len(abs_df)):
for j in range(i + 1, len(abs_df)):
edge_weight = abs_df.at[i, j] / n_total
if edge_weight > 0:
G.add_edge(i + 1, j + 1, weight=edge_weight)
return G
def plot_overlap_quotient_graph(G, df, n_total, save_path=None):
np.random.seed(1234)
pos = nx.layout.fruchterman_reingold_layout(G)
_, ax = plt.subplots(1, 1, figsize=(12, 12))
for x, y, w in G.edges(data="weight"):
if w * n_total > 1:
ax.plot(
[pos[x][0], pos[y][0]],
[pos[x][1], pos[y][1]],
"-k",
linewidth=w * n_total / 100,
zorder=-10,
alpha=0.5,
)
for node in G.nodes(data=True):
ax.scatter(
*pos[node[0]],
s=5.0 * node[1]["n_nodes_total"],
c=df.at[node[0] + 1, "structure_color"],
marker=df.at[node[0] + 1, "structure_shape"],
zorder=1,
)
nx.draw_networkx_labels(G, pos, zorder=100)
plt.axis("off")
plt.tight_layout()
if save_path is not None:
plt.savefig(save_path, transparent=True)
plt.close()
|
[
"networkx.layout.fruchterman_reingold_layout",
"pandas.read_csv",
"matplotlib.pyplot.ylabel",
"networkx.traversal.bfs_tree",
"seaborn.set_style",
"networkx.draw_networkx_labels",
"matplotlib.pyplot.xlabel",
"networkx.MultiGraph",
"matplotlib.pyplot.close",
"numpy.random.seed",
"networkx.dfs_postorder_nodes",
"matplotlib.pyplot.scatter",
"pandas.DataFrame",
"matplotlib.pyplot.axis",
"networkx.layout.kamada_kawai_layout",
"matplotlib.pyplot.savefig",
"functools.reduce",
"matplotlib.pyplot.tick_params",
"seaborn.heatmap",
"networkx.connected_components",
"networkx.dijkstra_path_length",
"numpy.finfo",
"matplotlib.pyplot.title",
"re.sub",
"networkx.degree",
"networkx.tree.maximum_spanning_tree",
"networkx.Graph",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.tight_layout",
"json.load",
"networkx.draw_networkx_edges",
"matplotlib.pyplot.subplots"
] |
[((225, 251), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (238, 251), True, 'import seaborn as sns\n'), ((325, 345), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (339, 345), True, 'import numpy as np\n'), ((3269, 3356), 'pandas.DataFrame', 'pd.DataFrame', (['tuples'], {'columns': "['structure', 'structure_cost', 'description_length']"}), "(tuples, columns=['structure', 'structure_cost',\n 'description_length'])\n", (3281, 3356), True, 'import pandas as pd\n'), ((4606, 4638), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Selected structure"""'], {}), "('Selected structure')\n", (4616, 4638), True, 'import matplotlib.pyplot as plt\n'), ((4643, 4706), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Total description length after structure selected"""'], {}), "('Total description length after structure selected')\n", (4653, 4706), True, 'import matplotlib.pyplot as plt\n'), ((4711, 4731), 'matplotlib.pyplot.title', 'plt.title', (['save_path'], {}), '(save_path)\n', (4720, 4731), True, 'import matplotlib.pyplot as plt\n'), ((4736, 4754), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (4752, 4754), True, 'import matplotlib.pyplot as plt\n'), ((5429, 5458), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of Nodes"""'], {}), "('Number of Nodes')\n", (5439, 5458), True, 'import matplotlib.pyplot as plt\n'), ((5661, 5690), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of Edges"""'], {}), "('Number of Edges')\n", (5671, 5690), True, 'import matplotlib.pyplot as plt\n'), ((5695, 5715), 'matplotlib.pyplot.title', 'plt.title', (['save_path'], {}), '(save_path)\n', (5704, 5715), True, 'import matplotlib.pyplot as plt\n'), ((5720, 5738), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5736, 5738), True, 'import matplotlib.pyplot as plt\n'), ((8244, 8272), 'seaborn.heatmap', 'sns.heatmap', (['df'], {'square': '(True)'}), '(df, square=True)\n', (8255, 8272), True, 'import seaborn as sns\n'), ((8445, 8477), 'networkx.tree.maximum_spanning_tree', 'nx.tree.maximum_spanning_tree', (['G'], {}), '(G)\n', (8474, 8477), True, 'import networkx as nx\n'), ((8868, 8913), 'networkx.traversal.bfs_tree', 'nx.traversal.bfs_tree', (['maxst', 'artificial_root'], {}), '(maxst, artificial_root)\n', (8889, 8913), True, 'import networkx as nx\n'), ((9359, 9390), 'networkx.dfs_postorder_nodes', 'nx.dfs_postorder_nodes', (['G', 'root'], {}), '(G, root)\n', (9381, 9390), True, 'import networkx as nx\n'), ((10106, 10142), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(12, 12)'}), '(1, 1, figsize=(12, 12))\n', (10118, 10142), True, 'import matplotlib.pyplot as plt\n'), ((10991, 11068), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'left': '(False)', 'labelleft': '(False)', 'bottom': '(False)', 'labelbottom': '(False)'}), '(left=False, labelleft=False, bottom=False, labelbottom=False)\n', (11006, 11068), True, 'import matplotlib.pyplot as plt\n'), ((11073, 11088), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (11081, 11088), True, 'import matplotlib.pyplot as plt\n'), ((11093, 11111), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (11109, 11111), True, 'import matplotlib.pyplot as plt\n'), ((11495, 11535), 'networkx.draw_networkx_edges', 'nx.draw_networkx_edges', (['tree'], {'pos': 'layout'}), '(tree, pos=layout)\n', (11517, 11535), True, 'import networkx as nx\n'), ((11995, 12051), 'networkx.draw_networkx_labels', 'nx.draw_networkx_labels', (['tree'], {'pos': 'layout', 'labels': 'labels'}), '(tree, pos=layout, labels=labels)\n', (12018, 12051), True, 'import networkx as nx\n'), ((12056, 12071), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (12064, 12071), True, 'import matplotlib.pyplot as plt\n'), ((14946, 14971), 'pandas.read_csv', 'pd.read_csv', (['nodemap_path'], {}), '(nodemap_path)\n', (14957, 14971), True, 'import pandas as pd\n'), ((15582, 15597), 'networkx.MultiGraph', 'nx.MultiGraph', ([], {}), '()\n', (15595, 15597), True, 'import networkx as nx\n'), ((16085, 16095), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (16093, 16095), True, 'import networkx as nx\n'), ((17136, 17192), 'networkx.layout.fruchterman_reingold_layout', 'nx.layout.fruchterman_reingold_layout', (['wG'], {'k': '(2.5)', 'seed': '(0)'}), '(wG, k=2.5, seed=0)\n', (17173, 17192), True, 'import networkx as nx\n'), ((17201, 17229), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 12)'}), '(figsize=(12, 12))\n', (17211, 17229), True, 'import matplotlib.pyplot as plt\n'), ((17659, 17703), 'networkx.draw_networkx_labels', 'nx.draw_networkx_labels', (['wG', 'pos'], {'zorder': '(100)'}), '(wG, pos, zorder=100)\n', (17682, 17703), True, 'import networkx as nx\n'), ((17708, 17723), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (17716, 17723), True, 'import matplotlib.pyplot as plt\n'), ((17728, 17746), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (17744, 17746), True, 'import matplotlib.pyplot as plt\n'), ((17908, 17918), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (17916, 17918), True, 'import networkx as nx\n'), ((18404, 18424), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (18418, 18424), True, 'import numpy as np\n'), ((18435, 18475), 'networkx.layout.fruchterman_reingold_layout', 'nx.layout.fruchterman_reingold_layout', (['G'], {}), '(G)\n', (18472, 18475), True, 'import networkx as nx\n'), ((18488, 18524), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(12, 12)'}), '(1, 1, figsize=(12, 12))\n', (18500, 18524), True, 'import matplotlib.pyplot as plt\n'), ((19109, 19152), 'networkx.draw_networkx_labels', 'nx.draw_networkx_labels', (['G', 'pos'], {'zorder': '(100)'}), '(G, pos, zorder=100)\n', (19132, 19152), True, 'import networkx as nx\n'), ((19157, 19172), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (19165, 19172), True, 'import matplotlib.pyplot as plt\n'), ((19177, 19195), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (19193, 19195), True, 'import matplotlib.pyplot as plt\n'), ((2729, 2741), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2738, 2741), False, 'import json\n'), ((4426, 4500), 'matplotlib.pyplot.scatter', 'plt.scatter', (['t[0]', 't[1]'], {'s': "(t[2] if t[3] != 'k' else 10)", 'c': 't[3]', 'marker': '"""o"""'}), "(t[0], t[1], s=t[2] if t[3] != 'k' else 10, c=t[3], marker='o')\n", (4437, 4500), True, 'import matplotlib.pyplot as plt\n'), ((4793, 4815), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_path'], {}), '(save_path)\n', (4804, 4815), True, 'import matplotlib.pyplot as plt\n'), ((4824, 4835), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (4833, 4835), True, 'import matplotlib.pyplot as plt\n'), ((5777, 5799), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_path'], {}), '(save_path)\n', (5788, 5799), True, 'import matplotlib.pyplot as plt\n'), ((5808, 5819), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5817, 5819), True, 'import matplotlib.pyplot as plt\n'), ((8311, 8333), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_path'], {}), '(save_path)\n', (8322, 8333), True, 'import matplotlib.pyplot as plt\n'), ((8342, 8353), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (8351, 8353), True, 'import matplotlib.pyplot as plt\n'), ((8535, 8561), 'networkx.connected_components', 'nx.connected_components', (['G'], {}), '(G)\n', (8558, 8561), True, 'import networkx as nx\n'), ((9029, 9078), 'networkx.layout.kamada_kawai_layout', 'nx.layout.kamada_kawai_layout', (['maxst'], {'weight': 'None'}), '(maxst, weight=None)\n', (9058, 9078), True, 'import networkx as nx\n'), ((11150, 11211), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_path'], {'transparent': '(True)', 'bbox_inches': '"""tight"""'}), "(save_path, transparent=True, bbox_inches='tight')\n", (11161, 11211), True, 'import matplotlib.pyplot as plt\n'), ((11220, 11231), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11229, 11231), True, 'import matplotlib.pyplot as plt\n'), ((12110, 12132), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_path'], {}), '(save_path)\n', (12121, 12132), True, 'import matplotlib.pyplot as plt\n'), ((12141, 12152), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (12150, 12152), True, 'import matplotlib.pyplot as plt\n'), ((17785, 17807), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_path'], {}), '(save_path)\n', (17796, 17807), True, 'import matplotlib.pyplot as plt\n'), ((17816, 17827), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (17825, 17827), True, 'import matplotlib.pyplot as plt\n'), ((19234, 19274), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_path'], {'transparent': '(True)'}), '(save_path, transparent=True)\n', (19245, 19274), True, 'import matplotlib.pyplot as plt\n'), ((19283, 19294), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (19292, 19294), True, 'import matplotlib.pyplot as plt\n'), ((7900, 7951), 'functools.reduce', 'functools.reduce', (['(lambda x, y: x + y)', 'node_sets', '[]'], {}), '(lambda x, y: x + y, node_sets, [])\n', (7916, 7951), False, 'import functools\n'), ((9256, 9307), 'networkx.dijkstra_path_length', 'nx.dijkstra_path_length', (['G', 'root', 'node'], {'weight': 'None'}), '(G, root, node, weight=None)\n', (9279, 9307), True, 'import networkx as nx\n'), ((12513, 12554), 're.sub', 're.sub', (['"""figure"""', '"""structures"""', 'save_base'], {}), "('figure', 'structures', save_base)\n", (12519, 12554), False, 'import re\n'), ((14857, 14899), 're.sub', 're.sub', (['"""graphics/"""', '"""results/"""', 'save_base'], {}), "('graphics/', 'results/', save_base)\n", (14863, 14899), False, 'import re\n'), ((15024, 15063), 're.sub', 're.sub', (['"""graphics/"""', '"""data/"""', 'save_base'], {}), "('graphics/', 'data/', save_base)\n", (15030, 15063), False, 'import re\n'), ((8657, 8686), 'networkx.degree', 'nx.degree', (['component_subgraph'], {}), '(component_subgraph)\n', (8666, 8686), True, 'import networkx as nx\n'), ((12785, 12840), 're.sub', 're.sub', (['"""figure"""', '"""structure_overlap_matrix"""', 'save_base'], {}), "('figure', 'structure_overlap_matrix', save_base)\n", (12791, 12840), False, 'import re\n'), ((13036, 13084), 're.sub', 're.sub', (['"""figure"""', '"""tree-hierarchical"""', 'save_base'], {}), "('figure', 'tree-hierarchical', save_base)\n", (13042, 13084), False, 'import re\n'), ((13164, 13206), 're.sub', 're.sub', (['"""figure"""', '"""tree-kamada"""', 'save_base'], {}), "('figure', 'tree-kamada', save_base)\n", (13170, 13206), False, 'import re\n'), ((13420, 13467), 're.sub', 're.sub', (['"""figure"""', '"""overlap-quotient"""', 'save_base'], {}), "('figure', 'overlap-quotient', save_base)\n", (13426, 13467), False, 'import re\n'), ((13947, 13986), 're.sub', 're.sub', (['"""figure"""', '"""progress"""', 'save_base'], {}), "('figure', 'progress', save_base)\n", (13953, 13986), False, 'import re\n'), ((14227, 14263), 're.sub', 're.sub', (['"""figure"""', '"""sizes"""', 'save_base'], {}), "('figure', 'sizes', save_base)\n", (14233, 14263), False, 'import re\n'), ((8836, 8851), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (8844, 8851), True, 'import numpy as np\n'), ((13683, 13732), 're.sub', 're.sub', (['"""figure"""', '"""structure-quotient"""', 'save_base'], {}), "('figure', 'structure-quotient', save_base)\n", (13689, 13732), False, 'import re\n')]
|
#
# Copyright (c) 2018-2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import falcon
import numpy as np
from grpc import StatusCode
from tensorflow.core.framework import tensor_pb2
from tensorflow.python.framework import tensor_shape
from tensorflow_serving.apis import predict_pb2
from tensorflow.python.framework import dtypes as dtypes
from tensorflow.python.framework import tensor_util as tensor_util
import tensorflow.contrib.util as tf_contrib_util
# import tensorflow.contrib.util as tf_contrib_util
from ie_serving.models.shape_management.utils import BatchingMode, ShapeMode
from ie_serving.server.constants import \
INVALID_INPUT_KEY, INVALID_SHAPE, INVALID_BATCHSIZE, GRPC, REST
from ie_serving.logger import get_logger
logger = get_logger(__name__)
statusCodes = {
'invalid_arg': {GRPC: StatusCode.INVALID_ARGUMENT,
REST: falcon.HTTP_BAD_REQUEST},
}
def prepare_input_data(target_engine, data, service_type):
# returns:
# inference_input, None on success
# None, error_message on error
model_inputs_in_input_request = list(dict(data).keys())
input_keys = target_engine.input_key_names
inference_input = {}
for requested_input_blob in model_inputs_in_input_request:
if requested_input_blob not in input_keys:
message = INVALID_INPUT_KEY % (model_inputs_in_input_request,
input_keys)
logger.debug("PREDICT error: {}".format(message))
return None, message
tensor_name = target_engine.model_keys['inputs'][requested_input_blob]
if service_type == GRPC:
try:
tensor_input = tf_contrib_util. \
make_ndarray(data[requested_input_blob])
except Exception as e:
message = str(e)
logger.debug("PREDICT prepare_input_data make_ndarray error: "
"{}".format(message))
return None, message
else:
tensor_input = np.asarray(data[requested_input_blob])
# Validate shape if shape not in auto mode
if target_engine.shape_info.mode != ShapeMode.AUTO:
shape_required_in_model = target_engine.net.inputs[
tensor_name].shape
# For reshapable models check all dimensions,
# for non-reshapable, check all starting from the second (omit
# batch size)
if target_engine.shape_info.mode == ShapeMode.DISABLED:
starting_dim = 1
else:
starting_dim = 0
# check requested shape and model shape
if shape_required_in_model[starting_dim:] != list(
tensor_input.shape)[starting_dim:]:
message = INVALID_SHAPE.format(list(tensor_input.shape),
shape_required_in_model)
logger.debug("PREDICT error: {}".format(message))
return None, message
# check if input batch size match the model only if not auto mode
if target_engine.batching_info.mode != \
BatchingMode.AUTO and shape_required_in_model[0] != \
tensor_input.shape[0]:
message = INVALID_BATCHSIZE.format(
tensor_input.shape[0],
target_engine.batching_info.batch_size)
logger.debug("PREDICT error,Invalid batchsize:{}".format(
message))
return None, message
inference_input[tensor_name] = tensor_input
return inference_input, None
def prepare_output_as_list(inference_output, model_available_outputs):
response = predict_pb2.PredictResponse()
for key, value in model_available_outputs.items():
if value in inference_output:
dtype = dtypes.as_dtype(inference_output[value].dtype)
output_tensor = tensor_pb2.TensorProto(
dtype=dtype.as_datatype_enum,
tensor_shape=tensor_shape.as_shape(
inference_output[value].shape).as_proto())
result = inference_output[value].flatten()
tensor_util._NP_TO_APPEND_FN[dtype.as_numpy_dtype](output_tensor,
result)
response.outputs[key].CopyFrom(output_tensor)
return response
'''
The function is not used.
Probably preparing the output would be faster,
but you need a change of grpc clients.
def prepare_output_with_tf(inference_output, model_available_outputs):
response = predict_pb2.PredictResponse()
for output in model_available_outputs:
response.outputs[output].CopyFrom(
tf_contrib_util.make_tensor_proto(inference_output[output],
shape=inference_output[output].
shape,
dtype=dtypes.as_dtype(
inference_output
[output].dtype).
as_datatype_enum))
return response
'''
|
[
"tensorflow_serving.apis.predict_pb2.PredictResponse",
"tensorflow.python.framework.dtypes.as_dtype",
"numpy.asarray",
"tensorflow.python.framework.tensor_shape.as_shape",
"ie_serving.logger.get_logger",
"tensorflow.contrib.util.make_ndarray",
"ie_serving.server.constants.INVALID_BATCHSIZE.format"
] |
[((1302, 1322), 'ie_serving.logger.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (1312, 1322), False, 'from ie_serving.logger import get_logger\n'), ((4355, 4384), 'tensorflow_serving.apis.predict_pb2.PredictResponse', 'predict_pb2.PredictResponse', ([], {}), '()\n', (4382, 4384), False, 'from tensorflow_serving.apis import predict_pb2\n'), ((2621, 2659), 'numpy.asarray', 'np.asarray', (['data[requested_input_blob]'], {}), '(data[requested_input_blob])\n', (2631, 2659), True, 'import numpy as np\n'), ((4501, 4547), 'tensorflow.python.framework.dtypes.as_dtype', 'dtypes.as_dtype', (['inference_output[value].dtype'], {}), '(inference_output[value].dtype)\n', (4516, 4547), True, 'from tensorflow.python.framework import dtypes as dtypes\n'), ((2257, 2313), 'tensorflow.contrib.util.make_ndarray', 'tf_contrib_util.make_ndarray', (['data[requested_input_blob]'], {}), '(data[requested_input_blob])\n', (2285, 2313), True, 'import tensorflow.contrib.util as tf_contrib_util\n'), ((3899, 3991), 'ie_serving.server.constants.INVALID_BATCHSIZE.format', 'INVALID_BATCHSIZE.format', (['tensor_input.shape[0]', 'target_engine.batching_info.batch_size'], {}), '(tensor_input.shape[0], target_engine.batching_info\n .batch_size)\n', (3923, 3991), False, 'from ie_serving.server.constants import INVALID_INPUT_KEY, INVALID_SHAPE, INVALID_BATCHSIZE, GRPC, REST\n'), ((4678, 4730), 'tensorflow.python.framework.tensor_shape.as_shape', 'tensor_shape.as_shape', (['inference_output[value].shape'], {}), '(inference_output[value].shape)\n', (4699, 4730), False, 'from tensorflow.python.framework import tensor_shape\n')]
|
import numpy as np
import matplotlib.pyplot as plt
# plt.style.use('ggplot')
''' Shot Accuracy Plot
ticks = [5,6,7,8,9,10,11,12,13,14,15]#[1,2,3,4,5]
data_lists = [
[92.35,92.52,93.2,93.71,93.85,94.15,94.22,94.37,94.68,94.73,94.82],
[89.15,89.74,90.41,90.88,91.31,91.47,91.84,92.03,92.2,92.3,92.48],
[86.13,86.98,87.8,88.15,88.71,89.22,89.43,89.6,89.87,90.05,90.16],
[80.04,81.38,82.39,83.09,83.61,84.21,84.6,85.16,85.35,85.79,85.99]
]#[[0.4,1.2,2.3,4,5.5]]
label_lists = [
'VirusShare_00177 5-way',
'VirusShare_00177 10-way',
'APIMDS 5-way',
'APIMDS 10-way'
]#['test1']
color_lists = ['red', 'red', 'royalblue', 'royalblue'] #['red']
marker_lists = ['o', '^', 'o', "^"]#['.']
'''
acc_data_lists = [
[91.04,91.71,92.11,92.35,91.8,91.55,90.71,91.05,90.22,90.12, 91.13, 90.32, 90.48, 90.84, 90.42, 91.14, 90.49, 90.49, 90.87, 90.77],
[87.44, 88.64, 88.7, 89.15, 88.07, 87.88, 87.77, 87.64, 87.46, 87.02, 86.93, 87.05, 86.87, 87.43, 87.56, 87.72, 87.38, 86.98, 87.31, 87.28]
]
time_data_lists = [
[14.2, 19.6, 25.1, 29.4, 36.9, 42.4, 48.8, 53.6, 58.6, 64.5, 70.1, 75.1, 80.5, 83.2, 90.5, 93.4, 100.6, 106.1, 111.5, 115.6],
[22.4, 32.0, 41.1, 50.2, 61.5, 71.4, 79.9, 89.8, 98.8, 108.5, 116.3, 122.4, 131.8, 142.6, 154.5, 164.3, 170.7, 187.9, 195.2, 201.9]
]
acc_label_lists = [
"VirusShare_00177 5-shot 5-way accuracy",
"VirusShare_00177 5-shot 10-way accuracy",
# "APIMDS 5-shot 5-way",
# "APIMDS 5-shot 10-way"
]
time_label_list = [
"VirusShare_00177 5-shot 5-way test time per episode",
"VirusShare_00177 5-shot 10-way test time per episode"
]
color_lists = ['orange', 'green']
marker_lists = ['s', 's']
bar_width = 10
ticks = np.arange(50, 1050, 50)
num_list = len(time_data_lists)
bar_ticks = [
np.arange(50, 1050, 50) - (num_list/2 - i - 0.5) * bar_width
for i in range(num_list)
]
marker_size = 6
title = ''
x_title = 'Sequence Length'
acc_y_title = 'Accuracy(%)'
time_y_title = 'ms / Episode'
fig_size = (15,6)
dpi = 300
fig = plt.figure(figsize=fig_size, dpi=dpi)
plt.xticks(ticks)
plt.title(title)
# plt.xlabel(x_title)
# plt.ylabel(y_title)
plt.grid(True, axis='y')
acc_axis = fig.add_subplot(111)
time_axis = acc_axis.twinx()
acc_axis.set_xlabel('Maximum Sequence Length')
acc_axis.set_ylabel(acc_y_title)
time_axis.set_ylabel(time_y_title)
acc_axis.set_ylim(75, 95)
time_axis.set_ylim(0, 350)
for acc_data, time_data, bar_tick, acc_label, time_label, color, marker in zip(acc_data_lists, time_data_lists, bar_ticks, acc_label_lists, time_label_list, color_lists, marker_lists):
acc_axis.plot(ticks, acc_data, color=color, marker=marker, label=acc_label, markersize=marker_size)
time_axis.bar(bar_tick, time_data, color=color, width=10, label=time_label, zorder=2)
acc_axis.legend(loc='upper left')
time_axis.legend(loc='upper right')
# plt.legend()
plt.show()
# plt.savefig('C:/Users/Asichurter/Desktop/截图/virushare.jpg', format='JPEG', dpi=300)
|
[
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((1704, 1727), 'numpy.arange', 'np.arange', (['(50)', '(1050)', '(50)'], {}), '(50, 1050, 50)\n', (1713, 1727), True, 'import numpy as np\n'), ((2020, 2057), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'fig_size', 'dpi': 'dpi'}), '(figsize=fig_size, dpi=dpi)\n', (2030, 2057), True, 'import matplotlib.pyplot as plt\n'), ((2058, 2075), 'matplotlib.pyplot.xticks', 'plt.xticks', (['ticks'], {}), '(ticks)\n', (2068, 2075), True, 'import matplotlib.pyplot as plt\n'), ((2076, 2092), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2085, 2092), True, 'import matplotlib.pyplot as plt\n'), ((2137, 2161), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {'axis': '"""y"""'}), "(True, axis='y')\n", (2145, 2161), True, 'import matplotlib.pyplot as plt\n'), ((2860, 2870), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2868, 2870), True, 'import matplotlib.pyplot as plt\n'), ((1778, 1801), 'numpy.arange', 'np.arange', (['(50)', '(1050)', '(50)'], {}), '(50, 1050, 50)\n', (1787, 1801), True, 'import numpy as np\n')]
|
import argparse
import os
import random
from PIL import Image
import cv2
import gym
import numpy as np
def save_as_image(observation,
save_dir,
img_name,
prefix="img_"):
# donwnscaling the image
im_array = cv2.resize(observation, IMAGE_SIZE)
im = Image.fromarray(im_array, 'RGB')
imname = '{}{}.png'.format(prefix, img_name)
im.save(os.path.join(save_dir, imname))
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser()
# Adding the arguments
arg_parser.add_argument("--save_dir", type=str, default=SAVE_DIR,
help="Relative path to the directory to store "
"the data (default value is 'data/'")
arg_parser.add_argument("--num_images", type=int,
default=IMAGES_TO_GENERATE,
help="Number of images to generate (default "
"value is 10000)")
args = arg_parser.parse_args()
save_dir = args.save_dir
num_images = args.num_images
if not os.path.exists(save_dir):
os.makedirs(save_dir)
envs = [(gym.make(name)) for name in ENV_NAMES]
env = random.choice(envs)
env.reset()
i, current_env_images = 0, 0
while i < num_images:
obs, _, is_done, _ = env.step(env.action_space.sample())
if np.mean(obs) > 0.01:
save_as_image(obs, save_dir, str(i))
current_env_images += 1
i += 1
else:
continue
if is_done or current_env_images % MAX_IMAGES_PER_ENV_INSTANCE == 0:
current_env_images = 0
env = random.choice(envs)
env.reset()
|
[
"os.path.exists",
"PIL.Image.fromarray",
"random.choice",
"numpy.mean",
"argparse.ArgumentParser",
"os.makedirs",
"os.path.join",
"cv2.resize",
"gym.make"
] |
[((270, 305), 'cv2.resize', 'cv2.resize', (['observation', 'IMAGE_SIZE'], {}), '(observation, IMAGE_SIZE)\n', (280, 305), False, 'import cv2\n'), ((315, 347), 'PIL.Image.fromarray', 'Image.fromarray', (['im_array', '"""RGB"""'], {}), "(im_array, 'RGB')\n", (330, 347), False, 'from PIL import Image\n'), ((487, 512), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (510, 512), False, 'import argparse\n'), ((1224, 1243), 'random.choice', 'random.choice', (['envs'], {}), '(envs)\n', (1237, 1243), False, 'import random\n'), ((409, 439), 'os.path.join', 'os.path.join', (['save_dir', 'imname'], {}), '(save_dir, imname)\n', (421, 439), False, 'import os\n'), ((1105, 1129), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (1119, 1129), False, 'import os\n'), ((1139, 1160), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (1150, 1160), False, 'import os\n'), ((1175, 1189), 'gym.make', 'gym.make', (['name'], {}), '(name)\n', (1183, 1189), False, 'import gym\n'), ((1396, 1408), 'numpy.mean', 'np.mean', (['obs'], {}), '(obs)\n', (1403, 1408), True, 'import numpy as np\n'), ((1686, 1705), 'random.choice', 'random.choice', (['envs'], {}), '(envs)\n', (1699, 1705), False, 'import random\n')]
|
import numpy as np
def hermitegaussian(coeffs,x,sigma):
xhat = (x/sigma)
herms = np.polynomial.hermite.Hermite(coeffs)
return herms(xhat) * np.exp(-xhat**2)
def continuous_convolve(kernels,obj):
out = np.empty(obj.shape)
for i in range(kernels.shape[0]):
out[jj] = np.dot(obj[max(0,jj-centering):min(size,jj+n-centering)], kernels[i,max(0,centering-jj):min(n,size-jj+centering)])
return out
def convolve_hermites(f_in,coeffs,center_kw,sigma,sigma_range,spacing):
x = np.arange(-sigma_range * max(sigma),sigma_range * max(sigma),step=spacing)
if center_kw == 'centered':
centering = int(x.shape[0]/2)
elif center_kw == 'right':
centering = 0
elif center_kw == 'left':
centering = x.shape[0]-1
else:
print('setting lsf centering to middle')
centering = int(x.shape[0]/2)
f_out = np.empty(f_in.shape)
size = f_in.shape[0]
n = x.shape[0]
for jj in range(f_out.shape[0]):
kernel = hermitegaussian(coeffs[jj,:],x,sigma[jj])
# L1 normalize the kernel so the total flux is conserved
kernel /= np.sum(kernel)
f_out[jj] = np.dot(f_in[max(0,jj-centering):min(size,jj+n-centering)]\
,kernel[max(0,centering-jj):min(n,size-jj+centering)])
return f_out
|
[
"numpy.exp",
"numpy.sum",
"numpy.polynomial.hermite.Hermite",
"numpy.empty"
] |
[((90, 127), 'numpy.polynomial.hermite.Hermite', 'np.polynomial.hermite.Hermite', (['coeffs'], {}), '(coeffs)\n', (119, 127), True, 'import numpy as np\n'), ((219, 238), 'numpy.empty', 'np.empty', (['obj.shape'], {}), '(obj.shape)\n', (227, 238), True, 'import numpy as np\n'), ((878, 898), 'numpy.empty', 'np.empty', (['f_in.shape'], {}), '(f_in.shape)\n', (886, 898), True, 'import numpy as np\n'), ((153, 171), 'numpy.exp', 'np.exp', (['(-xhat ** 2)'], {}), '(-xhat ** 2)\n', (159, 171), True, 'import numpy as np\n'), ((1126, 1140), 'numpy.sum', 'np.sum', (['kernel'], {}), '(kernel)\n', (1132, 1140), True, 'import numpy as np\n')]
|
import warnings
from collections import OrderedDict
from pathlib import Path
import numpy as np
import pandas as pd
import torch
from tqdm import tqdm
import librosa
import model_utils
import utils
def long_clip_to_images(y, sample_rate, composer):
len_y = len(y)
start = 0
end = sample_rate * 5
images = []
while len_y > start:
y_batch = y[start:end].astype(np.float32)
if len(y_batch) != (sample_rate * 5):
break
start = end
end = end + sample_rate * 5
image = composer(y_batch)
images.append(image)
return images
def proba_to_label_string(proba, threshold):
events = proba >= threshold
all_events = set(np.argwhere(events)[:, 1])
labels = list(all_events)
if len(labels) == 0:
label_string = "nocall"
else:
labels_str_list = list(map(lambda x: utils.INV_BIRD_CODE[x], labels))
label_string = " ".join(labels_str_list)
# print(label_string)
return label_string
def prediction_for_clip(
test_df: pd.DataFrame,
clip: np.ndarray,
ds_class,
sample_rate,
model,
composer=None,
threshold=0.5,
):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
model.eval()
prediction_dict = {}
for idx in tqdm(range(len(test_df))):
record = test_df.loc[idx, :]
# print(record)
row_id = record.row_id
site = record.site
if site in {"site_1", "site_2"}:
end_seconds = int(record.seconds)
start_seconds = int(end_seconds - 5)
start_index = sample_rate * start_seconds
end_index = sample_rate * end_seconds
y = clip[start_index:end_index].astype(np.float32)
image = composer(y)
image = image[np.newaxis, :, :, :]
image = torch.Tensor(image)
image = image.to(device)
with torch.no_grad():
prediction = torch.sigmoid(model(image))
proba = prediction.detach().cpu().numpy()
else:
# to avoid prediction on large batch
y = clip.astype(np.float32)
images = long_clip_to_images(y, sample_rate, composer)
image = np.asarray(images)
image = torch.Tensor(image)
image = image.to(device)
image = image.squeeze(0)
batch_size = 16
whole_size = image.size(0)
if whole_size % batch_size == 0:
n_iter = whole_size // batch_size
else:
n_iter = whole_size // batch_size + 1
# all_events = set()
proba = np.zeros([0, len(utils.BIRD_CODE)])
for batch_i in range(n_iter):
batch = image[batch_i * batch_size : (batch_i + 1) * batch_size]
if batch.ndim == 3:
batch = batch.unsqueeze(0)
batch = batch.to(device)
with torch.no_grad():
prediction = torch.sigmoid(model(batch))
_proba = prediction.detach().cpu().numpy()
# print(proba.shape)
proba = np.concatenate([proba, _proba])
# label_string = proba_to_label_string(proba, threshold)
# prediction_dict[row_id] = label_string
prediction_dict[row_id] = proba
return prediction_dict
def prediction(
test_df: pd.DataFrame,
test_audio: Path,
ds_class,
model_list,
composer=None,
sample_rate=32000,
threshold=0.5,
denoise=False,
):
unique_audio_id = test_df.audio_id.unique()
warnings.filterwarnings("ignore")
# ================================
all_prediction_dict = OrderedDict()
print(model_list)
for audio_id in unique_audio_id:
clip, _ = librosa.load(
test_audio / (audio_id + ".mp3"),
sr=sample_rate,
mono=True,
res_type="kaiser_fast",
)
if denoise:
clip = utils.noise_reduce(
clip, rate=sample_rate, threshold=0.25, verbose=True
)
test_df_for_audio_id = test_df.query(f"audio_id == '{audio_id}'").reset_index(
drop=True
)
agg_dict = OrderedDict()
for model_config in model_list:
# print(model_config)
model = model_utils.load_pytorch_model(**model_config)
prediction_dict = prediction_for_clip(
test_df_for_audio_id,
clip=clip,
ds_class=ds_class,
sample_rate=sample_rate,
model=model,
composer=composer,
threshold=threshold,
)
# aggregate model prediction
for key in prediction_dict.keys():
if key in agg_dict:
agg_dict[key] += prediction_dict[key]
else:
agg_dict[key] = prediction_dict[key]
all_prediction_dict.update(agg_dict)
# print(all_prediction_dict)
# proba to label string
for k, v in all_prediction_dict.items():
v /= len(model_list)
all_prediction_dict[k] = proba_to_label_string(v, threshold)
print(all_prediction_dict)
row_id = list(all_prediction_dict.keys())
birds = list(all_prediction_dict.values())
prediction_df = pd.DataFrame({"row_id": row_id, "birds": birds})
return prediction_df
|
[
"collections.OrderedDict",
"model_utils.load_pytorch_model",
"utils.noise_reduce",
"torch.Tensor",
"numpy.asarray",
"numpy.argwhere",
"torch.cuda.is_available",
"numpy.concatenate",
"pandas.DataFrame",
"torch.no_grad",
"warnings.filterwarnings",
"librosa.load"
] |
[((3641, 3674), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (3664, 3674), False, 'import warnings\n'), ((3740, 3753), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3751, 3753), False, 'from collections import OrderedDict\n'), ((5386, 5434), 'pandas.DataFrame', 'pd.DataFrame', (["{'row_id': row_id, 'birds': birds}"], {}), "({'row_id': row_id, 'birds': birds})\n", (5398, 5434), True, 'import pandas as pd\n'), ((3831, 3932), 'librosa.load', 'librosa.load', (["(test_audio / (audio_id + '.mp3'))"], {'sr': 'sample_rate', 'mono': '(True)', 'res_type': '"""kaiser_fast"""'}), "(test_audio / (audio_id + '.mp3'), sr=sample_rate, mono=True,\n res_type='kaiser_fast')\n", (3843, 3932), False, 'import librosa\n'), ((4269, 4282), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4280, 4282), False, 'from collections import OrderedDict\n'), ((706, 725), 'numpy.argwhere', 'np.argwhere', (['events'], {}), '(events)\n', (717, 725), True, 'import numpy as np\n'), ((1203, 1228), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1226, 1228), False, 'import torch\n'), ((1870, 1889), 'torch.Tensor', 'torch.Tensor', (['image'], {}), '(image)\n', (1882, 1889), False, 'import torch\n'), ((2267, 2285), 'numpy.asarray', 'np.asarray', (['images'], {}), '(images)\n', (2277, 2285), True, 'import numpy as np\n'), ((2306, 2325), 'torch.Tensor', 'torch.Tensor', (['image'], {}), '(image)\n', (2318, 2325), False, 'import torch\n'), ((4027, 4099), 'utils.noise_reduce', 'utils.noise_reduce', (['clip'], {'rate': 'sample_rate', 'threshold': '(0.25)', 'verbose': '(True)'}), '(clip, rate=sample_rate, threshold=0.25, verbose=True)\n', (4045, 4099), False, 'import utils\n'), ((4377, 4423), 'model_utils.load_pytorch_model', 'model_utils.load_pytorch_model', ([], {}), '(**model_config)\n', (4407, 4423), False, 'import model_utils\n'), ((1944, 1959), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1957, 1959), False, 'import torch\n'), ((3195, 3226), 'numpy.concatenate', 'np.concatenate', (['[proba, _proba]'], {}), '([proba, _proba])\n', (3209, 3226), True, 'import numpy as np\n'), ((2993, 3008), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3006, 3008), False, 'import torch\n')]
|
#!/usr/bin/env python
#
#
# Train TuneNet on position-position bouncing ball data, which is very similar to the dataset of Ajay et al 2018.
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.utils
import torch.utils
import torch.utils.data
import torch.utils.data
from tune.utils import get_torch_device, create_tensorboard_writer
device = get_torch_device()
writer = create_tensorboard_writer()
TIME_LENGTH = 400
SERIES_COUNT = 2
INPUT_DIM = TIME_LENGTH
OUT_DIM = 1
BATCH_SIZE = 50
ax = None
loss_fn = torch.nn.MSELoss()
def train(epoch,
model,
sim,
data_loader,
optimizer,
train_eval_iterations,
should_eval=False,
display_graphs=False,
incremental=True):
train_loss = 0
batch_idx = 0
for (zeta_batch, s_batch, _) in data_loader:
zeta_batch = zeta_batch.float().to(device)
s_batch = s_batch.float().to(device).permute(0, 2, 1)
input_i = torch.tensor(
np.reshape(s_batch[:, :, :SERIES_COUNT].cpu(), ([-1, TIME_LENGTH * SERIES_COUNT]), order="F")).to(device)
input_i.requires_grad = True
# TODO: the naming on delta_zeta_batch is misleading. If the network is not incremental, this is
# not delta_zeta, but just zeta.
if incremental:
delta_zeta_batch = zeta_batch[:, 1].sub(zeta_batch[:, 0])
else:
delta_zeta_batch = zeta_batch[:, 1]
delta_zeta_hat = model(input_i).squeeze()
delta_zeta = delta_zeta_batch[:, 0].squeeze()
optimizer.zero_grad()
loss = loss_fn(delta_zeta_hat, delta_zeta)
train_loss += loss.item()
loss.backward()
optimizer.step()
batch_idx += 1
err_s = None
err_zeta = None
print('====> Epoch: {} Average loss: {}'.format(
epoch, train_loss / len(data_loader.dataset)))
if should_eval:
err_zeta, err_s, _, _ = test(epoch,
model,
sim,
data_loader,
train_eval_iterations,
display_graphs,
test_type="train",
incremental=incremental)
return err_zeta, err_s
def test(epoch, model, sim, data_loader, tuning_iterations=1, display_graphs=False, test_type="test", incremental=True):
"""
Perform tests over a dataset to calculate the error in parameter and simulated state
:param epoch: the epoch at evaluation time (used for tensorboard logging
:param model: the model to use for evaluation
:param tuning_iterations: number of tuning iterations to perform
:param display_graphs: if True, display graphs after tuning showing some examples
:param data_loader: the ground truth data to test over
:param test_type: a string describing why this test is run. The tests can be run during training, which
can make printouts confusing, so this string is used to disambiguate.
:param incremental: if True, then each iteration will add to the starting value (the model estimates the difference)
rather than simply setting the value (the model estimates the value).
:return:
"""
print("Testing over " + test_type + "...")
dataset_size = len(data_loader.dataset)
print('dataset size is ' + str(dataset_size))
s = torch.zeros((dataset_size, TIME_LENGTH, 2)).to(device)
v = torch.zeros((dataset_size, TIME_LENGTH, 2)).to(device)
s_hat = torch.zeros((dataset_size, TIME_LENGTH)).to(device)
v_hat = torch.zeros((dataset_size, TIME_LENGTH)).to(device)
zeta_list = torch.zeros((dataset_size, 1)).to(device)
zeta_hat_history_list = torch.zeros((dataset_size, tuning_iterations + 1)).to(device)
# generate predictions
with torch.no_grad():
# count = 0
for batch_idx, batch_data in enumerate(data_loader):
zeta_batch = batch_data[0]
s_batch = batch_data[1]
if len(batch_data) > 2:
v_batch = batch_data[2]
for idx_in_batch in range(zeta_batch.shape[0]):
# print(idx_in_batch)
idx = batch_idx * BATCH_SIZE + idx_in_batch
# print(idx)
# pull out the first datapoint from the batch.
zeta_i = zeta_batch[idx_in_batch].float()
s[idx] = s_batch.float().to(device).permute(0, 2, 1)[idx_in_batch]
if len(batch_data) > 2:
v[idx] = v_batch.float().to(device).permute(0, 2, 1)[idx_in_batch]
# extract relevant physics information from datapoint.
# zeta_i is a vstack. Row 1 is the source sim, row 2 is the target sim
# each row is a list of all the physics params, such as (restitution, drop_height).
# print(zeta_i)
zeta_list[idx] = zeta_i[1, 0]
zeta_hat_history_list[idx, :], s_hat[idx, :], v_hat[idx, :] = \
tune_iter(model, sim, s, zeta_i, idx, tuning_iterations, display_graphs,
incremental=incremental)
# print("zeta_list evolution: " + str(zeta_hat_history_list[idx, :]))
# count += 1
# print("{} datapoints processed.".format(count))
err_s = torch.abs(s[:, :, 1] - s_hat[:, :]).cpu().numpy()
# err_s_percentage = np.abs(np.divide(err_s, s[:, :, 1].cpu().numpy() + 0.0000001) * 100.)
# err_v = torch.abs(v[:, :, 1] - v_hat[:, :]).cpu().numpy()
# compare the last iteration of zeta_hat_history_list with zeta_list to compute the mean absolute error
err_zeta = torch.abs(zeta_list - zeta_hat_history_list).cpu().numpy()
last_err_zeta = err_zeta[:, -1]
# writer.add_scalar('{}_mae_s'.format(test_type), np.mean(err_s, keepdims=False), epoch)
writer.add_scalar('{}_mae_zeta'.format(test_type), np.mean(last_err_zeta, keepdims=False), epoch)
print("mae of zeta_list: {:6.4f}".format(np.mean(last_err_zeta, keepdims=False)))
print("mse of zeta_list: {:f}".format(np.mean(last_err_zeta * last_err_zeta, keepdims=False)))
return np.mean(err_zeta, keepdims=False), np.mean(err_s, keepdims=False), zeta_hat_history_list, err_zeta
def tune_iter(model, sim, s_start, zeta_start, idx, tuning_iterations,
display_graphs, incremental):
assert tuning_iterations > 0
# zeta = zeta_start.clone()
s = s_start.clone()
position_list = linear_velocity_list = None
zeta_hat_history = torch.tensor(np.zeros([tuning_iterations + 1]))
zeta_hat_history[0] = zeta_start[0, 0]
# print("starting zeta value: " + str(zeta_start[0, 0]))
for iters in range(tuning_iterations):
input_i = torch.tensor(
np.reshape(s[idx, :, :SERIES_COUNT].cpu(), ([-1, TIME_LENGTH * SERIES_COUNT]), order="F")).to(device)
delta_zeta_hat = model(input_i).item()
# calculate new parameters
previous_zeta = zeta_hat_history[iters]
if incremental:
new_zeta = previous_zeta + delta_zeta_hat
else:
new_zeta = delta_zeta_hat
new_zeta = max(0, new_zeta)
if tuning_iterations == 1:
# special case to speed things up: don't run the sim
position_list = torch.zeros(TIME_LENGTH, 3)
linear_velocity_list = torch.zeros(TIME_LENGTH, 3)
zeta_hat_history[iters + 1] = new_zeta
return zeta_hat_history, torch.tensor(position_list[:, 2]), torch.tensor(linear_velocity_list[:, 2])
# get new rollout
obj_pos = [0, 0, zeta_start[1, 1]]
_, _, position_list, _, linear_velocity_list, _, _, _, _ = sim.run(zeta=[new_zeta, obj_pos], render=False)
if display_graphs:
# do_display(input_i, position_list, zeta_start, new_zeta, s[idx])
pass
s[idx, :, 0] = torch.tensor(position_list[:, 2])
zeta_hat_history[iters + 1] = new_zeta
# print("zeta hat history: " + str(zeta_hat_history))
return zeta_hat_history, torch.tensor(position_list[:, 2]), torch.tensor(linear_velocity_list[:, 2])
def do_display(input_i, position_list, zeta_target, zeta_hat, s_i):
global ax
if ax is None:
_, ax = plt.subplots(2, 1)
ax[0].cla()
ax[0].set_ylim([0, 5])
ax[1].cla()
ax[1].set_ylim([0, 5])
# note: rho is the symbol for COR, but this should be changed if the semantic meaning
# of the zeta parameter changes.
ax[0].plot(position_list[:, 2], label="approximate run 1 (rho={:.4f})".format(zeta_hat),
color=(0.0, 0.5, 0.0, 1.0), ls="dashed")
ax[1].plot(input_i[0, :].detach().cpu().squeeze().numpy())
ax[0].plot(s_i[:, 0].detach().squeeze().cpu().numpy(),
label="actual run 0 (rho={:.4f})".format(zeta_target[0, 0]), color=(0.0, 0.0, 0.0))
ax[0].plot(s_i[:, 1].detach().squeeze().cpu().numpy(),
label="actual run 1 (rho={:.4f})".format(zeta_target[1, 0]), color=(0.0, 0.5, 0.0))
ax[0].legend()
plt.pause(1e-6)
|
[
"numpy.mean",
"torch.abs",
"tune.utils.get_torch_device",
"tune.utils.create_tensorboard_writer",
"torch.nn.MSELoss",
"numpy.zeros",
"torch.tensor",
"torch.zeros",
"matplotlib.pyplot.pause",
"torch.no_grad",
"matplotlib.pyplot.subplots"
] |
[((370, 388), 'tune.utils.get_torch_device', 'get_torch_device', ([], {}), '()\n', (386, 388), False, 'from tune.utils import get_torch_device, create_tensorboard_writer\n'), ((398, 425), 'tune.utils.create_tensorboard_writer', 'create_tensorboard_writer', ([], {}), '()\n', (423, 425), False, 'from tune.utils import get_torch_device, create_tensorboard_writer\n'), ((535, 553), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (551, 553), False, 'import torch\n'), ((9169, 9185), 'matplotlib.pyplot.pause', 'plt.pause', (['(1e-06)'], {}), '(1e-06)\n', (9178, 9185), True, 'import matplotlib.pyplot as plt\n'), ((3946, 3961), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3959, 3961), False, 'import torch\n'), ((6042, 6080), 'numpy.mean', 'np.mean', (['last_err_zeta'], {'keepdims': '(False)'}), '(last_err_zeta, keepdims=False)\n', (6049, 6080), True, 'import numpy as np\n'), ((6286, 6319), 'numpy.mean', 'np.mean', (['err_zeta'], {'keepdims': '(False)'}), '(err_zeta, keepdims=False)\n', (6293, 6319), True, 'import numpy as np\n'), ((6321, 6351), 'numpy.mean', 'np.mean', (['err_s'], {'keepdims': '(False)'}), '(err_s, keepdims=False)\n', (6328, 6351), True, 'import numpy as np\n'), ((6675, 6708), 'numpy.zeros', 'np.zeros', (['[tuning_iterations + 1]'], {}), '([tuning_iterations + 1])\n', (6683, 6708), True, 'import numpy as np\n'), ((8019, 8052), 'torch.tensor', 'torch.tensor', (['position_list[:, 2]'], {}), '(position_list[:, 2])\n', (8031, 8052), False, 'import torch\n'), ((8188, 8221), 'torch.tensor', 'torch.tensor', (['position_list[:, 2]'], {}), '(position_list[:, 2])\n', (8200, 8221), False, 'import torch\n'), ((8223, 8263), 'torch.tensor', 'torch.tensor', (['linear_velocity_list[:, 2]'], {}), '(linear_velocity_list[:, 2])\n', (8235, 8263), False, 'import torch\n'), ((8383, 8401), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {}), '(2, 1)\n', (8395, 8401), True, 'import matplotlib.pyplot as plt\n'), ((3514, 3557), 'torch.zeros', 'torch.zeros', (['(dataset_size, TIME_LENGTH, 2)'], {}), '((dataset_size, TIME_LENGTH, 2))\n', (3525, 3557), False, 'import torch\n'), ((3577, 3620), 'torch.zeros', 'torch.zeros', (['(dataset_size, TIME_LENGTH, 2)'], {}), '((dataset_size, TIME_LENGTH, 2))\n', (3588, 3620), False, 'import torch\n'), ((3644, 3684), 'torch.zeros', 'torch.zeros', (['(dataset_size, TIME_LENGTH)'], {}), '((dataset_size, TIME_LENGTH))\n', (3655, 3684), False, 'import torch\n'), ((3708, 3748), 'torch.zeros', 'torch.zeros', (['(dataset_size, TIME_LENGTH)'], {}), '((dataset_size, TIME_LENGTH))\n', (3719, 3748), False, 'import torch\n'), ((3777, 3807), 'torch.zeros', 'torch.zeros', (['(dataset_size, 1)'], {}), '((dataset_size, 1))\n', (3788, 3807), False, 'import torch\n'), ((3847, 3897), 'torch.zeros', 'torch.zeros', (['(dataset_size, tuning_iterations + 1)'], {}), '((dataset_size, tuning_iterations + 1))\n', (3858, 3897), False, 'import torch\n'), ((6134, 6172), 'numpy.mean', 'np.mean', (['last_err_zeta'], {'keepdims': '(False)'}), '(last_err_zeta, keepdims=False)\n', (6141, 6172), True, 'import numpy as np\n'), ((6217, 6271), 'numpy.mean', 'np.mean', (['(last_err_zeta * last_err_zeta)'], {'keepdims': '(False)'}), '(last_err_zeta * last_err_zeta, keepdims=False)\n', (6224, 6271), True, 'import numpy as np\n'), ((7432, 7459), 'torch.zeros', 'torch.zeros', (['TIME_LENGTH', '(3)'], {}), '(TIME_LENGTH, 3)\n', (7443, 7459), False, 'import torch\n'), ((7495, 7522), 'torch.zeros', 'torch.zeros', (['TIME_LENGTH', '(3)'], {}), '(TIME_LENGTH, 3)\n', (7506, 7522), False, 'import torch\n'), ((7611, 7644), 'torch.tensor', 'torch.tensor', (['position_list[:, 2]'], {}), '(position_list[:, 2])\n', (7623, 7644), False, 'import torch\n'), ((7646, 7686), 'torch.tensor', 'torch.tensor', (['linear_velocity_list[:, 2]'], {}), '(linear_velocity_list[:, 2])\n', (7658, 7686), False, 'import torch\n'), ((5466, 5501), 'torch.abs', 'torch.abs', (['(s[:, :, 1] - s_hat[:, :])'], {}), '(s[:, :, 1] - s_hat[:, :])\n', (5475, 5501), False, 'import torch\n'), ((5799, 5843), 'torch.abs', 'torch.abs', (['(zeta_list - zeta_hat_history_list)'], {}), '(zeta_list - zeta_hat_history_list)\n', (5808, 5843), False, 'import torch\n')]
|
#!/usr/bin/python3
'''
Abstract:
This is a program to show the data with different true and prediction
Usage:
plot_sed.py [main_name] [true label] [pred label]
Example:
plot_sed.py MaxLoss15 1 2
Editor:
Jacob975
##################################
# Python3 #
# This code is made in python3 #
##################################
20180412
####################################
update log
20180412 version alpha 1:
1. The code work
'''
import numpy as np
import time
import load_lib
import collections
from sys import argv
from glob import glob
import matplotlib.pyplot as plt
def get_sed(detected_occurance, n, data, tracer):
# initialize variables
normed_by_band = [dict() for i in range(8)]
for key in detected_occurance:
if detected_occurance[key] >= n:
selected_data = data[np.where(tracer == key)]
ind_of_peak = np.argmax(selected_data)
if ind_of_peak >= 8:
continue
else:
normed_by_band[ind_of_peak][key] = selected_data
return normed_by_band
#--------------------------------------------
# main code
if __name__ == "__main__":
VERBOSE = 0
# measure times
start_time = time.time()
#----------------------------------------
# initialize variables and constants
data = None
tracer = None
cls_pred = None
cls_true = None
collected_tracer_in_confusion_matrix = np.array([])
collected_sed_in_confusion_matrix = np.array([])
normed_by_band = None
n = 5
true_ = pred_ = ["star", "gala", "yso"]
#----------------------------------------
# load argv
if len(argv) != 4:
print ("Error!\nUsage: plot_sed.py [main_name] [true label] [pred label]")
exit()
main_name = argv[1]
true_label = int(argv[2])
pred_label = int(argv[3])
#----------------------------------------
data_list = glob("AI*test_on*")
for directory in data_list:
print ("#################################")
print ("start to loading data saved in {0}".format(directory))
# load tracer
failure, data, tracer = load_lib.load_arrangement(main_name, directory)
if not failure:
print ("load data and tracer success")
# load cls_pred
failure, cls_pred = load_lib.load_cls_pred(main_name, directory)
if not failure:
print ("load cls_pred success")
# load cls_true
failure, cls_true = load_lib.load_cls_true(main_name, directory)
if not failure:
print ("load cls_true success")
# confusion matrix
print ("### confusion matrix ###")
failure, cm = load_lib.confusion_matrix(cls_true, cls_pred)
if not failure:
print ("confusion matrix success")
print (cm)
#-----------------------------------
star_length = len(cls_true[cls_true == 0])
print ("number of stars: {0}".format(len(cls_true[cls_true == 0])))
gala_length = len(cls_true[cls_true == 1])
print ("number of galaxies: {0}".format(len(cls_true[cls_true == 1])))
yso_length = len(cls_true[cls_true == 2])
print ("number of YSOs: {0}".format(len(cls_true[cls_true == 2])))
tracer_in_confusion_matrix = tracer.test[(cls_true == true_label) &(cls_pred == pred_label)]
collected_tracer_in_confusion_matrix = np.append(collected_tracer_in_confusion_matrix, tracer_in_confusion_matrix)
print ("number of gala to yso: {0}".format(len(tracer_in_confusion_matrix)))
# save tracer_in_confusion_matrix
np.savetxt("{0}/{1}_tracer_true_{2}_pred_{3}.txt".format(directory, main_name, true_[true_label], pred_[pred_label]),
tracer_in_confusion_matrix)
# save collected_tracer_in_confusion_matrix
np.savetxt("all_tracer_true_{0}_pred_{1}.txt".format(true_[true_label], pred_[pred_label]), collected_tracer_in_confusion_matrix)
# sort object by band
print("detect the occurance")
detected_occurance = collections.Counter(collected_tracer_in_confusion_matrix)
print("select by band")
normed_by_band = get_sed(detected_occurance, n, data.test.images, tracer.test)
# plot the sed band by band
for ind, peak_at in enumerate(normed_by_band):
if len(peak_at) == 0:
continue
result_plt = plt.figure("sed of true: {0}, pred: {1}, peak at {2} band, {3} data".format(true_[true_label], pred_[pred_label], ind+1, len(peak_at)))
plt.title("sed of true: {0}, pred: {1}, peak at {2} band, {3} data".format(true_[true_label], pred_[pred_label], ind+1, len(peak_at)))
plt.xlabel("signal/error")
plt.ylabel("normalized flux")
for key, value in peak_at.items():
plt.plot(range(1, 17), value[0])
result_plt.savefig("sed_true_{0}_pred_{1}_peak_at_{2}_band_{3}_data.png".format(true_[true_label], pred_[pred_label], ind+1, len(peak_at)))
#----------------------------------------
# measuring time
elapsed_time = time.time() - start_time
print ("Exiting Main Program, spending ", elapsed_time, "seconds.")
|
[
"load_lib.confusion_matrix",
"load_lib.load_cls_true",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"numpy.where",
"numpy.argmax",
"collections.Counter",
"numpy.array",
"numpy.append",
"load_lib.load_cls_pred",
"load_lib.load_arrangement",
"time.time",
"glob.glob"
] |
[((1239, 1250), 'time.time', 'time.time', ([], {}), '()\n', (1248, 1250), False, 'import time\n'), ((1455, 1467), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1463, 1467), True, 'import numpy as np\n'), ((1508, 1520), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1516, 1520), True, 'import numpy as np\n'), ((1930, 1949), 'glob.glob', 'glob', (['"""AI*test_on*"""'], {}), "('AI*test_on*')\n", (1934, 1949), False, 'from glob import glob\n'), ((4056, 4113), 'collections.Counter', 'collections.Counter', (['collected_tracer_in_confusion_matrix'], {}), '(collected_tracer_in_confusion_matrix)\n', (4075, 4113), False, 'import collections\n'), ((2159, 2206), 'load_lib.load_arrangement', 'load_lib.load_arrangement', (['main_name', 'directory'], {}), '(main_name, directory)\n', (2184, 2206), False, 'import load_lib\n'), ((2334, 2378), 'load_lib.load_cls_pred', 'load_lib.load_cls_pred', (['main_name', 'directory'], {}), '(main_name, directory)\n', (2356, 2378), False, 'import load_lib\n'), ((2499, 2543), 'load_lib.load_cls_true', 'load_lib.load_cls_true', (['main_name', 'directory'], {}), '(main_name, directory)\n', (2521, 2543), False, 'import load_lib\n'), ((2704, 2749), 'load_lib.confusion_matrix', 'load_lib.confusion_matrix', (['cls_true', 'cls_pred'], {}), '(cls_true, cls_pred)\n', (2729, 2749), False, 'import load_lib\n'), ((3415, 3490), 'numpy.append', 'np.append', (['collected_tracer_in_confusion_matrix', 'tracer_in_confusion_matrix'], {}), '(collected_tracer_in_confusion_matrix, tracer_in_confusion_matrix)\n', (3424, 3490), True, 'import numpy as np\n'), ((4667, 4693), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""signal/error"""'], {}), "('signal/error')\n", (4677, 4693), True, 'import matplotlib.pyplot as plt\n'), ((4702, 4731), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""normalized flux"""'], {}), "('normalized flux')\n", (4712, 4731), True, 'import matplotlib.pyplot as plt\n'), ((5054, 5065), 'time.time', 'time.time', ([], {}), '()\n', (5063, 5065), False, 'import time\n'), ((908, 932), 'numpy.argmax', 'np.argmax', (['selected_data'], {}), '(selected_data)\n', (917, 932), True, 'import numpy as np\n'), ((856, 879), 'numpy.where', 'np.where', (['(tracer == key)'], {}), '(tracer == key)\n', (864, 879), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
==========
"""
# import standard libraries
import os
# import third-party libraries
import numpy as np
import matplotlib.pyplot as plt
from colour import write_image, read_image
# import my libraries
import test_pattern_generator2 as tpg
import transfer_functions as tf
import plot_utility as pu
# information
__author__ = '<NAME>'
__copyright__ = 'Copyright (C) 2020 - <NAME>'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = '<NAME>'
__email__ = 'toru.ver.11 at-sign gmail.com'
__all__ = []
def create_ramp():
x = np.linspace(0, 1, 1920).reshape((1, 1920, 1))
img = np.ones((1080, 1920, 3))
img = x * img
write_image(img, "test_src.tif", bit_depth='uint16')
def create_exr_ramp(min_exposure=-12, max_exposure=12):
x = np.linspace(0, 1, 1920).reshape((1, 1920, 1))
y = tpg.shaper_func_log2_to_linear(
x, min_exposure=min_exposure, max_exposure=max_exposure)
img = np.ones((1080, 1920, 3)) * y
fname = f"./img/test_src_exp_{min_exposure}_{max_exposure}.exr"
write_image(img, fname, bit_depth='float32')
def plot_input_drt():
# file_list = [
# ['./img/old/test_out_sdr100.tif', 'SDR 100'],
# ['./img/old/test_out_hdr500.tif', 'HDR 500'],
# ['./img/old/test_out_hdr1000.tif', 'HDR 1000'],
# ['./img/old/test_out_hdr2000.tif', 'HDR 2000'],
# ['./img/old/test_out_hdr4000.tif', 'HDR 4000'],
# ['./img/old/test_out_off.tif', 'DRT OFF']
# ]
# check_input_drt_test(
# file_list=file_list, graph_name="Input_DRT_Characteristics_w_SDR")
# file_list = [
# ['./img/old/test_out_hdr500.tif', 'HDR 500'],
# ['./img/old/test_out_hdr1000.tif', 'HDR 1000'],
# ['./img/old/test_out_hdr2000.tif', 'HDR 2000'],
# ['./img/old/test_out_hdr4000.tif', 'HDR 4000'],
# ['./img/old/test_out_off.tif', 'DRT OFF']
# ]
# check_input_drt_test(
# file_list=file_list, graph_name="Input_DRT_Characteristics_wo_SDR")
# file_list = [
# ['./img/old/test_out_sdr_er_100-200.tif', 'SDR ER 100/200'],
# ['./img/old/test_out_hdr_er_1000-2000.tif', 'HDR ER 1000/2000'],
# ['./img/old/test_out_hdr_er_1000-4000.tif', 'HDR ER 1000/4000'],
# ['./img/old/test_out_hdr_er_1000-10000.tif', 'HDR ER 1000/10000'],
# ['./img/old/test_out_hdr_er_4000-10000.tif', 'HDR ER 4000/10000'],
# ['./img/old/test_out_off.tif', 'DRT OFF']
# ]
# check_input_drt_test(
# file_list=file_list, graph_name="Input_DRT_Characteristics_ER_w_SDR")
file_list = [
['./img/old/test_out_hdr_er_1000-2000.tif', 'HDR ER 1000/2000', '-.'],
['./img/old/test_out_hdr_er_1000-4000.tif', 'HDR ER 1000/4000', '--'],
['./img/old/test_out_hdr_er_1000-10000.tif', 'HDR ER 1000/10000', '-'],
['./img/old/test_out_hdr_er_4000-10000.tif', 'HDR ER 4000/10000', '-'],
# ['./img/old/test_out_off.tif', 'DRT OFF']
]
check_input_drt_test(
file_list=file_list, graph_name="Input_DRT_Characteristics_ER_wo_SDR")
# check_input_drt_test_sdr_only()
def check_input_drt_test(file_list, graph_name):
create_ramp()
x = np.linspace(0, 1, 1920)
x_luminance = tf.eotf_to_luminance(x, tf.ST2084)
fig, ax1 = pu.plot_1_graph(
fontsize=20,
figsize=(10, 8),
graph_title="DaVinci17 Input DRT Characteristics",
graph_title_size=None,
xlabel="Input Luminance [cd/m2]",
ylabel="Output Luminance [cd/m2]",
axis_label_size=None,
legend_size=17,
xlim=[0.009, 15000],
ylim=[0.009, 15000],
xtick=None,
ytick=None,
xtick_size=None,
ytick_size=None,
linewidth=3,
minor_xtick_num=None,
minor_ytick_num=None,
return_figure=True)
pu.log_scale_settings(ax1, grid_alpha=0.5, bg_color="#E0E0E0")
for idx in range(len(file_list))[::-1]:
img = read_image(file_list[idx][0])[0, :, 0]
label = file_list[idx][1]
ls = file_list[idx][2]
y_luminance = tf.eotf_to_luminance(img, tf.ST2084)
ax1.plot(x_luminance, y_luminance, ls, label=label)
plt.legend(loc='upper left')
fname_full = f"./img/{graph_name}.png"
plt.savefig(fname_full, bbox_inches='tight', pad_inches=0.1)
# plt.show()
plt.close(fig)
def check_input_drt_test_sdr_only():
create_ramp()
x = np.linspace(0, 1, 1920)
fig, ax1 = pu.plot_1_graph(
fontsize=20,
figsize=(10, 8),
graph_title="DaVinci17 Input DRT Characteristics",
graph_title_size=None,
xlabel="Input Luminance [cd/m2]",
ylabel="Output Luminance [cd/m2]",
axis_label_size=None,
legend_size=17,
xlim=[0.009, 15000],
ylim=[0.009, 15000],
xtick=None,
ytick=None,
xtick_size=None,
ytick_size=None,
linewidth=3,
minor_xtick_num=None,
minor_ytick_num=None,
return_figure=True)
pu.log_scale_settings(ax1, grid_alpha=0.5, bg_color="#E0E0E0")
# img = read_image("./img/test_out_sdr100_on_gm24.tif")[0, :, 0]
# label = "DRT OFF(ST2084 to Gamma2.4 (.tif))"
# x_luminance = tf.eotf_to_luminance(x, tf.ST2084)
# y_luminance = tf.eotf_to_luminance(img, tf.GAMMA24)
# ax1.plot(x_luminance, y_luminance, label=label)
# img = read_image("./img/test_out_sdr100_on_gm24_203nits.tif")[0, :, 0]
# label = "DRT OFF(ST2084 to Gamma2.4 (.tif) 203nits)"
# x_luminance = tf.eotf_to_luminance(x, tf.ST2084)
# y_luminance = tf.eotf_to_luminance(img, tf.GAMMA24)
# ax1.plot(x_luminance, y_luminance, label=label)
img = read_image("./img/old/test_out_sdr100_on_gm24.tif")[0, :, 0]
label = 'SDR 100 (Output color space is Gamma2.4)'
x_luminance = tf.eotf_to_luminance(x, tf.ST2084)
y_luminance = tf.eotf_to_luminance(img, tf.GAMMA24)
ax1.plot(x_luminance, y_luminance, label=label)
# img = read_image("./img/test_out_exp_-12_12_sdr_drt-off_gm24.tif")[0, :, 0]
# label = "DRT OFF(Gamma2.4 to Gamma2.4 (.tif))"
# x_luminance = tf.eotf_to_luminance(x, tf.GAMMA24)
# y_luminance = tf.eotf_to_luminance(img, tf.GAMMA24)
# ax1.plot(x_luminance, y_luminance, label=label)
# img = read_image("./img/test_out_exp_-12_12_sdr_drt-off.tif")[0, :, 0]
# label = "DRT OFF(Linear to Gamma2.4 (.exr))"
# y_luminance = tf.eotf_to_luminance(img, tf.GAMMA24)
# x = np.linspace(0, 1, 1920)
# x_luminance = tpg.shaper_func_log2_to_linear(
# x, min_exposure=-12, max_exposure=12)
# ax1.plot(
# x_luminance * 100, y_luminance, '--', color=pu.SKY, label=label)
plt.legend(loc='upper left')
fname_full = "./img/input_drt_sdr_only.png"
plt.savefig(fname_full, bbox_inches='tight', pad_inches=0.1)
# plt.show()
plt.close(fig)
def check_100nits_code_value_on_st2084():
code_value = tf.oetf_from_luminance(100, tf.ST2084)
print(code_value)
print(code_value * 1023)
def plot_forum_fig1():
x = np.linspace(0, 1, 1920)
fig, ax1 = pu.plot_1_graph(
fontsize=20,
figsize=(10, 8),
graph_title="HDR to SDR conversion",
graph_title_size=None,
xlabel="Input Luminance [cd/m2]",
ylabel="Output Luminance [cd/m2]",
axis_label_size=None,
legend_size=17,
xlim=[0.009, 15000],
ylim=[0.009, 15000],
xtick=None,
ytick=None,
xtick_size=None,
ytick_size=None,
linewidth=3,
minor_xtick_num=None,
minor_ytick_num=None,
return_figure=True)
pu.log_scale_settings(ax1, grid_alpha=0.5, bg_color="#E0E0E0")
img = read_image("./img/dv17_fig1_sdr_out_st2084.tif")[0, :, 0]
label = "(a) src: ST2084(.tif)"
x_luminance = tf.eotf_to_luminance(x, tf.ST2084)
y_luminance = tf.eotf_to_luminance(img, tf.GAMMA24)
ax1.plot(x_luminance, y_luminance, color=pu.BLUE, label=label)
# img = read_image("./img/dv17_fig1_203_sdr_out_st2084.tif")[0, :, 0]
# label = "(b) src: ST2084(.tif), ref-white: 203nits"
# x_luminance = tf.eotf_to_luminance(x, tf.ST2084)
# y_luminance = tf.eotf_to_luminance(img, tf.GAMMA24)
# ax1.plot(x_luminance, y_luminance, label=label)
img = read_image("./img/dv17_fig1_sdr_out_linear.tif")[0, :, 0]
label = "(b) src: Linear(.exr), This is the expected result."
y_luminance = tf.eotf_to_luminance(img, tf.GAMMA24)
x = np.linspace(0, 1, 1920)
x_luminance = tpg.shaper_func_log2_to_linear(
x, min_exposure=-12, max_exposure=12)
ax1.plot(
x_luminance * 100, y_luminance, '--', color=pu.RED, label=label)
# img = read_image("./img/dv17_fig1_203_sdr_out_linear.tif")[0, :, 0]
# label = "src=Linear(.exr), ref-white=203nits"
# y_luminance = tf.eotf_to_luminance(img, tf.GAMMA24)
# x = np.linspace(0, 1, 1920)
# x_luminance = tpg.shaper_func_log2_to_linear(
# x, min_exposure=-12, max_exposure=12)
# ax1.plot(
# x_luminance * 100, y_luminance, label=label)
plt.legend(loc='upper left')
fname_full = "./img/fig1.png"
plt.savefig(fname_full, bbox_inches='tight', pad_inches=0.1)
# plt.show()
plt.close(fig)
def plot_output_drt():
# file_list = [
# # ['./img/Output_DRT_SDR_ER_100-200.tif', 'SDR ER 100/200', '-'],
# ['./img/old/Output_DRT_HDR_ER_1000-2000.tif', 'HDR ER 1000/2000', '-'],
# ['./img/old/Output_DRT_HDR_ER_1000-4000.tif', 'HDR ER 1000/4000', '-'],
# ['./img/old/Output_DRT_HDR_ER_1000-10000.tif', 'HDR ER 1000/10000', '-'],
# ['./img/old/Output_DRT_HDR_ER_4000-10000.tif', 'HDR ER 4000/10000', '--'],
# ]
# check_output_drt_test(
# file_list=file_list,
# graph_name="DaVinci17 Output DRT ER 無印ST2084")
# file_list = [
# # ['./img/Output_DRT_SDR_ER_100-200.tif', 'SDR ER 100/200', '-'],
# ['./img/Output_DRT_HDR_ER_1000-2000.tif', 'HDR ER 1000/2000', '-'],
# ['./img/Output_DRT_HDR_ER_1000-4000.tif', 'HDR ER 1000/4000', '-'],
# ['./img/Output_DRT_HDR_ER_1000-10000.tif', 'HDR ER 1000/10000', '-'],
# ['./img/Output_DRT_HDR_ER_4000-10000.tif', 'HDR ER 4000/10000', '--'],
# ]
# check_output_drt_test(
# file_list=file_list,
# graph_name="DaVinci17 Output DRT Characteristics ER")
# file_list = [
# # ['./img/Output_DRT_SDR_100.tif', 'SDR 100', '-'],
# ['./img/old/Output_DRT_HDR_500.tif', 'HDR 500', '-'],
# ['./img/old/Output_DRT_HDR_1000.tif', 'HDR 1000', '-'],
# ['./img/old/Output_DRT_HDR_2000.tif', 'HDR 2000', '-'],
# ['./img/old/Output_DRT_HDR_4000.tif', 'HDR 4000', '-']
# ]
# check_output_drt_test(
# file_list=file_list,
# graph_name="DaVinci17 Output DRT 無印 ST2084")
file_list = [
# ['./img/Output_DRT_SDR_100.tif', 'SDR 100', '-'],
['./img/Output_DRT_HDR_500.tif', 'HDR 500', '-'],
['./img/Output_DRT_HDR_1000.tif', 'HDR 1000', '-'],
['./img/Output_DRT_HDR_2000.tif', 'HDR 2000', '-'],
['./img/Output_DRT_HDR_4000.tif', 'HDR 4000', '-'],
['./img/Output_DRT_HDR_10000.tif', 'Custom (10000 nit)', '--']
]
check_output_drt_test(
file_list=file_list,
graph_name="DaVinci17 Output DRT Characteristics")
file_list = [
['./img/DRT_In_None_HDR1000-500.tif', 'HDR 1000, ST2084 500 nit', '-'],
['./img/DRT_In_None_HDR1000-1000.tif', 'HDR 1000, ST2084 1000 nit', '-'],
['./img/DRT_In_None_HDR1000-2000.tif', 'HDR 1000, ST2084 2000 nit', '-'],
['./img/DRT_In_None_HDR1000-4000.tif', 'HDR 1000, ST2084 4000 nit', '-'],
['./img/DRT_In_None_HDR1000-10000.tif', 'HDR 1000, ST2084 10000 nit', '-'],
]
check_output_drt_test(
file_list=file_list,
graph_name="DaVinci17 Out DRT Characteristics_fix_HDR1000")
def check_output_drt_test(file_list, graph_name):
x = np.linspace(0, 1, 1920)
x_luminance = tf.eotf_to_luminance(x, tf.ST2084)
fig, ax1 = pu.plot_1_graph(
fontsize=20,
figsize=(10, 8),
graph_title="DaVinci17 Output DRT Characteristics",
graph_title_size=None,
xlabel="Input Luminance [cd/m2]",
ylabel="Output Luminance [cd/m2]",
axis_label_size=None,
legend_size=17,
xlim=[0.009, 15000],
ylim=[0.009, 15000],
xtick=None,
ytick=None,
xtick_size=None,
ytick_size=None,
linewidth=3,
minor_xtick_num=None,
minor_ytick_num=None,
return_figure=True)
pu.log_scale_settings(ax1, grid_alpha=0.5, bg_color="#E0E0E0")
for idx in range(len(file_list)):
img = read_image(file_list[idx][0])[0, :, 0]
label = file_list[idx][1]
ls = file_list[idx][2]
y_luminance = tf.eotf_to_luminance(img, tf.ST2084)
ax1.plot(x_luminance, y_luminance, ls, label=label)
plt.legend(loc='upper left')
fname_full = f"./img/{graph_name}.png".replace(' ', "_")
plt.savefig(fname_full, bbox_inches='tight', pad_inches=0.1)
# plt.show()
plt.close(fig)
def check_output_drt_test_exr(file_list, graph_name):
x = np.linspace(0, 1, 1920)
x_luminance = tf.eotf_to_luminance(x, tf.ST2084)
fig, ax1 = pu.plot_1_graph(
fontsize=20,
figsize=(10, 8),
graph_title=graph_name,
graph_title_size=None,
xlabel="Input Luminance [cd/m2]",
ylabel="Output Luminance [cd/m2]",
axis_label_size=None,
legend_size=17,
xlim=[0.009, 15000],
ylim=None,
xtick=None,
ytick=None,
xtick_size=None,
ytick_size=None,
linewidth=3,
minor_xtick_num=None,
minor_ytick_num=None,
return_figure=True)
pu.log_scale_settings(ax1, grid_alpha=0.5, bg_color="#E0E0E0")
for idx in range(len(file_list)):
img = read_image(file_list[idx][0])[0, :, 0]
label = file_list[idx][1]
ls = file_list[idx][2]
y_luminance = img * 10000
ax1.plot(x_luminance, y_luminance, ls, label=label)
plt.legend(loc='upper left')
fname_full = f"./img/{graph_name}.png".replace(' ', "_")
plt.savefig(fname_full, bbox_inches='tight', pad_inches=0.1)
# plt.show()
plt.close(fig)
def plot_total_drt():
file_list = [
['./img/DRT_Total_HDR_500.tif', 'HDR 500', '-'],
['./img/DRT_Total_HDR_1000.tif', 'HDR 1000', '-'],
['./img/DRT_Total_HDR_2000.tif', 'HDR 2000', '-'],
['./img/DRT_Total_HDR_4000.tif', 'HDR 4000', '-'],
['./img/DRT_Total_HDR_10000.tif', 'Custom (10000 nit)', '-'],
]
check_total_drt_test(
file_list=file_list,
graph_name="Input-Output_DRT_Characteristics")
file_list = [
['./img/Output_DRT_HDR1000-500.tif', 'HDR 1000, ST2084 500 nit', '-'],
['./img/Output_DRT_HDR1000-1000.tif', 'HDR 1000, ST2084 1000 nit', '-'],
['./img/Output_DRT_HDR1000-2000.tif', 'HDR 1000, ST2084 2000 nit', '-'],
['./img/Output_DRT_HDR1000-4000.tif', 'HDR 1000, ST2084 4000 nit', '-'],
['./img/Output_DRT_HDR1000-10000.tif','HDR 1000, ST2084 10000 nit', '-'],
]
check_total_drt_test(
file_list=file_list,
graph_name="DaVinci17 In-Out DRT Characteristics_fix_HDR1000")
file_list = [
['./img/DRT_Total_HDR_ER_1000-2000.tif', 'HDR ER 1000/2000', '-'],
['./img/DRT_Total_HDR_ER_1000-4000.tif', 'HDR ER 1000/4000', '-'],
['./img/DRT_Total_HDR_ER_1000-10000.tif', 'HDR ER 1000/10000', '-'],
['./img/DRT_Total_HDR_ER_4000-10000.tif', 'HDR ER 4000/10000', '-'],
]
check_total_drt_test(
file_list=file_list,
graph_name="Input-Output_DRT_Characteristics_ER")
def check_total_drt_test(file_list, graph_name):
x = np.linspace(0, 1, 1920)
x_luminance = tf.eotf_to_luminance(x, tf.ST2084)
fig, ax1 = pu.plot_1_graph(
fontsize=20,
figsize=(10, 8),
graph_title="DaVinci17 Input-Output DRT Characteristics",
graph_title_size=None,
xlabel="Input Luminance [cd/m2]",
ylabel="Output Luminance [cd/m2]",
axis_label_size=None,
legend_size=17,
xlim=[0.009, 15000],
ylim=[0.009, 15000],
xtick=None,
ytick=None,
xtick_size=None,
ytick_size=None,
linewidth=3,
minor_xtick_num=None,
minor_ytick_num=None,
return_figure=True)
pu.log_scale_settings(ax1, grid_alpha=0.5, bg_color="#E0E0E0")
for idx in range(len(file_list)):
img = read_image(file_list[idx][0])[0, :, 0]
label = file_list[idx][1]
ls = file_list[idx][2]
y_luminance = tf.eotf_to_luminance(img, tf.ST2084)
ax1.plot(x_luminance, y_luminance, ls, label=label)
plt.legend(loc='upper left')
fname_full = f"./img/{graph_name}.png".replace(' ', "_")
plt.savefig(fname_full, bbox_inches='tight', pad_inches=0.1)
# plt.show()
plt.close(fig)
def plot_inv_drt():
file_list = [
# ['./img/Inverse_DRT_to_HDR500.tif', 'SDR to HDR 500 nit', '-'],
['./img/Inverse_DRT_to_HDR1000.tif', 'SDR to HDR 1000 nit', '-'],
# ['./img/Inverse_DRT_to_HDR2000.tif', 'SDR to HDR 2000 nit', '-'],
['./img/Inverse_DRT_to_HDR4000.tif', 'SDR to HDR 4000 nit', '-'],
['./img/Inverse_DRT_to_HDR10000.tif', 'SDR to HDR 10000 nit', '-'],
]
check_inv_drt_test(
file_list=file_list,
graph_name="Inverse_DRT_Characteristics")
def check_inv_drt_test(file_list, graph_name):
x = np.linspace(0, 1, 1920)
x_luminance = tf.eotf_to_luminance(x, tf.GAMMA24)
fig, ax1 = pu.plot_1_graph(
fontsize=20,
figsize=(10, 8),
graph_title="DaVinci17 Inverse DRT for SDR to HDR Conversion",
graph_title_size=None,
xlabel="Input Luminance [cd/m2]",
ylabel="Output Luminance [cd/m2]",
axis_label_size=None,
legend_size=17,
xlim=[0.009, 15000],
ylim=[0.009, 15000],
xtick=None,
ytick=None,
xtick_size=None,
ytick_size=None,
linewidth=3,
minor_xtick_num=None,
minor_ytick_num=None,
return_figure=True)
pu.log_scale_settings(ax1, grid_alpha=0.5, bg_color="#E0E0E0")
for idx in range(len(file_list))[::-1]:
img = read_image(file_list[idx][0])[0, :, 0]
label = file_list[idx][1]
ls = file_list[idx][2]
y_luminance = tf.eotf_to_luminance(img, tf.ST2084)
ax1.plot(x_luminance, y_luminance, ls, label=label)
plt.legend(loc='upper left')
fname_full = f"./img/{graph_name}.png".replace(' ', "_")
plt.savefig(fname_full, bbox_inches='tight', pad_inches=0.1)
# plt.show()
plt.close(fig)
def conv_st2084_to_linear():
src_file = "./ST2084_vs_Linear/st2084_clip_checker_st2084.png"
dst_file = "./ST2084_vs_Linear/st2084_clip_checker_linear.exr"
img_st2084 = read_image(src_file)
img_linear = tf.eotf(img_st2084, tf.ST2084) * 100
write_image(img_linear, dst_file)
def main_func():
# create_exr_ramp()
# plot_input_drt()
# plot_output_drt()
# check_100nits_code_value_on_st2084()
# plot_forum_fig1()
# plot_total_drt()
# plot_inv_drt()
conv_st2084_to_linear()
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
main_func()
|
[
"matplotlib.pyplot.savefig",
"numpy.ones",
"colour.write_image",
"test_pattern_generator2.shaper_func_log2_to_linear",
"colour.read_image",
"matplotlib.pyplot.legend",
"transfer_functions.eotf_to_luminance",
"matplotlib.pyplot.close",
"plot_utility.log_scale_settings",
"numpy.linspace",
"os.path.abspath",
"transfer_functions.eotf",
"transfer_functions.oetf_from_luminance",
"plot_utility.plot_1_graph"
] |
[((661, 685), 'numpy.ones', 'np.ones', (['(1080, 1920, 3)'], {}), '((1080, 1920, 3))\n', (668, 685), True, 'import numpy as np\n'), ((708, 760), 'colour.write_image', 'write_image', (['img', '"""test_src.tif"""'], {'bit_depth': '"""uint16"""'}), "(img, 'test_src.tif', bit_depth='uint16')\n", (719, 760), False, 'from colour import write_image, read_image\n'), ((881, 973), 'test_pattern_generator2.shaper_func_log2_to_linear', 'tpg.shaper_func_log2_to_linear', (['x'], {'min_exposure': 'min_exposure', 'max_exposure': 'max_exposure'}), '(x, min_exposure=min_exposure, max_exposure=\n max_exposure)\n', (911, 973), True, 'import test_pattern_generator2 as tpg\n'), ((1090, 1134), 'colour.write_image', 'write_image', (['img', 'fname'], {'bit_depth': '"""float32"""'}), "(img, fname, bit_depth='float32')\n", (1101, 1134), False, 'from colour import write_image, read_image\n'), ((3227, 3250), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(1920)'], {}), '(0, 1, 1920)\n', (3238, 3250), True, 'import numpy as np\n'), ((3269, 3303), 'transfer_functions.eotf_to_luminance', 'tf.eotf_to_luminance', (['x', 'tf.ST2084'], {}), '(x, tf.ST2084)\n', (3289, 3303), True, 'import transfer_functions as tf\n'), ((3320, 3750), 'plot_utility.plot_1_graph', 'pu.plot_1_graph', ([], {'fontsize': '(20)', 'figsize': '(10, 8)', 'graph_title': '"""DaVinci17 Input DRT Characteristics"""', 'graph_title_size': 'None', 'xlabel': '"""Input Luminance [cd/m2]"""', 'ylabel': '"""Output Luminance [cd/m2]"""', 'axis_label_size': 'None', 'legend_size': '(17)', 'xlim': '[0.009, 15000]', 'ylim': '[0.009, 15000]', 'xtick': 'None', 'ytick': 'None', 'xtick_size': 'None', 'ytick_size': 'None', 'linewidth': '(3)', 'minor_xtick_num': 'None', 'minor_ytick_num': 'None', 'return_figure': '(True)'}), "(fontsize=20, figsize=(10, 8), graph_title=\n 'DaVinci17 Input DRT Characteristics', graph_title_size=None, xlabel=\n 'Input Luminance [cd/m2]', ylabel='Output Luminance [cd/m2]',\n axis_label_size=None, legend_size=17, xlim=[0.009, 15000], ylim=[0.009,\n 15000], xtick=None, ytick=None, xtick_size=None, ytick_size=None,\n linewidth=3, minor_xtick_num=None, minor_ytick_num=None, return_figure=True\n )\n", (3335, 3750), True, 'import plot_utility as pu\n'), ((3873, 3935), 'plot_utility.log_scale_settings', 'pu.log_scale_settings', (['ax1'], {'grid_alpha': '(0.5)', 'bg_color': '"""#E0E0E0"""'}), "(ax1, grid_alpha=0.5, bg_color='#E0E0E0')\n", (3894, 3935), True, 'import plot_utility as pu\n'), ((4223, 4251), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (4233, 4251), True, 'import matplotlib.pyplot as plt\n'), ((4299, 4359), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname_full'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0.1)'}), "(fname_full, bbox_inches='tight', pad_inches=0.1)\n", (4310, 4359), True, 'import matplotlib.pyplot as plt\n'), ((4381, 4395), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (4390, 4395), True, 'import matplotlib.pyplot as plt\n'), ((4461, 4484), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(1920)'], {}), '(0, 1, 1920)\n', (4472, 4484), True, 'import numpy as np\n'), ((4501, 4931), 'plot_utility.plot_1_graph', 'pu.plot_1_graph', ([], {'fontsize': '(20)', 'figsize': '(10, 8)', 'graph_title': '"""DaVinci17 Input DRT Characteristics"""', 'graph_title_size': 'None', 'xlabel': '"""Input Luminance [cd/m2]"""', 'ylabel': '"""Output Luminance [cd/m2]"""', 'axis_label_size': 'None', 'legend_size': '(17)', 'xlim': '[0.009, 15000]', 'ylim': '[0.009, 15000]', 'xtick': 'None', 'ytick': 'None', 'xtick_size': 'None', 'ytick_size': 'None', 'linewidth': '(3)', 'minor_xtick_num': 'None', 'minor_ytick_num': 'None', 'return_figure': '(True)'}), "(fontsize=20, figsize=(10, 8), graph_title=\n 'DaVinci17 Input DRT Characteristics', graph_title_size=None, xlabel=\n 'Input Luminance [cd/m2]', ylabel='Output Luminance [cd/m2]',\n axis_label_size=None, legend_size=17, xlim=[0.009, 15000], ylim=[0.009,\n 15000], xtick=None, ytick=None, xtick_size=None, ytick_size=None,\n linewidth=3, minor_xtick_num=None, minor_ytick_num=None, return_figure=True\n )\n", (4516, 4931), True, 'import plot_utility as pu\n'), ((5054, 5116), 'plot_utility.log_scale_settings', 'pu.log_scale_settings', (['ax1'], {'grid_alpha': '(0.5)', 'bg_color': '"""#E0E0E0"""'}), "(ax1, grid_alpha=0.5, bg_color='#E0E0E0')\n", (5075, 5116), True, 'import plot_utility as pu\n'), ((5854, 5888), 'transfer_functions.eotf_to_luminance', 'tf.eotf_to_luminance', (['x', 'tf.ST2084'], {}), '(x, tf.ST2084)\n', (5874, 5888), True, 'import transfer_functions as tf\n'), ((5907, 5944), 'transfer_functions.eotf_to_luminance', 'tf.eotf_to_luminance', (['img', 'tf.GAMMA24'], {}), '(img, tf.GAMMA24)\n', (5927, 5944), True, 'import transfer_functions as tf\n'), ((6718, 6746), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (6728, 6746), True, 'import matplotlib.pyplot as plt\n'), ((6799, 6859), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname_full'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0.1)'}), "(fname_full, bbox_inches='tight', pad_inches=0.1)\n", (6810, 6859), True, 'import matplotlib.pyplot as plt\n'), ((6881, 6895), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (6890, 6895), True, 'import matplotlib.pyplot as plt\n'), ((6957, 6995), 'transfer_functions.oetf_from_luminance', 'tf.oetf_from_luminance', (['(100)', 'tf.ST2084'], {}), '(100, tf.ST2084)\n', (6979, 6995), True, 'import transfer_functions as tf\n'), ((7080, 7103), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(1920)'], {}), '(0, 1, 1920)\n', (7091, 7103), True, 'import numpy as np\n'), ((7120, 7536), 'plot_utility.plot_1_graph', 'pu.plot_1_graph', ([], {'fontsize': '(20)', 'figsize': '(10, 8)', 'graph_title': '"""HDR to SDR conversion"""', 'graph_title_size': 'None', 'xlabel': '"""Input Luminance [cd/m2]"""', 'ylabel': '"""Output Luminance [cd/m2]"""', 'axis_label_size': 'None', 'legend_size': '(17)', 'xlim': '[0.009, 15000]', 'ylim': '[0.009, 15000]', 'xtick': 'None', 'ytick': 'None', 'xtick_size': 'None', 'ytick_size': 'None', 'linewidth': '(3)', 'minor_xtick_num': 'None', 'minor_ytick_num': 'None', 'return_figure': '(True)'}), "(fontsize=20, figsize=(10, 8), graph_title=\n 'HDR to SDR conversion', graph_title_size=None, xlabel=\n 'Input Luminance [cd/m2]', ylabel='Output Luminance [cd/m2]',\n axis_label_size=None, legend_size=17, xlim=[0.009, 15000], ylim=[0.009,\n 15000], xtick=None, ytick=None, xtick_size=None, ytick_size=None,\n linewidth=3, minor_xtick_num=None, minor_ytick_num=None, return_figure=True\n )\n", (7135, 7536), True, 'import plot_utility as pu\n'), ((7659, 7721), 'plot_utility.log_scale_settings', 'pu.log_scale_settings', (['ax1'], {'grid_alpha': '(0.5)', 'bg_color': '"""#E0E0E0"""'}), "(ax1, grid_alpha=0.5, bg_color='#E0E0E0')\n", (7680, 7721), True, 'import plot_utility as pu\n'), ((7845, 7879), 'transfer_functions.eotf_to_luminance', 'tf.eotf_to_luminance', (['x', 'tf.ST2084'], {}), '(x, tf.ST2084)\n', (7865, 7879), True, 'import transfer_functions as tf\n'), ((7898, 7935), 'transfer_functions.eotf_to_luminance', 'tf.eotf_to_luminance', (['img', 'tf.GAMMA24'], {}), '(img, tf.GAMMA24)\n', (7918, 7935), True, 'import transfer_functions as tf\n'), ((8456, 8493), 'transfer_functions.eotf_to_luminance', 'tf.eotf_to_luminance', (['img', 'tf.GAMMA24'], {}), '(img, tf.GAMMA24)\n', (8476, 8493), True, 'import transfer_functions as tf\n'), ((8502, 8525), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(1920)'], {}), '(0, 1, 1920)\n', (8513, 8525), True, 'import numpy as np\n'), ((8544, 8612), 'test_pattern_generator2.shaper_func_log2_to_linear', 'tpg.shaper_func_log2_to_linear', (['x'], {'min_exposure': '(-12)', 'max_exposure': '(12)'}), '(x, min_exposure=-12, max_exposure=12)\n', (8574, 8612), True, 'import test_pattern_generator2 as tpg\n'), ((9104, 9132), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (9114, 9132), True, 'import matplotlib.pyplot as plt\n'), ((9171, 9231), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname_full'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0.1)'}), "(fname_full, bbox_inches='tight', pad_inches=0.1)\n", (9182, 9231), True, 'import matplotlib.pyplot as plt\n'), ((9253, 9267), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (9262, 9267), True, 'import matplotlib.pyplot as plt\n'), ((11988, 12011), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(1920)'], {}), '(0, 1, 1920)\n', (11999, 12011), True, 'import numpy as np\n'), ((12030, 12064), 'transfer_functions.eotf_to_luminance', 'tf.eotf_to_luminance', (['x', 'tf.ST2084'], {}), '(x, tf.ST2084)\n', (12050, 12064), True, 'import transfer_functions as tf\n'), ((12081, 12512), 'plot_utility.plot_1_graph', 'pu.plot_1_graph', ([], {'fontsize': '(20)', 'figsize': '(10, 8)', 'graph_title': '"""DaVinci17 Output DRT Characteristics"""', 'graph_title_size': 'None', 'xlabel': '"""Input Luminance [cd/m2]"""', 'ylabel': '"""Output Luminance [cd/m2]"""', 'axis_label_size': 'None', 'legend_size': '(17)', 'xlim': '[0.009, 15000]', 'ylim': '[0.009, 15000]', 'xtick': 'None', 'ytick': 'None', 'xtick_size': 'None', 'ytick_size': 'None', 'linewidth': '(3)', 'minor_xtick_num': 'None', 'minor_ytick_num': 'None', 'return_figure': '(True)'}), "(fontsize=20, figsize=(10, 8), graph_title=\n 'DaVinci17 Output DRT Characteristics', graph_title_size=None, xlabel=\n 'Input Luminance [cd/m2]', ylabel='Output Luminance [cd/m2]',\n axis_label_size=None, legend_size=17, xlim=[0.009, 15000], ylim=[0.009,\n 15000], xtick=None, ytick=None, xtick_size=None, ytick_size=None,\n linewidth=3, minor_xtick_num=None, minor_ytick_num=None, return_figure=True\n )\n", (12096, 12512), True, 'import plot_utility as pu\n'), ((12635, 12697), 'plot_utility.log_scale_settings', 'pu.log_scale_settings', (['ax1'], {'grid_alpha': '(0.5)', 'bg_color': '"""#E0E0E0"""'}), "(ax1, grid_alpha=0.5, bg_color='#E0E0E0')\n", (12656, 12697), True, 'import plot_utility as pu\n'), ((12979, 13007), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (12989, 13007), True, 'import matplotlib.pyplot as plt\n'), ((13073, 13133), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname_full'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0.1)'}), "(fname_full, bbox_inches='tight', pad_inches=0.1)\n", (13084, 13133), True, 'import matplotlib.pyplot as plt\n'), ((13155, 13169), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (13164, 13169), True, 'import matplotlib.pyplot as plt\n'), ((13234, 13257), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(1920)'], {}), '(0, 1, 1920)\n', (13245, 13257), True, 'import numpy as np\n'), ((13276, 13310), 'transfer_functions.eotf_to_luminance', 'tf.eotf_to_luminance', (['x', 'tf.ST2084'], {}), '(x, tf.ST2084)\n', (13296, 13310), True, 'import transfer_functions as tf\n'), ((13327, 13716), 'plot_utility.plot_1_graph', 'pu.plot_1_graph', ([], {'fontsize': '(20)', 'figsize': '(10, 8)', 'graph_title': 'graph_name', 'graph_title_size': 'None', 'xlabel': '"""Input Luminance [cd/m2]"""', 'ylabel': '"""Output Luminance [cd/m2]"""', 'axis_label_size': 'None', 'legend_size': '(17)', 'xlim': '[0.009, 15000]', 'ylim': 'None', 'xtick': 'None', 'ytick': 'None', 'xtick_size': 'None', 'ytick_size': 'None', 'linewidth': '(3)', 'minor_xtick_num': 'None', 'minor_ytick_num': 'None', 'return_figure': '(True)'}), "(fontsize=20, figsize=(10, 8), graph_title=graph_name,\n graph_title_size=None, xlabel='Input Luminance [cd/m2]', ylabel=\n 'Output Luminance [cd/m2]', axis_label_size=None, legend_size=17, xlim=\n [0.009, 15000], ylim=None, xtick=None, ytick=None, xtick_size=None,\n ytick_size=None, linewidth=3, minor_xtick_num=None, minor_ytick_num=\n None, return_figure=True)\n", (13342, 13716), True, 'import plot_utility as pu\n'), ((13843, 13905), 'plot_utility.log_scale_settings', 'pu.log_scale_settings', (['ax1'], {'grid_alpha': '(0.5)', 'bg_color': '"""#E0E0E0"""'}), "(ax1, grid_alpha=0.5, bg_color='#E0E0E0')\n", (13864, 13905), True, 'import plot_utility as pu\n'), ((14162, 14190), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (14172, 14190), True, 'import matplotlib.pyplot as plt\n'), ((14256, 14316), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname_full'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0.1)'}), "(fname_full, bbox_inches='tight', pad_inches=0.1)\n", (14267, 14316), True, 'import matplotlib.pyplot as plt\n'), ((14338, 14352), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (14347, 14352), True, 'import matplotlib.pyplot as plt\n'), ((15872, 15895), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(1920)'], {}), '(0, 1, 1920)\n', (15883, 15895), True, 'import numpy as np\n'), ((15914, 15948), 'transfer_functions.eotf_to_luminance', 'tf.eotf_to_luminance', (['x', 'tf.ST2084'], {}), '(x, tf.ST2084)\n', (15934, 15948), True, 'import transfer_functions as tf\n'), ((15965, 16401), 'plot_utility.plot_1_graph', 'pu.plot_1_graph', ([], {'fontsize': '(20)', 'figsize': '(10, 8)', 'graph_title': '"""DaVinci17 Input-Output DRT Characteristics"""', 'graph_title_size': 'None', 'xlabel': '"""Input Luminance [cd/m2]"""', 'ylabel': '"""Output Luminance [cd/m2]"""', 'axis_label_size': 'None', 'legend_size': '(17)', 'xlim': '[0.009, 15000]', 'ylim': '[0.009, 15000]', 'xtick': 'None', 'ytick': 'None', 'xtick_size': 'None', 'ytick_size': 'None', 'linewidth': '(3)', 'minor_xtick_num': 'None', 'minor_ytick_num': 'None', 'return_figure': '(True)'}), "(fontsize=20, figsize=(10, 8), graph_title=\n 'DaVinci17 Input-Output DRT Characteristics', graph_title_size=None,\n xlabel='Input Luminance [cd/m2]', ylabel='Output Luminance [cd/m2]',\n axis_label_size=None, legend_size=17, xlim=[0.009, 15000], ylim=[0.009,\n 15000], xtick=None, ytick=None, xtick_size=None, ytick_size=None,\n linewidth=3, minor_xtick_num=None, minor_ytick_num=None, return_figure=True\n )\n", (15980, 16401), True, 'import plot_utility as pu\n'), ((16525, 16587), 'plot_utility.log_scale_settings', 'pu.log_scale_settings', (['ax1'], {'grid_alpha': '(0.5)', 'bg_color': '"""#E0E0E0"""'}), "(ax1, grid_alpha=0.5, bg_color='#E0E0E0')\n", (16546, 16587), True, 'import plot_utility as pu\n'), ((16869, 16897), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (16879, 16897), True, 'import matplotlib.pyplot as plt\n'), ((16963, 17023), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname_full'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0.1)'}), "(fname_full, bbox_inches='tight', pad_inches=0.1)\n", (16974, 17023), True, 'import matplotlib.pyplot as plt\n'), ((17045, 17059), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (17054, 17059), True, 'import matplotlib.pyplot as plt\n'), ((17640, 17663), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(1920)'], {}), '(0, 1, 1920)\n', (17651, 17663), True, 'import numpy as np\n'), ((17682, 17717), 'transfer_functions.eotf_to_luminance', 'tf.eotf_to_luminance', (['x', 'tf.GAMMA24'], {}), '(x, tf.GAMMA24)\n', (17702, 17717), True, 'import transfer_functions as tf\n'), ((17734, 18178), 'plot_utility.plot_1_graph', 'pu.plot_1_graph', ([], {'fontsize': '(20)', 'figsize': '(10, 8)', 'graph_title': '"""DaVinci17 Inverse DRT for SDR to HDR Conversion"""', 'graph_title_size': 'None', 'xlabel': '"""Input Luminance [cd/m2]"""', 'ylabel': '"""Output Luminance [cd/m2]"""', 'axis_label_size': 'None', 'legend_size': '(17)', 'xlim': '[0.009, 15000]', 'ylim': '[0.009, 15000]', 'xtick': 'None', 'ytick': 'None', 'xtick_size': 'None', 'ytick_size': 'None', 'linewidth': '(3)', 'minor_xtick_num': 'None', 'minor_ytick_num': 'None', 'return_figure': '(True)'}), "(fontsize=20, figsize=(10, 8), graph_title=\n 'DaVinci17 Inverse DRT for SDR to HDR Conversion', graph_title_size=\n None, xlabel='Input Luminance [cd/m2]', ylabel=\n 'Output Luminance [cd/m2]', axis_label_size=None, legend_size=17, xlim=\n [0.009, 15000], ylim=[0.009, 15000], xtick=None, ytick=None, xtick_size\n =None, ytick_size=None, linewidth=3, minor_xtick_num=None,\n minor_ytick_num=None, return_figure=True)\n", (17749, 18178), True, 'import plot_utility as pu\n'), ((18299, 18361), 'plot_utility.log_scale_settings', 'pu.log_scale_settings', (['ax1'], {'grid_alpha': '(0.5)', 'bg_color': '"""#E0E0E0"""'}), "(ax1, grid_alpha=0.5, bg_color='#E0E0E0')\n", (18320, 18361), True, 'import plot_utility as pu\n'), ((18649, 18677), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper left"""'}), "(loc='upper left')\n", (18659, 18677), True, 'import matplotlib.pyplot as plt\n'), ((18743, 18803), 'matplotlib.pyplot.savefig', 'plt.savefig', (['fname_full'], {'bbox_inches': '"""tight"""', 'pad_inches': '(0.1)'}), "(fname_full, bbox_inches='tight', pad_inches=0.1)\n", (18754, 18803), True, 'import matplotlib.pyplot as plt\n'), ((18825, 18839), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (18834, 18839), True, 'import matplotlib.pyplot as plt\n'), ((19022, 19042), 'colour.read_image', 'read_image', (['src_file'], {}), '(src_file)\n', (19032, 19042), False, 'from colour import write_image, read_image\n'), ((19101, 19134), 'colour.write_image', 'write_image', (['img_linear', 'dst_file'], {}), '(img_linear, dst_file)\n', (19112, 19134), False, 'from colour import write_image, read_image\n'), ((989, 1013), 'numpy.ones', 'np.ones', (['(1080, 1920, 3)'], {}), '((1080, 1920, 3))\n', (996, 1013), True, 'import numpy as np\n'), ((4121, 4157), 'transfer_functions.eotf_to_luminance', 'tf.eotf_to_luminance', (['img', 'tf.ST2084'], {}), '(img, tf.ST2084)\n', (4141, 4157), True, 'import transfer_functions as tf\n'), ((5720, 5771), 'colour.read_image', 'read_image', (['"""./img/old/test_out_sdr100_on_gm24.tif"""'], {}), "('./img/old/test_out_sdr100_on_gm24.tif')\n", (5730, 5771), False, 'from colour import write_image, read_image\n'), ((7733, 7781), 'colour.read_image', 'read_image', (['"""./img/dv17_fig1_sdr_out_st2084.tif"""'], {}), "('./img/dv17_fig1_sdr_out_st2084.tif')\n", (7743, 7781), False, 'from colour import write_image, read_image\n'), ((8314, 8362), 'colour.read_image', 'read_image', (['"""./img/dv17_fig1_sdr_out_linear.tif"""'], {}), "('./img/dv17_fig1_sdr_out_linear.tif')\n", (8324, 8362), False, 'from colour import write_image, read_image\n'), ((12877, 12913), 'transfer_functions.eotf_to_luminance', 'tf.eotf_to_luminance', (['img', 'tf.ST2084'], {}), '(img, tf.ST2084)\n', (12897, 12913), True, 'import transfer_functions as tf\n'), ((16767, 16803), 'transfer_functions.eotf_to_luminance', 'tf.eotf_to_luminance', (['img', 'tf.ST2084'], {}), '(img, tf.ST2084)\n', (16787, 16803), True, 'import transfer_functions as tf\n'), ((18547, 18583), 'transfer_functions.eotf_to_luminance', 'tf.eotf_to_luminance', (['img', 'tf.ST2084'], {}), '(img, tf.ST2084)\n', (18567, 18583), True, 'import transfer_functions as tf\n'), ((19060, 19090), 'transfer_functions.eotf', 'tf.eotf', (['img_st2084', 'tf.ST2084'], {}), '(img_st2084, tf.ST2084)\n', (19067, 19090), True, 'import transfer_functions as tf\n'), ((604, 627), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(1920)'], {}), '(0, 1, 1920)\n', (615, 627), True, 'import numpy as np\n'), ((827, 850), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(1920)'], {}), '(0, 1, 1920)\n', (838, 850), True, 'import numpy as np\n'), ((3995, 4024), 'colour.read_image', 'read_image', (['file_list[idx][0]'], {}), '(file_list[idx][0])\n', (4005, 4024), False, 'from colour import write_image, read_image\n'), ((12751, 12780), 'colour.read_image', 'read_image', (['file_list[idx][0]'], {}), '(file_list[idx][0])\n', (12761, 12780), False, 'from colour import write_image, read_image\n'), ((13959, 13988), 'colour.read_image', 'read_image', (['file_list[idx][0]'], {}), '(file_list[idx][0])\n', (13969, 13988), False, 'from colour import write_image, read_image\n'), ((16641, 16670), 'colour.read_image', 'read_image', (['file_list[idx][0]'], {}), '(file_list[idx][0])\n', (16651, 16670), False, 'from colour import write_image, read_image\n'), ((18421, 18450), 'colour.read_image', 'read_image', (['file_list[idx][0]'], {}), '(file_list[idx][0])\n', (18431, 18450), False, 'from colour import write_image, read_image\n'), ((19422, 19447), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (19437, 19447), False, 'import os\n')]
|
import math
import numpy as np
import matplotlib.pyplot as plt
import csv
import os
logPath = './online/log/9x16_test/'
nameList = ['Alien', 'Conan1', 'Conan2', 'Cooking', 'Rhinos', 'Skiing', 'Surfing', 'War']
num = 48
batch_size = 4
tileList = []
tileListList = [[] for j in range(len(nameList))]
for idx, f in enumerate(nameList):
for i in range(1, num+1):
with open(logPath + f"user_{i}/{f}_{i}.csv", 'r') as csvfile:
reader = csv.reader(csvfile)
rows = [row for row in reader]
tile_list = [float(item[3]) for item in rows[1:-8]]
try:
tileListList[idx].append(np.mean(tile_list))
except IndexError:
print("IndexError:", idx)
tileList.append(math.ceil(np.mean(tileListList[idx])))
x = np.arange(len(nameList))
width = 0.4
plt.rcParams['font.size'] = 20
fig, ax = plt.subplots(figsize=(20, 8))
plt.bar(x, tileList, width=width)
plt.grid(linestyle='--')
plt.xticks(x, nameList, fontsize=20)
ax.set_ylabel('AverageTiles')
plt.tight_layout()
plt.savefig('./online/log/9x16_test/48_users/tiles.png')
plt.show()
print("finish!")
fileName = f'./online/log/9x16_test/48_users/tiles.csv'
with open(fileName, 'w', newline='') as f:
logWriter = csv.writer(f, dialect='excel')
logWriter.writerow(['AverageMetric']+nameList)
logWriter.writerows([
['Tiles'] + tileList,
])
|
[
"numpy.mean",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xticks",
"csv.writer",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.tight_layout",
"csv.reader",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((883, 912), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(20, 8)'}), '(figsize=(20, 8))\n', (895, 912), True, 'import matplotlib.pyplot as plt\n'), ((913, 946), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'tileList'], {'width': 'width'}), '(x, tileList, width=width)\n', (920, 946), True, 'import matplotlib.pyplot as plt\n'), ((948, 972), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'linestyle': '"""--"""'}), "(linestyle='--')\n", (956, 972), True, 'import matplotlib.pyplot as plt\n'), ((973, 1009), 'matplotlib.pyplot.xticks', 'plt.xticks', (['x', 'nameList'], {'fontsize': '(20)'}), '(x, nameList, fontsize=20)\n', (983, 1009), True, 'import matplotlib.pyplot as plt\n'), ((1040, 1058), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1056, 1058), True, 'import matplotlib.pyplot as plt\n'), ((1059, 1115), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./online/log/9x16_test/48_users/tiles.png"""'], {}), "('./online/log/9x16_test/48_users/tiles.png')\n", (1070, 1115), True, 'import matplotlib.pyplot as plt\n'), ((1116, 1126), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1124, 1126), True, 'import matplotlib.pyplot as plt\n'), ((1261, 1291), 'csv.writer', 'csv.writer', (['f'], {'dialect': '"""excel"""'}), "(f, dialect='excel')\n", (1271, 1291), False, 'import csv\n'), ((460, 479), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (470, 479), False, 'import csv\n'), ((771, 797), 'numpy.mean', 'np.mean', (['tileListList[idx]'], {}), '(tileListList[idx])\n', (778, 797), True, 'import numpy as np\n'), ((647, 665), 'numpy.mean', 'np.mean', (['tile_list'], {}), '(tile_list)\n', (654, 665), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import numpy as np
import scipy.io as sio
import glob
import os
import torch
import torch.utils.data
import torchvision.transforms.functional
import cv2
def read_dataset(path):
"""
Read training dataset or validation dataset.
:param path: The path of dataset.
:return: The list of filenames.
"""
image_list = glob.glob(os.path.join(path, 'images/*.jpg'))
return image_list
def read_mat(mode, path, image_list):
"""
Read joints.mat file.
joints.mat in lspet is (14, 3, 10000); joints.mat in lsp is (3, 14, 2000)
:param mode: 'lspet' or 'lsp'
:param path: The path of joints.mat.
:param image_list: The array of image filenames.
:return:
"""
mat_arr = sio.loadmat(os.path.join(path, 'joints.mat'))['joints']
# (x,y,z)
# LSPET: z = 1 means the key points is not blocked.
# LSP: z = 0 means the key points is not blocked.
key_point_list = []
limits = []
if mode == 'lspet':
key_point_list = np.transpose(mat_arr, (2, 0, 1)).tolist()
# Calculate the limits to find center points
limits = np.transpose(mat_arr, (2, 1, 0))
if mode == 'lsp':
# Guarantee z = 1 means the key points is not blocked
mat_arr[2] = np.logical_not(mat_arr[2])
key_point_list = np.transpose(mat_arr, (2, 1, 0)).tolist()
# Calculate the limits to find center points
limits = np.transpose(mat_arr, (2, 0, 1))
center_point_list = []
scale_list = []
for i in range(limits.shape[0]):
image = cv2.imread(image_list[i])
h = image.shape[0]
w = image.shape[1]
# Calculate the center points of each image
center_x = (limits[i][0][limits[i][0] > 0].min() + limits[i][0][limits[i][0] < w].max()) / 2
center_y = (limits[i][1][limits[i][1] > 0].min() + limits[i][1][limits[i][1] < h].max()) / 2
center_point_list.append([center_x, center_y])
# Calculate the scale of each image
scale = (limits[i][1][limits[i][1] < h].max() - limits[i][1][limits[i][1] > 0].min() + 4) / 368
scale_list.append(scale)
return key_point_list, center_point_list, scale_list
def gaussian_kernel(size_w, size_h, center_x, center_y, sigma):
grid_y, grid_x = np.mgrid[0:size_h, 0:size_w]
D2 = (grid_x - center_x) ** 2 + (grid_y - center_y) ** 2
return np.exp(-D2 / 2.0 / sigma / sigma)
class LSP_DATA(torch.utils.data.Dataset):
def __init__(self, mode, path, stride, transformer=None):
self.image_list = read_dataset(path)
self.key_point_list, self.center_point_list, self.scale_list = read_mat(mode, path, self.image_list)
self.stride = stride
self.transformer = transformer
self.sigma = 3.0
def __getitem__(self, item):
image_path = self.image_list[item]
image = np.array(cv2.imread(image_path), dtype=np.float32)
key_points = self.key_point_list[item]
center_points = self.center_point_list[item]
scale = self.scale_list[item]
# Expand dataset
image, key_points, center_points = self.transformer(image, key_points, center_points, scale)
h, w, _ = image.shape
# Generate heatmap
size_h = int(h / self.stride)
size_w = int(w / self.stride)
heatmap = np.zeros((size_h, size_w, len(key_points) + 1), dtype=np.float32)
# Generate the heatmap of all key points
for i in range(len(key_points)):
# Resize image from 368 to 46
x = int(key_points[i][0]) * 1.0 / self.stride
y = int(key_points[i][1]) * 1.0 / self.stride
kernel = gaussian_kernel(size_h=size_h, size_w=size_w, center_x=x, center_y=y, sigma=self.sigma)
kernel[kernel > 1] = 1
kernel[kernel < 0.01] = 0
heatmap[:, :, i + 1] = kernel
# Generate the heatmap of background
heatmap[:, :, 0] = 1.0 - np.max(heatmap[:, :, 1:], axis=2)
# Generate centermap
centermap = np.zeros((h, w, 1), dtype=np.float32)
kernel = gaussian_kernel(size_h=h, size_w=w, center_x=center_points[0], center_y=center_points[1],
sigma=self.sigma)
kernel[kernel > 1] = 1
kernel[kernel < 0.01] = 0
centermap[:, :, 0] = kernel
image -= image.mean()
image = torchvision.transforms.functional.to_tensor(image)
heatmap = torch.from_numpy(np.transpose(heatmap, (2, 0, 1)))
centermap = torch.from_numpy(np.transpose(centermap, (2, 0, 1)))
return image.float(), heatmap.float(), centermap.float()
def __len__(self):
return len(self.image_list)
|
[
"numpy.logical_not",
"os.path.join",
"numpy.max",
"numpy.exp",
"numpy.zeros",
"numpy.transpose",
"cv2.imread"
] |
[((2187, 2220), 'numpy.exp', 'np.exp', (['(-D2 / 2.0 / sigma / sigma)'], {}), '(-D2 / 2.0 / sigma / sigma)\n', (2193, 2220), True, 'import numpy as np\n'), ((351, 385), 'os.path.join', 'os.path.join', (['path', '"""images/*.jpg"""'], {}), "(path, 'images/*.jpg')\n", (363, 385), False, 'import os\n'), ((1042, 1074), 'numpy.transpose', 'np.transpose', (['mat_arr', '(2, 1, 0)'], {}), '(mat_arr, (2, 1, 0))\n', (1054, 1074), True, 'import numpy as np\n'), ((1166, 1192), 'numpy.logical_not', 'np.logical_not', (['mat_arr[2]'], {}), '(mat_arr[2])\n', (1180, 1192), True, 'import numpy as np\n'), ((1312, 1344), 'numpy.transpose', 'np.transpose', (['mat_arr', '(2, 0, 1)'], {}), '(mat_arr, (2, 0, 1))\n', (1324, 1344), True, 'import numpy as np\n'), ((1432, 1457), 'cv2.imread', 'cv2.imread', (['image_list[i]'], {}), '(image_list[i])\n', (1442, 1457), False, 'import cv2\n'), ((3632, 3669), 'numpy.zeros', 'np.zeros', (['(h, w, 1)'], {'dtype': 'np.float32'}), '((h, w, 1), dtype=np.float32)\n', (3640, 3669), True, 'import numpy as np\n'), ((707, 739), 'os.path.join', 'os.path.join', (['path', '"""joints.mat"""'], {}), "(path, 'joints.mat')\n", (719, 739), False, 'import os\n'), ((2628, 2650), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (2638, 2650), False, 'import cv2\n'), ((3560, 3593), 'numpy.max', 'np.max', (['heatmap[:, :, 1:]'], {'axis': '(2)'}), '(heatmap[:, :, 1:], axis=2)\n', (3566, 3593), True, 'import numpy as np\n'), ((4013, 4045), 'numpy.transpose', 'np.transpose', (['heatmap', '(2, 0, 1)'], {}), '(heatmap, (2, 0, 1))\n', (4025, 4045), True, 'import numpy as np\n'), ((4078, 4112), 'numpy.transpose', 'np.transpose', (['centermap', '(2, 0, 1)'], {}), '(centermap, (2, 0, 1))\n', (4090, 4112), True, 'import numpy as np\n'), ((941, 973), 'numpy.transpose', 'np.transpose', (['mat_arr', '(2, 0, 1)'], {}), '(mat_arr, (2, 0, 1))\n', (953, 973), True, 'import numpy as np\n'), ((1212, 1244), 'numpy.transpose', 'np.transpose', (['mat_arr', '(2, 1, 0)'], {}), '(mat_arr, (2, 1, 0))\n', (1224, 1244), True, 'import numpy as np\n')]
|
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
from tensorflow import keras
from pandas.plotting import autocorrelation_plot
from keras import Sequential
from tensorflow.python.keras.layers.recurrent import LSTM
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import OneHotEncoder
from random import randint
import sys
df = pd.read_csv(r'C:\Users\Michael\Desktop\pwrball_rand\pwr_ball - Copy.csv')
trim = df.drop(['prize', 'daysin','daycos','year'], axis=1)
#print(trim)
sequence = trim.values.reshape(-1,1).tolist()
#print(sequence)
ohe = OneHotEncoder().fit(sequence)
encoded_trim = ohe.transform(sequence).toarray()
#np.set_printoptions(threshold=sys.maxsize)
row, col = encoded_trim.shape
def gen_sample(num_start):
#start_of_sample = randint(0,17436)
#print(start_of_sample)
sample_X = encoded_trim[num_start:num_start + 6, :]
sample_Y = encoded_trim[num_start+6:num_start+7, :]
sample_X = sample_X.reshape(1,6,69)
#sample_Y = sample_Y.reshape(1,1,69)
#print(sample_X.shape)
#print(sample_Y.shape)
return sample_X, sample_Y
#this_x, this_y = gen_sample(0)
model = Sequential()
model.add(LSTM(138, input_shape = (6,69), return_sequences = True))
model.add(LSTM(69, input_shape = (6,69)))
model.add(tf.keras.layers.Dense(69, activation='softmax'))
model.compile(optimizer=keras.optimizers.Adam(learning_rate=0.05), loss="categorical_crossentropy")
model.summary()
test_num = [9,36,49,56,62,9]
#june 27
test_num = np.asarray(test_num)
test_num = test_num.reshape(-1,1)
test_num_encode = ohe.transform(test_num).toarray()
#print(test_num_encode)
test_sample = test_num_encode.reshape(1,6,69)
for i in range(17429):
X, y = gen_sample(i)
model.fit(X,y,epochs=1, verbose=2)
#model.reset_states()
test_out = model.predict(test_sample)
test_out = ohe.inverse_transform(test_out)
print(test_out)
#expect 15
#test nums:
#9,36,49,56,62,8
#lstm_input_dataframe = pd.DataFrame(np.concatenate(lstm_input_unsplit))
#decoded_trim = ohe.inverse_transform(encoded_trim)
#print(type(decoded_trim))
|
[
"keras.Sequential",
"pandas.read_csv",
"sklearn.preprocessing.OneHotEncoder",
"numpy.asarray",
"tensorflow.python.keras.layers.recurrent.LSTM",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.Dense"
] |
[((412, 489), 'pandas.read_csv', 'pd.read_csv', (['"""C:\\\\Users\\\\Michael\\\\Desktop\\\\pwrball_rand\\\\pwr_ball - Copy.csv"""'], {}), "('C:\\\\Users\\\\Michael\\\\Desktop\\\\pwrball_rand\\\\pwr_ball - Copy.csv')\n", (423, 489), True, 'import pandas as pd\n'), ((1243, 1255), 'keras.Sequential', 'Sequential', ([], {}), '()\n', (1253, 1255), False, 'from keras import Sequential\n'), ((1606, 1626), 'numpy.asarray', 'np.asarray', (['test_num'], {}), '(test_num)\n', (1616, 1626), True, 'import numpy as np\n'), ((1267, 1320), 'tensorflow.python.keras.layers.recurrent.LSTM', 'LSTM', (['(138)'], {'input_shape': '(6, 69)', 'return_sequences': '(True)'}), '(138, input_shape=(6, 69), return_sequences=True)\n', (1271, 1320), False, 'from tensorflow.python.keras.layers.recurrent import LSTM\n'), ((1336, 1365), 'tensorflow.python.keras.layers.recurrent.LSTM', 'LSTM', (['(69)'], {'input_shape': '(6, 69)'}), '(69, input_shape=(6, 69))\n', (1340, 1365), False, 'from tensorflow.python.keras.layers.recurrent import LSTM\n'), ((1379, 1426), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(69)'], {'activation': '"""softmax"""'}), "(69, activation='softmax')\n", (1400, 1426), True, 'import tensorflow as tf\n'), ((641, 656), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {}), '()\n', (654, 656), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((1457, 1498), 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'learning_rate': '(0.05)'}), '(learning_rate=0.05)\n', (1478, 1498), False, 'from tensorflow import keras\n')]
|
import torch
import torch.utils.data as data
from glob import glob
from os.path import join, basename, exists
import numpy as np
import pickle as pkl
from random import random
class KETTS76(data.Dataset):
def __init__(self, which_set='train', datapath='/home/thkim/data/KETTS76/bin_22050'):
# Load vocabulary
self.__dict__.update(locals())
vocab_path = datapath + '/vocab_dict.pkl'
self.vocab_dict = pkl.load(open(vocab_path, 'rb'))
self.vocab_size = len(self.vocab_dict)
self.num_spkr = 6
# Filelist
self.txtlist = np.sort(glob(datapath+'/*.txt'))
self.mellist = np.sort(glob(datapath+'/*.mel'))
if which_set == 'train':
self.txtlist = [xx for xx in self.txtlist if int(xx.split('_')[-1][:-4]) < 490]
self.mellist = [xx for xx in self.mellist if int(xx.split('_')[-1][:-4]) < 490]
elif which_set == 'val':
self.txtlist = [xx for xx in self.txtlist if int(xx.split('_')[-1][:-4]) >= 490]
self.mellist = [xx for xx in self.mellist if int(xx.split('_')[-1][:-4]) >= 490]
else:
raise ValueError
self.dbname = 'KETTS76'
self.gen_lu = {'f': 0, 'm': 1}
self.age_lu = {'age20': 0, 'age30': 1, 'age40': 2, 'age50': 3, 'age60': 4}
self.emo_lu = {'neu': 0, 'hap': 1, 'sad': 2, 'ang': 3, 'sur': 4, 'fea': 5, 'dis': 6}
self.spkr_dict = ['20m', '30f', '40m', '50m', '50f', '60f']
self.spkr_lu = {'_'.join((self.dbname, self.spkr_dict[ii])): xx for ii, xx in enumerate(range(self.num_spkr))}
assert len(self.txtlist)==len(self.mellist), \
'mellist({}) and txtlist({}) has different length'.format(len(self.mellist), len(self.txtlist))
self.char2onehot = lambda x : self.vocab_dict[x] if x in self.vocab_dict.keys() else None
def __len__(self):
return len(self.txtlist)
def __getitem__(self, idx):
# Text read
with open(self.txtlist[idx], 'r') as f:
txt = f.readline()
txt_feat = list(filter(None, [self.char2onehot(xx) for xx in txt]))
# Mel/Lin read
mellin = pkl.load(open(self.mellist[idx], 'rb'))
mel = mellin['mel']
#lin = mellin['lin']
target_mel_name = basename(self.mellist[idx])
spk, emo, _, _, sent_no = target_mel_name[:-4].split('_')
while True:
new_sent = np.random.randint(500)
style_mel_name = f'{spk}_{emo}_500_trim_{new_sent:05}.mel'
style_mel_path = join(self.datapath, style_mel_name)
if exists(style_mel_path):
break
while True:
new_emo = np.random.choice(list(self.emo_lu.keys()))
new_spk = np.random.choice(self.spkr_dict)
contents_mel_name = f'{new_spk}_{new_emo}_500_trim_{sent_no}.mel'
contents_mel_path = join(self.datapath, contents_mel_name)
if exists(contents_mel_path):
break
contents_mel = pkl.load(open(contents_mel_path, 'rb'))['mel']
style_mel = pkl.load(open(style_mel_path, 'rb'))['mel']
style = self.getstyle(self.txtlist[idx])
return {'txt': np.asarray(txt_feat),
'style': style,
#'target_lin': np.asarray(lin),
'target_mel': np.asarray(mel),
'style_mel': np.asarray(style_mel),
'contents_mel': np.asarray(contents_mel),
'filename': {'target':self.mellist[idx], 'style':style_mel_path, 'input':contents_mel_path}
}
def getstyle(self, filename):
filename = basename(filename)
spkr, emo = basename(filename).split('_')[:2]
gender = self.gen_lu[spkr[2]]
age = self.age_lu[f'age{spkr[:2]}']
emotion = self.emo_lu[emo]
spkr = self.spkr_lu['_'.join((self.dbname, spkr))]
return {'age': age, 'gender': gender,'emotion': emotion, 'dbname': self.dbname, 'spkr': spkr}
def get_vocab_size(self):
return self.vocab_size
def set_vocab_dict(self, vocab_dict):
self.vocab_dict = vocab_dict
self.vocab_size = len(vocab_dict)
self.char2onehot = lambda x : self.vocab_dict[x] if x in self.vocab_dict.keys() else None
def set_spkr_lu(self, spkr_lu):
self.spkr_lu = spkr_lu
if __name__=='__main__':
aa = KETTS76()
aa[0]
import ipdb
ipdb.set_trace()
|
[
"os.path.exists",
"ipdb.set_trace",
"numpy.random.choice",
"os.path.join",
"numpy.asarray",
"numpy.random.randint",
"os.path.basename",
"glob.glob"
] |
[((4439, 4455), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (4453, 4455), False, 'import ipdb\n'), ((2296, 2323), 'os.path.basename', 'basename', (['self.mellist[idx]'], {}), '(self.mellist[idx])\n', (2304, 2323), False, 'from os.path import join, basename, exists\n'), ((3660, 3678), 'os.path.basename', 'basename', (['filename'], {}), '(filename)\n', (3668, 3678), False, 'from os.path import join, basename, exists\n'), ((595, 620), 'glob.glob', 'glob', (["(datapath + '/*.txt')"], {}), "(datapath + '/*.txt')\n", (599, 620), False, 'from glob import glob\n'), ((651, 676), 'glob.glob', 'glob', (["(datapath + '/*.mel')"], {}), "(datapath + '/*.mel')\n", (655, 676), False, 'from glob import glob\n'), ((2436, 2458), 'numpy.random.randint', 'np.random.randint', (['(500)'], {}), '(500)\n', (2453, 2458), True, 'import numpy as np\n'), ((2559, 2594), 'os.path.join', 'join', (['self.datapath', 'style_mel_name'], {}), '(self.datapath, style_mel_name)\n', (2563, 2594), False, 'from os.path import join, basename, exists\n'), ((2610, 2632), 'os.path.exists', 'exists', (['style_mel_path'], {}), '(style_mel_path)\n', (2616, 2632), False, 'from os.path import join, basename, exists\n'), ((2764, 2796), 'numpy.random.choice', 'np.random.choice', (['self.spkr_dict'], {}), '(self.spkr_dict)\n', (2780, 2796), True, 'import numpy as np\n'), ((2907, 2945), 'os.path.join', 'join', (['self.datapath', 'contents_mel_name'], {}), '(self.datapath, contents_mel_name)\n', (2911, 2945), False, 'from os.path import join, basename, exists\n'), ((2961, 2986), 'os.path.exists', 'exists', (['contents_mel_path'], {}), '(contents_mel_path)\n', (2967, 2986), False, 'from os.path import join, basename, exists\n'), ((3218, 3238), 'numpy.asarray', 'np.asarray', (['txt_feat'], {}), '(txt_feat)\n', (3228, 3238), True, 'import numpy as np\n'), ((3353, 3368), 'numpy.asarray', 'np.asarray', (['mel'], {}), '(mel)\n', (3363, 3368), True, 'import numpy as np\n'), ((3399, 3420), 'numpy.asarray', 'np.asarray', (['style_mel'], {}), '(style_mel)\n', (3409, 3420), True, 'import numpy as np\n'), ((3454, 3478), 'numpy.asarray', 'np.asarray', (['contents_mel'], {}), '(contents_mel)\n', (3464, 3478), True, 'import numpy as np\n'), ((3699, 3717), 'os.path.basename', 'basename', (['filename'], {}), '(filename)\n', (3707, 3717), False, 'from os.path import join, basename, exists\n')]
|
"""Flexible code for histogramming per-snp and per-replica statistics for selected SNPs in selected replicas in
selected scenarios and/or demographies."""
from Operations.Shari_Operations.localize.Scenario import GetScenarios, GetSelectionScenarios
from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, \
MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff
from Classes.DotData import DotData
from Operations.Ilya_Operations.PipeRun.python.PipeRun import GetDependsOn
from Operations.Shari_Operations.localize.PopConsts import AllFreqs, AllPops, AllAges, CAUSAL_POS
from Operations.IDotData import IDotData
import operator, os, logging, contextlib, functools, collections, types, ast
from itertools import izip
import itertools, string
from UserDict import DictMixin
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as pp
import numpy as np
import math
import traceback as tb
__all__ = ( 'gatherCausalFreqs', 'DefineRulesTo_gatherCausalFreqs', 'histogramSnpStatistic', 'histogramReplicaStatistic',
'AddUpHistograms', 'GraphHistograms', 'GraphCumulPlots', 'DefineRulesTo_histogramSnpStatistic',
'DefineRulesTo_histogramReplicaStatistic', 'findReplicasMatchingConds', 'findSnpsMatchingConds',
'identifyReplicasMeetingConds', 'splitSnpStatsFile',
'DefineRulesTo_identifyReplicasMeetingCommonConds' )
def gatherCausalFreqs( scen, Ddata, simsOut, thinSfx, thinExt, nreplicas, getio = None ):
"""For all replicas within one scenario, gather some useful summary info for each replica:
e.g. that replica's modern-day frequency of the causal allele, the genetic map position of the
causal SNP, number of SNPs in the replica, the range of the genetic map, etc.
"""
#hm3big/simsOutHm3big/10ky/sel100_1/
simScenDir = Ddata + '/' + simsOut + thinSfx + '/' + scen.scenDir()
statScenDir = Ddata + '/replicastats' + thinSfx + '/' + scen.scenDir()
posFileNames = [ simScenDir + '/' + '%d_%s.pos-%d%s' % ( replicaNum, scen.scenName(), scen.mutPop, thinExt )
for replicaNum in range( nreplicas ) ] if not scen.is_neutral() else []
replicaInfoFileName = statScenDir + '/replicaStats.tsv'
if getio: return dict( depends_on = posFileNames, creates = replicaInfoFileName,
mediumRuleNameSfx = scen )
causalAlleleFreqs = [ ]
replicaNums = [ ]
selpos = 500000
okReplicas = 0
for replicaNum in range( nreplicas ):
if scen.is_neutral(): causalFreq = np.nan
else:
posFile = DotData( SVPath = posFileNames[ replicaNum ], SVSkipFirstLines = 1, SVHeader = False,
names = ['SNP','CHROM', 'CHROM_POS', 'ALLELE1', 'FREQ1', 'ALLELE2', 'FREQ2' ] )
causalLine = posFile[ posFile.CHROM_POS == selpos ]
assert len( causalLine ) == 1
causalFreq = causalLine[0].FREQ1
causalAlleleFreqs.append( causalFreq )
replicaNums.append( replicaNum )
DotData( names = [ 'replicaNum', 'causalAlleleFreq', 'targetCausalFreq' ],
Columns = [ replicaNums, causalAlleleFreqs,
(( 0 if scen.isNeutral() else scen.mutFreq),)*nreplicas ] ).saveToSV( replicaInfoFileName )
def gatherReplicaGDstats( scen, Ddata, simsOut, thinSfx, thinExt, nreplicas, getio = None ):
"""For all replicas within each scenario, gather some genetic map-related info for each replica:
e.g. the genetic map position of the
causal SNP, the range of the genetic map, etc.
"""
#hm3big/simsOutHm3big/10ky/sel100_1/
simScenDir = os.path.join( Ddata, simsOut + thinSfx, scen.scenDir() )
statScenDir = os.path.join( Ddata, 'replicastats' + thinSfx, scen.scenDir() )
posFileNames = [ simScenDir + '/' + '%d_%s.pos-%d%s' % ( replicaNum, scen.scenName(), scen.mutPop, thinExt )
for replicaNum in range( nreplicas ) ] if not scen.is_neutral() else []
replicaInfoFileName = statScenDir + '/replicaStats.tsv'
if getio: return dict( depends_on = posFileNames, creates = replicaInfoFileName,
mediumRuleNameSfx = scen )
causalAlleleFreqs = [ ]
replicaNums = [ ]
selpos = 500000
def DefineRulesTo_gatherCausalFreqs( pr, Ddata, simsOut = 'simsOut',
mutAges = AllAges, mutPops = AllPops, mutFreqs = AllFreqs,
thinSfx = '', thinExt = '', nreplicas = 100 ):
"""Define rules to gather per-replica statistics"""
for scen in GetScenarios( mutAges, mutPops, mutFreqs ):
pr.addInvokeRule( invokeFn = gatherReplicaStats,
invokeArgs = Dict( 'scen Ddata simsOut thinSfx thinExt nreplicas' ) )
# for compatibility with old code
gatherReplicaStats = gatherCausalFreqs
DefineRulesTo_gatherReplicaStats = DefineRulesTo_gatherCausalFreqs
def histogramSnpStatistic( Ddata, thinSfx, scenDir, replicaTables, replicaCond, snpTables, snpCond, snpStat,
outFile, nreplicas, binSize, binShift = 0.0, sfx = None, scenSfx = None, getio = None ):
"""Compute histogram of $snpStat for snps matching $snpCond in replicas matching $replicaCond in scenario $scenDir.
Params:
statTable - the name of the per-snp statistics table. we assume there is a file called
Ddata/snpstats/scenDir/statTable_pop.tsv for each scenario.
statCol - column name to histogram.
"""
replicaTables = MakeSeq( replicaTables )
snpTables = MakeSeq( snpTables )
replicaCondExpr = compile_expr( replicaCond )
snpCondExpr = compile_expr( snpCond )
snpStatExpr = compile_expr( snpStat )
outFile = AddFileSfx( outFile, sfx )
outFileStats = AddFileSfx( outFile, 'stats' )
if IsSeq( scenSfx ): scenSfx = dict( scenSfx )
replicaTableFiles = [ os.path.join( Ddata, 'replicastats' + thinSfx, scenDir,
replicaTable + ( '.tsv' if '.' not in replicaTable else '' ) )
for replicaTable in replicaTables ]
snpTableFiles = [ os.path.join( Ddata, 'snpStats' + thinSfx, scenDir,
AddFileSfx( snpTable + ( '.tsv' if '.' not in snpTable else '' ),
scenSfx if isinstance( scenSfx, types.StringTypes )
else scenSfx[ os.path.splitext( snpTable )[0] ] ) )
for snpTable in snpTables ]
#dbg('replicaTableFiles snpTableFiles')
#dbg('"*****" replicaTableFiles+snpTableFiles')
replicaTableFiles = [ f + '/' if f.endswith('.data') else f for f in replicaTableFiles ]
snpTableFiles = [ f + '/' if f.endswith('.data') else f for f in snpTableFiles ]
snpTables = [ os.path.splitext(snpTable)[0] for snpTable in snpTables ]
if getio: return dict( depends_on = replicaTableFiles + snpTableFiles,
creates = ( outFile, AddFileSfx( outFile, 'stats' ) ),
attrs = Dict( 'scenDir snpCond replicaCond snpStat' ),
mediumRuleNameSfx = ( scenDir, scenSfx ) )
replicaTableVals = [ DotData( SVPath = f ) for f in replicaTableFiles ]
replicasToUse = [ eval( replicaCondExpr, globals(), dict( zip( replicaTables, replicaTableRows ) ) )
for replicaTableRows in izip( *replicaTableVals ) ]
#dbg( 'sum(replicasToUse)' )
snpTableVals = [ IDotData( SVPath = f ) for f in snpTableFiles ]
histogramBuilder = Histogrammer( binSize = binSize, binShift = binShift )
lastReplica = np.nan
for snpTableRows in izip( *snpTableVals ):
r0 = snpTableRows[ 0 ]
assert all([ r.Chrom == r0.Chrom for r in snpTableRows ]) or all([ np.isnan( r.Chrom ) for r in snpTableRows ])
assert all([ r.Pos == r0.Pos for r in snpTableRows ])
replica = int( r0.Chrom ) if not np.isnan( r0.Chrom ) else -1
useThisReplica = not replicaTables or replicasToUse[ replica ]
if replica != lastReplica: dbg( 'replica useThisReplica histogramBuilder.getNumVals()' )
if useThisReplica:
snpDict = dict( zip( snpTables, snpTableRows ) )
if eval( snpCondExpr, globals(), snpDict ):
val = eval( snpStatExpr, globals(), snpDict )
histogramBuilder.addVal( val )
lastReplica = replica
logging.info('saving histogram to ', outFile )
histogramBuilder.save( outFile )
def histogramReplicaStatistic( Ddata, thinSfx, replicaCond, replicaStat,
outFile, nreplicas, binSize, scenCond = 'True',
replicaTables = None,
scen2sfxs = {}, allScens = GetScenarios(),
sfx = None, replicaCondSfx = '',
nameSfx = '', getio = None ):
"""Compute histogram of $replicaStat for replicas matching $replicaCond in scenarios matching $scenCond.
Saves the histogram as well as overall stats about the values of this statistic, e.g. the average.
Params:
statTable - the name of the per-snp statistics table. we assume there is a file called
Ddata/snpstats/scenDir/statTable_pop.tsv for each scenario.
statCol - column name to histogram.
"""
outFile = AddFileSfx( outFile, sfx, replicaCondSfx )
outFileStats = AddFileSfx( outFile, 'stats' )
args = Dict( 'Ddata thinSfx replicaTables scenCond replicaCond scen2sfxs allScens' )
if getio: return dict( depends_on =
findReplicasMatchingConds( getio = True, **args )[ 'depends_on' ],
creates = ( outFile, outFileStats ),
mediumRuleNameSfx = sfx, attrs = dict( piperun_short = True ),
name = 'histogramReplicaStatistic' + Sfx( nameSfx ) )
histogramBuilder = Histogrammer( binSize = binSize )
histogramBuilder.addVals( findReplicasMatchingConds( showHeadings = 'val', showVals = replicaStat, **args ).val )
histogramBuilder.save( outFile )
def histogramSnpStatistic2( Ddata, thinSfx, snpTables, snpCond, snpCondSfx, replicaTables, replicaCond, replicaStat,
outFile, nreplicas, binSize, scenCond = 'True',
scen2sfxs = {}, allScens = GetScenarios(),
sfx = None, replicaCondSfx = '',
nameSfx = '', getio = None ):
"""Compute histogram of $replicaStat for replicas matching $replicaCond in scenarios matching $scenCond.
Saves the histogram as well as overall stats about the values of this statistic, e.g. the average.
Params:
statTable - the name of the per-snp statistics table. we assume there is a file called
Ddata/snpstats/scenDir/statTable_pop.tsv for each scenario.
statCol - column name to histogram.
"""
outFile = AddFileSfx( outFile, sfx, replicaCondSfx, snpCondSfx )
outFileStats = AddFileSfx( outFile, 'stats' )
args = Dict( 'Ddata thinSfx snpTables snpCond replicaTables scenCond replicaCond scen2sfxs allScens' )
if getio: return dict( depends_on =
finSnpsMatchingConds( getio = True, **args )[ 'depends_on' ],
creates = ( outFile, outFileStats ),
mediumRuleNameSfx = sfx, attrs = dict( piperun_short = True ),
name = 'histogramReplicaStatistic' + Sfx( nameSfx ) )
histogramBuilder = Histogrammer( binSize = binSize )
histogramBuilder.addVals( findSnpsMatchingConds( showHeadings = 'val', showVals = snpStat, **args ).val )
histogramBuilder.save( outFile )
def AddUpHistograms( histFiles, outFile, getio = None ):
"""Add up histograms from separate files, write results to new file"""
outFileStats = AddFileSfx( outFile, 'stats' )
if getio: return dict( depends_on = histFiles, creates = ( outFile, outFileStats ),
attrs = dict( piperun_short = True ) )
sumHist = reduce( operator.add, map( Histogrammer.load, histFiles ) )
sumHist.save( outFile )
def GraphHistograms( histFiles, outFile = None, xlabel = '', ylabel = '', title = '',
labels = (), colors = 'brcmygkbrcmygkbrcmygkbrcmygk',
relWidth = 0.4,
xbound = None, ybound = None, coarsenBy = None, sfx = '',
ticksCoarsen = 1, log = False, normed = False,
cumulative = False,
cumulativeUpTo = None,
figSize = (24, 12 ),
subplots_adjust = {},
getio = None ):
"""Plot one or more histograms sharing the same bins.
Params:
normalizeHistograms - if true, for each histogram on the y-axis we plot not the number of
items in a given bin, but their fraction out of the total number of items in that histogram.
This lets us compare different histograms.
"""
#dbg( '"at_first" labels' )
# ( if the bins of one are strictly finer than bins of other, i.e. if they form a DAG in this
# relationship, then we can still do the graph).
histFiles = MakeSeq( histFiles )
if not outFile:
assert len( histFiles ) == 1
outFile = ReplaceFileExt( histFiles[0], '.png' )
outFile = AddFileSfx( outFile, sfx )
if not labels: labels = [ os.path.splitext( os.path.basename( f ) )[0] for f in histFiles ]
if getio: return dict( depends_on = histFiles, creates = outFile,
mediumRuleNameSfx = sfx,
attrs = dict( piperun_short = True ) )
pp.figure(1, figsize = figSize )
#pp.clf()
pp.subplots_adjust( **MergeDicts( dict( hspace = 0.3, bottom = 0.15 ), subplots_adjust ) )
for which, cumulative in enumerate( ( True, False ) ):
pp.subplot( 2, 1, which + 1 )
pp.xlabel( xlabel )
pp.ylabel( ylabel )
pp.hold( True )
binSize = None
binShift = None
theLabels = []
theHandles = []
hists = map( Histogrammer.load, histFiles )
if coarsenBy: hists = [ hist.coarsenBy( coarsenBy ) for hist in hists ]
allBinIds = reduce( operator.concat, [ hist.bin2count.keys() for hist in hists ] )
if not allBinIds: allBinIds = ( 0, )
minBinId = min( allBinIds )
maxBinId = max( allBinIds ) + 1
if cumulativeUpTo is not None:
maxBinId = min( maxBinId, max( [ hist.getCumulativeBinFor( cumulativeUpTo ) for hist in hists ] ) ) + 1
for color, label, ( histFileNum, hist ) in zip( colors, labels, enumerate( hists ) ):
# check that all histograms we're loading have the same bins
if binSize is None: binSize = hist.binSize
else: assert abs( hist.binSize - binSize ) < 1e-12
if binShift is None: binShift = hist.binShift
else: assert abs( hist.binShift - binShift ) < 1e-12
width = binSize * relWidth / len( histFiles )
left = np.array( hist.getAllBinLefts( minBinId = minBinId, maxBinId = maxBinId ) ) + histFileNum * width
if histFileNum == 0: pp.xticks( [ x for i, x in enumerate( left ) if i % ticksCoarsen == 0 ] )
height = hist.getAllBinCounts( normed = normed, cumulative = cumulative,
minBinId = minBinId, maxBinId = maxBinId )
rects = pp.bar( height = height,
width = width * 0.95, **Dict( 'left color log' ) )
if rects:
labelHere = label + ' (%d values)' % hist.getNumVals()
if hist.getNumNaNs(): labelHere += ' (%d nans)' % hist.getNumNaNs()
if hist.getNumInfs(): labelHere += ' (%d infs)' % hist.getNumInfs()
rects[ 0 ].set_label( labelHere )
theLabels.append( labelHere )
theHandles.append( rects[0] )
pp.title( title )
if theLabels and theHandles:
pp.figlegend( loc = 'lower center', labels = theLabels, handles = theHandles )
if xbound: pp.gca().set_xbound( *xbound )
if ybound: pp.gca().set_ybound( *ybound )
pp.savefig( outFile )
def GraphCumulPlots( histFiles, outFile = None, xlabel = '', ylabel = '', title = '',
labels = (), colors = 'brcmygkbrcmygkbrcmygkbrcmygk',
relWidth = 0.4,
xbound = None, ybound = None, coarsenBy = None, sfx = '',
ticksCoarsen = 1, log = False, normed = True,
getio = None ):
"""Plot one or more cumulative plots.
"""
# ( if the bins of one are strictly finer than bins of other, i.e. if they form a DAG in this
# relationship, then we can still do the graph).
histFiles = MakeSeq( histFiles )
if not outFile:
assert len( histFiles ) == 1
outFile = ReplaceFileExt( histFiles[0], '.png' )
if not labels: labels = [ os.path.splitext( os.path.basename( f ) )[0] for f in histFiles ]
outFileTable = outFile + '.points.tsv'
if getio: return dict( depends_on = histFiles, creates = ( outFile, outFileTable ),
mediumRuleNameSfx = sfx,
attrs = dict( piperun_short = True ) )
pp.figure(1, figsize = (18,6) )
#pp.clf()
pp.subplots_adjust( bottom = 0.37 )
pp.xlabel( xlabel + '\n\n\n\n' )
pp.ylabel( ylabel )
pp.hold( True )
binSize = None
theLabels = []
theHandles = []
for color, label, ( histFileNum, histFile ) in zip( colors, labels, enumerate( histFiles ) ):
hist = Histogrammer.load( histFile )
if coarsenBy: hist = hist.coarsenBy( coarsenBy )
if not binSize: binSize = hist.binSize
else:
if not abs( hist.binSize - binSize ) < 1e-12:
dbg( 'hist.binSize binSize hist.binSize-binSize' )
assert abs( hist.binSize - binSize ) < 1e-12
binLefts = hist.getBinLefts()
if histFileNum == 0: pp.xticks( [ x for i, x in enumerate( binLefts ) if i % ticksCoarsen == 0 ] )
binCounts = hist.getBinCounts( normed = normed, cumulative = True )
rects = pp.plot( binLefts, binCounts, label = label, color = color )
DotData( names = ( 'binLefts', 'binCounts' ), Columns = ( binLefts, binCounts ) ).saveToSV( outFileTable )
if rects:
theLabels.append( label )
theHandles.append( rects )
pp.title( title )
if theLabels and theHandles:
pp.figlegend( loc = 'lower center', labels = theLabels, handles = theHandles )
if xbound: pp.gca().set_xbound( *xbound )
if ybound: pp.gca().set_ybound( *ybound )
pp.savefig( outFile )
def DefineRulesTo_histogramSnpStatistic( pr, Ddata,
outFile, snpTables, snpStat, binSize,
binShift = 0.0,
scen2sfxs = lambda scen: '',
scenCond = 'True',
allScens = GetScenarios(),
nreplicas = 100, thinSfx = '', replicaTables = (),
replicaConds = 'True', replicaCondsSfxs = '',
snpConds = 'True', snpCondsSfxs = '', title = '', titlePrefix = '',
xlabel = '', ylabel = '',
xbound = None, ybound = None, log = False, coarsenBy = None, sfx = '',
ticksCoarsen = 1, cumulative = False, normed = False,
colors = 'brcmygkbrcmygkbrcmygkbrcmygk',
subplots_adjust = {},
name = None ):
"""A generic way to plot the distribution of some per-snp statistics for some subset of SNPs.
Params:
statTable - the name of the per-snp statistics table. we assume there is a file called
Ddata/snpstats/scenDir/statTable_pop.tsv for each scenario.
statCol - column name to histogram.
Notes:
- for histogramming should not need to load it all into memory. can do a pre-pass to just get
the range of values, define the bins, then do a second pass to count what goes in what bin.
could also add bins as we go. so, really just need to know bin size, and then can do all this
with one pass. can also, later, make this automatically parallelized.
"""
if not os.path.dirname( outFile ): outFile = os.path.join( Ddata, outFile )
scenCondExpr = compile_expr( scenCond )
replicaConds = MakeSeq( replicaConds )
replicaCondsSfxs = MakeSeq( replicaCondsSfxs )
snpConds = MakeSeq( snpConds )
snpCondsSfxs = MakeSeq( snpCondsSfxs )
totaledHistFiles = []
totaledLabels = []
outFile = AddFileSfx( outFile, sfx )
baseOutFile = outFile
for replicaCond, replicaCondSfx in zip( replicaConds, replicaCondsSfxs ):
for snpCond, snpCondSfx in zip( snpConds, snpCondsSfxs ):
histFiles = []
for scen in allScens:
if not eval( scenCondExpr, globals(), ScenAttrs( scen ) ): continue
scenDir = scen.scenDir()
for scenSfx in MakeSeq( scen2sfxs( scen ) if callable( scen2sfxs ) else scen2sfxs[ scen ] ):
histOutFile = os.path.join( Ddata, 'hist', scenDir,
AddFileSfx( ReplaceFileExt( os.path.basename( outFile ), '.tsv' ),
snpStat,
replicaCondSfx, snpCondSfx, scenSfx, sfx ) )
rule = pr.addInvokeRule( invokeFn = histogramSnpStatistic,
invokeArgs =
dict( outFile = histOutFile,
**Dict( 'Ddata thinSfx replicaTables replicaCond snpTables snpCond '
'snpStat nreplicas binSize binShift scenDir scenSfx sfx' ) ),
name = name,
comment = 'Compute distribution of ' + snpStat
+ ' for SNPs matching ' + snpCond + ' in replicas matching ' + replicaCond )
histFiles.append( histOutFile )
totaledHistFile = os.path.join( Ddata, 'hist',
AddFileSfx( ReplaceFileExt( os.path.basename( outFile ), '.tsv' ),
snpCondSfx, replicaCondSfx, sfx ) )
totaledHistFiles.append( totaledHistFile )
totaledLabel = ''
if replicaCondSfx:
totaledLabel += replicaCondSfx + ' replicas' + ( (' (' + replicaCond + ') ') \
if replicaCond != 'True' else '' )
if snpCondSfx: totaledLabel += snpCondSfx + ' SNPs' + ( (' (' + snpCond + ') ') \
if snpCond != 'True' else '' )
totaledLabels.append( totaledLabel )
pr.addInvokeRule( invokeFn = AddUpHistograms, invokeArgs = dict( histFiles = histFiles,
outFile = totaledHistFile ),
mediumRuleNameSfx = ( sfx, snpStat, replicaCondSfx, snpCondSfx ), name = 'AddUpSnpHists',
fileDescrs = { 0:
( 'Distribution of <b>' + snpStat + '</b> among '
+ ( 'all SNPs' if snpCond == 'True'
else ' snps matching <em>' + snpCond + '</em>' )
+ ' in '
+ ( 'all replicas' if replicaCond == 'True' else
'replicas matching <em>' + replicaCond + '</em>' )
+ ' in '
+ ( 'all scenarios' if scenCond == 'True' else
'scenarios matching <em>' + scenCond + '</em>' ),
( ( 'count', 'Number of SNPs with ' + snpStat + ' in given bin' ),) ) } )
if not title:
title = 'Histogram of ' + snpStat + '\n'
if scenCond != 'True': title += ' scenCond: ' + scenCond
if any( replicaCond != 'True' for replicaCond in replicaConds ):
title += ' replicaConds: ' + ', '.join(replicaCondsSfxs)
if any( snpCond != 'True' for snpCond in snpConds ): title += ' snpConds: ' + ', '.join(snpCondsSfxs)
title = titlePrefix + title
if not ylabel: ylabel = ('#' if not normed else 'fraction') + ' of snps'
if not xlabel: xlabel = snpStat
pr.addInvokeRule( invokeFn = GraphHistograms,
mediumRuleNameSfx = (snpStat,) + tuple(replicaCondsSfxs) + tuple(snpCondsSfxs),
name = 'GraphSnpHists',
invokeArgs = dict( histFiles = totaledHistFiles, labels = totaledLabels,
**Dict( 'xlabel ylabel title xbound ybound coarsenBy log outFile '
'cumulative normed ticksCoarsen colors' ) ),
attrs = Dict( 'snpStat replicaConds snpConds scenCond subplots_adjust' ) )
def DefineRulesTo_histogramReplicaStatistic( pr, Ddata,
outFile, replicaStat, binSize,
scenCond = 'True',
replicaTables = None,
sfx = '',
scen2sfxs = lambda scen: '',
allScens = tuple( GetScenarios() ),
nreplicas = 100, thinSfx = '',
replicaConds = 'True', replicaCondsSfxs = '',
title = '', titlePrefix = '',
xlabel = '', ylabel = '',
xbound = None, ybound = None, log = False, coarsenBy = None,
ticksCoarsen = 1, cumulative = False, normed = False,
cumulativeUpTo = 0.99,
subplots_adjust = {},
name = None, nameSfx = '' ):
"""Define rules to plot the distribution of a specified per-replica statistic for some subsets of replicas
in some subset of scenarios.
Params:
pr - the PipeRun object to which the rules should be added
Ddata - the root folder of the genetic data in simulations format
outFile - the filename to which the histogram plot will be written
replicaTables - names of tables containing per-replica values. For each such table T,
there must be a file of the form os.path.join( Ddata, replicastats, scenario.scenDir(), T + '.tsv' )
giving some values for each replica in the scenario.
replicaStat - a Python expression in which the names in replicaTables may appear as variables, and refer
to a named tuple representing the replica's row in the corresponding replicaTable.
Notes:
- for histogramming should not need to load it all into memory. can do a pre-pass to just get
the range of values, define the bins, then do a second pass to count what goes in what bin.
could also add bins as we go. so, really just need to know bin size, and then can do all this
with one pass. can also, later, make this automatically parallelized.
"""
if not os.path.dirname( outFile ): outFile = os.path.join( Ddata, outFile )
scenCondExpr = compile_expr( scenCond )
ourScens = [ scen for scen in allScens if eval( scenCondExpr, globals(), ScenAttrs( scen ) ) ]
if callable( scen2sfxs ):
scen2sfxs = dict( ( scen, scen2sfxs( scen ) ) for scen in ourScens )
replicaConds = MakeSeq( replicaConds )
replicaCondsSfxs = MakeSeq( replicaCondsSfxs )
totaledHistFiles = []
totaledLabels = []
for replicaCond, replicaCondSfx in zip( replicaConds, replicaCondsSfxs ):
totaledHistFile = os.path.join( Ddata, 'hist',
ReplaceFileExt( os.path.basename( outFile ), '.tsv' ) )
totaledLabels.append( replicaCondSfx + ': ' + replicaCond )
r = pr.addInvokeRule( invokeFn = histogramReplicaStatistic,
invokeArgs = Dict( 'Ddata thinSfx replicaTables replicaCond replicaStat nreplicas '
'binSize scenCond scen2sfxs allScens nameSfx sfx replicaCondSfx',
outFile = totaledHistFile ),
mediumRuleNameSfx = ( replicaStat, replicaCondSfx, sfx ),
fileDescrs = { 0:
( 'Distribution of <b>' + replicaStat + '</b> among '
+ ( 'all replicas' if replicaCond == 'True' else
'replicas matching <em>' + replicaCond + '</em>' )
+ ' in '
+ ( 'all scenarios' if scenCond == 'True' else
'scenarios matching <em>' + scenCond + '</em>' ),
( ( 'count', 'Number of replicas with ' + replicaStat +
' in given bin' ),
)) } )
totaledHistFiles.append( r.creates[0] )
if not title:
if scenCond != 'True': title += ' scenCond: ' + scenCond
if len( replicaConds ) == 1 and replicaConds[0] != 'True': title += ' replicaCond: ' + replicaConds[0]
title = titlePrefix + title
if not ylabel: ylabel = ('#' if not normed else 'fraction') + ' of replicas'
if not xlabel: xlabel = replicaStat
pr.addInvokeRule( invokeFn = GraphHistograms,
invokeArgs = dict( histFiles = totaledHistFiles, labels = totaledLabels,
**Dict( 'xlabel ylabel title xbound ybound coarsenBy log outFile '
'sfx ticksCoarsen cumulative normed cumulativeUpTo' ) ),
name = 'GraphReplicaHists' + Sfx( nameSfx ),
mediumRuleNameSfx = ( replicaStat, sfx ) + tuple( replicaConds ),
attrs = Dict( 'replicaStat sfx subplots_adjust' ) )
return totaledHistFiles
def identifyReplicasMeetingConds( Ddata, scenario, replicaTables, replicaConds, condsFileFN, nreplicas,
thinSfx = '', getio = None ):
"""Given a list of named replica conditions, determine for each replica which conditions it meets, and
write out the result in an easy-to-access format.
Input params:
replicaConds - sequence of pairs of the form ( condName, cond ) -- for example,
( ( 'hi', 'replicaStats.causalAlleleFreq >= .5' ), ( 'lo', 'replicaStats.causalAlleleFreq < .5' ) )
"""
replicaTables = MakeSeq( replicaTables )
replicaTableFiles = [ os.path.join( Ddata, 'replicastats' + thinSfx, scenario.scenDir(),
replicaTable + ( '.tsv' if not os.path.splitext( replicaTable )[1] else '' ) )
for replicaTable in replicaTables ]
if not os.path.dirname( condsFileFN ): condsFileFN = os.path.join( Ddata, 'replicastats' + thinSfx, scenario.scenDir(),
condsFileFN )
if getio: return dict( depends_on = replicaTableFiles, creates = condsFileFN, mediumRuleNameSfx = scenario.scenDir(),
attrs = dict( piperun_short = True,
condNames = ', '.join( map( operator.itemgetter( 0 ), replicaConds ) ) ) )
replicaTableVals = [ DotData( SVPath = f ) for f in replicaTableFiles ]
assert all([ len( replicaTableVal ) == nreplicas for replicaTableVal in replicaTableVals ])
matchingReplicas = []
for replicaCond in map( operator.itemgetter( 1 ), replicaConds ):
replicaCondExpr = compile_expr( replicaCond )
replicasToUse = [ int( eval( replicaCondExpr, globals(), dict( zip( replicaTables, replicaTableRows ) ) ) )
for replicaTableRows in izip( *replicaTableVals ) ]
matchingReplicas.append( replicasToUse )
Records = []
condNames = tuple( map( operator.itemgetter( 0 ), replicaConds ) )
for replicaNum, condResults in enumerate( izip( *matchingReplicas ) ):
Records.append( ( replicaNum, ','.join( replicaCondName for condNum, replicaCondName
in enumerate( condNames )
if condResults[ condNum ] ) )
+ condResults )
IDotData( names = ( 'replicaNum', 'matchingConds' ) + condNames, Records = Records ).save( condsFileFN )
def DefineRulesTo_identifyReplicasMeetingCommonConds( pr, Ddata, thinSfx = '', allScens = GetSelectionScenarios(),
nreplicas = 100 ):
"""Define rules to identify replicas meeting common conditions such as all/lo/hi freq"""
for scenario in allScens:
pr.addInvokeRule( invokeFn = identifyReplicasMeetingConds,
invokeArgs = Dict( 'Ddata scenario nreplicas thinSfx',
replicaTables = ( 'replicaStats', ),
replicaConds = ( ( 'all', 'True' ),
( 'hi', 'replicaStats.causalAlleleFreq >= .5' ),
( 'lo', 'replicaStats.causalAlleleFreq < .5' ) ),
condsFileFN = 'commonReplicaConds.tsv' ) )
def splitSnpStatsFile( Ddata, scenario, inFileFN, condsFileFN, condNames, thinSfx = '',
replicaColName = 'Chrom', sfx = '', getio = None ):
"""Split a file containing per-snp data for all replicas, into separate files containing the same data for each
kind of replica."""
if not os.path.dirname( inFileFN ): inFileFN = os.path.join( Ddata, scenario.scenDir(), inFileFN )
if not os.path.dirname( condsFileFN ):
condsFileFN = os.path.join( Ddata, 'replicastats' + thinSfx, scenario.scenDir(), condsFileFN )
outFileFNs = [ AddFileSfx( inFileFN, sfx, condName ) for condName in condNames ]
if getio: return dict( depends_on = ( inFileFN, condsFileFN ),
creates = outFileFNs, mediumRuleNameSfx = scenario.scenDir() )
condsFile = IDotData( condsFileFN )
inFile = IDotData( inFileFN )
with contextlib.nested( *map( functools.partial( IDotData.openForWrite, headings = inFile.headings ),
outFileFNs ) ) as outFiles:
for (replica, replicaRows), condValues in izip( inFile.groupby( replicaColName, multiPass = False ), condsFile ):
assert condValues.replicaNum == replica
# if this replica matches more than one condition, save the replica rows so we can iterate over them more
# than once
if sum( condValues[ condNames ] ) > 1: replicaRows = tuple( replicaRows )
for condName, outFile in zip( condNames, outFiles ):
if condValues[ condName ]: outFile.writeRecords( replicaRows )
def joinSnpStatsFiles( Ddata, scenario, outFileFN, condNames, condsFileFN, thinSfx = '',
replicaColName = 'Chrom', sfx = '', getio = None ):
"""Join several per-snp stats files, each containing data for some of the replicas,
into a single file containing data for all the replicas.
"""
if not os.path.dirname( outFileFN ): outFileFN = os.path.join( Ddata, scenario.scenDir(), outFileFN )
if not os.path.dirname( condsFileFN ): condsFileFN = os.path.join( Ddata, 'replicastats' + thinSfx, scenario.scenDir(),
condsFileFN )
inFileFNs = [ AddFileSfx( outFileFN, sfx, condName ) for condName in condNames ]
if getio: return dict( depends_on = [ condsFileFN ] + inFileFNs, creates = outFileFN,
mediumRuleNameSfx = scenario.scenDir() )
inFiles = map( IDotData, inFileFNs )
dbg( 'inFiles' )
condsFile = IDotData( condsFileFN )
groupIters = [ inFile.groupby( replicaColName ) for inFile in inFiles ]
def getBlocks():
for r in condsFile:
for condName, groupIter in zip( condNames, groupIters ):
if r[ condName ]:
replicaNum, replicaRows = next( groupIter )
assert replicaNum == r.replicaNum
yield replicaRows
break
IDotData.vstackFromIterable( getBlocks() ).save( outFileFN )
def ScenAttrs( scen ):
"""Make a dictionary describing the attributes of a scenario"""
scenAttrs = dict( scen = scen, is_neutral = scen.is_neutral(), isNeutral = scen.isNeutral() )
if not scen.is_neutral(): scenAttrs.update( mutAge = scen.mutAge,
mutPop = scen.mutPop,
mutFreq = scen.mutFreq )
return scenAttrs
def scatterPlotReplicaStatistic( Ddata, nreplicas, replicaStatX,
replicaStatY,
outFile,
thinSfx = '',
scenCond = 'True',
replicaTables = (), replicaCond = 'True',
replicaColorings = (),
replicaDefaultColor = 'b',
replicaShow = None,
allScens = tuple( GetScenarios() ), nameSfx = '',
scen2sfxs = {},
title = '', subtitle = '',
highlightScen = None, highlightReplica = None,
xbound = None, ybound = None,
getio = None ):
"""Draw a scatter plot where for each replica we have a pair of values.
"""
args = Dict( 'Ddata thinSfx replicaTables scenCond scen2sfxs replicaCond allScens' )
if getio: return dict( depends_on = findReplicasMatchingConds( getio = True, **args )[ 'depends_on' ],
creates = outFile,
name = 'scatterPlotReplicaStatistic' + Sfx( nameSfx ),
mediumRuleNameSfx = ( replicaStatX, replicaStatY ), attrs = dict( piperun_short = True ) )
x = []
y = []
urls = []
nskipped = 0
colors = []
if IsSeq( replicaShow ): replicaShow = '"_".join(map(str,["%.2f" % v if isinstance(v,float) else v for v in (' + ','.join( map( str, replicaShow ) ) + ')]))'
for r in findReplicasMatchingConds( showHeadings = ( 'valX', 'valY', 'valShow' ) + tmap( operator.itemgetter( 0 ),
replicaColorings ),
showVals = ( replicaStatX, replicaStatY,
replicaShow if replicaShow is not None else '0' ) +
tmap( operator.itemgetter( 1 ), replicaColorings ),
**args ):
x.append( r.valX )
y.append( r.valY )
urls.append( '%s_%d_x=%s_y=%s' % ( r.scenario, r.replicaNum,
'%.2f' % r.valX if isinstance( r.valX, float ) else r.valX,
'%.2f' % r.valY if isinstance( r.valY, float ) else r.valY ) +
( ( '' if str( r.valShow).startswith('_') else '_' ) + str( r.valShow ) if replicaShow else '' ) )
if replicaColorings:
colorHere = None
for name, cond, color in replicaColorings:
if r[ name ]:
colorHere = color
break
colors.append( colorHere if colorHere is not None else replicaDefaultColor )
pp.scatter( **Dict( 'x y urls', c = colors if colors else 'b' ) )
pp.axis( 'equal' )
if xbound: pp.gca().set_xbound( *xbound )
if ybound: pp.gca().set_ybound( *ybound )
if not xbound and not ybound:
start = min( min(x), min(y) )
rng = max( max( x ) - min( x ), max(y) - min(y) )
pp.plot( [ start, start+rng ], [ start, start+rng ], 'g--' )
pp.xlabel( replicaStatX )
pp.ylabel( replicaStatY )
if title: pp.title( title )
pp.savefig( outFile )
def findTableFiles( Ddata, thinSfx, whichStats, tables, scenCond, allScens, scen2sfxs ):
"""Return table files used in conditions"""
tables = MakeSeq( tables )
scen2sfxs = dict( scen2sfxs )
scenCondExpr = compile_expr( scenCond )
ourScens = [ scen for scen in allScens if eval( scenCondExpr, globals(), ScenAttrs( scen ) ) ]
depends_on = []
scen2table2file = {}
for scen in ourScens:
thisScenDict = {}
for table in tables:
# identify the scenario-specific suffix for this table
scenSfx = DictGet( scen2sfxs, scen, '' )
if scenSfx:
if IsSeq( scenSfx ): scenSfx = dict( scenSfx )
if not isinstance( scenSfx, types.StringTypes ): scenSfx = DictGet( dict( scenSfx ),
os.path.splitext( table )[0], '' )
tableFile = os.path.join( Ddata, whichStats+ thinSfx, scen.scenDir(),
AddFileSfx( table + ( '.tsv' if '.' not in table else '' ),
scenSfx )
+ ( '/' if table.endswith( '.data' ) else '' ) )
depends_on.append( tableFile )
thisScenDict[ table ] = tableFile
scen2table2file[ scen ] = thisScenDict
tableNames = map( operator.itemgetter( 0 ), map( os.path.splitext, tables ) )
return tableNames, tables, ourScens, scen2table2file, depends_on
def FindChromCol( iDotData ):
"""Find the column representing the replica or chromosome, based on our conventions."""
return 'replicaNum' if 'replicaNum' in iDotData.headings else ( 'Chrom' if 'Chrom' in iDotData.headings else 'chrom' )
def FindPosCol( iDotData ):
"""Find the column representing the SNP position, based on our conventions."""
return 'Pos' if 'Pos' in iDotData.headings else 'pos'
class NameCollector(ast.NodeVisitor):
"""Gather table names used in an expression"""
def __init__(self):
self.names = []
def visit_Name(self, node):
self.names.append( node.id )
@staticmethod
def getNamesIn( expr ):
nc = NameCollector()
nc.visit( ast.parse( expr ) )
return tuple( set( nc.names ) )
def FindTables( *exprs ):
"""Find tables referenced in specified expressions"""
return tuple( set( reduce( operator.concat, map( NameCollector.getNamesIn, exprs ) ) ) - set( ( 'True', 'False' ) ) )
def findReplicasMatchingConds( Ddata,
replicaTables = None, replicaCond = 'True',
outFile = None,
scenCond = 'True',
showHeadings = (),
showVals = (),
allScens = GetScenarios(),
scen2sfxs = {},
thinSfx = '',
getio = None ):
"""Make an IDotData containing specified per-replica values for replicas meeting specified conditions."""
dbg( '"findReplicasMatchingConds" scenCond replicaTables replicaCond showHeadings showVals scen2sfxs' )
if replicaTables is None: replicaTables = FindTables( replicaCond, *MakeSeq( showVals ) )
replicaTables = tuple( set( MakeSeq( replicaTables ) ) )
replicaTableNames, replicaTables, ourScens, scen2table2file, depends_on = \
findTableFiles( whichStats = 'replicastats', tables = replicaTables,
**Dict( 'Ddata thinSfx scenCond allScens scen2sfxs' ) )
if getio: return dict( depends_on = depends_on,
creates = outFile,
attrs = dict( piperun_short = True ) )
replicaCondExpr = compile_expr( replicaCond )
showVals = MakeSeq( showVals )
showValsExpr = map( compile_expr, showVals )
if not showHeadings:
showHeadings = map( MakeAlphaNum, showVals )
showHeadings2 = []
for h in showHeadings:
h_new = h
i = 1
while h_new in showHeadings2:
h_new = h + Sfx( i )
i += 1
showHeadings2.append( h_new )
showHeadings = showHeadings2
def makeResult():
yield ( 'scenario', 'replicaNum' ) + tuple( MakeSeq( showHeadings ) )
numReplicasSkippedTot, numReplicasAllowedTot = 0, 0
for scen in ourScens:
logging.info( '"findReplicasMatchingConds" scen' )
numReplicasSkipped, numReplicasAllowed = 0, 0
thisScenDict = scen2table2file[ scen ]
replicaTableVals = [ IDotData( thisScenDict[ replicaTable ] ) for replicaTable in replicaTables ]
for replicaTableRows in \
IDotData.TableIterInnerJoinAuxAsTuples( tableIters = map( iter, replicaTableVals ),
cols = map( FindChromCol, replicaTableVals ),
blanks = ( None, ) * len( replicaTableVals ),
headingLens = map( IDotData.rootClass.numCols,
replicaTableVals ) ):
vdict = dict( zip( replicaTableNames, replicaTableRows ) )
dbg( 'scen vdict' )
evalHere = lambda expr: eval( expr, globals(), vdict )
if evalHere( replicaCondExpr ):
numReplicasAllowed += 1
yield [ scen.scenName(), replicaTableRows[0].replicaNum ] + map( evalHere, showValsExpr )
else:
numReplicasSkipped += 1
dbg( '"in_scenario" scen numReplicasSkipped numReplicasAllowed' )
numReplicasSkippedTot += numReplicasSkipped
numReplicasAllowedTot += numReplicasAllowed
dbg( 'numReplicasSkippedTot numReplicasAllowedTot' )
r = IDotData.fromFn( makeResult )
if outFile: r.save( outFile )
return r
def findSnpsMatchingConds( Ddata,
snpTables = (), snpCond = 'True', replicaTables = (), replicaCond = 'True',
outFile = None,
scenCond = 'True',
showHeadings = (),
showVals = (),
allScens = GetScenarios(),
scen2sfxs = {},
thinSfx = '',
getio = None ):
"""Make an IDotData containing specified per-replica values for SNPs meeting specified conditions in
replicas meeting specified conditions."""
snpTables = tuple( set( MakeSeq( snpTables ) ) )
dbg( '"findSnpsMatchingConds" scenCond snpTables snpCond replicaTables replicaCond showHeadings showVals '
'scen2sfxs' )
replicaArgs = Dict( 'Ddata thinSfx scenCond allScens scen2sfxs' )
snpTableNames, snpTables, ourScens, scen2table2file, depends_on = \
findTableFiles( whichStats = 'snpStats', tables = snpTables, **replicaArgs )
if getio: return dict( depends_on = depends_on + findTableFiles( whichStats = 'replicastats', tables = replicaTables,
**replicaArgs )[-1],
creates = outFile )
snpCondExpr = compile_expr( snpCond )
showVals = MakeSeq( showVals )
showValsExpr = map( compile_expr, showVals )
if not showHeadings: showHeadings = map( MakeAlphaNum, showVals )
numSnpsSkippedTot, numSnpsAllowedTot = 0, 0
def makeResult():
yield ( 'scenario', 'replicaNum', 'Pos' ) + tuple( MakeSeq( showHeadings ) )
for scen in ourScens:
dbg( '"findSnpsMatchingConds" scen ')
numSnpsAllowed, numSnpsSkipped = 0, 0
replicasHere = findReplicasMatchingConds( **MergeDicts( replicaArgs,
Dict( 'replicaTables replicaCond scenCond',
allScens = ( scen, ) ) ) )
replicasHereSet = frozenset( replicasHere.replicaNum )
dbg( 'scen len(replicasHereSet) replicasHereSet' )
thisScenDict = scen2table2file[ scen ]
dbg( '#[ ( thisScenDict[ snpTable ] ) for snpTable in snpTables ]' )
snpTableVals = [ IDotData( thisScenDict[ snpTable ] ) for snpTable in snpTables ]
lastReplica = None
lastReplicaResult = None
replicaCol = FindChromCol( snpTableVals[ 0 ] )
posCol = FindPosCol( snpTableVals[ 0 ] )
numSnpsSkippedTot, numSnpsAllowedTot = 0, 0
for snpTableRows in \
IDotData.TableIterInnerJoinAuxAsTuples( tableIters = map( iter, snpTableVals ),
cols = zip( map( FindChromCol, snpTableVals ),
map( FindPosCol, snpTableVals ) ),
blanks = ( None, ) * len( snpTableVals ),
headingLens = map( IDotData.rootClass.numCols,
snpTableVals ) ):
thisReplica = snpTableRows[0][ replicaCol ]
if thisReplica != lastReplica:
thisReplicaResult = ( thisReplica in replicasHereSet )
if not thisReplicaResult: dbg( '"SKIPPING_REPLICA" thisReplica' )
lastReplicaResult = thisReplicaResult
lastReplica = thisReplica
if thisReplicaResult:
localDict = dict( zip( snpTableNames, snpTableRows ) )
evalHere = lambda expr: eval( expr, globals(), localDict )
evalResult = evalHere( snpCondExpr )
if evalResult:
v = [ scen.scenName(), thisReplica, snpTableRows[0][ posCol ] ] \
+ map( evalHere, showValsExpr )
numSnpsAllowed += 1
yield v
else: numSnpsSkipped += 1
numSnpsSkippedTot += numSnpsSkipped
numSnpsAllowedTot += numSnpsAllowed
dbg( 'scen numSnpsSkippedTot numSnpsAllowedTot' )
dbg( '"finalCount" numSnpsSkippedTot numSnpsAllowedTot' )
r = IDotData.fromFn( makeResult )
if outFile: r.save( outFile )
return r
def gatherCausalStat( Ddata, scenario, snpStatFN, replicaCol = 'Chrom', posCol = 'Pos', getio = None ):
"""Gather a specified per-SNP statistic just for the causal SNPs, and write them out as a replicastat.
"""
replicaStatFN = string.replace( snpStatFN, 'snpStats', 'replicastats', 1 )
if getio: return dict( depends_on = snpStatFN, creates = replicaStatFN, attrs = dict( scenario = scenario.scenDir() ) )
snpStatFile = IDotData( snpStatFN )
with IDotData.openForWrite( replicaStatFN, snpStatFile.headings ) as replicaStatFile:
for r in snpStatFile:
if r[ posCol ] == CAUSAL_POS:
replicaStatFile.writeRecord( r )
def DefineRulesTo_gatherCausalStat( pr, Ddata, scen2snpStatFN, posCol = 'Pos' ):
"""Define rules to gather a specified per-SNP statistic for the causal SNPs into a replica stat."""
for scenario, snpStatFN in scen2snpStatFN.items():
pr.addInvokeRule( invokeFn = gatherCausalStat, invokeArgs = Dict( 'Ddata scenario snpStatFN posCol' ) )
|
[
"Operations.Shari_Operations.localize.Scenario.GetScenarios",
"Operations.MiscUtil.dbg",
"matplotlib.pyplot.ylabel",
"Operations.MiscUtil.compile_expr",
"Operations.MiscUtil.ReplaceFileExt",
"itertools.izip",
"Operations.MiscUtil.Dict",
"operator.itemgetter",
"logging.info",
"Operations.Shari_Operations.localize.Scenario.GetSelectionScenarios",
"Classes.DotData.DotData",
"string.replace",
"Operations.MiscUtil.Histogrammer.load",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"Operations.MiscUtil.DictGet",
"Operations.MiscUtil.Sfx",
"matplotlib.pyplot.axis",
"ast.parse",
"Operations.IDotData.IDotData",
"Operations.MiscUtil.AddFileSfx",
"matplotlib.pyplot.savefig",
"matplotlib.use",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.figlegend",
"os.path.splitext",
"Operations.MiscUtil.Histogrammer",
"matplotlib.pyplot.subplot",
"os.path.dirname",
"numpy.isnan",
"Operations.MiscUtil.MakeSeq",
"matplotlib.pyplot.title",
"Operations.IDotData.IDotData.fromFn",
"matplotlib.pyplot.subplots_adjust",
"Operations.IDotData.IDotData.openForWrite",
"os.path.join",
"matplotlib.pyplot.figure",
"functools.partial",
"os.path.basename",
"Operations.MiscUtil.IsSeq",
"matplotlib.pyplot.hold"
] |
[((871, 892), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (885, 892), False, 'import matplotlib\n'), ((4643, 4683), 'Operations.Shari_Operations.localize.Scenario.GetScenarios', 'GetScenarios', (['mutAges', 'mutPops', 'mutFreqs'], {}), '(mutAges, mutPops, mutFreqs)\n', (4655, 4683), False, 'from Operations.Shari_Operations.localize.Scenario import GetScenarios, GetSelectionScenarios\n'), ((5606, 5628), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['replicaTables'], {}), '(replicaTables)\n', (5613, 5628), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((5647, 5665), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['snpTables'], {}), '(snpTables)\n', (5654, 5665), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((5695, 5720), 'Operations.MiscUtil.compile_expr', 'compile_expr', (['replicaCond'], {}), '(replicaCond)\n', (5707, 5720), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((5741, 5762), 'Operations.MiscUtil.compile_expr', 'compile_expr', (['snpCond'], {}), '(snpCond)\n', (5753, 5762), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((5783, 5804), 'Operations.MiscUtil.compile_expr', 'compile_expr', (['snpStat'], {}), '(snpStat)\n', (5795, 5804), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((5822, 5846), 'Operations.MiscUtil.AddFileSfx', 'AddFileSfx', (['outFile', 'sfx'], {}), '(outFile, sfx)\n', (5832, 5846), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((5868, 5896), 'Operations.MiscUtil.AddFileSfx', 'AddFileSfx', (['outFile', '"""stats"""'], {}), "(outFile, 'stats')\n", (5878, 5896), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((5911, 5925), 'Operations.MiscUtil.IsSeq', 'IsSeq', (['scenSfx'], {}), '(scenSfx)\n', (5916, 5925), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((7686, 7734), 'Operations.MiscUtil.Histogrammer', 'Histogrammer', ([], {'binSize': 'binSize', 'binShift': 'binShift'}), '(binSize=binSize, binShift=binShift)\n', (7698, 7734), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((7791, 7810), 'itertools.izip', 'izip', (['*snpTableVals'], {}), '(*snpTableVals)\n', (7795, 7810), False, 'from itertools import izip\n'), ((8562, 8607), 'logging.info', 'logging.info', (['"""saving histogram to """', 'outFile'], {}), "('saving histogram to ', outFile)\n", (8574, 8607), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((8920, 8934), 'Operations.Shari_Operations.localize.Scenario.GetScenarios', 'GetScenarios', ([], {}), '()\n', (8932, 8934), False, 'from Operations.Shari_Operations.localize.Scenario import GetScenarios, GetSelectionScenarios\n'), ((9522, 9562), 'Operations.MiscUtil.AddFileSfx', 'AddFileSfx', (['outFile', 'sfx', 'replicaCondSfx'], {}), '(outFile, sfx, replicaCondSfx)\n', (9532, 9562), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((9584, 9612), 'Operations.MiscUtil.AddFileSfx', 'AddFileSfx', (['outFile', '"""stats"""'], {}), "(outFile, 'stats')\n", (9594, 9612), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((9627, 9702), 'Operations.MiscUtil.Dict', 'Dict', (['"""Ddata thinSfx replicaTables scenCond replicaCond scen2sfxs allScens"""'], {}), "('Ddata thinSfx replicaTables scenCond replicaCond scen2sfxs allScens')\n", (9631, 9702), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((10102, 10131), 'Operations.MiscUtil.Histogrammer', 'Histogrammer', ([], {'binSize': 'binSize'}), '(binSize=binSize)\n', (10114, 10131), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((10541, 10555), 'Operations.Shari_Operations.localize.Scenario.GetScenarios', 'GetScenarios', ([], {}), '()\n', (10553, 10555), False, 'from Operations.Shari_Operations.localize.Scenario import GetScenarios, GetSelectionScenarios\n'), ((11137, 11189), 'Operations.MiscUtil.AddFileSfx', 'AddFileSfx', (['outFile', 'sfx', 'replicaCondSfx', 'snpCondSfx'], {}), '(outFile, sfx, replicaCondSfx, snpCondSfx)\n', (11147, 11189), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((11211, 11239), 'Operations.MiscUtil.AddFileSfx', 'AddFileSfx', (['outFile', '"""stats"""'], {}), "(outFile, 'stats')\n", (11221, 11239), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((11254, 11357), 'Operations.MiscUtil.Dict', 'Dict', (['"""Ddata thinSfx snpTables snpCond replicaTables scenCond replicaCond scen2sfxs allScens"""'], {}), "(\n 'Ddata thinSfx snpTables snpCond replicaTables scenCond replicaCond scen2sfxs allScens'\n )\n", (11258, 11357), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((11742, 11771), 'Operations.MiscUtil.Histogrammer', 'Histogrammer', ([], {'binSize': 'binSize'}), '(binSize=binSize)\n', (11754, 11771), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((12082, 12110), 'Operations.MiscUtil.AddFileSfx', 'AddFileSfx', (['outFile', '"""stats"""'], {}), "(outFile, 'stats')\n", (12092, 12110), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((13463, 13481), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['histFiles'], {}), '(histFiles)\n', (13470, 13481), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((13614, 13638), 'Operations.MiscUtil.AddFileSfx', 'AddFileSfx', (['outFile', 'sfx'], {}), '(outFile, sfx)\n', (13624, 13638), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((13933, 13962), 'matplotlib.pyplot.figure', 'pp.figure', (['(1)'], {'figsize': 'figSize'}), '(1, figsize=figSize)\n', (13942, 13962), True, 'import matplotlib.pyplot as pp\n'), ((16512, 16531), 'matplotlib.pyplot.savefig', 'pp.savefig', (['outFile'], {}), '(outFile)\n', (16522, 16531), True, 'import matplotlib.pyplot as pp\n'), ((17136, 17154), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['histFiles'], {}), '(histFiles)\n', (17143, 17154), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((17630, 17659), 'matplotlib.pyplot.figure', 'pp.figure', (['(1)'], {'figsize': '(18, 6)'}), '(1, figsize=(18, 6))\n', (17639, 17659), True, 'import matplotlib.pyplot as pp\n'), ((17681, 17712), 'matplotlib.pyplot.subplots_adjust', 'pp.subplots_adjust', ([], {'bottom': '(0.37)'}), '(bottom=0.37)\n', (17699, 17712), True, 'import matplotlib.pyplot as pp\n'), ((17722, 17752), 'matplotlib.pyplot.xlabel', 'pp.xlabel', (["(xlabel + '\\n\\n\\n\\n')"], {}), "(xlabel + '\\n\\n\\n\\n')\n", (17731, 17752), True, 'import matplotlib.pyplot as pp\n'), ((17759, 17776), 'matplotlib.pyplot.ylabel', 'pp.ylabel', (['ylabel'], {}), '(ylabel)\n', (17768, 17776), True, 'import matplotlib.pyplot as pp\n'), ((17784, 17797), 'matplotlib.pyplot.hold', 'pp.hold', (['(True)'], {}), '(True)\n', (17791, 17797), True, 'import matplotlib.pyplot as pp\n'), ((18840, 18855), 'matplotlib.pyplot.title', 'pp.title', (['title'], {}), '(title)\n', (18848, 18855), True, 'import matplotlib.pyplot as pp\n'), ((19076, 19095), 'matplotlib.pyplot.savefig', 'pp.savefig', (['outFile'], {}), '(outFile)\n', (19086, 19095), True, 'import matplotlib.pyplot as pp\n'), ((19469, 19483), 'Operations.Shari_Operations.localize.Scenario.GetScenarios', 'GetScenarios', ([], {}), '()\n', (19481, 19483), False, 'from Operations.Shari_Operations.localize.Scenario import GetScenarios, GetSelectionScenarios\n'), ((21085, 21107), 'Operations.MiscUtil.compile_expr', 'compile_expr', (['scenCond'], {}), '(scenCond)\n', (21097, 21107), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((21130, 21151), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['replicaConds'], {}), '(replicaConds)\n', (21137, 21151), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((21177, 21202), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['replicaCondsSfxs'], {}), '(replicaCondsSfxs)\n', (21184, 21202), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((21225, 21242), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['snpConds'], {}), '(snpConds)\n', (21232, 21242), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((21264, 21285), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['snpCondsSfxs'], {}), '(snpCondsSfxs)\n', (21271, 21285), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((21353, 21377), 'Operations.MiscUtil.AddFileSfx', 'AddFileSfx', (['outFile', 'sfx'], {}), '(outFile, sfx)\n', (21363, 21377), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((28874, 28896), 'Operations.MiscUtil.compile_expr', 'compile_expr', (['scenCond'], {}), '(scenCond)\n', (28886, 28896), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((29126, 29147), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['replicaConds'], {}), '(replicaConds)\n', (29133, 29147), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((29173, 29198), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['replicaCondsSfxs'], {}), '(replicaCondsSfxs)\n', (29180, 29198), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((32491, 32513), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['replicaTables'], {}), '(replicaTables)\n', (32498, 32513), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((34530, 34553), 'Operations.Shari_Operations.localize.Scenario.GetSelectionScenarios', 'GetSelectionScenarios', ([], {}), '()\n', (34551, 34553), False, 'from Operations.Shari_Operations.localize.Scenario import GetScenarios, GetSelectionScenarios\n'), ((36220, 36241), 'Operations.IDotData.IDotData', 'IDotData', (['condsFileFN'], {}), '(condsFileFN)\n', (36228, 36241), False, 'from Operations.IDotData import IDotData\n'), ((36257, 36275), 'Operations.IDotData.IDotData', 'IDotData', (['inFileFN'], {}), '(inFileFN)\n', (36265, 36275), False, 'from Operations.IDotData import IDotData\n'), ((38019, 38033), 'Operations.MiscUtil.dbg', 'dbg', (['"""inFiles"""'], {}), "('inFiles')\n", (38022, 38033), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((38053, 38074), 'Operations.IDotData.IDotData', 'IDotData', (['condsFileFN'], {}), '(condsFileFN)\n', (38061, 38074), False, 'from Operations.IDotData import IDotData\n'), ((39965, 40040), 'Operations.MiscUtil.Dict', 'Dict', (['"""Ddata thinSfx replicaTables scenCond scen2sfxs replicaCond allScens"""'], {}), "('Ddata thinSfx replicaTables scenCond scen2sfxs replicaCond allScens')\n", (39969, 40040), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((40474, 40492), 'Operations.MiscUtil.IsSeq', 'IsSeq', (['replicaShow'], {}), '(replicaShow)\n', (40479, 40492), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((42017, 42033), 'matplotlib.pyplot.axis', 'pp.axis', (['"""equal"""'], {}), "('equal')\n", (42024, 42033), True, 'import matplotlib.pyplot as pp\n'), ((42342, 42365), 'matplotlib.pyplot.xlabel', 'pp.xlabel', (['replicaStatX'], {}), '(replicaStatX)\n', (42351, 42365), True, 'import matplotlib.pyplot as pp\n'), ((42372, 42395), 'matplotlib.pyplot.ylabel', 'pp.ylabel', (['replicaStatY'], {}), '(replicaStatY)\n', (42381, 42395), True, 'import matplotlib.pyplot as pp\n'), ((42434, 42453), 'matplotlib.pyplot.savefig', 'pp.savefig', (['outFile'], {}), '(outFile)\n', (42444, 42453), True, 'import matplotlib.pyplot as pp\n'), ((42608, 42623), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['tables'], {}), '(tables)\n', (42615, 42623), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((42684, 42706), 'Operations.MiscUtil.compile_expr', 'compile_expr', (['scenCond'], {}), '(scenCond)\n', (42696, 42706), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((45372, 45386), 'Operations.Shari_Operations.localize.Scenario.GetScenarios', 'GetScenarios', ([], {}), '()\n', (45384, 45386), False, 'from Operations.Shari_Operations.localize.Scenario import GetScenarios, GetSelectionScenarios\n'), ((45642, 45748), 'Operations.MiscUtil.dbg', 'dbg', (['""""findReplicasMatchingConds" scenCond replicaTables replicaCond showHeadings showVals scen2sfxs"""'], {}), '(\'"findReplicasMatchingConds" scenCond replicaTables replicaCond showHeadings showVals scen2sfxs\'\n )\n', (45645, 45748), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((46337, 46362), 'Operations.MiscUtil.compile_expr', 'compile_expr', (['replicaCond'], {}), '(replicaCond)\n', (46349, 46362), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((46380, 46397), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['showVals'], {}), '(showVals)\n', (46387, 46397), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((48625, 48652), 'Operations.IDotData.IDotData.fromFn', 'IDotData.fromFn', (['makeResult'], {}), '(makeResult)\n', (48640, 48652), False, 'from Operations.IDotData import IDotData\n'), ((49057, 49071), 'Operations.Shari_Operations.localize.Scenario.GetScenarios', 'GetScenarios', ([], {}), '()\n', (49069, 49071), False, 'from Operations.Shari_Operations.localize.Scenario import GetScenarios, GetSelectionScenarios\n'), ((49414, 49534), 'Operations.MiscUtil.dbg', 'dbg', (['""""findSnpsMatchingConds" scenCond snpTables snpCond replicaTables replicaCond showHeadings showVals scen2sfxs"""'], {}), '(\'"findSnpsMatchingConds" scenCond snpTables snpCond replicaTables replicaCond showHeadings showVals scen2sfxs\'\n )\n', (49417, 49534), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((49563, 49612), 'Operations.MiscUtil.Dict', 'Dict', (['"""Ddata thinSfx scenCond allScens scen2sfxs"""'], {}), "('Ddata thinSfx scenCond allScens scen2sfxs')\n", (49567, 49612), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((50055, 50076), 'Operations.MiscUtil.compile_expr', 'compile_expr', (['snpCond'], {}), '(snpCond)\n', (50067, 50076), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((50094, 50111), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['showVals'], {}), '(showVals)\n', (50101, 50111), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((53279, 53306), 'Operations.IDotData.IDotData.fromFn', 'IDotData.fromFn', (['makeResult'], {}), '(makeResult)\n', (53294, 53306), False, 'from Operations.IDotData import IDotData\n'), ((53598, 53654), 'string.replace', 'string.replace', (['snpStatFN', '"""snpStats"""', '"""replicastats"""', '(1)'], {}), "(snpStatFN, 'snpStats', 'replicastats', 1)\n", (53612, 53654), False, 'import itertools, string\n'), ((53801, 53820), 'Operations.IDotData.IDotData', 'IDotData', (['snpStatFN'], {}), '(snpStatFN)\n', (53809, 53820), False, 'from Operations.IDotData import IDotData\n'), ((5986, 6105), 'os.path.join', 'os.path.join', (['Ddata', "('replicastats' + thinSfx)", 'scenDir', "(replicaTable + ('.tsv' if '.' not in replicaTable else ''))"], {}), "(Ddata, 'replicastats' + thinSfx, scenDir, replicaTable + (\n '.tsv' if '.' not in replicaTable else ''))\n", (5998, 6105), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((7327, 7344), 'Classes.DotData.DotData', 'DotData', ([], {'SVPath': 'f'}), '(SVPath=f)\n', (7334, 7344), False, 'from Classes.DotData import DotData\n'), ((7614, 7632), 'Operations.IDotData.IDotData', 'IDotData', ([], {'SVPath': 'f'}), '(SVPath=f)\n', (7622, 7632), False, 'from Operations.IDotData import IDotData\n'), ((13560, 13596), 'Operations.MiscUtil.ReplaceFileExt', 'ReplaceFileExt', (['histFiles[0]', '""".png"""'], {}), "(histFiles[0], '.png')\n", (13574, 13596), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((14149, 14176), 'matplotlib.pyplot.subplot', 'pp.subplot', (['(2)', '(1)', '(which + 1)'], {}), '(2, 1, which + 1)\n', (14159, 14176), True, 'import matplotlib.pyplot as pp\n'), ((14188, 14205), 'matplotlib.pyplot.xlabel', 'pp.xlabel', (['xlabel'], {}), '(xlabel)\n', (14197, 14205), True, 'import matplotlib.pyplot as pp\n'), ((14216, 14233), 'matplotlib.pyplot.ylabel', 'pp.ylabel', (['ylabel'], {}), '(ylabel)\n', (14225, 14233), True, 'import matplotlib.pyplot as pp\n'), ((14245, 14258), 'matplotlib.pyplot.hold', 'pp.hold', (['(True)'], {}), '(True)\n', (14252, 14258), True, 'import matplotlib.pyplot as pp\n'), ((16261, 16276), 'matplotlib.pyplot.title', 'pp.title', (['title'], {}), '(title)\n', (16269, 16276), True, 'import matplotlib.pyplot as pp\n'), ((17233, 17269), 'Operations.MiscUtil.ReplaceFileExt', 'ReplaceFileExt', (['histFiles[0]', '""".png"""'], {}), "(histFiles[0], '.png')\n", (17247, 17269), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((17974, 18001), 'Operations.MiscUtil.Histogrammer.load', 'Histogrammer.load', (['histFile'], {}), '(histFile)\n', (17991, 18001), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((18554, 18608), 'matplotlib.pyplot.plot', 'pp.plot', (['binLefts', 'binCounts'], {'label': 'label', 'color': 'color'}), '(binLefts, binCounts, label=label, color=color)\n', (18561, 18608), True, 'import matplotlib.pyplot as pp\n'), ((18900, 18970), 'matplotlib.pyplot.figlegend', 'pp.figlegend', ([], {'loc': '"""lower center"""', 'labels': 'theLabels', 'handles': 'theHandles'}), "(loc='lower center', labels=theLabels, handles=theHandles)\n", (18912, 18970), True, 'import matplotlib.pyplot as pp\n'), ((20992, 21016), 'os.path.dirname', 'os.path.dirname', (['outFile'], {}), '(outFile)\n', (21007, 21016), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((21030, 21058), 'os.path.join', 'os.path.join', (['Ddata', 'outFile'], {}), '(Ddata, outFile)\n', (21042, 21058), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((26771, 26785), 'Operations.Shari_Operations.localize.Scenario.GetScenarios', 'GetScenarios', ([], {}), '()\n', (26783, 26785), False, 'from Operations.Shari_Operations.localize.Scenario import GetScenarios, GetSelectionScenarios\n'), ((28781, 28805), 'os.path.dirname', 'os.path.dirname', (['outFile'], {}), '(outFile)\n', (28796, 28805), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((28819, 28847), 'os.path.join', 'os.path.join', (['Ddata', 'outFile'], {}), '(Ddata, outFile)\n', (28831, 28847), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((32803, 32831), 'os.path.dirname', 'os.path.dirname', (['condsFileFN'], {}), '(condsFileFN)\n', (32818, 32831), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((33333, 33350), 'Classes.DotData.DotData', 'DotData', ([], {'SVPath': 'f'}), '(SVPath=f)\n', (33340, 33350), False, 'from Classes.DotData import DotData\n'), ((33541, 33563), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (33560, 33563), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((33609, 33634), 'Operations.MiscUtil.compile_expr', 'compile_expr', (['replicaCond'], {}), '(replicaCond)\n', (33621, 33634), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((34015, 34038), 'itertools.izip', 'izip', (['*matchingReplicas'], {}), '(*matchingReplicas)\n', (34019, 34038), False, 'from itertools import izip\n'), ((35721, 35746), 'os.path.dirname', 'os.path.dirname', (['inFileFN'], {}), '(inFileFN)\n', (35736, 35746), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((35824, 35852), 'os.path.dirname', 'os.path.dirname', (['condsFileFN'], {}), '(condsFileFN)\n', (35839, 35852), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((35979, 36014), 'Operations.MiscUtil.AddFileSfx', 'AddFileSfx', (['inFileFN', 'sfx', 'condName'], {}), '(inFileFN, sfx, condName)\n', (35989, 36014), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((37353, 37379), 'os.path.dirname', 'os.path.dirname', (['outFileFN'], {}), '(outFileFN)\n', (37368, 37379), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((37459, 37487), 'os.path.dirname', 'os.path.dirname', (['condsFileFN'], {}), '(condsFileFN)\n', (37474, 37487), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((37747, 37783), 'Operations.MiscUtil.AddFileSfx', 'AddFileSfx', (['outFileFN', 'sfx', 'condName'], {}), '(outFileFN, sfx, condName)\n', (37757, 37783), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((39535, 39549), 'Operations.Shari_Operations.localize.Scenario.GetScenarios', 'GetScenarios', ([], {}), '()\n', (39547, 39549), False, 'from Operations.Shari_Operations.localize.Scenario import GetScenarios, GetSelectionScenarios\n'), ((42268, 42326), 'matplotlib.pyplot.plot', 'pp.plot', (['[start, start + rng]', '[start, start + rng]', '"""g--"""'], {}), "([start, start + rng], [start, start + rng], 'g--')\n", (42275, 42326), True, 'import matplotlib.pyplot as pp\n'), ((42412, 42427), 'matplotlib.pyplot.title', 'pp.title', (['title'], {}), '(title)\n', (42420, 42427), True, 'import matplotlib.pyplot as pp\n'), ((43883, 43905), 'operator.itemgetter', 'operator.itemgetter', (['(0)'], {}), '(0)\n', (43902, 43905), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((48543, 48593), 'Operations.MiscUtil.dbg', 'dbg', (['"""numReplicasSkippedTot numReplicasAllowedTot"""'], {}), "('numReplicasSkippedTot numReplicasAllowedTot')\n", (48546, 48593), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((53212, 53267), 'Operations.MiscUtil.dbg', 'dbg', (['""""finalCount" numSnpsSkippedTot numSnpsAllowedTot"""'], {}), '(\'"finalCount" numSnpsSkippedTot numSnpsAllowedTot\')\n', (53215, 53267), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((53832, 53890), 'Operations.IDotData.IDotData.openForWrite', 'IDotData.openForWrite', (['replicaStatFN', 'snpStatFile.headings'], {}), '(replicaStatFN, snpStatFile.headings)\n', (53853, 53890), False, 'from Operations.IDotData import IDotData\n'), ((2643, 2805), 'Classes.DotData.DotData', 'DotData', ([], {'SVPath': 'posFileNames[replicaNum]', 'SVSkipFirstLines': '(1)', 'SVHeader': '(False)', 'names': "['SNP', 'CHROM', 'CHROM_POS', 'ALLELE1', 'FREQ1', 'ALLELE2', 'FREQ2']"}), "(SVPath=posFileNames[replicaNum], SVSkipFirstLines=1, SVHeader=False,\n names=['SNP', 'CHROM', 'CHROM_POS', 'ALLELE1', 'FREQ1', 'ALLELE2', 'FREQ2']\n )\n", (2650, 2805), False, 'from Classes.DotData import DotData\n'), ((6929, 6955), 'os.path.splitext', 'os.path.splitext', (['snpTable'], {}), '(snpTable)\n', (6945, 6955), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((7530, 7553), 'itertools.izip', 'izip', (['*replicaTableVals'], {}), '(*replicaTableVals)\n', (7534, 7553), False, 'from itertools import izip\n'), ((8212, 8271), 'Operations.MiscUtil.dbg', 'dbg', (['"""replica useThisReplica histogramBuilder.getNumVals()"""'], {}), "('replica useThisReplica histogramBuilder.getNumVals()')\n", (8215, 8271), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((16328, 16398), 'matplotlib.pyplot.figlegend', 'pp.figlegend', ([], {'loc': '"""lower center"""', 'labels': 'theLabels', 'handles': 'theHandles'}), "(loc='lower center', labels=theLabels, handles=theHandles)\n", (16340, 16398), True, 'import matplotlib.pyplot as pp\n'), ((26239, 26301), 'Operations.MiscUtil.Dict', 'Dict', (['"""snpStat replicaConds snpConds scenCond subplots_adjust"""'], {}), "('snpStat replicaConds snpConds scenCond subplots_adjust')\n", (26243, 26301), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((31839, 31878), 'Operations.MiscUtil.Dict', 'Dict', (['"""replicaStat sfx subplots_adjust"""'], {}), "('replicaStat sfx subplots_adjust')\n", (31843, 31878), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((33926, 33948), 'operator.itemgetter', 'operator.itemgetter', (['(0)'], {}), '(0)\n', (33945, 33948), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((34334, 34410), 'Operations.IDotData.IDotData', 'IDotData', ([], {'names': "(('replicaNum', 'matchingConds') + condNames)", 'Records': 'Records'}), "(names=('replicaNum', 'matchingConds') + condNames, Records=Records)\n", (34342, 34410), False, 'from Operations.IDotData import IDotData\n'), ((41961, 42006), 'Operations.MiscUtil.Dict', 'Dict', (['"""x y urls"""'], {'c': "(colors if colors else 'b')"}), "('x y urls', c=colors if colors else 'b')\n", (41965, 42006), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((43025, 43053), 'Operations.MiscUtil.DictGet', 'DictGet', (['scen2sfxs', 'scen', '""""""'], {}), "(scen2sfxs, scen, '')\n", (43032, 43053), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((44749, 44764), 'ast.parse', 'ast.parse', (['expr'], {}), '(expr)\n', (44758, 44764), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((45874, 45896), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['replicaTables'], {}), '(replicaTables)\n', (45881, 45896), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((46091, 46140), 'Operations.MiscUtil.Dict', 'Dict', (['"""Ddata thinSfx scenCond allScens scen2sfxs"""'], {}), "('Ddata thinSfx scenCond allScens scen2sfxs')\n", (46095, 46140), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((47024, 47072), 'logging.info', 'logging.info', (['""""findReplicasMatchingConds" scen"""'], {}), '(\'"findReplicasMatchingConds" scen\')\n', (47036, 47072), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((48356, 48419), 'Operations.MiscUtil.dbg', 'dbg', (['""""in_scenario" scen numReplicasSkipped numReplicasAllowed"""'], {}), '(\'"in_scenario" scen numReplicasSkipped numReplicasAllowed\')\n', (48359, 48419), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((49380, 49398), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['snpTables'], {}), '(snpTables)\n', (49387, 49398), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((50433, 50469), 'Operations.MiscUtil.dbg', 'dbg', (['""""findSnpsMatchingConds" scen """'], {}), '(\'"findSnpsMatchingConds" scen \')\n', (50436, 50469), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((50898, 50946), 'Operations.MiscUtil.dbg', 'dbg', (['"""scen len(replicasHereSet) replicasHereSet"""'], {}), "('scen len(replicasHereSet) replicasHereSet')\n", (50901, 50946), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((51013, 51079), 'Operations.MiscUtil.dbg', 'dbg', (['"""#[ ( thisScenDict[ snpTable ] ) for snpTable in snpTables ]"""'], {}), "('#[ ( thisScenDict[ snpTable ] ) for snpTable in snpTables ]')\n", (51016, 51079), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((53153, 53200), 'Operations.MiscUtil.dbg', 'dbg', (['"""scen numSnpsSkippedTot numSnpsAllowedTot"""'], {}), "('scen numSnpsSkippedTot numSnpsAllowedTot')\n", (53156, 53200), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((4784, 4836), 'Operations.MiscUtil.Dict', 'Dict', (['"""scen Ddata simsOut thinSfx thinExt nreplicas"""'], {}), "('scen Ddata simsOut thinSfx thinExt nreplicas')\n", (4788, 4836), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((7180, 7223), 'Operations.MiscUtil.Dict', 'Dict', (['"""scenDir snpCond replicaCond snpStat"""'], {}), "('scenDir snpCond replicaCond snpStat')\n", (7184, 7223), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((8077, 8095), 'numpy.isnan', 'np.isnan', (['r0.Chrom'], {}), '(r0.Chrom)\n', (8085, 8095), True, 'import numpy as np\n'), ((18198, 18246), 'Operations.MiscUtil.dbg', 'dbg', (['"""hist.binSize binSize hist.binSize-binSize"""'], {}), "('hist.binSize binSize hist.binSize-binSize')\n", (18201, 18246), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((18624, 18695), 'Classes.DotData.DotData', 'DotData', ([], {'names': "('binLefts', 'binCounts')", 'Columns': '(binLefts, binCounts)'}), "(names=('binLefts', 'binCounts'), Columns=(binLefts, binCounts))\n", (18631, 18695), False, 'from Classes.DotData import DotData\n'), ((18994, 19002), 'matplotlib.pyplot.gca', 'pp.gca', ([], {}), '()\n', (19000, 19002), True, 'import matplotlib.pyplot as pp\n'), ((19040, 19048), 'matplotlib.pyplot.gca', 'pp.gca', ([], {}), '()\n', (19046, 19048), True, 'import matplotlib.pyplot as pp\n'), ((29450, 29475), 'os.path.basename', 'os.path.basename', (['outFile'], {}), '(outFile)\n', (29466, 29475), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((29683, 29850), 'Operations.MiscUtil.Dict', 'Dict', (['"""Ddata thinSfx replicaTables replicaCond replicaStat nreplicas binSize scenCond scen2sfxs allScens nameSfx sfx replicaCondSfx"""'], {'outFile': 'totaledHistFile'}), "(\n 'Ddata thinSfx replicaTables replicaCond replicaStat nreplicas binSize scenCond scen2sfxs allScens nameSfx sfx replicaCondSfx'\n , outFile=totaledHistFile)\n", (29687, 29850), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((31705, 31717), 'Operations.MiscUtil.Sfx', 'Sfx', (['nameSfx'], {}), '(nameSfx)\n', (31708, 31717), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((33803, 33826), 'itertools.izip', 'izip', (['*replicaTableVals'], {}), '(*replicaTableVals)\n', (33807, 33826), False, 'from itertools import izip\n'), ((34858, 35111), 'Operations.MiscUtil.Dict', 'Dict', (['"""Ddata scenario nreplicas thinSfx"""'], {'replicaTables': "('replicaStats',)", 'replicaConds': "(('all', 'True'), ('hi', 'replicaStats.causalAlleleFreq >= .5'), ('lo',\n 'replicaStats.causalAlleleFreq < .5'))", 'condsFileFN': '"""commonReplicaConds.tsv"""'}), "('Ddata scenario nreplicas thinSfx', replicaTables=('replicaStats',),\n replicaConds=(('all', 'True'), ('hi',\n 'replicaStats.causalAlleleFreq >= .5'), ('lo',\n 'replicaStats.causalAlleleFreq < .5')), condsFileFN=\n 'commonReplicaConds.tsv')\n", (34862, 35111), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((42052, 42060), 'matplotlib.pyplot.gca', 'pp.gca', ([], {}), '()\n', (42058, 42060), True, 'import matplotlib.pyplot as pp\n'), ((42098, 42106), 'matplotlib.pyplot.gca', 'pp.gca', ([], {}), '()\n', (42104, 42106), True, 'import matplotlib.pyplot as pp\n'), ((43099, 43113), 'Operations.MiscUtil.IsSeq', 'IsSeq', (['scenSfx'], {}), '(scenSfx)\n', (43104, 43113), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((45819, 45836), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['showVals'], {}), '(showVals)\n', (45826, 45836), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((47231, 47267), 'Operations.IDotData.IDotData', 'IDotData', (['thisScenDict[replicaTable]'], {}), '(thisScenDict[replicaTable])\n', (47239, 47267), False, 'from Operations.IDotData import IDotData\n'), ((47963, 47980), 'Operations.MiscUtil.dbg', 'dbg', (['"""scen vdict"""'], {}), "('scen vdict')\n", (47966, 47980), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((51111, 51143), 'Operations.IDotData.IDotData', 'IDotData', (['thisScenDict[snpTable]'], {}), '(thisScenDict[snpTable])\n', (51119, 51143), False, 'from Operations.IDotData import IDotData\n'), ((54344, 54383), 'Operations.MiscUtil.Dict', 'Dict', (['"""Ddata scenario snpStatFN posCol"""'], {}), "('Ddata scenario snpStatFN posCol')\n", (54348, 54383), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((7111, 7139), 'Operations.MiscUtil.AddFileSfx', 'AddFileSfx', (['outFile', '"""stats"""'], {}), "(outFile, 'stats')\n", (7121, 7139), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((7920, 7937), 'numpy.isnan', 'np.isnan', (['r.Chrom'], {}), '(r.Chrom)\n', (7928, 7937), True, 'import numpy as np\n'), ((10057, 10069), 'Operations.MiscUtil.Sfx', 'Sfx', (['nameSfx'], {}), '(nameSfx)\n', (10060, 10069), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((11697, 11709), 'Operations.MiscUtil.Sfx', 'Sfx', (['nameSfx'], {}), '(nameSfx)\n', (11700, 11709), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((13690, 13709), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (13706, 13709), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((15822, 15844), 'Operations.MiscUtil.Dict', 'Dict', (['"""left color log"""'], {}), "('left color log')\n", (15826, 15844), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((16426, 16434), 'matplotlib.pyplot.gca', 'pp.gca', ([], {}), '()\n', (16432, 16434), True, 'import matplotlib.pyplot as pp\n'), ((16476, 16484), 'matplotlib.pyplot.gca', 'pp.gca', ([], {}), '()\n', (16482, 16484), True, 'import matplotlib.pyplot as pp\n'), ((17321, 17340), 'os.path.basename', 'os.path.basename', (['f'], {}), '(f)\n', (17337, 17340), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((26050, 26161), 'Operations.MiscUtil.Dict', 'Dict', (['"""xlabel ylabel title xbound ybound coarsenBy log outFile cumulative normed ticksCoarsen colors"""'], {}), "(\n 'xlabel ylabel title xbound ybound coarsenBy log outFile cumulative normed ticksCoarsen colors'\n )\n", (26054, 26161), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((31483, 31606), 'Operations.MiscUtil.Dict', 'Dict', (['"""xlabel ylabel title xbound ybound coarsenBy log outFile sfx ticksCoarsen cumulative normed cumulativeUpTo"""'], {}), "(\n 'xlabel ylabel title xbound ybound coarsenBy log outFile sfx ticksCoarsen cumulative normed cumulativeUpTo'\n )\n", (31487, 31606), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((36313, 36379), 'functools.partial', 'functools.partial', (['IDotData.openForWrite'], {'headings': 'inFile.headings'}), '(IDotData.openForWrite, headings=inFile.headings)\n', (36330, 36379), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((40262, 40274), 'Operations.MiscUtil.Sfx', 'Sfx', (['nameSfx'], {}), '(nameSfx)\n', (40265, 40274), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((40727, 40749), 'operator.itemgetter', 'operator.itemgetter', (['(0)'], {}), '(0)\n', (40746, 40749), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((41098, 41120), 'operator.itemgetter', 'operator.itemgetter', (['(1)'], {}), '(1)\n', (41117, 41120), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((43503, 43568), 'Operations.MiscUtil.AddFileSfx', 'AddFileSfx', (["(table + ('.tsv' if '.' not in table else ''))", 'scenSfx'], {}), "(table + ('.tsv' if '.' not in table else ''), scenSfx)\n", (43513, 43568), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((46695, 46701), 'Operations.MiscUtil.Sfx', 'Sfx', (['i'], {}), '(i)\n', (46698, 46701), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((46885, 46906), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['showHeadings'], {}), '(showHeadings)\n', (46892, 46906), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((50364, 50385), 'Operations.MiscUtil.MakeSeq', 'MakeSeq', (['showHeadings'], {}), '(showHeadings)\n', (50371, 50385), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((23132, 23157), 'os.path.basename', 'os.path.basename', (['outFile'], {}), '(outFile)\n', (23148, 23157), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((50673, 50733), 'Operations.MiscUtil.Dict', 'Dict', (['"""replicaTables replicaCond scenCond"""'], {'allScens': '(scen,)'}), "('replicaTables replicaCond scenCond', allScens=(scen,))\n", (50677, 50733), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((52310, 52347), 'Operations.MiscUtil.dbg', 'dbg', (['""""SKIPPING_REPLICA" thisReplica"""'], {}), '(\'"SKIPPING_REPLICA" thisReplica\')\n', (52313, 52347), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n'), ((6547, 6573), 'os.path.splitext', 'os.path.splitext', (['snpTable'], {}), '(snpTable)\n', (6563, 6573), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((32681, 32711), 'os.path.splitext', 'os.path.splitext', (['replicaTable'], {}), '(replicaTable)\n', (32697, 32711), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((43328, 43351), 'os.path.splitext', 'os.path.splitext', (['table'], {}), '(table)\n', (43344, 43351), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((22009, 22034), 'os.path.basename', 'os.path.basename', (['outFile'], {}), '(outFile)\n', (22025, 22034), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((33256, 33278), 'operator.itemgetter', 'operator.itemgetter', (['(0)'], {}), '(0)\n', (33275, 33278), False, 'import operator, os, logging, contextlib, functools, collections, types, ast\n'), ((22488, 22618), 'Operations.MiscUtil.Dict', 'Dict', (['"""Ddata thinSfx replicaTables replicaCond snpTables snpCond snpStat nreplicas binSize binShift scenDir scenSfx sfx"""'], {}), "(\n 'Ddata thinSfx replicaTables replicaCond snpTables snpCond snpStat nreplicas binSize binShift scenDir scenSfx sfx'\n )\n", (22492, 22618), False, 'from Operations.MiscUtil import Dict, compile_expr, dbg, Histogrammer, AddFileSfx, ReplaceFileExt, MergeDicts, MakeSeq, SlurpFile, IsSeq, Sfx, MakeAlphaNum, DictGet, tmap, PrintDictDiff\n')]
|
import logging
import os
import random
import time
import warnings
from collections import OrderedDict
from contextlib import contextmanager
from pathlib import Path
from typing import List, Optional
import cv2
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as data
from IPython.display import Audio
from sklearn.metrics import average_precision_score, f1_score
from sklearn.model_selection import StratifiedKFold
import librosa
import librosa.display as display
import soundfile as sf
import utils
from catalyst.dl import Callback, CallbackOrder, State
class DFTBase(nn.Module):
def __init__(self):
"""Base class for DFT and IDFT matrix"""
super(DFTBase, self).__init__()
def dft_matrix(self, n):
(x, y) = np.meshgrid(np.arange(n), np.arange(n))
omega = np.exp(-2 * np.pi * 1j / n)
W = np.power(omega, x * y)
return W
def idft_matrix(self, n):
(x, y) = np.meshgrid(np.arange(n), np.arange(n))
omega = np.exp(2 * np.pi * 1j / n)
W = np.power(omega, x * y)
return W
class STFT(DFTBase):
def __init__(
self,
n_fft=2048,
hop_length=None,
win_length=None,
window="hann",
center=True,
pad_mode="reflect",
freeze_parameters=True,
):
"""Implementation of STFT with Conv1d. The function has the same output
of librosa.core.stft
"""
super(STFT, self).__init__()
assert pad_mode in ["constant", "reflect"]
self.n_fft = n_fft
self.center = center
self.pad_mode = pad_mode
# By default, use the entire frame
if win_length is None:
win_length = n_fft
# Set the default hop, if it's not already specified
if hop_length is None:
hop_length = int(win_length // 4)
fft_window = librosa.filters.get_window(window, win_length, fftbins=True)
# Pad the window out to n_fft size
fft_window = librosa.util.pad_center(fft_window, n_fft)
# DFT & IDFT matrix
self.W = self.dft_matrix(n_fft)
out_channels = n_fft // 2 + 1
self.conv_real = nn.Conv1d(
in_channels=1,
out_channels=out_channels,
kernel_size=n_fft,
stride=hop_length,
padding=0,
dilation=1,
groups=1,
bias=False,
)
self.conv_imag = nn.Conv1d(
in_channels=1,
out_channels=out_channels,
kernel_size=n_fft,
stride=hop_length,
padding=0,
dilation=1,
groups=1,
bias=False,
)
self.conv_real.weight.data = torch.Tensor(
np.real(self.W[:, 0:out_channels] * fft_window[:, None]).T
)[:, None, :]
# (n_fft // 2 + 1, 1, n_fft)
self.conv_imag.weight.data = torch.Tensor(
np.imag(self.W[:, 0:out_channels] * fft_window[:, None]).T
)[:, None, :]
# (n_fft // 2 + 1, 1, n_fft)
if freeze_parameters:
for param in self.parameters():
param.requires_grad = False
def forward(self, input):
"""input: (batch_size, data_length)
Returns:
real: (batch_size, n_fft // 2 + 1, time_steps)
imag: (batch_size, n_fft // 2 + 1, time_steps)
"""
x = input[:, None, :] # (batch_size, channels_num, data_length)
if self.center:
x = F.pad(x, pad=(self.n_fft // 2, self.n_fft // 2), mode=self.pad_mode)
real = self.conv_real(x)
imag = self.conv_imag(x)
# (batch_size, n_fft // 2 + 1, time_steps)
real = real[:, None, :, :].transpose(2, 3)
imag = imag[:, None, :, :].transpose(2, 3)
# (batch_size, 1, time_steps, n_fft // 2 + 1)
return real, imag
class Spectrogram(nn.Module):
def __init__(
self,
n_fft=2048,
hop_length=None,
win_length=None,
window="hann",
center=True,
pad_mode="reflect",
power=2.0,
freeze_parameters=True,
):
"""Calculate spectrogram using pytorch. The STFT is implemented with
Conv1d. The function has the same output of librosa.core.stft
"""
super(Spectrogram, self).__init__()
self.power = power
self.stft = STFT(
n_fft=n_fft,
hop_length=hop_length,
win_length=win_length,
window=window,
center=center,
pad_mode=pad_mode,
freeze_parameters=True,
)
def forward(self, input):
"""input: (batch_size, 1, time_steps, n_fft // 2 + 1)
Returns:
spectrogram: (batch_size, 1, time_steps, n_fft // 2 + 1)
"""
(real, imag) = self.stft.forward(input)
# (batch_size, n_fft // 2 + 1, time_steps)
spectrogram = real ** 2 + imag ** 2
if self.power == 2.0:
pass
else:
spectrogram = spectrogram ** (power / 2.0)
return spectrogram
class LogmelFilterBank(nn.Module):
def __init__(
self,
sr=32000,
n_fft=2048,
n_mels=64,
fmin=50,
fmax=14000,
is_log=True,
ref=1.0,
amin=1e-10,
top_db=80.0,
freeze_parameters=True,
):
"""Calculate logmel spectrogram using pytorch. The mel filter bank is
the pytorch implementation of as librosa.filters.mel
"""
super(LogmelFilterBank, self).__init__()
self.is_log = is_log
self.ref = ref
self.amin = amin
self.top_db = top_db
self.melW = librosa.filters.mel(
sr=sr, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax
).T
# (n_fft // 2 + 1, mel_bins)
self.melW = nn.Parameter(torch.Tensor(self.melW))
if freeze_parameters:
for param in self.parameters():
param.requires_grad = False
def forward(self, input):
"""input: (batch_size, channels, time_steps)
Output: (batch_size, time_steps, mel_bins)
"""
# Mel spectrogram
mel_spectrogram = torch.matmul(input, self.melW)
# Logmel spectrogram
if self.is_log:
output = self.power_to_db(mel_spectrogram)
else:
output = mel_spectrogram
return output
def power_to_db(self, input):
"""Power to db, this function is the pytorch implementation of
librosa.core.power_to_lb
"""
ref_value = self.ref
log_spec = 10.0 * torch.log10(torch.clamp(input, min=self.amin, max=np.inf))
log_spec -= 10.0 * np.log10(np.maximum(self.amin, ref_value))
if self.top_db is not None:
if self.top_db < 0:
raise ParameterError("top_db must be non-negative")
log_spec = torch.clamp(
log_spec, min=log_spec.max().item() - self.top_db, max=np.inf
)
return log_spec
class DropStripes(nn.Module):
def __init__(self, dim, drop_width, stripes_num):
"""Drop stripes.
Args:
dim: int, dimension along which to drop
drop_width: int, maximum width of stripes to drop
stripes_num: int, how many stripes to drop
"""
super(DropStripes, self).__init__()
assert dim in [2, 3] # dim 2: time; dim 3: frequency
self.dim = dim
self.drop_width = drop_width
self.stripes_num = stripes_num
def forward(self, input):
"""input: (batch_size, channels, time_steps, freq_bins)"""
assert input.ndimension() == 4
if self.training is False:
return input
else:
batch_size = input.shape[0]
total_width = input.shape[self.dim]
for n in range(batch_size):
self.transform_slice(input[n], total_width)
return input
def transform_slice(self, e, total_width):
"""e: (channels, time_steps, freq_bins)"""
for _ in range(self.stripes_num):
distance = torch.randint(low=0, high=self.drop_width, size=(1,))[0]
bgn = torch.randint(low=0, high=total_width - distance, size=(1,))[0]
if self.dim == 2:
e[:, bgn : bgn + distance, :] = 0
elif self.dim == 3:
e[:, :, bgn : bgn + distance] = 0
class SpecAugmentation(nn.Module):
def __init__(
self, time_drop_width, time_stripes_num, freq_drop_width, freq_stripes_num
):
"""Spec augmetation.
[ref] <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., <NAME>.
and <NAME>., 2019. Specaugment: A simple data augmentation method
for automatic speech recognition. arXiv preprint arXiv:1904.08779.
Args:
time_drop_width: int
time_stripes_num: int
freq_drop_width: int
freq_stripes_num: int
"""
super(SpecAugmentation, self).__init__()
self.time_dropper = DropStripes(
dim=2, drop_width=time_drop_width, stripes_num=time_stripes_num
)
self.freq_dropper = DropStripes(
dim=3, drop_width=freq_drop_width, stripes_num=freq_stripes_num
)
def forward(self, input):
x = self.time_dropper(input)
x = self.freq_dropper(x)
return x
def init_layer(layer):
nn.init.xavier_uniform_(layer.weight)
if hasattr(layer, "bias"):
if layer.bias is not None:
layer.bias.data.fill_(0.0)
def init_bn(bn):
bn.bias.data.fill_(0.0)
bn.weight.data.fill_(1.0)
def do_mixup(x, mixup_lambda):
"""Mixup x of even indexes (0, 2, 4, ...) with x of odd indexes
(1, 3, 5, ...).
Args:
x: (batch_size * 2, ...)
mixup_lambda: (batch_size * 2,)
Returns:
out: (batch_size, ...)
"""
out = (
x[0::2].transpose(0, -1) * mixup_lambda[0::2]
+ x[1::2].transpose(0, -1) * mixup_lambda[1::2]
).transpose(0, -1)
return out
def interpolate(x: torch.Tensor, ratio: int):
"""Interpolate data in time domain. This is used to compensate the
resolution reduction in downsampling of a CNN.
Args:
x: (batch_size, time_steps, classes_num)
ratio: int, ratio to interpolate
Returns:
upsampled: (batch_size, time_steps * ratio, classes_num)
"""
(batch_size, time_steps, classes_num) = x.shape
upsampled = x[:, :, None, :].repeat(1, 1, ratio, 1)
upsampled = upsampled.reshape(batch_size, time_steps * ratio, classes_num)
return upsampled
def pad_framewise_output(framewise_output: torch.Tensor, frames_num: int):
"""Pad framewise_output to the same length as input frames. The pad value
is the same as the value of the last frame.
Args:
framewise_output: (batch_size, frames_num, classes_num)
frames_num: int, number of frames to pad
Outputs:
output: (batch_size, frames_num, classes_num)
"""
pad = framewise_output[:, -1:, :].repeat(
1, frames_num - framewise_output.shape[1], 1
)
"""tensor for padding"""
output = torch.cat((framewise_output, pad), dim=1)
"""(batch_size, frames_num, classes_num)"""
return output
class ConvBlock(nn.Module):
def __init__(self, in_channels: int, out_channels: int):
super().__init__()
self.conv1 = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 3),
stride=(1, 1),
padding=(1, 1),
bias=False,
)
self.conv2 = nn.Conv2d(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=(3, 3),
stride=(1, 1),
padding=(1, 1),
bias=False,
)
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.init_weight()
def init_weight(self):
init_layer(self.conv1)
init_layer(self.conv2)
init_bn(self.bn1)
init_bn(self.bn2)
def forward(self, input, pool_size=(2, 2), pool_type="avg"):
x = input
x = F.relu_(self.bn1(self.conv1(x)))
x = F.relu_(self.bn2(self.conv2(x)))
if pool_type == "max":
x = F.max_pool2d(x, kernel_size=pool_size)
elif pool_type == "avg":
x = F.avg_pool2d(x, kernel_size=pool_size)
elif pool_type == "avg+max":
x1 = F.avg_pool2d(x, kernel_size=pool_size)
x2 = F.max_pool2d(x, kernel_size=pool_size)
x = x1 + x2
else:
raise Exception("Incorrect argument!")
return x
class AttBlock(nn.Module):
def __init__(
self, in_features: int, out_features: int, activation="linear", temperature=1.0
):
super().__init__()
self.activation = activation
self.temperature = temperature
self.att = nn.Conv1d(
in_channels=in_features,
out_channels=out_features,
kernel_size=1,
stride=1,
padding=0,
bias=True,
)
self.cla = nn.Conv1d(
in_channels=in_features,
out_channels=out_features,
kernel_size=1,
stride=1,
padding=0,
bias=True,
)
self.bn_att = nn.BatchNorm1d(out_features)
self.init_weights()
def init_weights(self):
init_layer(self.att)
init_layer(self.cla)
init_bn(self.bn_att)
def forward(self, x):
# x: (n_samples, n_in, n_time)
norm_att = torch.softmax(torch.clamp(self.att(x), -10, 10), dim=-1)
cla = self.nonlinear_transform(self.cla(x))
x = torch.sum(norm_att * cla, dim=2)
return x, norm_att, cla
def nonlinear_transform(self, x):
if self.activation == "linear":
return x
elif self.activation == "sigmoid":
return torch.sigmoid(x)
class PANNsCNN14Att(nn.Module):
def __init__(
self,
sample_rate: int,
window_size: int,
hop_size: int,
mel_bins: int,
fmin: int,
fmax: int,
classes_num: int,
):
super().__init__()
window = "hann"
center = True
pad_mode = "reflect"
ref = 1.0
amin = 1e-10
top_db = None
self.interpolate_ratio = 32 # Downsampled ratio
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(
n_fft=window_size,
hop_length=hop_size,
win_length=window_size,
window=window,
center=center,
pad_mode=pad_mode,
freeze_parameters=True,
)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(
sr=sample_rate,
n_fft=window_size,
n_mels=mel_bins,
fmin=fmin,
fmax=fmax,
ref=ref,
amin=amin,
top_db=top_db,
freeze_parameters=True,
)
# Spec augmenter
self.spec_augmenter = SpecAugmentation(
time_drop_width=64,
time_stripes_num=2,
freq_drop_width=8,
freq_stripes_num=2,
)
self.bn0 = nn.BatchNorm2d(mel_bins)
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
self.conv_block5 = ConvBlock(in_channels=512, out_channels=1024)
self.conv_block6 = ConvBlock(in_channels=1024, out_channels=2048)
self.fc1 = nn.Linear(2048, 2048, bias=True)
self.att_block = AttBlock(2048, classes_num, activation="sigmoid")
self.init_weight()
def init_weight(self):
init_bn(self.bn0)
init_layer(self.fc1)
def cnn_feature_extractor(self, x):
x = self.conv_block1(x, pool_size=(2, 2), pool_type="avg")
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block2(x, pool_size=(2, 2), pool_type="avg")
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block3(x, pool_size=(2, 2), pool_type="avg")
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block4(x, pool_size=(2, 2), pool_type="avg")
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block5(x, pool_size=(2, 2), pool_type="avg")
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block6(x, pool_size=(1, 1), pool_type="avg")
x = F.dropout(x, p=0.2, training=self.training)
return x
def preprocess(self, input, mixup_lambda=None):
# t1 = time.time()
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
frames_num = x.shape[2]
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
if self.training:
x = self.spec_augmenter(x)
# Mixup on spectrogram
if self.training and mixup_lambda is not None:
x = do_mixup(x, mixup_lambda)
return x, frames_num
def forward(self, input, mixup_lambda=None):
"""
Input: (batch_size, data_length)"""
x, frames_num = self.preprocess(input, mixup_lambda=mixup_lambda)
# Output shape (batch size, channels, time, frequency)
x = self.cnn_feature_extractor(x)
# Aggregate in frequency axis
x = torch.mean(x, dim=3)
x1 = F.max_pool1d(x, kernel_size=3, stride=1, padding=1)
x2 = F.avg_pool1d(x, kernel_size=3, stride=1, padding=1)
x = x1 + x2
x = F.dropout(x, p=0.5, training=self.training)
x = x.transpose(1, 2)
x = F.relu_(self.fc1(x))
x = x.transpose(1, 2)
x = F.dropout(x, p=0.5, training=self.training)
(clipwise_output, norm_att, segmentwise_output) = self.att_block(x)
segmentwise_output = segmentwise_output.transpose(1, 2)
# Get framewise output
framewise_output = interpolate(segmentwise_output, self.interpolate_ratio)
framewise_output = pad_framewise_output(framewise_output, frames_num)
output_dict = {
"framewise_output": framewise_output,
"clipwise_output": clipwise_output,
}
return output_dict
class PANNsDataset(data.Dataset):
# def __init__(self, file_list: List[List[str]], waveform_transforms=None, period=5):
def __init__(
self, df, datadir, waveform_transforms=None, period=5, sample_rate=32000
):
# self.file_list = file_list # list of list: [file_path, ebird_code]
self.df = df
self.datadir = datadir
self.waveform_transforms = waveform_transforms
self.period = period
self.sample_rate = sample_rate
def __len__(self):
return len(self.df)
def __getitem__(self, idx: int):
sample = self.df.iloc[idx, :]
# wav_name = sample["resampled_filename"]
wav_name = sample["filename"]
# wav_name = wav_name.replace("mp3", "wav")
wav_name = wav_name.replace("mp3", "npy")
wav_name = wav_name.replace("wav", "npy")
ebird_code = sample["ebird_code"]
duration = sample["duration"]
wav_path = self.datadir / ebird_code / wav_name
# y, sr = sf.read(self.datadir / ebird_code / wav_name)
effective_length = self.sample_rate * self.period
# wav_path, ebird_code = self.file_list[idx]
# y, sr = sf.read(wav_path)
y = np.load(wav_path)
if self.waveform_transforms:
y = self.waveform_transforms(y)
else:
len_y = len(y)
effective_length = self.sample_rate * self.period
if len_y < effective_length:
new_y = np.zeros(effective_length, dtype=y.dtype)
start = np.random.randint(effective_length - len_y)
new_y[start : start + len_y] = y
y = new_y.astype(np.float32)
elif len_y > effective_length:
start = np.random.randint(len_y - effective_length)
y = y[start : start + effective_length].astype(np.float32)
else:
y = y.astype(np.float32)
labels = np.zeros(len(utils.BIRD_CODE), dtype="f")
labels[utils.BIRD_CODE[ebird_code]] = 1
return {"waveform": y, "targets": labels}
class PANNsLoss(nn.Module):
def __init__(self):
super().__init__()
self.bce = nn.BCELoss()
def forward(self, input, target):
input_ = input["clipwise_output"]
input_ = torch.where(torch.isnan(input_), torch.zeros_like(input_), input_)
input_ = torch.where(torch.isinf(input_), torch.zeros_like(input_), input_)
target = target.float()
return self.bce(input_, target)
class F1Callback(Callback):
def __init__(
self,
input_key: str = "targets",
output_key: str = "logits",
model_output_key: str = "clipwise_output",
prefix: str = "f1",
):
super().__init__(CallbackOrder.Metric)
self.input_key = input_key
self.output_key = output_key
self.model_output_key = model_output_key
self.prefix = prefix
def on_loader_start(self, state: State):
self.prediction: List[np.ndarray] = []
self.target: List[np.ndarray] = []
def on_batch_end(self, state: State):
targ = state.input[self.input_key].detach().cpu().numpy()
out = state.output[self.output_key]
clipwise_output = out[self.model_output_key].detach().cpu().numpy()
self.prediction.append(clipwise_output)
self.target.append(targ)
y_pred = clipwise_output.argmax(axis=1)
y_true = targ.argmax(axis=1)
score = f1_score(y_true, y_pred, average="macro")
state.batch_metrics[self.prefix] = score
def on_loader_end(self, state: State):
y_pred = np.concatenate(self.prediction, axis=0).argmax(axis=1)
y_true = np.concatenate(self.target, axis=0).argmax(axis=1)
score = f1_score(y_true, y_pred, average="macro")
state.loader_metrics[self.prefix] = score
if state.is_valid_loader:
state.epoch_metrics[state.valid_loader + "_epoch_" + self.prefix] = score
else:
state.epoch_metrics["train_epoch_" + self.prefix] = score
class mAPCallback(Callback):
def __init__(
self,
input_key: str = "targets",
output_key: str = "logits",
model_output_key: str = "clipwise_output",
prefix: str = "mAP",
):
super().__init__(CallbackOrder.Metric)
self.input_key = input_key
self.output_key = output_key
self.model_output_key = model_output_key
self.prefix = prefix
def on_loader_start(self, state: State):
self.prediction: List[np.ndarray] = []
self.target: List[np.ndarray] = []
def on_batch_end(self, state: State):
targ = state.input[self.input_key].detach().cpu().numpy()
out = state.output[self.output_key]
clipwise_output = out[self.model_output_key].detach().cpu().numpy()
self.prediction.append(clipwise_output)
self.target.append(targ)
score = average_precision_score(targ, clipwise_output, average=None)
score = np.nan_to_num(score).mean()
state.batch_metrics[self.prefix] = score
def on_loader_end(self, state: State):
y_pred = np.concatenate(self.prediction, axis=0)
y_true = np.concatenate(self.target, axis=0)
score = average_precision_score(y_true, y_pred, average=None)
score = np.nan_to_num(score).mean()
state.loader_metrics[self.prefix] = score
if state.is_valid_loader:
state.epoch_metrics[state.valid_loader + "_epoch_" + self.prefix] = score
else:
state.epoch_metrics["train_epoch_" + self.prefix] = score
def get_model(config: dict, weights_path=None):
model = PANNsCNN14Att(**config)
model.att_block = AttBlock(2048, 264, activation="sigmoid")
if weights_path:
checkpoint = torch.load(weights_path)
state_dict = checkpoint["model_state_dict"]
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k
if k[:7] == "module.":
name = k[7:] # remove `module.`
new_state_dict[name] = v
model.load_state_dict(new_state_dict)
else:
model.att_block.init_weights()
# device = torch.device("cuda")
# model.to(device)
# model.eval()
return model
|
[
"torch.nn.BatchNorm1d",
"librosa.util.pad_center",
"torch.nn.functional.avg_pool1d",
"torch.sum",
"torch.nn.functional.pad",
"torch.isinf",
"numpy.imag",
"numpy.arange",
"torch.nn.BatchNorm2d",
"torch.nn.init.xavier_uniform_",
"torch.mean",
"torch.nn.functional.avg_pool2d",
"numpy.exp",
"torch.randint",
"numpy.real",
"torch.matmul",
"numpy.concatenate",
"torch.zeros_like",
"numpy.maximum",
"collections.OrderedDict",
"sklearn.metrics.average_precision_score",
"torch.Tensor",
"torch.nn.functional.dropout",
"librosa.filters.mel",
"torch.nn.functional.max_pool2d",
"torch.cat",
"torch.clamp",
"librosa.filters.get_window",
"sklearn.metrics.f1_score",
"numpy.power",
"torch.nn.functional.max_pool1d",
"torch.load",
"torch.sigmoid",
"torch.nn.Conv2d",
"torch.nn.BCELoss",
"numpy.zeros",
"numpy.random.randint",
"torch.nn.Linear",
"numpy.load",
"torch.isnan",
"torch.nn.Conv1d",
"numpy.nan_to_num"
] |
[((9567, 9604), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['layer.weight'], {}), '(layer.weight)\n', (9590, 9604), True, 'import torch.nn as nn\n'), ((11300, 11341), 'torch.cat', 'torch.cat', (['(framewise_output, pad)'], {'dim': '(1)'}), '((framewise_output, pad), dim=1)\n', (11309, 11341), False, 'import torch\n'), ((911, 940), 'numpy.exp', 'np.exp', (['(-2 * np.pi * 1.0j / n)'], {}), '(-2 * np.pi * 1.0j / n)\n', (917, 940), True, 'import numpy as np\n'), ((951, 973), 'numpy.power', 'np.power', (['omega', '(x * y)'], {}), '(omega, x * y)\n', (959, 973), True, 'import numpy as np\n'), ((1095, 1123), 'numpy.exp', 'np.exp', (['(2 * np.pi * 1.0j / n)'], {}), '(2 * np.pi * 1.0j / n)\n', (1101, 1123), True, 'import numpy as np\n'), ((1134, 1156), 'numpy.power', 'np.power', (['omega', '(x * y)'], {}), '(omega, x * y)\n', (1142, 1156), True, 'import numpy as np\n'), ((1977, 2037), 'librosa.filters.get_window', 'librosa.filters.get_window', (['window', 'win_length'], {'fftbins': '(True)'}), '(window, win_length, fftbins=True)\n', (2003, 2037), False, 'import librosa\n'), ((2103, 2145), 'librosa.util.pad_center', 'librosa.util.pad_center', (['fft_window', 'n_fft'], {}), '(fft_window, n_fft)\n', (2126, 2145), False, 'import librosa\n'), ((2280, 2418), 'torch.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': '(1)', 'out_channels': 'out_channels', 'kernel_size': 'n_fft', 'stride': 'hop_length', 'padding': '(0)', 'dilation': '(1)', 'groups': '(1)', 'bias': '(False)'}), '(in_channels=1, out_channels=out_channels, kernel_size=n_fft,\n stride=hop_length, padding=0, dilation=1, groups=1, bias=False)\n', (2289, 2418), True, 'import torch.nn as nn\n'), ((2548, 2686), 'torch.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': '(1)', 'out_channels': 'out_channels', 'kernel_size': 'n_fft', 'stride': 'hop_length', 'padding': '(0)', 'dilation': '(1)', 'groups': '(1)', 'bias': '(False)'}), '(in_channels=1, out_channels=out_channels, kernel_size=n_fft,\n stride=hop_length, padding=0, dilation=1, groups=1, bias=False)\n', (2557, 2686), True, 'import torch.nn as nn\n'), ((6330, 6360), 'torch.matmul', 'torch.matmul', (['input', 'self.melW'], {}), '(input, self.melW)\n', (6342, 6360), False, 'import torch\n'), ((11549, 11678), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': '(3, 3)', 'stride': '(1, 1)', 'padding': '(1, 1)', 'bias': '(False)'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=(\n 3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n', (11558, 11678), True, 'import torch.nn as nn\n'), ((11779, 11909), 'torch.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'out_channels', 'out_channels': 'out_channels', 'kernel_size': '(3, 3)', 'stride': '(1, 1)', 'padding': '(1, 1)', 'bias': '(False)'}), '(in_channels=out_channels, out_channels=out_channels, kernel_size=\n (3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n', (11788, 11909), True, 'import torch.nn as nn\n'), ((12008, 12036), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (12022, 12036), True, 'import torch.nn as nn\n'), ((12056, 12084), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['out_channels'], {}), '(out_channels)\n', (12070, 12084), True, 'import torch.nn as nn\n'), ((13125, 13237), 'torch.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': 'in_features', 'out_channels': 'out_features', 'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(True)'}), '(in_channels=in_features, out_channels=out_features, kernel_size=1,\n stride=1, padding=0, bias=True)\n', (13134, 13237), True, 'import torch.nn as nn\n'), ((13336, 13448), 'torch.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': 'in_features', 'out_channels': 'out_features', 'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)', 'bias': '(True)'}), '(in_channels=in_features, out_channels=out_features, kernel_size=1,\n stride=1, padding=0, bias=True)\n', (13345, 13448), True, 'import torch.nn as nn\n'), ((13551, 13579), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_features'], {}), '(out_features)\n', (13565, 13579), True, 'import torch.nn as nn\n'), ((13930, 13962), 'torch.sum', 'torch.sum', (['(norm_att * cla)'], {'dim': '(2)'}), '(norm_att * cla, dim=2)\n', (13939, 13962), False, 'import torch\n'), ((15512, 15536), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['mel_bins'], {}), '(mel_bins)\n', (15526, 15536), True, 'import torch.nn as nn\n'), ((15989, 16021), 'torch.nn.Linear', 'nn.Linear', (['(2048)', '(2048)'], {'bias': '(True)'}), '(2048, 2048, bias=True)\n', (15998, 16021), True, 'import torch.nn as nn\n'), ((16328, 16371), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': '(0.2)', 'training': 'self.training'}), '(x, p=0.2, training=self.training)\n', (16337, 16371), True, 'import torch.nn.functional as F\n'), ((16451, 16494), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': '(0.2)', 'training': 'self.training'}), '(x, p=0.2, training=self.training)\n', (16460, 16494), True, 'import torch.nn.functional as F\n'), ((16574, 16617), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': '(0.2)', 'training': 'self.training'}), '(x, p=0.2, training=self.training)\n', (16583, 16617), True, 'import torch.nn.functional as F\n'), ((16697, 16740), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': '(0.2)', 'training': 'self.training'}), '(x, p=0.2, training=self.training)\n', (16706, 16740), True, 'import torch.nn.functional as F\n'), ((16820, 16863), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': '(0.2)', 'training': 'self.training'}), '(x, p=0.2, training=self.training)\n', (16829, 16863), True, 'import torch.nn.functional as F\n'), ((16943, 16986), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': '(0.2)', 'training': 'self.training'}), '(x, p=0.2, training=self.training)\n', (16952, 16986), True, 'import torch.nn.functional as F\n'), ((17929, 17949), 'torch.mean', 'torch.mean', (['x'], {'dim': '(3)'}), '(x, dim=3)\n', (17939, 17949), False, 'import torch\n'), ((17964, 18015), 'torch.nn.functional.max_pool1d', 'F.max_pool1d', (['x'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(x, kernel_size=3, stride=1, padding=1)\n', (17976, 18015), True, 'import torch.nn.functional as F\n'), ((18029, 18080), 'torch.nn.functional.avg_pool1d', 'F.avg_pool1d', (['x'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)'}), '(x, kernel_size=3, stride=1, padding=1)\n', (18041, 18080), True, 'import torch.nn.functional as F\n'), ((18114, 18157), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': '(0.5)', 'training': 'self.training'}), '(x, p=0.5, training=self.training)\n', (18123, 18157), True, 'import torch.nn.functional as F\n'), ((18263, 18306), 'torch.nn.functional.dropout', 'F.dropout', (['x'], {'p': '(0.5)', 'training': 'self.training'}), '(x, p=0.5, training=self.training)\n', (18272, 18306), True, 'import torch.nn.functional as F\n'), ((20015, 20032), 'numpy.load', 'np.load', (['wav_path'], {}), '(wav_path)\n', (20022, 20032), True, 'import numpy as np\n'), ((20992, 21004), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (21002, 21004), True, 'import torch.nn as nn\n'), ((22297, 22338), 'sklearn.metrics.f1_score', 'f1_score', (['y_true', 'y_pred'], {'average': '"""macro"""'}), "(y_true, y_pred, average='macro')\n", (22305, 22338), False, 'from sklearn.metrics import average_precision_score, f1_score\n'), ((22588, 22629), 'sklearn.metrics.f1_score', 'f1_score', (['y_true', 'y_pred'], {'average': '"""macro"""'}), "(y_true, y_pred, average='macro')\n", (22596, 22629), False, 'from sklearn.metrics import average_precision_score, f1_score\n'), ((23768, 23828), 'sklearn.metrics.average_precision_score', 'average_precision_score', (['targ', 'clipwise_output'], {'average': 'None'}), '(targ, clipwise_output, average=None)\n', (23791, 23828), False, 'from sklearn.metrics import average_precision_score, f1_score\n'), ((23983, 24022), 'numpy.concatenate', 'np.concatenate', (['self.prediction'], {'axis': '(0)'}), '(self.prediction, axis=0)\n', (23997, 24022), True, 'import numpy as np\n'), ((24040, 24075), 'numpy.concatenate', 'np.concatenate', (['self.target'], {'axis': '(0)'}), '(self.target, axis=0)\n', (24054, 24075), True, 'import numpy as np\n'), ((24092, 24145), 'sklearn.metrics.average_precision_score', 'average_precision_score', (['y_true', 'y_pred'], {'average': 'None'}), '(y_true, y_pred, average=None)\n', (24115, 24145), False, 'from sklearn.metrics import average_precision_score, f1_score\n'), ((24636, 24660), 'torch.load', 'torch.load', (['weights_path'], {}), '(weights_path)\n', (24646, 24660), False, 'import torch\n'), ((24738, 24751), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (24749, 24751), False, 'from collections import OrderedDict\n'), ((867, 879), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (876, 879), True, 'import numpy as np\n'), ((881, 893), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (890, 893), True, 'import numpy as np\n'), ((1051, 1063), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (1060, 1063), True, 'import numpy as np\n'), ((1065, 1077), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (1074, 1077), True, 'import numpy as np\n'), ((3606, 3674), 'torch.nn.functional.pad', 'F.pad', (['x'], {'pad': '(self.n_fft // 2, self.n_fft // 2)', 'mode': 'self.pad_mode'}), '(x, pad=(self.n_fft // 2, self.n_fft // 2), mode=self.pad_mode)\n', (3611, 3674), True, 'import torch.nn.functional as F\n'), ((5813, 5889), 'librosa.filters.mel', 'librosa.filters.mel', ([], {'sr': 'sr', 'n_fft': 'n_fft', 'n_mels': 'n_mels', 'fmin': 'fmin', 'fmax': 'fmax'}), '(sr=sr, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax)\n', (5832, 5889), False, 'import librosa\n'), ((5985, 6008), 'torch.Tensor', 'torch.Tensor', (['self.melW'], {}), '(self.melW)\n', (5997, 6008), False, 'import torch\n'), ((12477, 12515), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['x'], {'kernel_size': 'pool_size'}), '(x, kernel_size=pool_size)\n', (12489, 12515), True, 'import torch.nn.functional as F\n'), ((21115, 21134), 'torch.isnan', 'torch.isnan', (['input_'], {}), '(input_)\n', (21126, 21134), False, 'import torch\n'), ((21136, 21160), 'torch.zeros_like', 'torch.zeros_like', (['input_'], {}), '(input_)\n', (21152, 21160), False, 'import torch\n'), ((21199, 21218), 'torch.isinf', 'torch.isinf', (['input_'], {}), '(input_)\n', (21210, 21218), False, 'import torch\n'), ((21220, 21244), 'torch.zeros_like', 'torch.zeros_like', (['input_'], {}), '(input_)\n', (21236, 21244), False, 'import torch\n'), ((6762, 6807), 'torch.clamp', 'torch.clamp', (['input'], {'min': 'self.amin', 'max': 'np.inf'}), '(input, min=self.amin, max=np.inf)\n', (6773, 6807), False, 'import torch\n'), ((6845, 6877), 'numpy.maximum', 'np.maximum', (['self.amin', 'ref_value'], {}), '(self.amin, ref_value)\n', (6855, 6877), True, 'import numpy as np\n'), ((8270, 8323), 'torch.randint', 'torch.randint', ([], {'low': '(0)', 'high': 'self.drop_width', 'size': '(1,)'}), '(low=0, high=self.drop_width, size=(1,))\n', (8283, 8323), False, 'import torch\n'), ((8345, 8405), 'torch.randint', 'torch.randint', ([], {'low': '(0)', 'high': '(total_width - distance)', 'size': '(1,)'}), '(low=0, high=total_width - distance, size=(1,))\n', (8358, 8405), False, 'import torch\n'), ((12565, 12603), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['x'], {'kernel_size': 'pool_size'}), '(x, kernel_size=pool_size)\n', (12577, 12603), True, 'import torch.nn.functional as F\n'), ((14157, 14173), 'torch.sigmoid', 'torch.sigmoid', (['x'], {}), '(x)\n', (14170, 14173), False, 'import torch\n'), ((20283, 20324), 'numpy.zeros', 'np.zeros', (['effective_length'], {'dtype': 'y.dtype'}), '(effective_length, dtype=y.dtype)\n', (20291, 20324), True, 'import numpy as np\n'), ((20349, 20392), 'numpy.random.randint', 'np.random.randint', (['(effective_length - len_y)'], {}), '(effective_length - len_y)\n', (20366, 20392), True, 'import numpy as np\n'), ((22449, 22488), 'numpy.concatenate', 'np.concatenate', (['self.prediction'], {'axis': '(0)'}), '(self.prediction, axis=0)\n', (22463, 22488), True, 'import numpy as np\n'), ((22521, 22556), 'numpy.concatenate', 'np.concatenate', (['self.target'], {'axis': '(0)'}), '(self.target, axis=0)\n', (22535, 22556), True, 'import numpy as np\n'), ((23845, 23865), 'numpy.nan_to_num', 'np.nan_to_num', (['score'], {}), '(score)\n', (23858, 23865), True, 'import numpy as np\n'), ((24162, 24182), 'numpy.nan_to_num', 'np.nan_to_num', (['score'], {}), '(score)\n', (24175, 24182), True, 'import numpy as np\n'), ((2854, 2910), 'numpy.real', 'np.real', (['(self.W[:, 0:out_channels] * fft_window[:, None])'], {}), '(self.W[:, 0:out_channels] * fft_window[:, None])\n', (2861, 2910), True, 'import numpy as np\n'), ((3036, 3092), 'numpy.imag', 'np.imag', (['(self.W[:, 0:out_channels] * fft_window[:, None])'], {}), '(self.W[:, 0:out_channels] * fft_window[:, None])\n', (3043, 3092), True, 'import numpy as np\n'), ((12658, 12696), 'torch.nn.functional.avg_pool2d', 'F.avg_pool2d', (['x'], {'kernel_size': 'pool_size'}), '(x, kernel_size=pool_size)\n', (12670, 12696), True, 'import torch.nn.functional as F\n'), ((12714, 12752), 'torch.nn.functional.max_pool2d', 'F.max_pool2d', (['x'], {'kernel_size': 'pool_size'}), '(x, kernel_size=pool_size)\n', (12726, 12752), True, 'import torch.nn.functional as F\n'), ((20554, 20597), 'numpy.random.randint', 'np.random.randint', (['(len_y - effective_length)'], {}), '(len_y - effective_length)\n', (20571, 20597), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name, too-many-arguments, bad-whitespace
# pylint: disable=too-many-lines, too-many-locals, len-as-condition
# pylint: disable=import-outside-toplevel
"""Copyright 2015 <NAME>.
FilterPy library.
http://github.com/rlabbe/filterpy
Documentation at:
https://filterpy.readthedocs.org
Supporting book at:
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the readme.MD file
for more information.
"""
from __future__ import absolute_import, division, unicode_literals
import math
from math import cos, sin
import random
import warnings
import numpy as np
from numpy.linalg import inv
import scipy.linalg as linalg
import scipy.sparse as sp
import scipy.sparse.linalg as spln
from scipy.stats import norm, multivariate_normal
# Older versions of scipy do not support the allow_singular keyword. I could
# check the version number explicily, but perhaps this is clearer
_support_singular = True
try:
multivariate_normal.logpdf(1, 1, 1, allow_singular=True)
except TypeError:
warnings.warn(
'You are using a version of SciPy that does not support the '\
'allow_singular parameter in scipy.stats.multivariate_normal.logpdf(). '\
'Future versions of FilterPy will require a version of SciPy that '\
'implements this keyword',
DeprecationWarning)
_support_singular = False
def _validate_vector(u, dtype=None):
# this is taken from scipy.spatial.distance. Internal function, so
# redefining here.
u = np.asarray(u, dtype=dtype).squeeze()
# Ensure values such as u=1 and u=[1] still return 1-D arrays.
u = np.atleast_1d(u)
if u.ndim > 1:
raise ValueError("Input vector should be 1-D.")
return u
def mahalanobis(x, mean, cov):
"""
Computes the Mahalanobis distance between the state vector x from the
Gaussian `mean` with covariance `cov`. This can be thought as the number
of standard deviations x is from the mean, i.e. a return value of 3 means
x is 3 std from mean.
Parameters
----------
x : (N,) array_like, or float
Input state vector
mean : (N,) array_like, or float
mean of multivariate Gaussian
cov : (N, N) array_like or float
covariance of the multivariate Gaussian
Returns
-------
mahalanobis : double
The Mahalanobis distance between vectors `x` and `mean`
Examples
--------
>>> mahalanobis(x=3., mean=3.5, cov=4.**2) # univariate case
0.125
>>> mahalanobis(x=3., mean=6, cov=1) # univariate, 3 std away
3.0
>>> mahalanobis([1., 2], [1.1, 3.5], [[1., .1],[.1, 13]])
0.42533327058913922
"""
x = _validate_vector(x)
mean = _validate_vector(mean)
if x.shape != mean.shape:
raise ValueError("length of input vectors must be the same")
y = x - mean
S = np.atleast_2d(cov)
dist = float(np.dot(np.dot(y.T, inv(S)), y))
return math.sqrt(dist)
def log_likelihood(z, x, P, H, R):
"""
Returns log-likelihood of the measurement z given the Gaussian
posterior (x, P) using measurement function H and measurement
covariance error R
"""
S = np.dot(H, np.dot(P, H.T)) + R
return logpdf(z, np.dot(H, x), S)
def likelihood(z, x, P, H, R):
"""
Returns likelihood of the measurement z given the Gaussian
posterior (x, P) using measurement function H and measurement
covariance error R
"""
return np.exp(log_likelihood(z, x, P, H, R))
def logpdf(x, mean=None, cov=1, allow_singular=True):
"""
Computes the log of the probability density function of the normal
N(mean, cov) for the data x. The normal may be univariate or multivariate.
Wrapper for older versions of scipy.multivariate_normal.logpdf which
don't support support the allow_singular keyword prior to verion 0.15.0.
If it is not supported, and cov is singular or not PSD you may get
an exception.
`x` and `mean` may be column vectors, row vectors, or lists.
"""
if mean is not None:
flat_mean = np.asarray(mean).flatten()
else:
flat_mean = None
flat_x = np.asarray(x).flatten()
if _support_singular:
return multivariate_normal.logpdf(flat_x, flat_mean, cov, allow_singular)
return multivariate_normal.logpdf(flat_x, flat_mean, cov)
def gaussian(x, mean, var, normed=True):
"""
returns probability density function (pdf) for x given a Gaussian with the
specified mean and variance. All must be scalars.
gaussian (1,2,3) is equivalent to scipy.stats.norm(2, math.sqrt(3)).pdf(1)
It is quite a bit faster albeit much less flexible than the latter.
Parameters
----------
x : scalar or array-like
The value(s) for which we compute the distribution
mean : scalar
Mean of the Gaussian
var : scalar
Variance of the Gaussian
normed : bool, default True
Normalize the output if the input is an array of values.
Returns
-------
pdf : float
probability distribution of x for the Gaussian (mean, var). E.g. 0.101 denotes
10.1%.
Examples
--------
>>> gaussian(8, 1, 2)
1.3498566943461957e-06
>>> gaussian([8, 7, 9], 1, 2)
array([1.34985669e-06, 3.48132630e-05, 3.17455867e-08])
"""
pdf = ((2*math.pi*var)**-.5) * np.exp((-0.5*(np.asarray(x)-mean)**2.) / var)
if normed and len(np.shape(pdf)) > 0:
pdf = pdf / sum(pdf)
return pdf
def mul(mean1, var1, mean2, var2):
"""
Multiply Gaussian (mean1, var1) with (mean2, var2) and return the
results as a tuple (mean, var).
Strictly speaking the product of two Gaussian PDFs is a Gaussian
function, not Gaussian PDF. It is, however, proportional to a Gaussian
PDF, so it is safe to treat the output as a PDF for any filter using
Bayes equation, which normalizes the result anyway.
Parameters
----------
mean1 : scalar
mean of first Gaussian
var1 : scalar
variance of first Gaussian
mean2 : scalar
mean of second Gaussian
var2 : scalar
variance of second Gaussian
Returns
-------
mean : scalar
mean of product
var : scalar
variance of product
Examples
--------
>>> mul(1, 2, 3, 4)
(1.6666666666666667, 1.3333333333333333)
References
----------
Bromily. "Products and Convolutions of Gaussian Probability Functions",
Tina Memo No. 2003-003.
http://www.tina-vision.net/docs/memos/2003-003.pdf
"""
mean = (var1*mean2 + var2*mean1) / (var1 + var2)
var = 1 / (1/var1 + 1/var2)
return (mean, var)
def mul_pdf(mean1, var1, mean2, var2):
"""
Multiply Gaussian (mean1, var1) with (mean2, var2) and return the
results as a tuple (mean, var, scale_factor).
Strictly speaking the product of two Gaussian PDFs is a Gaussian
function, not Gaussian PDF. It is, however, proportional to a Gaussian
PDF. `scale_factor` provides this proportionality constant
Parameters
----------
mean1 : scalar
mean of first Gaussian
var1 : scalar
variance of first Gaussian
mean2 : scalar
mean of second Gaussian
var2 : scalar
variance of second Gaussian
Returns
-------
mean : scalar
mean of product
var : scalar
variance of product
scale_factor : scalar
proportionality constant
Examples
--------
>>> mul(1, 2, 3, 4)
(1.6666666666666667, 1.3333333333333333)
References
----------
Bromily. "Products and Convolutions of Gaussian Probability Functions",
Tina Memo No. 2003-003.
http://www.tina-vision.net/docs/memos/2003-003.pdf
"""
mean = (var1*mean2 + var2*mean1) / (var1 + var2)
var = 1. / (1./var1 + 1./var2)
S = math.exp(-(mean1 - mean2)**2 / (2*(var1 + var2))) / \
math.sqrt(2 * math.pi * (var1 + var2))
return mean, var, S
def add(mean1, var1, mean2, var2):
"""
Add the Gaussians (mean1, var1) with (mean2, var2) and return the
results as a tuple (mean,var).
var1 and var2 are variances - sigma squared in the usual parlance.
"""
return (mean1+mean2, var1+var2)
def multivariate_gaussian(x, mu, cov):
"""
This is designed to replace scipy.stats.multivariate_normal
which is not available before version 0.14. You may either pass in a
multivariate set of data:
.. code-block:: Python
multivariate_gaussian (array([1,1]), array([3,4]), eye(2)*1.4)
multivariate_gaussian (array([1,1,1]), array([3,4,5]), 1.4)
or unidimensional data:
.. code-block:: Python
multivariate_gaussian(1, 3, 1.4)
In the multivariate case if cov is a scalar it is interpreted as eye(n)*cov
The function gaussian() implements the 1D (univariate)case, and is much
faster than this function.
equivalent calls:
.. code-block:: Python
multivariate_gaussian(1, 2, 3)
scipy.stats.multivariate_normal(2,3).pdf(1)
Parameters
----------
x : float, or np.array-like
Value to compute the probability for. May be a scalar if univariate,
or any type that can be converted to an np.array (list, tuple, etc).
np.array is best for speed.
mu : float, or np.array-like
mean for the Gaussian . May be a scalar if univariate, or any type
that can be converted to an np.array (list, tuple, etc).np.array is
best for speed.
cov : float, or np.array-like
Covariance for the Gaussian . May be a scalar if univariate, or any
type that can be converted to an np.array (list, tuple, etc).np.array is
best for speed.
Returns
-------
probability : float
probability for x for the Gaussian (mu,cov)
"""
warnings.warn(
("This was implemented before SciPy version 0.14, which implemented "
"scipy.stats.multivariate_normal. This function will be removed in "
"a future release of FilterPy"), DeprecationWarning)
# force all to numpy.array type, and flatten in case they are vectors
x = np.array(x, copy=False, ndmin=1).flatten()
mu = np.array(mu, copy=False, ndmin=1).flatten()
nx = len(mu)
cov = _to_cov(cov, nx)
norm_coeff = nx*math.log(2*math.pi) + np.linalg.slogdet(cov)[1]
err = x - mu
if sp.issparse(cov):
numerator = spln.spsolve(cov, err).T.dot(err)
else:
numerator = np.linalg.solve(cov, err).T.dot(err)
return math.exp(-0.5*(norm_coeff + numerator))
def multivariate_multiply(m1, c1, m2, c2):
"""
Multiplies the two multivariate Gaussians together and returns the
results as the tuple (mean, covariance).
Examples
--------
.. code-block:: Python
m, c = multivariate_multiply([7.0, 2], [[1.0, 2.0], [2.0, 1.0]],
[3.2, 0], [[8.0, 1.1], [1.1,8.0]])
Parameters
----------
m1 : array-like
Mean of first Gaussian. Must be convertable to an 1D array via
numpy.asarray(), For example 6, [6], [6, 5], np.array([3, 4, 5, 6])
are all valid.
c1 : matrix-like
Covariance of first Gaussian. Must be convertable to an 2D array via
numpy.asarray().
m2 : array-like
Mean of second Gaussian. Must be convertable to an 1D array via
numpy.asarray(), For example 6, [6], [6, 5], np.array([3, 4, 5, 6])
are all valid.
c2 : matrix-like
Covariance of second Gaussian. Must be convertable to an 2D array via
numpy.asarray().
Returns
-------
m : ndarray
mean of the result
c : ndarray
covariance of the result
"""
C1 = np.asarray(c1)
C2 = np.asarray(c2)
M1 = np.asarray(m1)
M2 = np.asarray(m2)
sum_inv = np.linalg.inv(C1+C2)
C3 = np.dot(C1, sum_inv).dot(C2)
M3 = (np.dot(C2, sum_inv).dot(M1) +
np.dot(C1, sum_inv).dot(M2))
return M3, C3
def covariance_ellipse(P, deviations=1):
"""
Returns a tuple defining the ellipse representing the 2 dimensional
covariance matrix P.
Parameters
----------
P : nd.array shape (2,2)
covariance matrix
deviations : int (optional, default = 1)
# of standard deviations. Default is 1.
Returns (angle_radians, width_radius, height_radius)
"""
U, s, _ = linalg.svd(P)
orientation = math.atan2(U[1, 0], U[0, 0])
width = deviations * math.sqrt(s[0])
height = deviations * math.sqrt(s[1])
if height > width:
raise ValueError('width must be greater than height')
return (orientation, width, height)
def _eigsorted(cov, asc=True):
"""
Computes eigenvalues and eigenvectors of a covariance matrix and returns
them sorted by eigenvalue.
Parameters
----------
cov : ndarray
covariance matrix
asc : bool, default=True
determines whether we are sorted smallest to largest (asc=True),
or largest to smallest (asc=False)
Returns
-------
eigval : 1D ndarray
eigenvalues of covariance ordered largest to smallest
eigvec : 2D ndarray
eigenvectors of covariance matrix ordered to match `eigval` ordering.
I.e eigvec[:, 0] is the rotation vector for eigval[0]
"""
eigval, eigvec = np.linalg.eigh(cov)
order = eigval.argsort()
if not asc:
# sort largest to smallest
order = order[::-1]
return eigval[order], eigvec[:, order]
def _std_tuple_of(var=None, std=None, interval=None):
"""
Convienence function for plotting. Given one of var, standard
deviation, or interval, return the std. Any of the three can be an
iterable list.
Examples
--------
>>>_std_tuple_of(var=[1, 3, 9])
(1, 2, 3)
"""
if std is not None:
if np.isscalar(std):
std = (std,)
return std
if interval is not None:
if np.isscalar(interval):
interval = (interval,)
return norm.interval(interval)[1]
if var is None:
raise ValueError("no inputs were provided")
if np.isscalar(var):
var = (var,)
return np.sqrt(var)
def norm_cdf(x_range, mu, var=1, std=None):
"""
Computes the probability that a Gaussian distribution lies
within a range of values.
Parameters
----------
x_range : (float, float)
tuple of range to compute probability for
mu : float
mean of the Gaussian
var : float, optional
variance of the Gaussian. Ignored if `std` is provided
std : float, optional
standard deviation of the Gaussian. This overrides the `var` parameter
Returns
-------
probability : float
probability that Gaussian is within x_range. E.g. .1 means 10%.
"""
if std is None:
std = math.sqrt(var)
return abs(norm.cdf(x_range[0], loc=mu, scale=std) -
norm.cdf(x_range[1], loc=mu, scale=std))
def _to_cov(x, n):
"""
If x is a scalar, returns a covariance matrix generated from it
as the identity matrix multiplied by x. The dimension will be nxn.
If x is already a 2D numpy array then it is returned unchanged.
Raises ValueError if not positive definite
"""
if np.isscalar(x):
if x < 0:
raise ValueError('covariance must be > 0')
return np.eye(n) * x
x = np.atleast_2d(x)
try:
# quickly find out if we are positive definite
np.linalg.cholesky(x)
except:
raise ValueError('covariance must be positive definit')
return x
def rand_student_t(df, mu=0, std=1):
"""
return random number distributed by student's t distribution with
`df` degrees of freedom with the specified mean and standard deviation.
"""
x = random.gauss(0, std)
y = 2.0*random.gammavariate(0.5 * df, 2.0)
return x / (math.sqrt(y / df)) + mu
def NEES(xs, est_xs, ps):
"""
Computes the normalized estimated error squared (NEES) test on a sequence
of estimates. The estimates are optimal if the mean error is zero and
the covariance matches the Kalman filter's covariance. If this holds,
then the mean of the NEES should be equal to or less than the dimension
of x.
Examples
--------
.. code-block: Python
xs = ground_truth()
est_xs, ps, _, _ = kf.batch_filter(zs)
NEES(xs, est_xs, ps)
Parameters
----------
xs : list-like
sequence of true values for the state x
est_xs : list-like
sequence of estimates from an estimator (such as Kalman filter)
ps : list-like
sequence of covariance matrices from the estimator
Returns
-------
errs : list of floats
list of NEES computed for each estimate
"""
est_err = xs - est_xs
errs = []
for x, p in zip(est_err, ps):
errs.append(np.dot(x.T, linalg.inv(p)).dot(x))
return errs
|
[
"numpy.sqrt",
"math.sqrt",
"math.log",
"numpy.array",
"math.exp",
"scipy.stats.norm.cdf",
"numpy.atleast_2d",
"numpy.isscalar",
"random.gammavariate",
"numpy.asarray",
"scipy.stats.norm.interval",
"numpy.dot",
"numpy.linalg.eigh",
"warnings.warn",
"scipy.sparse.linalg.spsolve",
"numpy.eye",
"scipy.sparse.issparse",
"numpy.linalg.slogdet",
"math.atan2",
"scipy.linalg.svd",
"scipy.stats.multivariate_normal.logpdf",
"numpy.shape",
"scipy.linalg.inv",
"numpy.atleast_1d",
"numpy.linalg.solve",
"numpy.linalg.inv",
"numpy.linalg.cholesky",
"random.gauss"
] |
[((1014, 1070), 'scipy.stats.multivariate_normal.logpdf', 'multivariate_normal.logpdf', (['(1)', '(1)', '(1)'], {'allow_singular': '(True)'}), '(1, 1, 1, allow_singular=True)\n', (1040, 1070), False, 'from scipy.stats import norm, multivariate_normal\n'), ((1685, 1701), 'numpy.atleast_1d', 'np.atleast_1d', (['u'], {}), '(u)\n', (1698, 1701), True, 'import numpy as np\n'), ((2916, 2934), 'numpy.atleast_2d', 'np.atleast_2d', (['cov'], {}), '(cov)\n', (2929, 2934), True, 'import numpy as np\n'), ((2996, 3011), 'math.sqrt', 'math.sqrt', (['dist'], {}), '(dist)\n', (3005, 3011), False, 'import math\n'), ((4342, 4392), 'scipy.stats.multivariate_normal.logpdf', 'multivariate_normal.logpdf', (['flat_x', 'flat_mean', 'cov'], {}), '(flat_x, flat_mean, cov)\n', (4368, 4392), False, 'from scipy.stats import norm, multivariate_normal\n'), ((9920, 10127), 'warnings.warn', 'warnings.warn', (['"""This was implemented before SciPy version 0.14, which implemented scipy.stats.multivariate_normal. This function will be removed in a future release of FilterPy"""', 'DeprecationWarning'], {}), "(\n 'This was implemented before SciPy version 0.14, which implemented scipy.stats.multivariate_normal. This function will be removed in a future release of FilterPy'\n , DeprecationWarning)\n", (9933, 10127), False, 'import warnings\n'), ((10472, 10488), 'scipy.sparse.issparse', 'sp.issparse', (['cov'], {}), '(cov)\n', (10483, 10488), True, 'import scipy.sparse as sp\n'), ((10623, 10664), 'math.exp', 'math.exp', (['(-0.5 * (norm_coeff + numerator))'], {}), '(-0.5 * (norm_coeff + numerator))\n', (10631, 10664), False, 'import math\n'), ((11834, 11848), 'numpy.asarray', 'np.asarray', (['c1'], {}), '(c1)\n', (11844, 11848), True, 'import numpy as np\n'), ((11858, 11872), 'numpy.asarray', 'np.asarray', (['c2'], {}), '(c2)\n', (11868, 11872), True, 'import numpy as np\n'), ((11882, 11896), 'numpy.asarray', 'np.asarray', (['m1'], {}), '(m1)\n', (11892, 11896), True, 'import numpy as np\n'), ((11906, 11920), 'numpy.asarray', 'np.asarray', (['m2'], {}), '(m2)\n', (11916, 11920), True, 'import numpy as np\n'), ((11936, 11958), 'numpy.linalg.inv', 'np.linalg.inv', (['(C1 + C2)'], {}), '(C1 + C2)\n', (11949, 11958), True, 'import numpy as np\n'), ((12501, 12514), 'scipy.linalg.svd', 'linalg.svd', (['P'], {}), '(P)\n', (12511, 12514), True, 'import scipy.linalg as linalg\n'), ((12533, 12561), 'math.atan2', 'math.atan2', (['U[1, 0]', 'U[0, 0]'], {}), '(U[1, 0], U[0, 0])\n', (12543, 12561), False, 'import math\n'), ((13448, 13467), 'numpy.linalg.eigh', 'np.linalg.eigh', (['cov'], {}), '(cov)\n', (13462, 13467), True, 'import numpy as np\n'), ((14248, 14264), 'numpy.isscalar', 'np.isscalar', (['var'], {}), '(var)\n', (14259, 14264), True, 'import numpy as np\n'), ((14298, 14310), 'numpy.sqrt', 'np.sqrt', (['var'], {}), '(var)\n', (14305, 14310), True, 'import numpy as np\n'), ((15402, 15416), 'numpy.isscalar', 'np.isscalar', (['x'], {}), '(x)\n', (15413, 15416), True, 'import numpy as np\n'), ((15529, 15545), 'numpy.atleast_2d', 'np.atleast_2d', (['x'], {}), '(x)\n', (15542, 15545), True, 'import numpy as np\n'), ((15940, 15960), 'random.gauss', 'random.gauss', (['(0)', 'std'], {}), '(0, std)\n', (15952, 15960), False, 'import random\n'), ((1093, 1357), 'warnings.warn', 'warnings.warn', (['"""You are using a version of SciPy that does not support the allow_singular parameter in scipy.stats.multivariate_normal.logpdf(). Future versions of FilterPy will require a version of SciPy that implements this keyword"""', 'DeprecationWarning'], {}), "(\n 'You are using a version of SciPy that does not support the allow_singular parameter in scipy.stats.multivariate_normal.logpdf(). Future versions of FilterPy will require a version of SciPy that implements this keyword'\n , DeprecationWarning)\n", (1106, 1357), False, 'import warnings\n'), ((3280, 3292), 'numpy.dot', 'np.dot', (['H', 'x'], {}), '(H, x)\n', (3286, 3292), True, 'import numpy as np\n'), ((4264, 4330), 'scipy.stats.multivariate_normal.logpdf', 'multivariate_normal.logpdf', (['flat_x', 'flat_mean', 'cov', 'allow_singular'], {}), '(flat_x, flat_mean, cov, allow_singular)\n', (4290, 4330), False, 'from scipy.stats import norm, multivariate_normal\n'), ((7920, 7973), 'math.exp', 'math.exp', (['(-(mean1 - mean2) ** 2 / (2 * (var1 + var2)))'], {}), '(-(mean1 - mean2) ** 2 / (2 * (var1 + var2)))\n', (7928, 7973), False, 'import math\n'), ((7991, 8029), 'math.sqrt', 'math.sqrt', (['(2 * math.pi * (var1 + var2))'], {}), '(2 * math.pi * (var1 + var2))\n', (8000, 8029), False, 'import math\n'), ((12587, 12602), 'math.sqrt', 'math.sqrt', (['s[0]'], {}), '(s[0])\n', (12596, 12602), False, 'import math\n'), ((12629, 12644), 'math.sqrt', 'math.sqrt', (['s[1]'], {}), '(s[1])\n', (12638, 12644), False, 'import math\n'), ((13962, 13978), 'numpy.isscalar', 'np.isscalar', (['std'], {}), '(std)\n', (13973, 13978), True, 'import numpy as np\n'), ((14066, 14087), 'numpy.isscalar', 'np.isscalar', (['interval'], {}), '(interval)\n', (14077, 14087), True, 'import numpy as np\n'), ((14974, 14988), 'math.sqrt', 'math.sqrt', (['var'], {}), '(var)\n', (14983, 14988), False, 'import math\n'), ((15618, 15639), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['x'], {}), '(x)\n', (15636, 15639), True, 'import numpy as np\n'), ((15973, 16007), 'random.gammavariate', 'random.gammavariate', (['(0.5 * df)', '(2.0)'], {}), '(0.5 * df, 2.0)\n', (15992, 16007), False, 'import random\n'), ((1573, 1599), 'numpy.asarray', 'np.asarray', (['u'], {'dtype': 'dtype'}), '(u, dtype=dtype)\n', (1583, 1599), True, 'import numpy as np\n'), ((3239, 3253), 'numpy.dot', 'np.dot', (['P', 'H.T'], {}), '(P, H.T)\n', (3245, 3253), True, 'import numpy as np\n'), ((4198, 4211), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (4208, 4211), True, 'import numpy as np\n'), ((10236, 10268), 'numpy.array', 'np.array', (['x'], {'copy': '(False)', 'ndmin': '(1)'}), '(x, copy=False, ndmin=1)\n', (10244, 10268), True, 'import numpy as np\n'), ((10288, 10321), 'numpy.array', 'np.array', (['mu'], {'copy': '(False)', 'ndmin': '(1)'}), '(mu, copy=False, ndmin=1)\n', (10296, 10321), True, 'import numpy as np\n'), ((10399, 10420), 'math.log', 'math.log', (['(2 * math.pi)'], {}), '(2 * math.pi)\n', (10407, 10420), False, 'import math\n'), ((10421, 10443), 'numpy.linalg.slogdet', 'np.linalg.slogdet', (['cov'], {}), '(cov)\n', (10438, 10443), True, 'import numpy as np\n'), ((11966, 11985), 'numpy.dot', 'np.dot', (['C1', 'sum_inv'], {}), '(C1, sum_inv)\n', (11972, 11985), True, 'import numpy as np\n'), ((14140, 14163), 'scipy.stats.norm.interval', 'norm.interval', (['interval'], {}), '(interval)\n', (14153, 14163), False, 'from scipy.stats import norm, multivariate_normal\n'), ((15004, 15043), 'scipy.stats.norm.cdf', 'norm.cdf', (['x_range[0]'], {'loc': 'mu', 'scale': 'std'}), '(x_range[0], loc=mu, scale=std)\n', (15012, 15043), False, 'from scipy.stats import norm, multivariate_normal\n'), ((15061, 15100), 'scipy.stats.norm.cdf', 'norm.cdf', (['x_range[1]'], {'loc': 'mu', 'scale': 'std'}), '(x_range[1], loc=mu, scale=std)\n', (15069, 15100), False, 'from scipy.stats import norm, multivariate_normal\n'), ((15506, 15515), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (15512, 15515), True, 'import numpy as np\n'), ((16024, 16041), 'math.sqrt', 'math.sqrt', (['(y / df)'], {}), '(y / df)\n', (16033, 16041), False, 'import math\n'), ((2972, 2978), 'numpy.linalg.inv', 'inv', (['S'], {}), '(S)\n', (2975, 2978), False, 'from numpy.linalg import inv\n'), ((4122, 4138), 'numpy.asarray', 'np.asarray', (['mean'], {}), '(mean)\n', (4132, 4138), True, 'import numpy as np\n'), ((5478, 5491), 'numpy.shape', 'np.shape', (['pdf'], {}), '(pdf)\n', (5486, 5491), True, 'import numpy as np\n'), ((12005, 12024), 'numpy.dot', 'np.dot', (['C2', 'sum_inv'], {}), '(C2, sum_inv)\n', (12011, 12024), True, 'import numpy as np\n'), ((12045, 12064), 'numpy.dot', 'np.dot', (['C1', 'sum_inv'], {}), '(C1, sum_inv)\n', (12051, 12064), True, 'import numpy as np\n'), ((10510, 10532), 'scipy.sparse.linalg.spsolve', 'spln.spsolve', (['cov', 'err'], {}), '(cov, err)\n', (10522, 10532), True, 'import scipy.sparse.linalg as spln\n'), ((10574, 10599), 'numpy.linalg.solve', 'np.linalg.solve', (['cov', 'err'], {}), '(cov, err)\n', (10589, 10599), True, 'import numpy as np\n'), ((17044, 17057), 'scipy.linalg.inv', 'linalg.inv', (['p'], {}), '(p)\n', (17054, 17057), True, 'import scipy.linalg as linalg\n'), ((5424, 5437), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (5434, 5437), True, 'import numpy as np\n')]
|
import tensorflow as tf
from dltk.core.activations import leaky_relu
import numpy as np
def test_leaky_relu():
test_alpha = tf.constant(0.1)
test_inp_1 = tf.constant(1.)
test_inp_2 = tf.constant(-1.)
test_relu_1 = leaky_relu(test_inp_1, test_alpha)
test_relu_2 = leaky_relu(test_inp_2, test_alpha)
with tf.Session() as s:
out_1 = s.run(test_relu_1)
assert np.isclose(out_1, 1.), \
'Got {} but expected {}'.format(out_1, 1.)
out_2 = s.run(test_relu_2)
assert np.isclose(out_2, -0.1), \
'Got {} but expected {}'.format(out_2, -0.1)
|
[
"tensorflow.Session",
"tensorflow.constant",
"numpy.isclose",
"dltk.core.activations.leaky_relu"
] |
[((130, 146), 'tensorflow.constant', 'tf.constant', (['(0.1)'], {}), '(0.1)\n', (141, 146), True, 'import tensorflow as tf\n'), ((164, 180), 'tensorflow.constant', 'tf.constant', (['(1.0)'], {}), '(1.0)\n', (175, 180), True, 'import tensorflow as tf\n'), ((197, 214), 'tensorflow.constant', 'tf.constant', (['(-1.0)'], {}), '(-1.0)\n', (208, 214), True, 'import tensorflow as tf\n'), ((233, 267), 'dltk.core.activations.leaky_relu', 'leaky_relu', (['test_inp_1', 'test_alpha'], {}), '(test_inp_1, test_alpha)\n', (243, 267), False, 'from dltk.core.activations import leaky_relu\n'), ((286, 320), 'dltk.core.activations.leaky_relu', 'leaky_relu', (['test_inp_2', 'test_alpha'], {}), '(test_inp_2, test_alpha)\n', (296, 320), False, 'from dltk.core.activations import leaky_relu\n'), ((331, 343), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (341, 343), True, 'import tensorflow as tf\n'), ((400, 422), 'numpy.isclose', 'np.isclose', (['out_1', '(1.0)'], {}), '(out_1, 1.0)\n', (410, 422), True, 'import numpy as np\n'), ((531, 554), 'numpy.isclose', 'np.isclose', (['out_2', '(-0.1)'], {}), '(out_2, -0.1)\n', (541, 554), True, 'import numpy as np\n')]
|
#
# Copyright (c) 2021 IBM Corp.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import textwrap
#
# thing_table.py
#
# Part of text_extensions_for_pandas
#
# Data structure for managing collections of immutable items that implement
# __hash__ and __eq__. Serves as a base class for StringTable
#
from abc import ABC, abstractmethod
from typing import *
import numpy as np
class ThingTable(ABC):
"""
A set of immutable things, plus integer IDs for said things.
Also implicitly maps `None` to ID -1.
Serves as a base class for collections of specific things like strings and
tokenizations.
"""
# Special integer ID for None as a thing.
NONE_ID = -1
# Special integer ID for "not an id"
NOT_AN_ID = -2
def __init__(self):
# Bidirectional map from unique thing (possibly boxed for dictionary
# compatibility) to integer ID and back
self._boxed_thing_to_id = {} # type: Dict[Any, int]
self._id_to_boxed_thing = [] # type: List[Any]
self._total_bytes = 0 # type: int
@abstractmethod
def size_of_thing(self, thing: Any) -> int:
"""
:param thing: Thing to be insterted in this table
:return: The number of bytes that the thing occupies in memory
"""
pass
@abstractmethod
def type_of_thing(self) -> Type:
"""
:return: Expected type of things that this table will manage
"""
pass
def box(self, thing: Any) -> Any:
"""
Subclasses should override this method if they manage items that aren't
compatible with Python dictionaries.
:param thing: Thing to insert into the table
:return: a dictionary-compatible boxed version of `thing`, if such boxing
is needed to make `thing` dictionary-compatible.
"""
# Default implementation is a no-op
return thing
def unbox(self, boxed_thing: Any) -> Any:
"""
Subclasses should override this method if they manage items that aren't
compatible with Python dictionaries.
:param boxed_thing: Thing that was boxed by this class's `box` method.
:return: Original thing that was passed to `box`
"""
# Default implementation is a no-op
return boxed_thing
@classmethod
def create_single(cls, thing: Any):
"""
Factory method for building a table containing a single value at ID 0.
Users of this class are encouraged to use this method when possible,
so that performance tuning can be localized to this method.
"""
# For now we return a fresh table each time.
ret = cls()
ret.maybe_add_thing(thing)
return ret
@classmethod
def merge_tables_and_ids(cls, tables: Sequence["ThingTable"],
int_ids: Sequence[np.ndarray]) \
-> Tuple["ThingTable", np.ndarray]:
"""
Factory method for combining together multiple references to different
ThingTables into references to a new, combined ThingTable of the same type.
Users of this class are encouraged to use this method when possible,
so that performance tuning can be localized to this method.
:param tables: A list of (possibly) different mappings from int to string
:param int_ids: List of lists of integer IDs that decode to strings via the
corresponding elements of `tables`.
:returns: A tuple containing:
* A new, merged table containing all the unique things under `tables`
that are referenced in `int_ids` (and possibly additional things that aren't
referenced)
* Numpy arrays of integer offsets into the new table, corresponding to the
elements of `int_ids`
"""
if len(tables) != len(int_ids):
raise ValueError(f"Got {len(tables)} {cls}s "
f"and {len(int_ids)} lists of IDs.")
# TODO: Add fast-path code here to pass through the first table if
# both input tables are identical.
new_table = cls()
new_ids_list = []
for i in range(len(tables)):
old_table = tables[i]
if not isinstance(old_table, cls):
raise TypeError(f"Expected table of type {cls}, but got "
f"{type(old_table)}")
old_ids = int_ids[i]
if len(old_ids.shape) != 1:
raise ValueError(f"Invalid shape for IDs {old_ids}")
new_ids = np.empty_like(old_ids, dtype=int)
old_id_to_new_id = [
new_table.maybe_add_thing(old_table.id_to_thing(j))
for j in range(old_table.num_things)
]
for j in range(len(old_ids)):
new_ids[j] = old_id_to_new_id[old_ids[j]]
new_ids_list.append(new_ids)
return new_table, new_ids_list
@classmethod
def merge_things(cls, things: Union[Sequence[Any], np.ndarray]):
f"""
Factory method for bulk-adding multiple things to create a single
ThingTable and a list of integer IDs against that ThingTable.
Users of this class are encouraged to use this method when possible,
so that performance tuning can be localized to this method.
:param things: things to be de-duplicated and converted to a ThingTable.
:returns: Two values:
* A ThingTable containing (at least) all the unique strings in `strings`
* A Numppy array of integer string IDs against the returned ThingTable, where
each ID maps to the corresponding element of `strings`
"""
new_table = cls()
str_ids = np.empty(len(things), dtype=int)
for i in range(len(things)):
str_ids[i] = new_table.maybe_add_thing(things[i])
return new_table, str_ids
@classmethod
def from_things(cls, things: Union[Sequence[Any], np.ndarray]):
"""
Factory method for creating a ThingTable from a sequence of unique things.
:param things: sequence of unique things to be added to the ThingTable.
:return: A ThingTable containing the elements of `things`.
"""
new_table = cls()
for thing in things:
new_table.add_thing(thing)
return new_table
def thing_to_id(self, thing: Any) -> int:
"""
:param thing: A thing to look up in this table
:returns: One of:
* The integer ID of the indicated thing, if present.
* `ThingTable.NONE_ID` if thing is None
* `ThingTable.NOT_AN_ID` if thing is not present in the table
"""
if thing is None:
# By convention, None maps to -1
return ThingTable.NONE_ID
elif not isinstance(thing, self.type_of_thing()):
raise TypeError(f"Expected an object of type {self.type_of_thing()}, "
f"but received an object of type {type(thing)}")
else:
# Remaining branches require boxing for dictionary lookup
boxed_thing = self.box(thing)
if boxed_thing not in self._boxed_thing_to_id:
return ThingTable.NOT_AN_ID
else:
return self._boxed_thing_to_id[boxed_thing]
def id_to_thing(self, int_id: Union[int, np.int64, np.int32]) -> Any:
"""
:param int_id: Integer ID that is potentially associated with a thing in the
table
:return: The associated thing, if present, or `None` if no thing is associated
with the indicated ID.
"""
if not isinstance(int_id, (int, np.int64, np.int32)):
raise TypeError(f"Expected integer, but received {int_id} "
f"of type {type(int_id)}")
elif int_id <= ThingTable.NOT_AN_ID:
raise ValueError(f"Invalid ID {int_id}")
elif ThingTable.NONE_ID == int_id:
return None
else:
boxed_thing = self._id_to_boxed_thing[int_id]
return self.unbox(boxed_thing)
def ids_to_things(self, int_ids: Union[Sequence[int], np.ndarray]) -> np.ndarray:
"""
Vectorized version of :func:`id_to_string` for translating multiple IDs
at once.
:param int_ids: Multiple integer IDs to be translated to strings
:returns: A numpy array of string objects.
"""
if not isinstance(int_ids, np.ndarray):
int_ids = np.array(int_ids, dtype=int)
if len(int_ids.shape) != 1:
raise TypeError(f"Invalid shape {int_ids.shape} for array of integer IDs.")
ret = np.empty(len(int_ids), dtype=object)
for i in range(len(int_ids)):
ret[i] = self.id_to_thing(int_ids[i].item())
return ret
def add_thing(self, thing: Any) -> int:
"""
Adds a thing to the table. Raises a ValueError if the thing is already
present.
:param thing: Thing to add
:return: unique ID for this thing
"""
if not isinstance(thing, self.type_of_thing()):
raise TypeError(f"Expected an object of type {self.type_of_thing()}, "
f"but received an object of type {type(thing)}")
# Box for dictionary compatibility
boxed_thing = self.box(thing)
if boxed_thing in self._boxed_thing_to_id:
raise ValueError(f"'{textwrap.shorten(str(thing), 40)}' already in table")
new_id = len(self._id_to_boxed_thing)
self._id_to_boxed_thing.append(boxed_thing)
self._boxed_thing_to_id[boxed_thing] = new_id
self._total_bytes += self.size_of_thing(thing)
return new_id
def maybe_add_thing(self, thing: Any) -> int:
"""
Adds a thing to the table if it is not already present.
:param thing: Thing to add
:return: unique ID for this thing
"""
if not isinstance(thing, self.type_of_thing()):
raise TypeError(f"Expected an object of type {self.type_of_thing()}, "
f"but received an object of type {type(thing)}")
current_id = self.thing_to_id(thing)
if current_id != ThingTable.NOT_AN_ID:
return current_id
else:
return self.add_thing(thing)
def maybe_add_things(self, s: Sequence[Any]) -> np.ndarray:
"""
Vectorized version of :func:`maybe_add_thing` for translating, and
potentially adding multiple things at once.
:param s: Multiple things to be translated and potentially added
:returns: A numpy array of the corresponding integer IDs for the things.
Adds each things to the table if it is not already present.
"""
result = np.empty(len(s), dtype=np.int32)
for i in range(len(result)):
result[i] = self.maybe_add_thing(s[i])
return result
def nbytes(self):
"""
Number of bytes in a (currently hypothetical) serialized version of this table.
"""
return self._total_bytes
@property
def num_things(self) -> int:
"""
:return: Number of distinct things in the table
"""
return len(self._id_to_boxed_thing)
@property
def things(self) -> Iterator[Any]:
"""
:return: Iterator over the unique things stored in this table.
"""
return (self.unbox(thing) for thing in self._id_to_boxed_thing)
@property
def ids(self) -> Iterator[int]:
"""
:return: Iterator over the IDs of things stored in this table, including the
implicit ID ThingTable.NONE_ID
"""
if ThingTable.NONE_ID != -1:
raise ValueError("Someone has changed the value of NONE_ID; need to rewrite "
"this function.")
return range(-1, len(self._id_to_boxed_thing))
def things_to_ids(self, things: Sequence[Any]) -> np.ndarray:
"""
Vectorized version of :func:`thing_to_id` for translating multiple things
at once.
:param things: Multiple things to be translated to IDs. Must be already
in the table's set of things.
:returns: A numpy array of the same integers that :func:`thing_to_id` would
return.
"""
ret = np.empty(len(things), dtype=np.int32)
for i in range(len(things)):
ret[i] = self.thing_to_id(things[i])
return ret
|
[
"numpy.array",
"numpy.empty_like"
] |
[((5071, 5104), 'numpy.empty_like', 'np.empty_like', (['old_ids'], {'dtype': 'int'}), '(old_ids, dtype=int)\n', (5084, 5104), True, 'import numpy as np\n'), ((9024, 9052), 'numpy.array', 'np.array', (['int_ids'], {'dtype': 'int'}), '(int_ids, dtype=int)\n', (9032, 9052), True, 'import numpy as np\n')]
|
"""
Semantic operations.
outliers
create_or_update_out_of_the_bbox,
create_or_update_gps_deactivated_signal,
create_or_update_gps_jump,
create_or_update_short_trajectory,
create_or_update_gps_block_signal,
filter_block_signal_by_repeated_amount_of_points,
filter_block_signal_by_time,
filter_longer_time_to_stop_segment_by_id
"""
from __future__ import annotations
from typing import TYPE_CHECKING
import numpy as np
from pandas import DataFrame
from pymove.preprocessing import filters, segmentation, stay_point_detection
from pymove.utils.constants import (
BLOCK,
DEACTIVATED,
DIST_PREV_TO_NEXT,
DIST_TO_NEXT,
DIST_TO_PREV,
JUMP,
OUT_BBOX,
OUTLIER,
SEGMENT_STOP,
SHORT,
TID_PART,
TIME_TO_PREV,
TRAJ_ID,
)
from pymove.utils.log import logger, timer_decorator
if TYPE_CHECKING:
from pymove.core.dask import DaskMoveDataFrame
from pymove.core.pandas import PandasMoveDataFrame
def _end_create_operation(
move_data: DataFrame, new_label: str, inplace: bool
) -> DataFrame | None:
"""
Returns the dataframe after create operation.
Parameters
----------
move_data: dataframe
The input trajectories data.
new_label: string
The name of the new feature with detected deactivated signals.
inplace : boolean
if set to true the original dataframe will be altered to contain
the result of the filtering, otherwise a copy will be returned.
Returns
-------
DataFrame
DataFrame with the additional features or None
"""
logger.debug(move_data[new_label].value_counts())
if not inplace:
return move_data
def _process_simple_filter(
move_data: DataFrame, new_label: str, feature: str, value: float, inplace: bool
) -> DataFrame | None:
"""
Processes create operation with simple filter.
Parameters
----------
move_data: dataframe
The input trajectories data.
new_label: string
The name of the new feature with detected deactivated signals.
feature: string
Feature column to compare
value: float
Value to compare feature
inplace : boolean
if set to true the original dataframe will be altered to contain
the result of the filtering, otherwise a copy will be returned.
Returns
-------
DataFrame
DataFrame with the additional features or None
"""
move_data[new_label] = False
filter_ = move_data[feature] >= value
idx_start = move_data[filter_].index
idx_end = idx_start - np.full(len(idx_start), 1, dtype=np.int32)
idx = np.concatenate([idx_start, idx_end], axis=0)
move_data.at[idx, new_label] = True
return _end_create_operation(
move_data, new_label, inplace
)
@timer_decorator
def outliers(
move_data: 'PandasMoveDataFrame' | 'DaskMoveDataFrame',
jump_coefficient: float = 3.0,
threshold: float = 1,
new_label: str = OUTLIER,
inplace: bool = False
) -> 'PandasMoveDataFrame' | 'DaskMoveDataFrame' | None:
"""
Create or update a boolean feature to detect outliers.
Parameters
----------
move_data : dataframe
The input trajectory data
jump_coefficient : float, optional
by default 3
threshold : float, optional
Minimum value that the distance features must have
in order to be considered outliers, by default 1
new_label: string, optional
The name of the new feature with detected points out of the bbox,
by default OUTLIER
inplace : bool, optional
if set to true the original dataframe will be altered to contain
the result of the filtering, otherwise a copy will be returned, by default False
Returns
-------
DataFrame
Returns a dataframe with the trajectories outliers or None
"""
if not inplace:
move_data = move_data.copy()
if DIST_TO_PREV not in move_data:
move_data.generate_dist_features()
if move_data.index.name is not None:
logger.debug('...Reset index for filtering\n')
move_data.reset_index(inplace=True)
if (
DIST_TO_PREV in move_data
and DIST_TO_NEXT
and DIST_PREV_TO_NEXT in move_data
):
jump = jump_coefficient * move_data[DIST_PREV_TO_NEXT]
filter_ = (
(move_data[DIST_TO_NEXT] > threshold)
& (move_data[DIST_TO_PREV] > threshold)
& (move_data[DIST_PREV_TO_NEXT] > threshold)
& (jump < move_data[DIST_TO_NEXT])
& (jump < move_data[DIST_TO_PREV])
)
move_data[new_label] = filter_
else:
logger.warning('...Distances features were not created')
if not inplace:
return move_data
@timer_decorator
def create_or_update_out_of_the_bbox(
move_data: DataFrame,
bbox: tuple[int, int, int, int],
new_label: str = OUT_BBOX,
inplace: bool = False
) -> DataFrame | None:
"""
Create or update a boolean feature to detect points out of the bbox.
Parameters
----------
move_data: dataframe
The input trajectories data.
bbox : tuple
Tuple of 4 elements, containing the minimum and maximum values
of latitude and longitude of the bounding box.
new_label: string, optional
The name of the new feature with detected points out of the bbox,
by default OUT_BBOX
inplace : boolean, optional
if set to true the original dataframe will be altered to contain
the result of the filtering, otherwise a copy will be returned,
by default False
Returns
-------
DataFrame
Returns dataframe with a boolean feature with detected
points out of the bbox, or None
Raises
------
ValueError
If feature generation fails
"""
if not inplace:
move_data = move_data.copy()
logger.debug('\nCreate or update boolean feature to detect points out of the bbox')
filtered_ = filters.by_bbox(move_data, bbox, filter_out=True)
if filtered_ is None:
raise ValueError('Filter bbox failed!')
logger.debug('...Creating a new label named as %s' % new_label)
move_data[new_label] = False
if filtered_.shape[0] > 0:
logger.debug('...Setting % as True\n' % new_label)
move_data.at[filtered_.index, new_label] = True
return _end_create_operation(
move_data, new_label, inplace
)
@timer_decorator
def create_or_update_gps_deactivated_signal(
move_data: 'PandasMoveDataFrame' | 'DaskMoveDataFrame',
max_time_between_adj_points: float = 7200,
new_label: str = DEACTIVATED,
inplace: bool = False
) -> 'PandasMoveDataFrame' | 'DaskMoveDataFrame' | None:
"""
Creates a new feature that inform if point invalid.
If the max time between adjacent points is equal or
less than max_time_between_adj_points.
Parameters
----------
move_data: dataframe
The input trajectories data.
max_time_between_adj_points: float, optional
The max time between adjacent points, by default 7200
new_label: string, optional
The name of the new feature with detected deactivated signals,
by default DEACTIVATED
inplace : boolean, optional
if set to true the original dataframe will be altered to contain
the result of the filtering, otherwise a copy will be returned,
by default False
Returns
-------
DataFrame
DataFrame with the additional features or None
'time_to_prev', 'time_to_next', 'time_prev_to_next', 'deactivate_signal'
"""
if not inplace:
move_data = move_data.copy()
message = 'Create or update deactivated signal if time max > %s seconds\n'
logger.debug(message % max_time_between_adj_points)
move_data.generate_time_features()
return _process_simple_filter(
move_data,
new_label,
TIME_TO_PREV,
max_time_between_adj_points,
inplace
)
@timer_decorator
def create_or_update_gps_jump(
move_data: 'PandasMoveDataFrame' | 'DaskMoveDataFrame',
max_dist_between_adj_points: float = 3000,
new_label: str = JUMP,
inplace: bool = False
) -> 'PandasMoveDataFrame' | 'DaskMoveDataFrame' | None:
"""
Creates a new feature that inform if point is a gps jump.
A jump is defined if the maximum distance between adjacent points
is greater than max_dist_between_adj_points.
Parameters
----------
move_data: dataframe
The input trajectories data.
max_dist_between_adj_points: float, optional
The maximum distance between adjacent points, by default 3000
new_label: string, optional
The name of the new feature with detected deactivated signals, by default GPS_JUMP
inplace : boolean, optional
if set to true the original dataframe will be altered to contain
the result of the filtering, otherwise a copy will be returned,
by default False
Returns
-------
DataFrame
DataFrame with the additional features or None
'dist_to_prev', 'dist_to_next', 'dist_prev_to_next', 'jump'
"""
if not inplace:
move_data = move_data.copy()
message = 'Create or update jump if dist max > %s meters\n'
logger.debug(message % max_dist_between_adj_points)
move_data.generate_dist_features()
return _process_simple_filter(
move_data,
new_label,
DIST_TO_PREV,
max_dist_between_adj_points,
inplace
)
@timer_decorator
def create_or_update_short_trajectory(
move_data: 'PandasMoveDataFrame' | 'DaskMoveDataFrame',
max_dist_between_adj_points: float = 3000,
max_time_between_adj_points: float = 7200,
max_speed_between_adj_points: float = 50,
k_segment_max: int = 50,
label_tid: str = TID_PART,
new_label: str = SHORT,
inplace: bool = False
) -> 'PandasMoveDataFrame' | 'DaskMoveDataFrame' | None:
"""
Creates a new feature that inform if point belongs to a short trajectory.
Parameters
----------
move_data : dataframe
The input trajectory data
max_dist_between_adj_points : float, optional
Specify the maximum distance a point should have from
the previous point, in order not to be dropped, by default 3000
max_time_between_adj_points : float, optional
Specify the maximum travel time between two adjacent points, by default 7200
max_speed_between_adj_points : float, optional
Specify the maximum speed of travel between two adjacent points, by default 50
k_segment_max: int, optional
Specify the maximum number of segments in the trajectory, by default 50
label_tid: str, optional
The label of the column containing the ids of the formed segments,
by default TID_PART
new_label: str, optional
The name of the new feature with short trajectories, by default SHORT
inplace : boolean, optional
if set to true the original dataframe will be altered to contain
the result of the filtering, otherwise a copy will be returned,
by default False
Returns
-------
DataFrame
DataFrame with the aditional features or None
'dist_to_prev', 'time_to_prev', 'speed_to_prev', 'tid_part', 'short_traj'
"""
if not inplace:
move_data = move_data.copy()
logger.debug('\nCreate or update short trajectories...')
segmentation.by_dist_time_speed(
move_data,
max_dist_between_adj_points=max_dist_between_adj_points,
max_time_between_adj_points=max_time_between_adj_points,
max_speed_between_adj_points=max_speed_between_adj_points,
label_new_tid=label_tid,
inplace=True
)
move_data[new_label] = False
df_count_tid = move_data.groupby(by=label_tid).size()
filter_ = df_count_tid <= k_segment_max
idx = df_count_tid[filter_].index
move_data.loc[move_data[label_tid].isin(idx), new_label] = True
return _end_create_operation(
move_data, new_label, inplace
)
@timer_decorator
def create_or_update_gps_block_signal(
move_data: 'PandasMoveDataFrame' | 'DaskMoveDataFrame',
max_time_stop: float = 7200,
new_label: str = BLOCK,
label_tid: str = TID_PART,
inplace: bool = False
) -> 'PandasMoveDataFrame' | 'DaskMoveDataFrame' | None:
"""
Creates a new feature that inform segments with periods without moving.
Parameters
----------
move_data: dataFrame
The input trajectories data.
max_time_stop: float, optional
Maximum time allowed with speed 0, by default 7200
new_label: string, optional
The name of the new feature with detected deactivated signals, by default BLOCK
label_tid : str, optional
The label of the column containing the ids of the formed segments,
by default TID_PART
Is the new slitted id.
inplace : boolean, optional
if set to true the original dataframe will be altered to contain
the result of the filtering, otherwise a copy will be returned,
by default False
Returns
-------
DataFrame
DataFrame with the additional features or None
'dist_to_prev', 'time_to_prev', 'speed_to_prev',
'tid_dist', 'block_signal'
"""
if not inplace:
move_data = move_data.copy()
message = 'Create or update block_signal if max time stop > %s seconds\n'
logger.debug(message % max_time_stop)
segmentation.by_max_dist(
move_data,
max_dist_between_adj_points=0.0,
label_new_tid=label_tid,
inplace=True
)
logger.debug('Updating dist time speed values')
move_data.generate_dist_time_speed_features(label_id=label_tid)
move_data[new_label] = False
df_agg_tid = move_data.groupby(by=label_tid).agg({TIME_TO_PREV: 'sum'})
filter_ = df_agg_tid[TIME_TO_PREV] >= max_time_stop
idx = df_agg_tid[filter_].index
move_data.loc[move_data[label_tid].isin(idx), new_label] = True
return _end_create_operation(
move_data, new_label, inplace
)
@timer_decorator
def filter_block_signal_by_repeated_amount_of_points(
move_data: 'PandasMoveDataFrame' | 'DaskMoveDataFrame',
amount_max_of_points_stop: float = 30.0,
max_time_stop: float = 7200,
filter_out: bool = False,
label_tid: str = TID_PART,
inplace: bool = False
) -> 'PandasMoveDataFrame' | 'DaskMoveDataFrame' | None:
"""
Filters from dataframe points with blocked signal by amount of points.
Parameters
----------
move_data: dataFrame
The input trajectories data.
amount_max_of_points_stop: float, optional
Maximum number of stopped points, by default 30
max_time_stop: float, optional
Maximum time allowed with speed 0, by default 7200
filter_out: boolean, optional
If set to True, it will return trajectory points with blocked signal,
by default False
label_tid : str, optional
The label of the column containing the ids of the formed segments,
by default TID_PART
inplace : boolean, optional
if set to true the original dataframe will be altered to contain
the result of the filtering, otherwise a copy will be returned,
by default False
Returns
-------
DataFrame
Filtered DataFrame with the additional features or None
'dist_to_prev', 'time_to_prev', 'speed_to_prev',
'tid_dist', 'block_signal'
"""
if not inplace:
move_data = move_data.copy()
if BLOCK not in move_data:
create_or_update_gps_block_signal(
move_data, max_time_stop, label_tid=label_tid, inplace=True
)
df_count_tid = move_data.groupby(by=[label_tid]).sum()
filter_ = df_count_tid[BLOCK] > amount_max_of_points_stop
if filter_out:
idx = df_count_tid[~filter_].index
else:
idx = df_count_tid[filter_].index
filter_ = move_data[move_data[label_tid].isin(idx)].index
move_data.drop(index=filter_, inplace=True)
if not inplace:
return move_data
@timer_decorator
def filter_block_signal_by_time(
move_data: 'PandasMoveDataFrame' | 'DaskMoveDataFrame',
max_time_stop: float = 7200,
filter_out: bool = False,
label_tid: str = TID_PART,
inplace: bool = False
) -> 'PandasMoveDataFrame' | 'DaskMoveDataFrame' | None:
"""
Filters from dataframe points with blocked signal by time.
Parameters
----------
move_data: dataFrame
The input trajectories data.
max_time_stop: float, optional
Maximum time allowed with speed 0, by default 7200
filter_out: boolean, optional
If set to True, it will return trajectory points with blocked signal,
by default False
label_tid : str, optional
The label of the column containing the ids of the formed segments,
by default TID_PART
inplace : boolean, optional
if set to true the original dataframe will be altered to contain
the result of the filtering, otherwise a copy will be returned,
by default False
Returns
-------
DataFrame
Filtered DataFrame with the additional features or None
'dist_to_prev', 'time_to_prev', 'speed_to_prev',
'tid_dist', 'block_signal'
"""
if not inplace:
move_data = move_data.copy()
if BLOCK not in move_data:
create_or_update_gps_block_signal(
move_data, max_time_stop, label_tid=label_tid, inplace=True
)
df_agg_tid = move_data.groupby(by=label_tid).agg(
{TIME_TO_PREV: 'sum', BLOCK: 'sum'}
)
filter_ = (df_agg_tid[TIME_TO_PREV] > max_time_stop) & (df_agg_tid[BLOCK] > 0)
if filter_out:
idx = df_agg_tid[~filter_].index
else:
idx = df_agg_tid[filter_].index
filter_ = move_data[move_data[label_tid].isin(idx)].index
move_data.drop(index=filter_, inplace=True)
if not inplace:
return move_data
@timer_decorator
def filter_longer_time_to_stop_segment_by_id(
move_data: 'PandasMoveDataFrame' | 'DaskMoveDataFrame',
dist_radius: float = 30,
time_radius: float = 900,
label_id: str = TRAJ_ID,
label_segment_stop: str = SEGMENT_STOP,
filter_out: bool = False,
inplace: bool = False
) -> 'PandasMoveDataFrame' | 'DaskMoveDataFrame' | None:
"""
Filters from dataframe segment with longest stop time.
Parameters
----------
move_data: dataFrame
The input trajectories data.
dist_radius : float, optional
The dist_radius defines the distance used in the segmentation, by default 30
time_radius : float, optional
The time_radius used to determine if a segment is a stop, by default 30
If the user stayed in the segment for a time
greater than time_radius, than the segment is a stop.
label_tid : str, optional
The label of the column containing the ids of the formed segments,
by default TRAJ_ID
label_segment_stop: str, optional
by default 'segment_stop'
filter_out: boolean, optional
If set to True, it will return trajectory points with longer time, by default True
inplace : boolean, optional
if set to true the original dataframe will be altered to contain
the result of the filtering, otherwise a copy will be returned,
by default False
Returns
-------
DataFrame
Filtered DataFrame with the additional features or None
'dist_to_prev', 'time_to_prev', 'speed_to_prev',
'tid_dist', 'block_signal'
"""
if not inplace:
move_data = move_data.copy()
if label_segment_stop not in move_data:
stay_point_detection.create_or_update_move_stop_by_dist_time(
move_data, dist_radius, time_radius, inplace=True
)
df_agg_id_stop = move_data.groupby(
[label_id, label_segment_stop], as_index=False
).agg({TIME_TO_PREV: 'sum'})
filter_ = df_agg_id_stop.groupby(
[label_id], as_index=False
).idxmax()[TIME_TO_PREV]
if filter_out:
segments = df_agg_id_stop.loc[~df_agg_id_stop.index.isin(filter_)]
else:
segments = df_agg_id_stop.loc[df_agg_id_stop.index.isin(filter_)]
segments = segments[label_segment_stop]
filter_ = move_data[move_data[label_segment_stop].isin(segments)].index
move_data.drop(index=filter_, inplace=True)
if not inplace:
return move_data
|
[
"pymove.preprocessing.filters.by_bbox",
"pymove.preprocessing.stay_point_detection.create_or_update_move_stop_by_dist_time",
"pymove.utils.log.logger.debug",
"pymove.utils.log.logger.warning",
"pymove.preprocessing.segmentation.by_max_dist",
"numpy.concatenate",
"pymove.preprocessing.segmentation.by_dist_time_speed"
] |
[((2615, 2659), 'numpy.concatenate', 'np.concatenate', (['[idx_start, idx_end]'], {'axis': '(0)'}), '([idx_start, idx_end], axis=0)\n', (2629, 2659), True, 'import numpy as np\n'), ((5895, 5986), 'pymove.utils.log.logger.debug', 'logger.debug', (['"""\nCreate or update boolean feature to detect points out of the bbox"""'], {}), '(\n """\nCreate or update boolean feature to detect points out of the bbox""")\n', (5907, 5986), False, 'from pymove.utils.log import logger, timer_decorator\n'), ((5995, 6044), 'pymove.preprocessing.filters.by_bbox', 'filters.by_bbox', (['move_data', 'bbox'], {'filter_out': '(True)'}), '(move_data, bbox, filter_out=True)\n', (6010, 6044), False, 'from pymove.preprocessing import filters, segmentation, stay_point_detection\n'), ((6125, 6188), 'pymove.utils.log.logger.debug', 'logger.debug', (["('...Creating a new label named as %s' % new_label)"], {}), "('...Creating a new label named as %s' % new_label)\n", (6137, 6188), False, 'from pymove.utils.log import logger, timer_decorator\n'), ((7765, 7816), 'pymove.utils.log.logger.debug', 'logger.debug', (['(message % max_time_between_adj_points)'], {}), '(message % max_time_between_adj_points)\n', (7777, 7816), False, 'from pymove.utils.log import logger, timer_decorator\n'), ((9302, 9353), 'pymove.utils.log.logger.debug', 'logger.debug', (['(message % max_dist_between_adj_points)'], {}), '(message % max_dist_between_adj_points)\n', (9314, 9353), False, 'from pymove.utils.log import logger, timer_decorator\n'), ((11410, 11469), 'pymove.utils.log.logger.debug', 'logger.debug', (['"""\nCreate or update short trajectories..."""'], {}), '("""\nCreate or update short trajectories...""")\n', (11422, 11469), False, 'from pymove.utils.log import logger, timer_decorator\n'), ((11472, 11741), 'pymove.preprocessing.segmentation.by_dist_time_speed', 'segmentation.by_dist_time_speed', (['move_data'], {'max_dist_between_adj_points': 'max_dist_between_adj_points', 'max_time_between_adj_points': 'max_time_between_adj_points', 'max_speed_between_adj_points': 'max_speed_between_adj_points', 'label_new_tid': 'label_tid', 'inplace': '(True)'}), '(move_data, max_dist_between_adj_points=\n max_dist_between_adj_points, max_time_between_adj_points=\n max_time_between_adj_points, max_speed_between_adj_points=\n max_speed_between_adj_points, label_new_tid=label_tid, inplace=True)\n', (11503, 11741), False, 'from pymove.preprocessing import filters, segmentation, stay_point_detection\n'), ((13487, 13524), 'pymove.utils.log.logger.debug', 'logger.debug', (['(message % max_time_stop)'], {}), '(message % max_time_stop)\n', (13499, 13524), False, 'from pymove.utils.log import logger, timer_decorator\n'), ((13529, 13640), 'pymove.preprocessing.segmentation.by_max_dist', 'segmentation.by_max_dist', (['move_data'], {'max_dist_between_adj_points': '(0.0)', 'label_new_tid': 'label_tid', 'inplace': '(True)'}), '(move_data, max_dist_between_adj_points=0.0,\n label_new_tid=label_tid, inplace=True)\n', (13553, 13640), False, 'from pymove.preprocessing import filters, segmentation, stay_point_detection\n'), ((13680, 13727), 'pymove.utils.log.logger.debug', 'logger.debug', (['"""Updating dist time speed values"""'], {}), "('Updating dist time speed values')\n", (13692, 13727), False, 'from pymove.utils.log import logger, timer_decorator\n'), ((4040, 4086), 'pymove.utils.log.logger.debug', 'logger.debug', (['"""...Reset index for filtering\n"""'], {}), "('...Reset index for filtering\\n')\n", (4052, 4086), False, 'from pymove.utils.log import logger, timer_decorator\n'), ((4653, 4709), 'pymove.utils.log.logger.warning', 'logger.warning', (['"""...Distances features were not created"""'], {}), "('...Distances features were not created')\n", (4667, 4709), False, 'from pymove.utils.log import logger, timer_decorator\n'), ((6262, 6312), 'pymove.utils.log.logger.debug', 'logger.debug', (["('...Setting % as True\\n' % new_label)"], {}), "('...Setting % as True\\n' % new_label)\n", (6274, 6312), False, 'from pymove.utils.log import logger, timer_decorator\n'), ((19774, 19889), 'pymove.preprocessing.stay_point_detection.create_or_update_move_stop_by_dist_time', 'stay_point_detection.create_or_update_move_stop_by_dist_time', (['move_data', 'dist_radius', 'time_radius'], {'inplace': '(True)'}), '(move_data,\n dist_radius, time_radius, inplace=True)\n', (19834, 19889), False, 'from pymove.preprocessing import filters, segmentation, stay_point_detection\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2018, IBM.
#
# This source code is licensed under the Apache License, Version 2.0 found in
# the LICENSE.txt file in the root directory of this source tree.
import os
from qiskit import *
import numpy as np
import time
import itertools
import math
from random import *
def inner_prod_circuit_ML(entangler_map, coupling_map, initial_layout,n, x_vec1, x_vec2, name = 'circuit',\
meas_string = None, measurement = True):
# setup the circuit
q = QuantumRegister("q", n)
c = ClassicalRegister("c", n)
trial_circuit = QuantumCircuit(q, c)
# 0: Set the qubits in superposition
#write input state from sample distribution
for r in range(len(x_vec1)):
trial_circuit.h(q[r])
trial_circuit.u1(2*x_vec1[r], q[r])
# 1: Using entanglement,map the training data to a quantum feature map
for node in entangler_map:
for j in entangler_map[node]:
trial_circuit.cx(q[node], q[j])
trial_circuit.u1(2*(np.pi-x_vec1[node])*(np.pi-x_vec1[j]), q[j])
trial_circuit.cx(q[node], q[j])
# 2: inference the quantum classifier.
for r in range(len(x_vec1)):
trial_circuit.h(q[r])
trial_circuit.u1(2*x_vec1[r], q[r])
for node in entangler_map:
for j in entangler_map[node]:
trial_circuit.cx(q[node], q[j])
trial_circuit.u1(2*(np.pi-x_vec1[node])*(np.pi-x_vec1[j]), q[j])
for node in entangler_map:
for j in entangler_map[node]:
trial_circuit.u1(-2*(np.pi-x_vec2[node])*(np.pi-x_vec2[j]), q[j])
trial_circuit.cx(q[node], q[j])
for r in range(len(x_vec2)):
trial_circuit.u1(-2*x_vec2[r], q[r])
trial_circuit.h(q[r])
for node in entangler_map:
for j in entangler_map[node]:
trial_circuit.cx(q[node], q[j])
trial_circuit.u1(-2*(np.pi-x_vec2[node])*(np.pi-x_vec2[j]), q[j])
trial_circuit.cx(q[node], q[j])
for r in range(len(x_vec2)):
trial_circuit.u1(-2*x_vec2[r], q[r])
trial_circuit.h(q[r])
trial_circuit.measure(q,c)
return name, trial_circuit
# ***************
# ***************
# ***************
def matrify(vector, dimension):
mat = np.eye(dimension,dimension);
for kk in range(dimension):
a = int(dimension*kk - kk*(kk+1)/2)
b = int(dimension*(kk+1)-((kk+1)*(kk+2)/2+1))
mat[kk][kk+1:] = vector[a:b+1];
for i in range(dimension):
for j in range(i, dimension):
mat[j][i] = mat[i][j]
return mat
def eval_svm_function(entangler_map, coupling_map, initial_layout,n,m,svm,test_input,class_labels, \
backend,shots):
sample_shots = 0
c1 = 1
c2 = 1.5
c3 = 2
my_zero_string = ''
for nq in range(n):
my_zero_string += '0'
correct_povm = 0
number_of_classes = len(class_labels)
cost=0
total_cost = 0
std_cost = 0
### RUN CIRCUITS
circuits = []
cp = []
cm = []
sequencesp = []
sequencesm = []
first_array = test_input[class_labels[0]]
second_array = test_input[class_labels[1]]
total_test = np.concatenate([first_array,second_array])
circuits = []
ind = 0
for a in range(len(total_test)):
for b in range(len(svm)):
cp, sequencesp = inner_prod_circuit_ML(entangler_map, coupling_map, initial_layout,n,\
svm[b],total_test[a],'AB'+str(ind),None,True)
circuits.append(sequencesp)
ind +=1
job_sim = execute(circuits, backend ,shots=shots)
sim_result = job_sim.result()
my_data = {}
for index, circuit_to_get_result in enumerate(circuits):
my_data[str(index)]=sim_result.get_counts(circuit_to_get_result)
ind = iter(my_data.items())
counts = dict(itertools.islice(ind,len(my_data)))
K_total = []
for v in range(len(counts)):
K_total.append(counts[str(v)][my_zero_string]/shots)
return K_total
|
[
"numpy.eye",
"numpy.concatenate"
] |
[((2266, 2294), 'numpy.eye', 'np.eye', (['dimension', 'dimension'], {}), '(dimension, dimension)\n', (2272, 2294), True, 'import numpy as np\n'), ((3185, 3228), 'numpy.concatenate', 'np.concatenate', (['[first_array, second_array]'], {}), '([first_array, second_array])\n', (3199, 3228), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""Copyright 2015 <NAME>.
Code supporting the book
Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
This is licensed under an MIT license. See the LICENSE.txt file
for more information.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import math
import matplotlib.pyplot as plt
import numpy as np
from numpy.random import uniform
from numpy.random import randn
import scipy.stats
import random
if __name__ == '__main__':
N = 2000
pf = ParticleFilter(N, 100, 100)
#pf.particles[:,2] = np.random.randn(pf.N)*np.radians(10) + np.radians(45)
z = np.array([20, 20])
#pf.create_particles(mean=z, variance=40)
mu0 = np.array([0., 0.])
plt.plot(pf, weights=False)
fig = plt.gcf()
#fig.show()
#fig.canvas.draw()
#plt.ioff()
for x in range(10):
z[0] = x+1 + randn()*0.3
z[1] = x+1 + randn()*0.3
pf.predict((1,1), (0.2, 0.2))
pf.weight(z=z, var=.8)
neff = pf.neff()
print('neff', neff)
if neff < N/2 or N <= 2000:
pf.resample()
mu, var = pf.estimate()
if x == 0:
mu0 = mu
#print(mu - z)
#print(var)
plot(pf, weights=True)
#plt.plot(z[0], z[1], marker='v', c='r', ms=10)
plt.plot(x+1, x+1, marker='*', c='r', ms=10)
plt.scatter(mu[0], mu[1], c='g', s=100)#,
#s=min(500, abs((1./np.sum(var)))*20), alpha=0.5)
plt.plot([0,100], [0,100])
plt.tight_layout()
plt.pause(.002)
#fig.canvas.draw()
#pf.assign_speed_by_gaussian(1, 1.5)
#pf.move(h=[1,1], v=1.4, t=1)
#pf.control(mu-mu0)
mu0 = mu
plt.ion()
|
[
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.plot",
"numpy.array",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.pause",
"numpy.random.randn"
] |
[((747, 765), 'numpy.array', 'np.array', (['[20, 20]'], {}), '([20, 20])\n', (755, 765), True, 'import numpy as np\n'), ((826, 846), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (834, 846), True, 'import numpy as np\n'), ((850, 877), 'matplotlib.pyplot.plot', 'plt.plot', (['pf'], {'weights': '(False)'}), '(pf, weights=False)\n', (858, 877), True, 'import matplotlib.pyplot as plt\n'), ((893, 902), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (900, 902), True, 'import matplotlib.pyplot as plt\n'), ((1901, 1910), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (1908, 1910), True, 'import matplotlib.pyplot as plt\n'), ((1474, 1522), 'matplotlib.pyplot.plot', 'plt.plot', (['(x + 1)', '(x + 1)'], {'marker': '"""*"""', 'c': '"""r"""', 'ms': '(10)'}), "(x + 1, x + 1, marker='*', c='r', ms=10)\n", (1482, 1522), True, 'import matplotlib.pyplot as plt\n'), ((1528, 1567), 'matplotlib.pyplot.scatter', 'plt.scatter', (['mu[0]', 'mu[1]'], {'c': '"""g"""', 's': '(100)'}), "(mu[0], mu[1], c='g', s=100)\n", (1539, 1567), True, 'import matplotlib.pyplot as plt\n'), ((1650, 1678), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 100]', '[0, 100]'], {}), '([0, 100], [0, 100])\n', (1658, 1678), True, 'import matplotlib.pyplot as plt\n'), ((1686, 1704), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1702, 1704), True, 'import matplotlib.pyplot as plt\n'), ((1714, 1730), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.002)'], {}), '(0.002)\n', (1723, 1730), True, 'import matplotlib.pyplot as plt\n'), ((1012, 1019), 'numpy.random.randn', 'randn', ([], {}), '()\n', (1017, 1019), False, 'from numpy.random import randn\n'), ((1046, 1053), 'numpy.random.randn', 'randn', ([], {}), '()\n', (1051, 1053), False, 'from numpy.random import randn\n')]
|
import os
import cv2
import numpy as np
import torch
import pickle
import argparse
from configs import paths
from utils.cam_utils import perspective_project_torch
from models.smpl_official import SMPL
def rotate_2d(pt_2d, rot_rad):
x = pt_2d[0]
y = pt_2d[1]
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
xx = x * cs - y * sn
yy = x * sn + y * cs
return np.array([xx, yy], dtype=np.float32)
def gen_trans_from_patch_cv(c_x, c_y, src_width, src_height, dst_width, dst_height, scale, rot, inv=False):
# augment size with scale
src_w = src_width * scale
src_h = src_height * scale
src_center = np.zeros(2)
src_center[0] = c_x
src_center[1] = c_y # np.array([c_x, c_y], dtype=np.float32)
# augment rotation
rot_rad = np.pi * rot / 180
src_downdir = rotate_2d(np.array([0, src_h * 0.5], dtype=np.float32), rot_rad)
src_rightdir = rotate_2d(np.array([src_w * 0.5, 0], dtype=np.float32), rot_rad)
dst_w = dst_width
dst_h = dst_height
dst_center = np.array([dst_w * 0.5, dst_h * 0.5], dtype=np.float32)
dst_downdir = np.array([0, dst_h * 0.5], dtype=np.float32)
dst_rightdir = np.array([dst_w * 0.5, 0], dtype=np.float32)
src = np.zeros((3, 2), dtype=np.float32)
src[0, :] = src_center
src[1, :] = src_center + src_downdir
src[2, :] = src_center + src_rightdir
dst = np.zeros((3, 2), dtype=np.float32)
dst[0, :] = dst_center
dst[1, :] = dst_center + dst_downdir
dst[2, :] = dst_center + dst_rightdir
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
def generate_patch_image_cv(cvimg, c_x, c_y, bb_width, bb_height, patch_width, patch_height,
do_flip, scale, rot):
img = cvimg.copy()
img_height, img_width, img_channels = img.shape
if do_flip:
img = img[:, ::-1, :]
c_x = img_width - c_x - 1
trans = gen_trans_from_patch_cv(c_x, c_y, bb_width, bb_height, patch_width, patch_height, scale, rot, inv=False)
img_patch = cv2.warpAffine(img, trans, (int(patch_width), int(patch_height)),
flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
return img_patch, trans
def get_single_image_crop(image, bbox, scale=1.2, crop_size=224):
if isinstance(image, str):
if os.path.isfile(image):
image = cv2.cvtColor(cv2.imread(image), cv2.COLOR_BGR2RGB)
else:
print(image)
raise BaseException(image, 'is not a valid file!')
elif not isinstance(image, np.ndarray):
raise('Unknown type for object', type(image))
crop_image, trans = generate_patch_image_cv(
cvimg=image.copy(),
c_x=bbox[0],
c_y=bbox[1],
bb_width=bbox[2],
bb_height=bbox[3],
patch_width=crop_size,
patch_height=crop_size,
do_flip=False,
scale=scale,
rot=0,
)
return crop_image
def pw3d_eval_extract(dataset_path, out_path, crop_wh=512):
bbox_scale_factor = 1.2
smpl_male = SMPL(paths.SMPL, batch_size=1, gender='male').to(device)
smpl_female = SMPL(paths.SMPL, batch_size=1, gender='female').to(device)
# imgnames_, scales_, centers_, parts_ = [], [], [], []
cropped_frame_fnames_, whs_, centers_, = [], [], []
poses_, shapes_, genders_ = [], [], []
sequence_files = sorted([os.path.join(dataset_path, 'sequenceFiles', 'test', f)
for f in os.listdir(os.path.join(dataset_path, 'sequenceFiles', 'test'))
if f.endswith('.pkl')])
for filename in sequence_files:
print('\n\n\n', filename)
with open(filename, 'rb') as f:
data = pickle.load(f, encoding='latin1')
smpl_poses = data['poses'] # list of (num frames, 72) pose params for each person
smpl_betas = data['betas'] # list of (10,) or (300,) shape params for each person
poses2d = data['poses2d'] # list of (num frames, 3, 18) 2d kps for each person
cam_extrinsics = data['cam_poses'] # array of (num frames, 4, 4) cam extrinsics
cam_K = data['cam_intrinsics'] # array of (3, 3) cam intrinsics.
genders = data['genders'] # list of genders for each person
valid = data['campose_valid'] # list of (num frames,) boolean arrays for each person, indicating whether camera pose has been aligned to that person (for trans).
trans = data['trans'] # list of (num frames, 3) translations in SMPL space for each person, to align them with image data (after projection)
num_people = len(smpl_poses) # Number of people in sequence
num_frames = len(smpl_poses[0]) # Number of frames in sequence
seq_name = str(data['sequence'])
print('smpl poses', len(smpl_poses), smpl_poses[0].shape,
'smpl betas', len(smpl_betas), smpl_betas[0].shape,
'poses2d', len(poses2d), poses2d[0].shape,
'global poses', cam_extrinsics.shape,
'cam_K', cam_K.shape,
'genders', genders, type(genders),
'valid', len(valid), valid[0].shape, np.sum(valid[0]), np.sum(valid[-1]),
'trans', len(trans), trans[0].shape,
'num people', num_people, 'num frames', num_frames, 'seq name', seq_name, '\n')
cam_K = torch.from_numpy(cam_K[None, :]).float().to(device)
for person_num in range(num_people):
# Get valid frames flags, shape and gender
valid_frames = valid[person_num].astype(np.bool)
shape = smpl_betas[person_num][:10]
torch_shape = torch.from_numpy(shape[None, :]).float().to(device)
gender = genders[person_num]
for frame_num in range(num_frames):
if valid_frames[frame_num]: # Only proceed if frame has valid camera pose for person
# Get bounding box using projected vertices
pose = smpl_poses[person_num][frame_num]
cam_R = cam_extrinsics[frame_num][:3, :3]
cam_t = cam_extrinsics[frame_num][:3, 3]
frame_trans = trans[person_num][frame_num]
pose = torch.from_numpy(pose[None, :]).float().to(device)
cam_t = torch.from_numpy(cam_t[None, :]).float().to(device)
cam_R = torch.from_numpy(cam_R[None, :, :]).float().to(device)
frame_trans = torch.from_numpy(frame_trans[None, :]).float().to(device)
if gender == 'm':
smpl_out = smpl_male(body_pose=pose[:, 3:],
global_orient=pose[:, :3],
betas=torch_shape,
transl=frame_trans)
elif gender == 'f':
smpl_out = smpl_female(body_pose=pose[:, 3:],
global_orient=pose[:, :3],
betas=torch_shape,
transl=frame_trans)
vertices = smpl_out.vertices
projected_aligned_vertices = perspective_project_torch(vertices, cam_R,
cam_t, cam_K=cam_K)
projected_aligned_vertices = projected_aligned_vertices[0].cpu().detach().numpy()
bbox = [min(projected_aligned_vertices[:, 0]),
min(projected_aligned_vertices[:, 1]),
max(projected_aligned_vertices[:, 0]),
max(projected_aligned_vertices[:, 1])] # (x1, y1, x2, y2) where x is cols and y is rows from top right corner.
center = [(bbox[2] + bbox[0]) / 2, (bbox[3] + bbox[1]) / 2]
wh = max(bbox[2] - bbox[0], bbox[3] - bbox[1])
# Save cropped frame using bounding box
image_fpath = os.path.join(dataset_path, 'imageFiles', seq_name,
'image_{}.jpg'.format(str(frame_num).zfill(5)))
image = cv2.imread(image_fpath)
centre_wh_bbox = center + [wh, wh]
cropped_image = get_single_image_crop(image, centre_wh_bbox,
scale=bbox_scale_factor,
crop_size=crop_wh)
cropped_image_fname = seq_name + '_image_{}_person_{}.png'.format(str(frame_num).zfill(5),
str(person_num).zfill(3))
cropped_image_fpath = os.path.join(out_path, 'cropped_frames',
cropped_image_fname)
cv2.imwrite(cropped_image_fpath, cropped_image)
# Transform global using cam extrinsics pose before storing
pose = pose[0].cpu().detach().numpy()
cam_R = cam_R[0].cpu().detach().numpy()
pose[:3] = cv2.Rodrigues(np.dot(cam_R, cv2.Rodrigues(pose[:3])[0]))[0].T[0]
# Store everything in lists
cropped_frame_fnames_.append(cropped_image_fname)
centers_.append(center)
whs_.append(wh)
poses_.append(pose)
shapes_.append(shape)
genders_.append(gender)
# print(cropped_image_fname, shape.shape, pose.shape, center, wh, gender)
# Store all data in npz file.
out_file = os.path.join(out_path, '3dpw_test.npz')
np.savez(out_file, imgname=cropped_frame_fnames_,
center=centers_,
wh=whs_,
pose=poses_,
shape=shapes_,
gender=genders_)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_path', type=str)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print('\nDevice: {}'.format(device))
out_path = os.path.join(args.dataset_path, 'test')
if not os.path.isdir(out_path):
os.makedirs(os.path.join(out_path, 'cropped_frames'))
pw3d_eval_extract(args.dataset_path, out_path)
|
[
"torch.from_numpy",
"numpy.array",
"torch.cuda.is_available",
"numpy.sin",
"numpy.savez",
"argparse.ArgumentParser",
"os.path.isdir",
"pickle.load",
"os.path.isfile",
"numpy.cos",
"cv2.imread",
"cv2.imwrite",
"utils.cam_utils.perspective_project_torch",
"os.path.join",
"numpy.sum",
"numpy.zeros",
"cv2.Rodrigues",
"models.smpl_official.SMPL",
"numpy.float32"
] |
[((377, 413), 'numpy.array', 'np.array', (['[xx, yy]'], {'dtype': 'np.float32'}), '([xx, yy], dtype=np.float32)\n', (385, 413), True, 'import numpy as np\n'), ((632, 643), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (640, 643), True, 'import numpy as np\n'), ((1018, 1072), 'numpy.array', 'np.array', (['[dst_w * 0.5, dst_h * 0.5]'], {'dtype': 'np.float32'}), '([dst_w * 0.5, dst_h * 0.5], dtype=np.float32)\n', (1026, 1072), True, 'import numpy as np\n'), ((1091, 1135), 'numpy.array', 'np.array', (['[0, dst_h * 0.5]'], {'dtype': 'np.float32'}), '([0, dst_h * 0.5], dtype=np.float32)\n', (1099, 1135), True, 'import numpy as np\n'), ((1155, 1199), 'numpy.array', 'np.array', (['[dst_w * 0.5, 0]'], {'dtype': 'np.float32'}), '([dst_w * 0.5, 0], dtype=np.float32)\n', (1163, 1199), True, 'import numpy as np\n'), ((1211, 1245), 'numpy.zeros', 'np.zeros', (['(3, 2)'], {'dtype': 'np.float32'}), '((3, 2), dtype=np.float32)\n', (1219, 1245), True, 'import numpy as np\n'), ((1367, 1401), 'numpy.zeros', 'np.zeros', (['(3, 2)'], {'dtype': 'np.float32'}), '((3, 2), dtype=np.float32)\n', (1375, 1401), True, 'import numpy as np\n'), ((9858, 9897), 'os.path.join', 'os.path.join', (['out_path', '"""3dpw_test.npz"""'], {}), "(out_path, '3dpw_test.npz')\n", (9870, 9897), False, 'import os\n'), ((9902, 10026), 'numpy.savez', 'np.savez', (['out_file'], {'imgname': 'cropped_frame_fnames_', 'center': 'centers_', 'wh': 'whs_', 'pose': 'poses_', 'shape': 'shapes_', 'gender': 'genders_'}), '(out_file, imgname=cropped_frame_fnames_, center=centers_, wh=whs_,\n pose=poses_, shape=shapes_, gender=genders_)\n', (9910, 10026), True, 'import numpy as np\n'), ((10130, 10155), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (10153, 10155), False, 'import argparse\n'), ((10487, 10526), 'os.path.join', 'os.path.join', (['args.dataset_path', '"""test"""'], {}), "(args.dataset_path, 'test')\n", (10499, 10526), False, 'import os\n'), ((283, 298), 'numpy.sin', 'np.sin', (['rot_rad'], {}), '(rot_rad)\n', (289, 298), True, 'import numpy as np\n'), ((300, 315), 'numpy.cos', 'np.cos', (['rot_rad'], {}), '(rot_rad)\n', (306, 315), True, 'import numpy as np\n'), ((816, 860), 'numpy.array', 'np.array', (['[0, src_h * 0.5]'], {'dtype': 'np.float32'}), '([0, src_h * 0.5], dtype=np.float32)\n', (824, 860), True, 'import numpy as np\n'), ((900, 944), 'numpy.array', 'np.array', (['[src_w * 0.5, 0]'], {'dtype': 'np.float32'}), '([src_w * 0.5, 0], dtype=np.float32)\n', (908, 944), True, 'import numpy as np\n'), ((2427, 2448), 'os.path.isfile', 'os.path.isfile', (['image'], {}), '(image)\n', (2441, 2448), False, 'import os\n'), ((10538, 10561), 'os.path.isdir', 'os.path.isdir', (['out_path'], {}), '(out_path)\n', (10551, 10561), False, 'import os\n'), ((1564, 1579), 'numpy.float32', 'np.float32', (['dst'], {}), '(dst)\n', (1574, 1579), True, 'import numpy as np\n'), ((1581, 1596), 'numpy.float32', 'np.float32', (['src'], {}), '(src)\n', (1591, 1596), True, 'import numpy as np\n'), ((1647, 1662), 'numpy.float32', 'np.float32', (['src'], {}), '(src)\n', (1657, 1662), True, 'import numpy as np\n'), ((1664, 1679), 'numpy.float32', 'np.float32', (['dst'], {}), '(dst)\n', (1674, 1679), True, 'import numpy as np\n'), ((3152, 3197), 'models.smpl_official.SMPL', 'SMPL', (['paths.SMPL'], {'batch_size': '(1)', 'gender': '"""male"""'}), "(paths.SMPL, batch_size=1, gender='male')\n", (3156, 3197), False, 'from models.smpl_official import SMPL\n'), ((3227, 3274), 'models.smpl_official.SMPL', 'SMPL', (['paths.SMPL'], {'batch_size': '(1)', 'gender': '"""female"""'}), "(paths.SMPL, batch_size=1, gender='female')\n", (3231, 3274), False, 'from models.smpl_official import SMPL\n'), ((3476, 3530), 'os.path.join', 'os.path.join', (['dataset_path', '"""sequenceFiles"""', '"""test"""', 'f'], {}), "(dataset_path, 'sequenceFiles', 'test', f)\n", (3488, 3530), False, 'import os\n'), ((3816, 3849), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""latin1"""'}), "(f, encoding='latin1')\n", (3827, 3849), False, 'import pickle\n'), ((5231, 5247), 'numpy.sum', 'np.sum', (['valid[0]'], {}), '(valid[0])\n', (5237, 5247), True, 'import numpy as np\n'), ((5249, 5266), 'numpy.sum', 'np.sum', (['valid[-1]'], {}), '(valid[-1])\n', (5255, 5266), True, 'import numpy as np\n'), ((10392, 10417), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10415, 10417), False, 'import torch\n'), ((10583, 10623), 'os.path.join', 'os.path.join', (['out_path', '"""cropped_frames"""'], {}), "(out_path, 'cropped_frames')\n", (10595, 10623), False, 'import os\n'), ((2483, 2500), 'cv2.imread', 'cv2.imread', (['image'], {}), '(image)\n', (2493, 2500), False, 'import cv2\n'), ((3580, 3631), 'os.path.join', 'os.path.join', (['dataset_path', '"""sequenceFiles"""', '"""test"""'], {}), "(dataset_path, 'sequenceFiles', 'test')\n", (3592, 3631), False, 'import os\n'), ((7327, 7389), 'utils.cam_utils.perspective_project_torch', 'perspective_project_torch', (['vertices', 'cam_R', 'cam_t'], {'cam_K': 'cam_K'}), '(vertices, cam_R, cam_t, cam_K=cam_K)\n', (7352, 7389), False, 'from utils.cam_utils import perspective_project_torch\n'), ((8324, 8347), 'cv2.imread', 'cv2.imread', (['image_fpath'], {}), '(image_fpath)\n', (8334, 8347), False, 'import cv2\n'), ((8909, 8970), 'os.path.join', 'os.path.join', (['out_path', '"""cropped_frames"""', 'cropped_image_fname'], {}), "(out_path, 'cropped_frames', cropped_image_fname)\n", (8921, 8970), False, 'import os\n'), ((9046, 9093), 'cv2.imwrite', 'cv2.imwrite', (['cropped_image_fpath', 'cropped_image'], {}), '(cropped_image_fpath, cropped_image)\n', (9057, 9093), False, 'import cv2\n'), ((5430, 5462), 'torch.from_numpy', 'torch.from_numpy', (['cam_K[None, :]'], {}), '(cam_K[None, :])\n', (5446, 5462), False, 'import torch\n'), ((5717, 5749), 'torch.from_numpy', 'torch.from_numpy', (['shape[None, :]'], {}), '(shape[None, :])\n', (5733, 5749), False, 'import torch\n'), ((6299, 6330), 'torch.from_numpy', 'torch.from_numpy', (['pose[None, :]'], {}), '(pose[None, :])\n', (6315, 6330), False, 'import torch\n'), ((6378, 6410), 'torch.from_numpy', 'torch.from_numpy', (['cam_t[None, :]'], {}), '(cam_t[None, :])\n', (6394, 6410), False, 'import torch\n'), ((6458, 6493), 'torch.from_numpy', 'torch.from_numpy', (['cam_R[None, :, :]'], {}), '(cam_R[None, :, :])\n', (6474, 6493), False, 'import torch\n'), ((6547, 6585), 'torch.from_numpy', 'torch.from_numpy', (['frame_trans[None, :]'], {}), '(frame_trans[None, :])\n', (6563, 6585), False, 'import torch\n'), ((9352, 9375), 'cv2.Rodrigues', 'cv2.Rodrigues', (['pose[:3]'], {}), '(pose[:3])\n', (9365, 9375), False, 'import cv2\n')]
|
import numpy as np
import torch
import torch.nn.functional as F
import skimage.measure as sk
import time
import pyrender
import pymesh
import trimesh
from pyemd import emd_samples
import chamfer_python
import binvox_rw
from glob import glob
D2R = np.pi/180.0
voxsize = 32
sample_size = 2048
def RotatePhi(phi):
return np.array([[1, 0, 0, 0],
[0, np.cos(D2R*phi), np.sin(D2R*phi), 0],
[0, -np.sin(D2R*phi), np.cos(D2R*phi), 0],
[0, 0, 0, 1]])
def RotateAzimuth(phi):
return np.array([[np.cos(D2R*phi), np.sin(D2R*phi), 0, 0],
[-np.sin(D2R*phi), np.cos(D2R*phi), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
def RotateAlongAxis(theta, a, b, c):
return np.array([[a**2*(1-np.cos(D2R*theta)) + np.cos(D2R*theta), a*b*(1-np.cos(D2R*theta)) - c*np.sin(D2R*theta), a*c*(1-np.cos(D2R*theta)) + b*np.sin(D2R*theta), 0],
[a*b*(1-np.cos(D2R*theta)) + c*np.sin(D2R*theta), b**2*(1-np.cos(D2R*theta)) + np.cos(D2R*theta), b*c*(1-np.cos(D2R*theta)) - a*np.sin(D2R*theta), 0],
[a*c*(1-np.cos(D2R*theta)) - b*np.sin(D2R*theta), b*c*(1-np.cos(D2R*theta)) + a*np.sin(D2R*theta), c**2*(1-np.cos(D2R*theta)) + np.cos(D2R*theta), 0],
[0, 0, 0, 1]])
# generate meshgrid
# [depth, height, width]
def get_meshgrid(depth = voxsize, height = voxsize, width = voxsize, ratio = 1.0):
x_mesh = np.repeat(np.repeat(np.linspace(-ratio, ratio, width)[np.newaxis, :], height, axis=0)[np.newaxis, :, :], depth, axis=0)
y_mesh = np.repeat(np.repeat(np.linspace(-ratio, ratio, height)[:, np.newaxis], width, axis=-1)[np.newaxis, :, :], depth, axis=0)
z_mesh = np.repeat(np.repeat(np.linspace(-ratio, ratio, depth)[:, np.newaxis], height, axis= -1)[:,:, np.newaxis], width, axis=-1)
x_expand = np.expand_dims(x_mesh, axis = -1)
y_expand = np.expand_dims(y_mesh, axis = -1)
z_expand = np.expand_dims(z_mesh, axis = -1)
meshgrid = np.concatenate((x_expand, np.concatenate((y_expand, z_expand), axis = -1)), axis = -1)
return meshgrid
# transform meshgrid given transformation matrix
def get_transformed_meshgrid(meshgrid, transform_matrix, depth = voxsize, height = voxsize, width = voxsize):
meshgrid_flat = meshgrid.transpose(3, 0, 1, 2).reshape(3,-1)
one = np.ones((1, meshgrid_flat.shape[1]))
meshgrid_expand = np.vstack((meshgrid_flat, one))
transformed_meshgrid = (transform_matrix @ meshgrid_expand)
transformed_meshgrid = (transformed_meshgrid[0:3, :]/transformed_meshgrid[3, :]).reshape(3, depth, height, width).transpose(1, 2, 3, 0)
return torch.tensor(transformed_meshgrid, dtype=torch.float)
######################
# single transform #
######################
# compute transformation matrix
def get_transform_matrix(azimuth, elevation, scale = np.sqrt(3)):
rot_base = RotateAlongAxis(90, 0, 0, 1) @ RotateAlongAxis(-90, 1, 0, 0)
rot_m = RotateAlongAxis(azimuth, 0, 1, 0) @ RotateAlongAxis(-elevation, 1, 0, 0) @ rot_base
sca_m = np.array([[scale, 0, 0, 0],
[0, scale, 0, 0],
[0, 0, scale, 0],
[0, 0, 0, 1]])
return rot_m @ sca_m
# group function for transform voxel in pytorch tensor
def get_transformed_vox(vox_torch, azimuth, elevation, scale = np.sqrt(3)):
meshgird = get_transformed_meshgrid(get_meshgrid(voxsize, voxsize, voxsize), get_transform_matrix(azimuth, elevation, scale), voxsize, voxsize, voxsize)
transformedVox = F.grid_sample(vox_torch, meshgird.unsqueeze(0), mode='bilinear', padding_mode='zeros', align_corners=False)
return transformedVox[0]
########################
# Relative transform #
########################
def get_relative_transform_matrix(azimuth_1, elevation_1, azimuth_2, elevation_2):
rot_m = RotateAlongAxis(elevation_1, 0, 0, 1) @ RotateAlongAxis(azimuth_1, 1, 0, 0) @ RotateAlongAxis(azimuth_2, 1, 0, 0) @ RotateAlongAxis(elevation_2, 0, 0, 1)
scale = 1
#scale = 1/np.sqrt(3)
sca_m = np.array([[scale, 0, 0, 0],
[0, scale, 0, 0],
[0, 0, scale, 0],
[0, 0, 0, 1]])
return rot_m @ sca_m
def get_relative_transformed_vox(vox_torch, azimuth_1, elevation_1, azimuth_2, elevation_2, device, voxsize = 32, align_mode = 'zeros'):
meshgird = get_transformed_meshgrid(get_meshgrid(voxsize, voxsize, voxsize),
get_relative_transform_matrix(azimuth_1, elevation_1, azimuth_2, elevation_2),
voxsize, voxsize, voxsize).to(device)
transformedVox = F.grid_sample(vox_torch, meshgird.unsqueeze(0), mode='bilinear', padding_mode=align_mode, align_corners=False)
return transformedVox
#########
# SDF #
#########
# transformation function to rotate sdf indice
def get_transform_matrix_sdf(azimuth, elevation, scale = 1.0):
rot_base = RotateAlongAxis(90, 0, 1, 0) @ RotateAlongAxis(-90, 1, 0, 0)
rot_m = RotateAlongAxis(azimuth, 0, 1, 0) @ RotateAlongAxis(-elevation, 0, 0, 1) @ rot_base
sca_m = np.array([[scale, 0, 0, 0],
[0, scale, 0, 0],
[0, 0, scale, 0],
[0, 0, 0, 1]])
return rot_m @ sca_m
# group function to get transformed sdf indice
def get_transformed_indices(indices, azimuth, elevation, scale = 1/np.sqrt(3)):
transform_matrix = get_transform_matrix_sdf(-azimuth, -elevation, scale)[0:3, 0:3]
transformed_indices = indices @ transform_matrix
return transformed_indices
# convert sdf to voxel
def sdf2Voxel(sample_pt, sample_sdf_val, fill = 0):
sample_pt = ((sample_pt + np.array([0.5, 0.5, 0.5]))* voxsize).astype(int)
sample_pt = np.clip(sample_pt, 0, voxsize-1)
v = fill * np.ones((voxsize, voxsize, voxsize))
v[sample_pt[:,0], sample_pt[:,1], sample_pt[:,2]] = sample_sdf_val
return v
# advanced indexing 2x2x2 context from voxel
def getContext(sample_pt_query, vox):
# sample_pt bxcxdimxdimxdim
# vox bxmx3
channel_size = vox.shape[1]
batch_size, sample_size, _ = sample_pt_query.shape
meshgrid_base = torch.Tensor(np.meshgrid(np.arange(0, batch_size), np.arange(0, channel_size), np.arange(0, 2), np.arange(0, 2), np.arange(0, 2))).int()
context = torch.empty((batch_size, sample_size, channel_size, 2, 2, 2))
for j in range(context.shape[1]):
context[:, j, :, :, :, :] = vox[
meshgrid_base[0].long(),
meshgrid_base[1].long(),
(meshgrid_base[2] + sample_pt_query[:, j, 0].reshape(1, -1, 1, 1, 1)).long(),
(meshgrid_base[3] + sample_pt_query[:, j, 1].reshape(1, -1, 1, 1, 1)).long(),
(meshgrid_base[4] + sample_pt_query[:, j, 2].reshape(1, -1, 1, 1, 1)).long()
].transpose(0, 1)
# b x c x m x 2 x 2 x 2
return context.transpose(1, 2)
def trilinearInterpolation(context, dx, dy, dz):
v0 = context[:, :, :, 0, 0, 0]*(1-dx)*(1-dy)*(1-dz)
v1 = context[:, :, :, 1, 0, 0]*dx*(1-dy)*(1-dz)
v2 = context[:, :, :, 0, 1, 0]*(1-dx)*dy*(1-dz)
v3 = context[:, :, :, 1, 1, 0]*dx*dy*(1-dz)
v4 = context[:, :, :, 0, 0, 1]*(1-dx)*(1-dy)*dz
v5 = context[:, :, :, 1, 0, 1]*dx*(1-dy)*dz
v6 = context[:, :, :, 0, 1, 1]*(1-dx)*dy*dz
v7 = context[:, :, :, 1, 1, 1]*dx*dy*dz
# b x c x m 1
return v0 + v1 + v2 + v3 + v4 + v5 + v6 + v7
# generate mesh from continuous model
def generate_mesh(continuous, unet, out_vox, z, device, vox_res = 32, grid_res = 64, batch_size = 32, azimuth = 0, elevation = 0, isosurface = 0.0, conditional=True):
start_time = time.time()
vox = np.zeros((grid_res, grid_res, grid_res))
idx = np.array(np.where(vox == 0))
# normalize
sample_pt = (torch.t(torch.tensor(idx/grid_res, dtype=torch.float)) - 0.5)
sample_pt = sample_pt.reshape(-1, sample_size, 3)
sample_pt_normalized = sample_pt + torch.tensor([0.5, 0.5, 0.5])
# (0, 63)
sample_pt_scale = torch.clamp(sample_pt_normalized* (vox_res-1), 0, (vox_res-1)-1e-5)
# (0, 62]
sample_pt_query = torch.clamp((sample_pt_scale).int(), 0, (vox_res-2))
sample_pt_distance = sample_pt_scale - sample_pt_query
vox_feature = unet(out_vox, z).repeat(batch_size, 1, 1, 1, 1).detach().cpu() if conditional else unet(out_vox).repeat(batch_size, 1, 1, 1, 1).detach().cpu()
#print("--- %s seconds ---" % (time.time() - start_time))
#print("Data generation")
pre_sdf_list = []
for i in range(int(sample_pt.shape[0]/batch_size)):
start = i*batch_size
end = (i + 1)*batch_size
context = getContext(sample_pt_query[start:end, :, :], vox_feature)
dx = sample_pt_distance[start:end, :, 0].unsqueeze(1)
dy = sample_pt_distance[start:end, :, 1].unsqueeze(1)
dz = sample_pt_distance[start:end, :, 2].unsqueeze(1)
# local feature
con = trilinearInterpolation(context, dx, dy, dz).to(device)
# global feature
latent = z.squeeze(-1).squeeze(-1).repeat(batch_size, 1, sample_size)
# point
sample_pt_batch = sample_pt[start:end, :, :].transpose(-1, -2).to(device)
sample_pt_batch = sample_pt_batch.transpose(-1, -2).reshape(-1, 3)
con_batch = con.transpose(-1, -2).reshape(-1, 32)
z_batch = latent.transpose(-1, -2).reshape(-1, 256)
# avoid occupying gpu memory
pred_sdf_batch = continuous(sample_pt_batch,
con_batch,
z_batch,
).squeeze(1).detach().cpu()
pre_sdf_list.append(pred_sdf_batch)
pred_sdf = torch.cat(pre_sdf_list).reshape(-1,)
vox[tuple([idx[0], idx[1], idx[2]])] = pred_sdf[:].numpy()
#print(vox.shape)
#print("--- %s seconds ---" % (time.time() - start_time))
#print("Success generation")
try:
verts, faces, _, _ = sk.marching_cubes_lewiner(vox, level=isosurface)
#mesh = pymesh.form_mesh(verts, faces)
#transform_matrix = get_relative_transform_matrix(azimuth, elevation, 0, 0)[0:3, 0:3]
#transformed_vertices = mesh.vertices @ transform_matrix
mesh = trimesh.Trimesh(verts, faces)
#trimesh.repair.fix_inversion(mesh)
trimesh.repair.fill_holes(mesh)
mesh_py = pymesh.form_mesh(mesh.vertices, mesh.faces)
return mesh_py
except:
print("Failed generation")
return None
# generate mesh from voxel
def mesh_from_voxel(vox_torch):
verts, faces, _, _ = sk.marching_cubes_lewiner(vox_torch.detach().cpu().numpy(), level=0.5)
mesh_py = pymesh.form_mesh(2*verts, faces)
mesh = trimesh.Trimesh(mesh_py.vertices,mesh_py.faces)
trimesh.repair.fix_inversion(mesh)
trimesh.repair.fill_holes(mesh)
return mesh
# render a mesh with pyrender render
def render(mesh):
model = trimesh.Trimesh(mesh.vertices,mesh.faces)
mesh_py = pyrender.Mesh.from_trimesh(model)
scene = pyrender.Scene()
scene.add(mesh_py)
viewer = pyrender.Viewer(scene, use_raymond_lighting=True, point_size=2)
def mesh_test(mesh_py, dim = 64, count = 16384):
mesh = trimesh.Trimesh(mesh_py.vertices, mesh_py.faces)
samples, _ = trimesh.sample.sample_surface(mesh, count)
samples_batch = torch.tensor(samples.reshape(64, -1, 3), dtype = torch.float)
grid = pymesh.VoxelGrid(2./dim)
grid.insert_mesh(mesh_py)
grid.create_grid()
idx = ((grid.mesh.vertices + 1.1) / 2.4 * dim).astype(np.int)
v = np.zeros([dim, dim, dim])
v[idx[:,0], idx[:,1], idx[:,2]] = 1
return samples_batch, samples, v
# compute chamfer distance, earth movers' distacne and intersection over union between two meshes
def get_test_results(mesh_py_1, mesh_py_2):
samples_batch_1, samples_1, v1 = mesh_test(mesh_py_1)
samples_batch_2, samples_2, v2 = mesh_test(mesh_py_2)
dist1, dist2, _, _ = chamfer_python.distChamfer(samples_batch_1, samples_batch_2)
chamfer_dist = torch.mean(dist1) + torch.mean(dist2)
emd = emd_samples(samples_1, samples_2)
intersection = np.sum(np.logical_and(v1, v2))
union = np.sum(np.logical_or(v1, v2))
iou = intersection/union
return chamfer_dist, emd, iou
|
[
"numpy.clip",
"numpy.sqrt",
"pyemd.emd_samples",
"trimesh.sample.sample_surface",
"skimage.measure.marching_cubes_lewiner",
"numpy.array",
"numpy.sin",
"numpy.arange",
"numpy.where",
"torch.mean",
"numpy.linspace",
"pymesh.form_mesh",
"numpy.vstack",
"numpy.concatenate",
"pyrender.Mesh.from_trimesh",
"numpy.ones",
"trimesh.repair.fill_holes",
"pyrender.Viewer",
"trimesh.Trimesh",
"numpy.cos",
"pyrender.Scene",
"time.time",
"torch.empty",
"torch.clamp",
"torch.cat",
"trimesh.repair.fix_inversion",
"numpy.logical_and",
"numpy.logical_or",
"torch.tensor",
"numpy.zeros",
"chamfer_python.distChamfer",
"numpy.expand_dims",
"pymesh.VoxelGrid"
] |
[((2036, 2067), 'numpy.expand_dims', 'np.expand_dims', (['x_mesh'], {'axis': '(-1)'}), '(x_mesh, axis=-1)\n', (2050, 2067), True, 'import numpy as np\n'), ((2085, 2116), 'numpy.expand_dims', 'np.expand_dims', (['y_mesh'], {'axis': '(-1)'}), '(y_mesh, axis=-1)\n', (2099, 2116), True, 'import numpy as np\n'), ((2134, 2165), 'numpy.expand_dims', 'np.expand_dims', (['z_mesh'], {'axis': '(-1)'}), '(z_mesh, axis=-1)\n', (2148, 2165), True, 'import numpy as np\n'), ((2535, 2571), 'numpy.ones', 'np.ones', (['(1, meshgrid_flat.shape[1])'], {}), '((1, meshgrid_flat.shape[1]))\n', (2542, 2571), True, 'import numpy as np\n'), ((2594, 2625), 'numpy.vstack', 'np.vstack', (['(meshgrid_flat, one)'], {}), '((meshgrid_flat, one))\n', (2603, 2625), True, 'import numpy as np\n'), ((2851, 2904), 'torch.tensor', 'torch.tensor', (['transformed_meshgrid'], {'dtype': 'torch.float'}), '(transformed_meshgrid, dtype=torch.float)\n', (2863, 2904), False, 'import torch\n'), ((3062, 3072), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (3069, 3072), True, 'import numpy as np\n'), ((3274, 3352), 'numpy.array', 'np.array', (['[[scale, 0, 0, 0], [0, scale, 0, 0], [0, 0, scale, 0], [0, 0, 0, 1]]'], {}), '([[scale, 0, 0, 0], [0, scale, 0, 0], [0, 0, scale, 0], [0, 0, 0, 1]])\n', (3282, 3352), True, 'import numpy as np\n'), ((3573, 3583), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (3580, 3583), True, 'import numpy as np\n'), ((4297, 4375), 'numpy.array', 'np.array', (['[[scale, 0, 0, 0], [0, scale, 0, 0], [0, 0, scale, 0], [0, 0, 0, 1]]'], {}), '([[scale, 0, 0, 0], [0, scale, 0, 0], [0, 0, scale, 0], [0, 0, 0, 1]])\n', (4305, 4375), True, 'import numpy as np\n'), ((5395, 5473), 'numpy.array', 'np.array', (['[[scale, 0, 0, 0], [0, scale, 0, 0], [0, 0, scale, 0], [0, 0, 0, 1]]'], {}), '([[scale, 0, 0, 0], [0, scale, 0, 0], [0, 0, scale, 0], [0, 0, 0, 1]])\n', (5403, 5473), True, 'import numpy as np\n'), ((6062, 6096), 'numpy.clip', 'np.clip', (['sample_pt', '(0)', '(voxsize - 1)'], {}), '(sample_pt, 0, voxsize - 1)\n', (6069, 6096), True, 'import numpy as np\n'), ((6650, 6711), 'torch.empty', 'torch.empty', (['(batch_size, sample_size, channel_size, 2, 2, 2)'], {}), '((batch_size, sample_size, channel_size, 2, 2, 2))\n', (6661, 6711), False, 'import torch\n'), ((8042, 8053), 'time.time', 'time.time', ([], {}), '()\n', (8051, 8053), False, 'import time\n'), ((8069, 8109), 'numpy.zeros', 'np.zeros', (['(grid_res, grid_res, grid_res)'], {}), '((grid_res, grid_res, grid_res))\n', (8077, 8109), True, 'import numpy as np\n'), ((8413, 8486), 'torch.clamp', 'torch.clamp', (['(sample_pt_normalized * (vox_res - 1))', '(0)', '(vox_res - 1 - 1e-05)'], {}), '(sample_pt_normalized * (vox_res - 1), 0, vox_res - 1 - 1e-05)\n', (8424, 8486), False, 'import torch\n'), ((11192, 11226), 'pymesh.form_mesh', 'pymesh.form_mesh', (['(2 * verts)', 'faces'], {}), '(2 * verts, faces)\n', (11208, 11226), False, 'import pymesh\n'), ((11236, 11284), 'trimesh.Trimesh', 'trimesh.Trimesh', (['mesh_py.vertices', 'mesh_py.faces'], {}), '(mesh_py.vertices, mesh_py.faces)\n', (11251, 11284), False, 'import trimesh\n'), ((11288, 11322), 'trimesh.repair.fix_inversion', 'trimesh.repair.fix_inversion', (['mesh'], {}), '(mesh)\n', (11316, 11322), False, 'import trimesh\n'), ((11327, 11358), 'trimesh.repair.fill_holes', 'trimesh.repair.fill_holes', (['mesh'], {}), '(mesh)\n', (11352, 11358), False, 'import trimesh\n'), ((11452, 11494), 'trimesh.Trimesh', 'trimesh.Trimesh', (['mesh.vertices', 'mesh.faces'], {}), '(mesh.vertices, mesh.faces)\n', (11467, 11494), False, 'import trimesh\n'), ((11508, 11541), 'pyrender.Mesh.from_trimesh', 'pyrender.Mesh.from_trimesh', (['model'], {}), '(model)\n', (11534, 11541), False, 'import pyrender\n'), ((11554, 11570), 'pyrender.Scene', 'pyrender.Scene', ([], {}), '()\n', (11568, 11570), False, 'import pyrender\n'), ((11607, 11670), 'pyrender.Viewer', 'pyrender.Viewer', (['scene'], {'use_raymond_lighting': '(True)', 'point_size': '(2)'}), '(scene, use_raymond_lighting=True, point_size=2)\n', (11622, 11670), False, 'import pyrender\n'), ((11746, 11794), 'trimesh.Trimesh', 'trimesh.Trimesh', (['mesh_py.vertices', 'mesh_py.faces'], {}), '(mesh_py.vertices, mesh_py.faces)\n', (11761, 11794), False, 'import trimesh\n'), ((11812, 11854), 'trimesh.sample.sample_surface', 'trimesh.sample.sample_surface', (['mesh', 'count'], {}), '(mesh, count)\n', (11841, 11854), False, 'import trimesh\n'), ((11958, 11985), 'pymesh.VoxelGrid', 'pymesh.VoxelGrid', (['(2.0 / dim)'], {}), '(2.0 / dim)\n', (11974, 11985), False, 'import pymesh\n'), ((12115, 12140), 'numpy.zeros', 'np.zeros', (['[dim, dim, dim]'], {}), '([dim, dim, dim])\n', (12123, 12140), True, 'import numpy as np\n'), ((12518, 12578), 'chamfer_python.distChamfer', 'chamfer_python.distChamfer', (['samples_batch_1', 'samples_batch_2'], {}), '(samples_batch_1, samples_batch_2)\n', (12544, 12578), False, 'import chamfer_python\n'), ((12651, 12684), 'pyemd.emd_samples', 'emd_samples', (['samples_1', 'samples_2'], {}), '(samples_1, samples_2)\n', (12662, 12684), False, 'from pyemd import emd_samples\n'), ((5686, 5696), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (5693, 5696), True, 'import numpy as np\n'), ((6115, 6151), 'numpy.ones', 'np.ones', (['(voxsize, voxsize, voxsize)'], {}), '((voxsize, voxsize, voxsize))\n', (6122, 6151), True, 'import numpy as np\n'), ((8129, 8147), 'numpy.where', 'np.where', (['(vox == 0)'], {}), '(vox == 0)\n', (8137, 8147), True, 'import numpy as np\n'), ((8347, 8376), 'torch.tensor', 'torch.tensor', (['[0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5])\n', (8359, 8376), False, 'import torch\n'), ((10461, 10509), 'skimage.measure.marching_cubes_lewiner', 'sk.marching_cubes_lewiner', (['vox'], {'level': 'isosurface'}), '(vox, level=isosurface)\n', (10486, 10509), True, 'import skimage.measure as sk\n'), ((10731, 10760), 'trimesh.Trimesh', 'trimesh.Trimesh', (['verts', 'faces'], {}), '(verts, faces)\n', (10746, 10760), False, 'import trimesh\n'), ((10813, 10844), 'trimesh.repair.fill_holes', 'trimesh.repair.fill_holes', (['mesh'], {}), '(mesh)\n', (10838, 10844), False, 'import trimesh\n'), ((10863, 10906), 'pymesh.form_mesh', 'pymesh.form_mesh', (['mesh.vertices', 'mesh.faces'], {}), '(mesh.vertices, mesh.faces)\n', (10879, 10906), False, 'import pymesh\n'), ((12598, 12615), 'torch.mean', 'torch.mean', (['dist1'], {}), '(dist1)\n', (12608, 12615), False, 'import torch\n'), ((12618, 12635), 'torch.mean', 'torch.mean', (['dist2'], {}), '(dist2)\n', (12628, 12635), False, 'import torch\n'), ((12716, 12738), 'numpy.logical_and', 'np.logical_and', (['v1', 'v2'], {}), '(v1, v2)\n', (12730, 12738), True, 'import numpy as np\n'), ((12759, 12780), 'numpy.logical_or', 'np.logical_or', (['v1', 'v2'], {}), '(v1, v2)\n', (12772, 12780), True, 'import numpy as np\n'), ((2214, 2259), 'numpy.concatenate', 'np.concatenate', (['(y_expand, z_expand)'], {'axis': '(-1)'}), '((y_expand, z_expand), axis=-1)\n', (2228, 2259), True, 'import numpy as np\n'), ((8195, 8242), 'torch.tensor', 'torch.tensor', (['(idx / grid_res)'], {'dtype': 'torch.float'}), '(idx / grid_res, dtype=torch.float)\n', (8207, 8242), False, 'import torch\n'), ((10185, 10208), 'torch.cat', 'torch.cat', (['pre_sdf_list'], {}), '(pre_sdf_list)\n', (10194, 10208), False, 'import torch\n'), ((382, 399), 'numpy.cos', 'np.cos', (['(D2R * phi)'], {}), '(D2R * phi)\n', (388, 399), True, 'import numpy as np\n'), ((399, 416), 'numpy.sin', 'np.sin', (['(D2R * phi)'], {}), '(D2R * phi)\n', (405, 416), True, 'import numpy as np\n'), ((464, 481), 'numpy.cos', 'np.cos', (['(D2R * phi)'], {}), '(D2R * phi)\n', (470, 481), True, 'import numpy as np\n'), ((617, 634), 'numpy.cos', 'np.cos', (['(D2R * phi)'], {}), '(D2R * phi)\n', (623, 634), True, 'import numpy as np\n'), ((634, 651), 'numpy.sin', 'np.sin', (['(D2R * phi)'], {}), '(D2R * phi)\n', (640, 651), True, 'import numpy as np\n'), ((699, 716), 'numpy.cos', 'np.cos', (['(D2R * phi)'], {}), '(D2R * phi)\n', (705, 716), True, 'import numpy as np\n'), ((447, 464), 'numpy.sin', 'np.sin', (['(D2R * phi)'], {}), '(D2R * phi)\n', (453, 464), True, 'import numpy as np\n'), ((682, 699), 'numpy.sin', 'np.sin', (['(D2R * phi)'], {}), '(D2R * phi)\n', (688, 699), True, 'import numpy as np\n'), ((933, 952), 'numpy.cos', 'np.cos', (['(D2R * theta)'], {}), '(D2R * theta)\n', (939, 952), True, 'import numpy as np\n'), ((1155, 1174), 'numpy.cos', 'np.cos', (['(D2R * theta)'], {}), '(D2R * theta)\n', (1161, 1174), True, 'import numpy as np\n'), ((1377, 1396), 'numpy.cos', 'np.cos', (['(D2R * theta)'], {}), '(D2R * theta)\n', (1383, 1396), True, 'import numpy as np\n'), ((1647, 1680), 'numpy.linspace', 'np.linspace', (['(-ratio)', 'ratio', 'width'], {}), '(-ratio, ratio, width)\n', (1658, 1680), True, 'import numpy as np\n'), ((1780, 1814), 'numpy.linspace', 'np.linspace', (['(-ratio)', 'ratio', 'height'], {}), '(-ratio, ratio, height)\n', (1791, 1814), True, 'import numpy as np\n'), ((1914, 1947), 'numpy.linspace', 'np.linspace', (['(-ratio)', 'ratio', 'depth'], {}), '(-ratio, ratio, depth)\n', (1925, 1947), True, 'import numpy as np\n'), ((5997, 6022), 'numpy.array', 'np.array', (['[0.5, 0.5, 0.5]'], {}), '([0.5, 0.5, 0.5])\n', (6005, 6022), True, 'import numpy as np\n'), ((6524, 6548), 'numpy.arange', 'np.arange', (['(0)', 'batch_size'], {}), '(0, batch_size)\n', (6533, 6548), True, 'import numpy as np\n'), ((6550, 6576), 'numpy.arange', 'np.arange', (['(0)', 'channel_size'], {}), '(0, channel_size)\n', (6559, 6576), True, 'import numpy as np\n'), ((6578, 6593), 'numpy.arange', 'np.arange', (['(0)', '(2)'], {}), '(0, 2)\n', (6587, 6593), True, 'import numpy as np\n'), ((6595, 6610), 'numpy.arange', 'np.arange', (['(0)', '(2)'], {}), '(0, 2)\n', (6604, 6610), True, 'import numpy as np\n'), ((6612, 6627), 'numpy.arange', 'np.arange', (['(0)', '(2)'], {}), '(0, 2)\n', (6621, 6627), True, 'import numpy as np\n'), ((982, 1001), 'numpy.sin', 'np.sin', (['(D2R * theta)'], {}), '(D2R * theta)\n', (988, 1001), True, 'import numpy as np\n'), ((1031, 1050), 'numpy.sin', 'np.sin', (['(D2R * theta)'], {}), '(D2R * theta)\n', (1037, 1050), True, 'import numpy as np\n'), ((1107, 1126), 'numpy.sin', 'np.sin', (['(D2R * theta)'], {}), '(D2R * theta)\n', (1113, 1126), True, 'import numpy as np\n'), ((1204, 1223), 'numpy.sin', 'np.sin', (['(D2R * theta)'], {}), '(D2R * theta)\n', (1210, 1223), True, 'import numpy as np\n'), ((1280, 1299), 'numpy.sin', 'np.sin', (['(D2R * theta)'], {}), '(D2R * theta)\n', (1286, 1299), True, 'import numpy as np\n'), ((1329, 1348), 'numpy.sin', 'np.sin', (['(D2R * theta)'], {}), '(D2R * theta)\n', (1335, 1348), True, 'import numpy as np\n'), ((912, 931), 'numpy.cos', 'np.cos', (['(D2R * theta)'], {}), '(D2R * theta)\n', (918, 931), True, 'import numpy as np\n'), ((959, 978), 'numpy.cos', 'np.cos', (['(D2R * theta)'], {}), '(D2R * theta)\n', (965, 978), True, 'import numpy as np\n'), ((1008, 1027), 'numpy.cos', 'np.cos', (['(D2R * theta)'], {}), '(D2R * theta)\n', (1014, 1027), True, 'import numpy as np\n'), ((1084, 1103), 'numpy.cos', 'np.cos', (['(D2R * theta)'], {}), '(D2R * theta)\n', (1090, 1103), True, 'import numpy as np\n'), ((1134, 1153), 'numpy.cos', 'np.cos', (['(D2R * theta)'], {}), '(D2R * theta)\n', (1140, 1153), True, 'import numpy as np\n'), ((1181, 1200), 'numpy.cos', 'np.cos', (['(D2R * theta)'], {}), '(D2R * theta)\n', (1187, 1200), True, 'import numpy as np\n'), ((1257, 1276), 'numpy.cos', 'np.cos', (['(D2R * theta)'], {}), '(D2R * theta)\n', (1263, 1276), True, 'import numpy as np\n'), ((1306, 1325), 'numpy.cos', 'np.cos', (['(D2R * theta)'], {}), '(D2R * theta)\n', (1312, 1325), True, 'import numpy as np\n'), ((1356, 1375), 'numpy.cos', 'np.cos', (['(D2R * theta)'], {}), '(D2R * theta)\n', (1362, 1375), True, 'import numpy as np\n')]
|
import os
import math
import gzip
import csv
import time
import torch
import torch.optim as optim
import torch.utils.data as data_utils
from sklearn.model_selection import train_test_split
from tqdm import tqdm
# import matplotlib.pyplot as plt
import numpy as np
from crf import CRF
# import Data Loader
from data_loader import get_dataset
if __name__ == '__main__':
# hyperparameters, dimensions and model parameters
dim = 128
epochs = 1
labels = 26
max_iter = 500
embed_dim = 128
batch_size = 64
conv_shapes = [[1, 64, 128]]
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# model and optimizer
model = CRF(dim, embed_dim, conv_shapes, labels, batch_size).to(device)
opt = optim.LBFGS(model.parameters(), lr=0.01)
dataset = get_dataset()
print(dataset.target.shape, dataset.data.shape)
# X_train, X_test, y_train, y_test = train_test_split(dataset.data, dataset.target, test_size=0.3, stratify=dataset.target)
split = int(0.7 * len(dataset.data))
X_train, X_test = dataset.data[:split], dataset.data[split:]
y_train, y_test = dataset.target[:split], dataset.target[split:]
# train_data = train_data.to(device)
# test_data = test_data.to(device)
# train_target = train_target.to(device)
# test_target = test_target.to(device)
train = data_utils.TensorDataset(torch.tensor(X_train).float(), torch.tensor(y_train).float())
test = data_utils.TensorDataset(torch.tensor(X_test).float(), torch.tensor(y_test).float())
# train = train.to(device)
# test = test.to(device)
# print(len(train[0][1][0]))
train_letter, test_letter, train_word, test_word = [], [], [], []
# Clear all log files
dir_name = "Q4"
files = os.listdir(dir_name)
for file in files:
if file.endswith(".txt"):
with open(os.path.join(dir_name, file), "r+") as f:
f.truncate(0)
f.close()
for i in range(epochs):
step = 1
print("\nEpoch {}".format(i + 1))
start_epoch = time.time()
train_data = data_utils.DataLoader(train, batch_size=batch_size, shuffle=True, sampler=None, num_workers=5,
pin_memory=True)
test_data = data_utils.DataLoader(test, batch_size=batch_size, shuffle=True, sampler=None, num_workers=5,
pin_memory=True)
train_mean_word_accuracy, test_mean_word_accuracy, train_mean_letter_accuracy, test_mean_letter_accuracy = 0, 0, 0, 0
for batch, sample in tqdm(enumerate(train_data)):
print("\nEpoch-{} Mini-Batch-{}".format(i + 1, batch))
start_t = time.time()
train_X = sample[0].to(device)
train_Y = sample[1].to(device)
def compute_loss():
opt.zero_grad()
_loss = model.loss(train_X, train_Y)
_loss.backward()
return _loss
start_step = time.time()
opt.step(compute_loss)
print("Epoch-{} Batch-{} Step-{} TIME ELAPSED = {}".format(i + 1, batch, step, time.time() - start_step))
for name, values in model.named_parameters():
if values.requires_grad:
print("Parameters", name, values.data)
random_index = np.random.choice(X_test.shape[0], batch_size, replace=False)
test_X = X_test[random_index, :]
test_Y = y_test[random_index, :]
test_X = torch.from_numpy(test_X).float().to(device)
test_Y = torch.from_numpy(test_Y).long().to(device)
total_train_words = len(train_Y)
total_test_words = len(test_Y)
total_train_letters = torch.sum(train_Y).item()
total_test_letters = torch.sum(test_Y).item()
print("Getting Accuracy")
with torch.no_grad():
print("Training predictions-->")
train_predictions = model(train_X)
print("Test predictions-->")
test_predictions = model(test_X)
word_acc_train = 0
letter_acc_train = 0
for y, y_preds in zip(train_Y, train_predictions):
letters = int(torch.sum(y).item())
if torch.all(torch.eq(y[:letters], y_preds[:letters])):
word_acc_train = word_acc_train + 1
letter_acc_train = letter_acc_train + letters - (
((~torch.eq(y[:letters], y_preds[:letters])).sum()) / 2).item()
word_accuracy_test = 0
letter_accuracy_test = 0
for y, y_preds in zip(test_Y, test_predictions):
letters = int(torch.sum(y).item())
if torch.all(torch.eq(y[:letters], y_preds[:letters])):
word_accuracy_test = word_accuracy_test + 1
letter_accuracy_test = letter_accuracy_test + letters - (
((~torch.eq(y[:letters], y_preds[:letters])).sum()) / 2).item()
letter_acc_train /= total_train_letters
letter_accuracy_test /= total_test_letters
word_acc_train /= total_train_words
word_accuracy_test /= total_test_words
## collect accuracies for 100 steps
train_letter.append(letter_acc_train)
test_letter.append(letter_accuracy_test)
train_word.append(word_acc_train)
test_word.append(word_accuracy_test)
f_trainingepoc = open("Q4/wordwise_training.txt", "a")
f_trainingepoc.write(str(word_acc_train) + "\n")
f_trainingepoc.close()
f_trainingepoc = open("Q4/letterwise_training.txt", "a")
f_trainingepoc.write(str(letter_acc_train) + "\n")
f_trainingepoc.close()
f_wtestingepoc = open("Q4/wordwise_testing.txt", "a")
f_wtestingepoc.write(str(word_accuracy_test) + "\n")
f_wtestingepoc.close()
f_testingepoc = open("Q4/letterwise_testing.txt", "a")
f_testingepoc.write(str(letter_accuracy_test) + "\n")
f_testingepoc.close()
print("\nTraining Accuracy ")
print("\tWord Acc = ", train_word)
print("\tLetter Acc = ", train_letter)
print(" Test Accuracy : ")
print("\tWord accuracy = ", test_word)
print("\tLetter accuracy = ", test_letter)
train_mean_word_accuracy = sum(train_word) / len(train_word)
test_mean_word_accuracy = sum(test_word) / len(test_word)
train_mean_letter_accuracy = sum(train_letter) / len(train_letter)
test_mean_letter_accuracy = sum(test_letter) / len(test_letter)
print(
"\n Train mean word accuracy = {}\n Test mean word accuracy = {}\n Train mean letter accuracy = {}\n Test mean letter accuracy = {}\n".format(
train_mean_word_accuracy, test_mean_word_accuracy, train_mean_letter_accuracy,
test_mean_letter_accuracy))
print("Epoch-{} Batch-{} Step-{} TIME TAKEN = {}".format(i, batch, step, time.time() - start_t))
step += 1
if step > max_iter: break
print("Epoch completed Epoch-{} Batch-{} Step-{} TIME ELAPSED = {}".format(i + 1, batch, step - 1,
time.time() - start_epoch))
|
[
"os.listdir",
"numpy.random.choice",
"os.path.join",
"torch.from_numpy",
"torch.eq",
"torch.tensor",
"torch.cuda.is_available",
"data_loader.get_dataset",
"torch.sum",
"torch.utils.data.DataLoader",
"torch.no_grad",
"crf.CRF",
"time.time"
] |
[((841, 854), 'data_loader.get_dataset', 'get_dataset', ([], {}), '()\n', (852, 854), False, 'from data_loader import get_dataset\n'), ((1818, 1838), 'os.listdir', 'os.listdir', (['dir_name'], {}), '(dir_name)\n', (1828, 1838), False, 'import os\n'), ((2138, 2149), 'time.time', 'time.time', ([], {}), '()\n', (2147, 2149), False, 'import time\n'), ((2174, 2290), 'torch.utils.data.DataLoader', 'data_utils.DataLoader', (['train'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'sampler': 'None', 'num_workers': '(5)', 'pin_memory': '(True)'}), '(train, batch_size=batch_size, shuffle=True, sampler=\n None, num_workers=5, pin_memory=True)\n', (2195, 2290), True, 'import torch.utils.data as data_utils\n'), ((2351, 2466), 'torch.utils.data.DataLoader', 'data_utils.DataLoader', (['test'], {'batch_size': 'batch_size', 'shuffle': '(True)', 'sampler': 'None', 'num_workers': '(5)', 'pin_memory': '(True)'}), '(test, batch_size=batch_size, shuffle=True, sampler=\n None, num_workers=5, pin_memory=True)\n', (2372, 2466), True, 'import torch.utils.data as data_utils\n'), ((628, 653), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (651, 653), False, 'import torch\n'), ((708, 760), 'crf.CRF', 'CRF', (['dim', 'embed_dim', 'conv_shapes', 'labels', 'batch_size'], {}), '(dim, embed_dim, conv_shapes, labels, batch_size)\n', (711, 760), False, 'from crf import CRF\n'), ((2784, 2795), 'time.time', 'time.time', ([], {}), '()\n', (2793, 2795), False, 'import time\n'), ((3102, 3113), 'time.time', 'time.time', ([], {}), '()\n', (3111, 3113), False, 'import time\n'), ((3460, 3520), 'numpy.random.choice', 'np.random.choice', (['X_test.shape[0]', 'batch_size'], {'replace': '(False)'}), '(X_test.shape[0], batch_size, replace=False)\n', (3476, 3520), True, 'import numpy as np\n'), ((1429, 1450), 'torch.tensor', 'torch.tensor', (['X_train'], {}), '(X_train)\n', (1441, 1450), False, 'import torch\n'), ((1460, 1481), 'torch.tensor', 'torch.tensor', (['y_train'], {}), '(y_train)\n', (1472, 1481), False, 'import torch\n'), ((1528, 1548), 'torch.tensor', 'torch.tensor', (['X_test'], {}), '(X_test)\n', (1540, 1548), False, 'import torch\n'), ((1558, 1578), 'torch.tensor', 'torch.tensor', (['y_test'], {}), '(y_test)\n', (1570, 1578), False, 'import torch\n'), ((4015, 4030), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4028, 4030), False, 'import torch\n'), ((1923, 1951), 'os.path.join', 'os.path.join', (['dir_name', 'file'], {}), '(dir_name, file)\n', (1935, 1951), False, 'import os\n'), ((3871, 3889), 'torch.sum', 'torch.sum', (['train_Y'], {}), '(train_Y)\n', (3880, 3889), False, 'import torch\n'), ((3931, 3948), 'torch.sum', 'torch.sum', (['test_Y'], {}), '(test_Y)\n', (3940, 3948), False, 'import torch\n'), ((4446, 4486), 'torch.eq', 'torch.eq', (['y[:letters]', 'y_preds[:letters]'], {}), '(y[:letters], y_preds[:letters])\n', (4454, 4486), False, 'import torch\n'), ((4928, 4968), 'torch.eq', 'torch.eq', (['y[:letters]', 'y_preds[:letters]'], {}), '(y[:letters], y_preds[:letters])\n', (4936, 4968), False, 'import torch\n'), ((7646, 7657), 'time.time', 'time.time', ([], {}), '()\n', (7655, 7657), False, 'import time\n'), ((3242, 3253), 'time.time', 'time.time', ([], {}), '()\n', (3251, 3253), False, 'import time\n'), ((7364, 7375), 'time.time', 'time.time', ([], {}), '()\n', (7373, 7375), False, 'import time\n'), ((3635, 3659), 'torch.from_numpy', 'torch.from_numpy', (['test_X'], {}), '(test_X)\n', (3651, 3659), False, 'import torch\n'), ((3701, 3725), 'torch.from_numpy', 'torch.from_numpy', (['test_Y'], {}), '(test_Y)\n', (3717, 3725), False, 'import torch\n'), ((4395, 4407), 'torch.sum', 'torch.sum', (['y'], {}), '(y)\n', (4404, 4407), False, 'import torch\n'), ((4877, 4889), 'torch.sum', 'torch.sum', (['y'], {}), '(y)\n', (4886, 4889), False, 'import torch\n'), ((4645, 4685), 'torch.eq', 'torch.eq', (['y[:letters]', 'y_preds[:letters]'], {}), '(y[:letters], y_preds[:letters])\n', (4653, 4685), False, 'import torch\n'), ((5143, 5183), 'torch.eq', 'torch.eq', (['y[:letters]', 'y_preds[:letters]'], {}), '(y[:letters], y_preds[:letters])\n', (5151, 5183), False, 'import torch\n')]
|
import torch
import numpy as np
import argparse
import os
from utils import Logger, LogFiles, ValidationAccuracies, cross_entropy_loss, compute_accuracy, MetaLearningState,\
shuffle
from model import FewShotClassifier
from dataset import get_dataset_reader
from tf_dataset_reader import TfDatasetReader
from image_folder_reader import ImageFolderReader
NUM_VALIDATION_TASKS = 200
NUM_TEST_TASKS = 600
PRINT_FREQUENCY = 1000
def main():
learner = Learner()
learner.run()
class Learner:
def __init__(self):
self.args = self.parse_command_line()
self.log_files = LogFiles(self.args.checkpoint_dir, self.args.resume_from_checkpoint,
(self.args.mode == 'test') or (self.args.mode == 'test_vtab'))
self.logger = Logger(self.args.checkpoint_dir, "log.txt")
self.logger.print_and_log("Options: %s\n" % self.args)
self.logger.print_and_log("Checkpoint Directory: %s\n" % self.log_files.checkpoint_dir)
self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
self.model = self.init_model()
self.train_set, self.validation_set, self.test_set = self.init_data()
if self.args.mode == "train" or self.args.mode == "test" or self.args.mode == 'train_test':
self.dataset = get_dataset_reader(
args=self.args,
train_set=self.train_set,
validation_set=self.validation_set,
test_set=self.test_set)
if self.args.train_method == 'lite':
self.train_fn = self.train_lite
else:
self.train_fn = self.train_task
self.use_batches = False if self.args.train_method == 'no_lite' else True
self.loss = cross_entropy_loss
self.accuracy_fn = compute_accuracy
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.args.learning_rate)
self.validation_accuracies = ValidationAccuracies(self.validation_set)
self.start_iteration = 0
if self.args.resume_from_checkpoint:
self.load_checkpoint()
self.optimizer.zero_grad()
self.feature_cache = None
def init_model(self):
model = FewShotClassifier(args=self.args, logger=self.logger, device=self.device).to(self.device)
model.count_parameters(model)
# set encoder is always in train mode (it only sees context data).
# Feature extractor gets switched in model.
model.train()
return model
def init_data(self):
train_set = ['ilsvrc_2012', 'omniglot', 'aircraft', 'cu_birds', 'dtd', 'quickdraw', 'fungi', 'mnist']
validation_set = ['omniglot', 'aircraft', 'cu_birds', 'dtd', 'quickdraw', 'fungi', 'mscoco']
test_set = self.args.test_datasets
return train_set, validation_set, test_set
"""
Command line parser
"""
def parse_command_line(self):
parser = argparse.ArgumentParser()
# operational parameters
parser.add_argument("--mode", choices=["train", "test", "train_test", "test_vtab"], default="train_test",
help="Whether to run meta-training only, meta-testing only,"
"both meta-training and meta-testing, or testing on vtab.")
parser.add_argument("--checkpoint_dir", "-c", default='../checkpoints', help="Directory to save checkpoint to.")
parser.add_argument("--resume_from_checkpoint", "-r", dest="resume_from_checkpoint", default=False,
action="store_true", help="Restart from latest checkpoint.")
# data parameters
parser.add_argument('--test_datasets', nargs='+', help='Datasets to use for testing',
default=["omniglot", "aircraft", "cu_birds", "dtd", "quickdraw", "fungi", "traffic_sign",
"mscoco"])
parser.add_argument("--data_path", default="../datasets", help="Path to Meta-Dataset records.")
parser.add_argument("--download_path_for_tensorflow_datasets", default=None,
help="Path to download the tensorflow datasets.")
parser.add_argument("--download_path_for_sun397_dataset", default=None,
help="Path to download the sun397 dataset.")
# training parameters
parser.add_argument("--train_method", choices=["lite", "small_task", "no_lite"], default="lite",
help="Whether to use lite, small tasks, or not lite.")
parser.add_argument("--pretrained_model_path", default="../models/efficientnet-b0_84.pt",
help="Path to dataset records.")
parser.add_argument("--learning_rate", "-lr", type=float, default=0.001, help="Learning rate.")
parser.add_argument("--tasks_per_step", type=int, default=16,
help="Number of tasks between parameter optimizations.")
parser.add_argument("--training_iterations", "-i", type=int, default=10000,
help="Number of meta-training iterations.")
parser.add_argument("--max_way_train", type=int, default=50, help="Maximum way of meta-train task.")
parser.add_argument("--max_support_train", type=int, default=500,
help="Maximum support set size of meta-train task.")
parser.add_argument("--image_size", type=int, default=224, help="Image height and width.")
parser.add_argument("--batch_size", type=int, default=40, help="Size of batch.")
parser.add_argument("--h", type=int, default=40,
help="Number of support set samples to back-propagate when training with LITE.")
# testing parameters
parser.add_argument("--test_model_path", "-m", default=None, help="Path to model to load and test.")
parser.add_argument("--val_freq", type=int, default=5000, help="Number of iterations between validations.")
args = parser.parse_args()
return args
def run(self):
if self.args.mode == 'train' or self.args.mode == 'train_test':
train_accuracies = []
losses = []
total_iterations = self.args.training_iterations
for iteration in range(self.start_iteration, total_iterations):
task_dict = self.dataset.get_train_task()
context_images, target_images, context_labels, target_labels = self.prepare_task(task_dict)
if self.use_batches:
self.model.clear_caches()
self.feature_cache = None
target_set_size = len(target_labels)
num_batches = self._get_number_of_batches(target_set_size)
for batch in range(num_batches):
batch_start_index, batch_end_index = self._get_batch_indices(batch, target_set_size)
batch_loss, batch_accuracy = self.train_fn(
context_images,
target_images[batch_start_index : batch_end_index],
context_labels,
target_labels[batch_start_index : batch_end_index]
)
train_accuracies.append(batch_accuracy)
losses.append(batch_loss)
else:
task_loss, task_accuracy = self.train_fn(context_images, target_images, context_labels,
target_labels)
train_accuracies.append(task_accuracy)
losses.append(task_loss)
# optimize
if ((iteration + 1) % self.args.tasks_per_step == 0) or (iteration == (total_iterations - 1)):
self.optimizer.step()
self.optimizer.zero_grad()
if (iteration + 1) % PRINT_FREQUENCY == 0:
# print training stats
self.save_checkpoint(iteration + 1)
torch.save(self.model.state_dict(), os.path.join(self.log_files.checkpoint_dir,
"model_{}.pt".format(iteration + 1)))
self.logger.print_and_log('Task [{}/{}], Train Loss: {:.7f},'
'Train Accuracy: {:.7f}, Learning Rate: {:.7f}'
.format(iteration + 1, total_iterations,
torch.Tensor(losses).mean().item(),
torch.Tensor(train_accuracies).mean().item(),
self.optimizer.param_groups[0]['lr']))
train_accuracies = []
losses = []
if ((iteration + 1) % self.args.val_freq == 0) and (iteration + 1) != total_iterations:
# validate
accuracy_dict = self.validate()
self.validation_accuracies.print(self.logger, accuracy_dict)
# save the model if validation is the best so far
if self.validation_accuracies.is_better(accuracy_dict):
self.validation_accuracies.replace(accuracy_dict)
torch.save(self.model.state_dict(), self.log_files.best_validation_model_path)
self.logger.print_and_log('Best validation model was updated.')
self.logger.print_and_log('')
# save the final model
torch.save(self.model.state_dict(), self.log_files.fully_trained_model_path)
if self.args.mode == 'train_test':
self.test(self.log_files.fully_trained_model_path)
self.test(self.log_files.best_validation_model_path)
if self.args.mode == 'test':
self.test(self.args.test_model_path)
if self.args.mode == 'test_vtab':
self._test_transfer_learning(self.args.test_model_path)
def train_task(self, context_images, target_images, context_labels, target_labels):
target_logits = self.model(context_images, context_labels, target_images, MetaLearningState.META_TRAIN)
task_loss = self.loss(target_logits, target_labels) / self.args.tasks_per_step
regularization_term = (self.model.feature_adaptation_network.regularization_term())
regularizer_scaling = 0.001
task_loss += regularizer_scaling * regularization_term
task_accuracy = self.accuracy_fn(target_logits, target_labels)
task_loss.backward(retain_graph=False)
return task_loss, task_accuracy
def train_lite(self, context_images, target_images, context_labels, target_labels):
# We'll split the context set into two: the first part will be of size batch_size and we'll use gradients
# for that. The second part will be everything else and we'll use no gradients for that, so we only need to
# compute that once per task.
context_size = context_images.size(0)
indices = np.random.permutation(context_size)
h = min(self.args.h, context_size) # number of example to back propagate
grad_indices = indices[0: h]
no_grad_indices = indices[h:]
self.model.build_task_representation_with_split_batch(context_images, grad_indices, no_grad_indices)
context_features = self._compute_features_with_split_batch(context_images, grad_indices, no_grad_indices,
MetaLearningState.META_TRAIN)
self.model.configure_classifier(context_features, context_labels[indices])
# now the target set
torch.set_grad_enabled(True)
batch_logits = self.model.predict(target_images, MetaLearningState.META_TRAIN)
# compute the loss
batch_loss = self.loss(batch_logits, target_labels) / self.args.tasks_per_step
regularization_term = (self.model.feature_adaptation_network.regularization_term())
regularizer_scaling = 0.001
batch_loss += regularizer_scaling * regularization_term
# compute accuracy
batch_accuracy = self.accuracy_fn(batch_logits, target_labels)
batch_loss.backward(retain_graph=False)
return batch_loss, batch_accuracy
def _get_number_of_batches(self, task_size):
num_batches = int(np.ceil(float(task_size) / float(self.args.batch_size)))
if num_batches > 1 and (task_size % self.args.batch_size == 1):
num_batches -= 1
return num_batches
def _get_batch_indices(self, index, last_element):
batch_start_index = index * self.args.batch_size
batch_end_index = batch_start_index + self.args.batch_size
if batch_end_index == (last_element - 1): # avoid batch size of 1
batch_end_index = last_element
if batch_end_index > last_element:
batch_end_index = last_element
return batch_start_index, batch_end_index
def validate(self):
with torch.no_grad():
accuracy_dict ={}
for item in self.validation_set:
accuracies = []
for _ in range(NUM_VALIDATION_TASKS):
task_dict = self.dataset.get_validation_task(item)
context_images, target_images, context_labels, target_labels = self.prepare_task(task_dict)
if self.use_batches:
self.model.build_task_representation_by_batch(context_images)
context_features = self._compute_features_by_batch(context_images, MetaLearningState.META_TEST)
self.model.configure_classifier(context_features, context_labels)
test_set_size = len(target_labels)
num_batches = self._get_number_of_batches(test_set_size)
target_logits = []
for batch in range(num_batches):
batch_start_index, batch_end_index = self._get_batch_indices(batch, test_set_size)
batch_logits = self.model.predict(target_images[batch_start_index: batch_end_index],
MetaLearningState.META_TEST)
target_logits.append(batch_logits)
target_logits = torch.vstack(target_logits)
target_accuracy = self.accuracy_fn(target_logits, target_labels)
del target_logits
accuracies.append(target_accuracy.item())
else:
target_logits = self.model(context_images, context_labels, target_images, MetaLearningState.META_TEST)
accuracy = self.accuracy_fn(target_logits, target_labels)
accuracies.append(accuracy.item())
del target_logits
accuracy = np.array(accuracies).mean() * 100.0
confidence = (196.0 * np.array(accuracies).std()) / np.sqrt(len(accuracies))
accuracy_dict[item] = {"accuracy": accuracy, "confidence": confidence}
return accuracy_dict
def test(self, path):
self.logger.print_and_log("") # add a blank line
self.logger.print_and_log('Testing model {0:}: '.format(path))
self.model = self.init_model()
if path != 'None':
self.model.load_state_dict(torch.load(path))
with torch.no_grad():
for item in self.test_set:
accuracies = []
for _ in range(NUM_TEST_TASKS):
task_dict = self.dataset.get_test_task(item)
context_images, target_images, context_labels, target_labels = self.prepare_task(task_dict)
if self.use_batches:
self.model.build_task_representation_by_batch(context_images)
context_features = self._compute_features_by_batch(context_images, MetaLearningState.META_TEST)
self.model.configure_classifier(context_features, context_labels)
test_set_size = len(target_labels)
num_batches = self._get_number_of_batches(test_set_size)
target_logits = []
for batch in range(num_batches):
batch_start_index, batch_end_index = self._get_batch_indices(batch, test_set_size)
batch_logits = self.model.predict(target_images[batch_start_index: batch_end_index],
MetaLearningState.META_TEST)
target_logits.append(batch_logits)
target_logits = torch.vstack(target_logits)
target_accuracy = self.accuracy_fn(target_logits, target_labels)
del target_logits
accuracies.append(target_accuracy.item())
else:
target_logits = self.model(context_images, context_labels, target_images,
MetaLearningState.META_TEST)
accuracy = self.accuracy_fn(target_logits, target_labels)
accuracies.append(accuracy.item())
del target_logits
accuracy = np.array(accuracies).mean() * 100.0
accuracy_confidence = (196.0 * np.array(accuracies).std()) / np.sqrt(len(accuracies))
self.logger.print_and_log('{0:}: {1:3.1f}+/-{2:2.1f}'.format(item, accuracy, accuracy_confidence))
def _test_transfer_learning(self, path):
self.logger.print_and_log("") # add a blank line
self.logger.print_and_log('Testing model {0:}: '.format(path))
self.model = self.init_model()
if path != 'None':
self.model.load_state_dict(torch.load(path))
context_set_size = 1000
datasets = [
{'name': "caltech101", 'task': None, 'enabled': True},
{'name': "cifar100", 'task': None, 'enabled': True},
{'name': "oxford_flowers102", 'task': None, 'enabled': True},
{'name': "oxford_iiit_pet", 'task': None, 'enabled': True},
{'name': "sun397", 'task': None, 'enabled': True},
{'name': "svhn_cropped", 'task': None, 'enabled': True},
{'name': "eurosat", 'task': None, 'enabled': True},
{'name': "resisc45", 'task': None, 'enabled': True},
{'name': "patch_camelyon", 'task': None, 'enabled': True},
{'name': "diabetic_retinopathy_detection", 'task': None, 'enabled': True},
{'name': "clevr", 'task': "count", 'enabled': True},
{'name': "clevr", 'task': "distance", 'enabled': True},
{'name': "dsprites", 'task': "location", 'enabled': True},
{'name': "dsprites", 'task': "orientation", 'enabled': True},
{'name': "smallnorb", 'task': "azimuth", 'enabled': True},
{'name': "smallnorb", 'task': "elevation", 'enabled': True},
{'name': "dmlab", 'task': None, 'enabled': True},
{'name': "kitti", 'task': None, 'enabled': True},
]
with torch.no_grad():
for dataset in datasets:
if dataset['enabled'] is False:
continue
if dataset['name'] == "sun397": # use the image folder reader as the tf reader is broken for sun397
dataset_reader = ImageFolderReader(
path_to_images=self.args.download_path_for_sun397_dataset,
context_batch_size=context_set_size,
target_batch_size=self.args.batch_size,
image_size=self.args.image_size,
device=self.device)
else: # use the tensorflow dataset reader
dataset_reader = TfDatasetReader(
dataset=dataset['name'],
task=dataset['task'],
context_batch_size=context_set_size,
target_batch_size=self.args.batch_size,
path_to_datasets=self.args.download_path_for_tensorflow_datasets,
image_size=self.args.image_size,
device=self.device
)
context_images, context_labels = dataset_reader.get_context_batch()
self.model.build_task_representation_by_batch(context_images)
context_features = self._compute_features_by_batch(context_images, MetaLearningState.META_TEST)
self.model.configure_classifier(context_features, context_labels)
test_set_size = dataset_reader.get_target_dataset_length()
num_batches = self._get_number_of_batches(test_set_size)
target_logits = []
target_labels = []
for batch in range(num_batches):
batch_target_images, batch_target_labels = dataset_reader.get_target_batch()
batch_logits = self.model.predict(batch_target_images, MetaLearningState.META_TEST)
target_logits.append(batch_logits)
target_labels.append(batch_target_labels)
target_logits = torch.vstack(target_logits)
target_labels = torch.hstack(target_labels)
target_accuracy = self.accuracy_fn(target_logits, target_labels)
del target_logits
accuracy = target_accuracy * 100.0
if dataset['task'] is None:
self.logger.print_and_log('{0:}: {1:3.1f}'.format(dataset['name'], accuracy))
else:
self.logger.print_and_log('{0:} {1:}: {2:3.1f}'.format(dataset['name'], dataset['task'], accuracy))
def _compute_features_by_batch(self, images, meta_learning_state):
features = []
num_images = images.size(0)
num_batches = self._get_number_of_batches(num_images)
for batch in range(num_batches):
batch_start_index, batch_end_index = self._get_batch_indices(batch, num_images)
features.append(self.model.get_context_features(images[batch_start_index: batch_end_index],
meta_learning_state))
return torch.vstack(features)
def _compute_features_with_split_batch(self, images, grad_indices, no_grad_indices, meta_learning_state):
num_images = images.size(0)
if self.feature_cache is None: # cache the part with no gradients
features = []
num_batches = self._get_number_of_batches(num_images)
for batch in range(num_batches):
batch_start_index, batch_end_index = self._get_batch_indices(batch, num_images)
torch.set_grad_enabled(False)
features.append(self.model.get_context_features(images[batch_start_index: batch_end_index],
meta_learning_state))
self.feature_cache = torch.vstack(features).to(self.device)
# now select some random images for that will have gradients and process those
embeddings = []
if len(grad_indices) > 0:
torch.set_grad_enabled(True)
embeddings.append(self.model.get_context_features(images[grad_indices], meta_learning_state))
# now add in the no_grad images
embeddings.extend(self.feature_cache[no_grad_indices])
return torch.vstack(embeddings)
def prepare_task(self, task_dict):
context_images_np, context_labels_np = task_dict['context_images'], task_dict['context_labels']
target_images_np, target_labels_np = task_dict['target_images'], task_dict['target_labels']
context_images_np = context_images_np.transpose([0, 3, 1, 2])
context_images_np, context_labels_np = shuffle(context_images_np, context_labels_np)
context_images = torch.from_numpy(context_images_np)
context_labels = torch.from_numpy(context_labels_np)
target_images_np = target_images_np.transpose([0, 3, 1, 2])
target_images_np, target_labels_np = shuffle(target_images_np, target_labels_np)
target_images = torch.from_numpy(target_images_np)
target_labels = torch.from_numpy(target_labels_np)
context_images = context_images.to(self.device)
target_images = target_images.to(self.device)
context_labels = context_labels.to(self.device)
target_labels = target_labels.type(torch.LongTensor).to(self.device)
return context_images, target_images, context_labels, target_labels
def save_checkpoint(self, iteration):
torch.save({
'iteration': iteration,
'model_state_dict': self.model.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'best_accuracy': self.validation_accuracies.get_current_best_accuracy_dict(),
}, os.path.join(self.log_files.checkpoint_dir, 'checkpoint.pt'))
def load_checkpoint(self):
checkpoint = torch.load(os.path.join(self.log_files.checkpoint_dir, 'checkpoint.pt'))
self.start_iteration = checkpoint['iteration']
self.model.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.validation_accuracies.replace(checkpoint['best_accuracy'])
if __name__ == "__main__":
main()
|
[
"torch.from_numpy",
"numpy.array",
"torch.cuda.is_available",
"utils.ValidationAccuracies",
"torch.set_grad_enabled",
"argparse.ArgumentParser",
"utils.LogFiles",
"dataset.get_dataset_reader",
"tf_dataset_reader.TfDatasetReader",
"utils.shuffle",
"numpy.random.permutation",
"torch.Tensor",
"model.FewShotClassifier",
"torch.hstack",
"torch.vstack",
"torch.load",
"os.path.join",
"utils.Logger",
"image_folder_reader.ImageFolderReader",
"torch.no_grad"
] |
[((597, 729), 'utils.LogFiles', 'LogFiles', (['self.args.checkpoint_dir', 'self.args.resume_from_checkpoint', "(self.args.mode == 'test' or self.args.mode == 'test_vtab')"], {}), "(self.args.checkpoint_dir, self.args.resume_from_checkpoint, self.\n args.mode == 'test' or self.args.mode == 'test_vtab')\n", (605, 729), False, 'from utils import Logger, LogFiles, ValidationAccuracies, cross_entropy_loss, compute_accuracy, MetaLearningState, shuffle\n'), ((785, 828), 'utils.Logger', 'Logger', (['self.args.checkpoint_dir', '"""log.txt"""'], {}), "(self.args.checkpoint_dir, 'log.txt')\n", (791, 828), False, 'from utils import Logger, LogFiles, ValidationAccuracies, cross_entropy_loss, compute_accuracy, MetaLearningState, shuffle\n'), ((1951, 1992), 'utils.ValidationAccuracies', 'ValidationAccuracies', (['self.validation_set'], {}), '(self.validation_set)\n', (1971, 1992), False, 'from utils import Logger, LogFiles, ValidationAccuracies, cross_entropy_loss, compute_accuracy, MetaLearningState, shuffle\n'), ((2941, 2966), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2964, 2966), False, 'import argparse\n'), ((11198, 11233), 'numpy.random.permutation', 'np.random.permutation', (['context_size'], {}), '(context_size)\n', (11219, 11233), True, 'import numpy as np\n'), ((11833, 11861), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(True)'], {}), '(True)\n', (11855, 11861), False, 'import torch\n'), ((22682, 22704), 'torch.vstack', 'torch.vstack', (['features'], {}), '(features)\n', (22694, 22704), False, 'import torch\n'), ((23887, 23911), 'torch.vstack', 'torch.vstack', (['embeddings'], {}), '(embeddings)\n', (23899, 23911), False, 'import torch\n'), ((24274, 24319), 'utils.shuffle', 'shuffle', (['context_images_np', 'context_labels_np'], {}), '(context_images_np, context_labels_np)\n', (24281, 24319), False, 'from utils import Logger, LogFiles, ValidationAccuracies, cross_entropy_loss, compute_accuracy, MetaLearningState, shuffle\n'), ((24345, 24380), 'torch.from_numpy', 'torch.from_numpy', (['context_images_np'], {}), '(context_images_np)\n', (24361, 24380), False, 'import torch\n'), ((24406, 24441), 'torch.from_numpy', 'torch.from_numpy', (['context_labels_np'], {}), '(context_labels_np)\n', (24422, 24441), False, 'import torch\n'), ((24556, 24599), 'utils.shuffle', 'shuffle', (['target_images_np', 'target_labels_np'], {}), '(target_images_np, target_labels_np)\n', (24563, 24599), False, 'from utils import Logger, LogFiles, ValidationAccuracies, cross_entropy_loss, compute_accuracy, MetaLearningState, shuffle\n'), ((24624, 24658), 'torch.from_numpy', 'torch.from_numpy', (['target_images_np'], {}), '(target_images_np)\n', (24640, 24658), False, 'import torch\n'), ((24683, 24717), 'torch.from_numpy', 'torch.from_numpy', (['target_labels_np'], {}), '(target_labels_np)\n', (24699, 24717), False, 'import torch\n'), ((1318, 1443), 'dataset.get_dataset_reader', 'get_dataset_reader', ([], {'args': 'self.args', 'train_set': 'self.train_set', 'validation_set': 'self.validation_set', 'test_set': 'self.test_set'}), '(args=self.args, train_set=self.train_set, validation_set\n =self.validation_set, test_set=self.test_set)\n', (1336, 1443), False, 'from dataset import get_dataset_reader\n'), ((13181, 13196), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13194, 13196), False, 'import torch\n'), ((15664, 15679), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (15677, 15679), False, 'import torch\n'), ((19485, 19500), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (19498, 19500), False, 'import torch\n'), ((23632, 23660), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(True)'], {}), '(True)\n', (23654, 23660), False, 'import torch\n'), ((25362, 25422), 'os.path.join', 'os.path.join', (['self.log_files.checkpoint_dir', '"""checkpoint.pt"""'], {}), "(self.log_files.checkpoint_dir, 'checkpoint.pt')\n", (25374, 25422), False, 'import os\n'), ((25488, 25548), 'os.path.join', 'os.path.join', (['self.log_files.checkpoint_dir', '"""checkpoint.pt"""'], {}), "(self.log_files.checkpoint_dir, 'checkpoint.pt')\n", (25500, 25548), False, 'import os\n'), ((1036, 1061), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1059, 1061), False, 'import torch\n'), ((2218, 2291), 'model.FewShotClassifier', 'FewShotClassifier', ([], {'args': 'self.args', 'logger': 'self.logger', 'device': 'self.device'}), '(args=self.args, logger=self.logger, device=self.device)\n', (2235, 2291), False, 'from model import FewShotClassifier\n'), ((15632, 15648), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (15642, 15648), False, 'import torch\n'), ((18146, 18162), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (18156, 18162), False, 'import torch\n'), ((21618, 21645), 'torch.vstack', 'torch.vstack', (['target_logits'], {}), '(target_logits)\n', (21630, 21645), False, 'import torch\n'), ((21678, 21705), 'torch.hstack', 'torch.hstack', (['target_labels'], {}), '(target_labels)\n', (21690, 21705), False, 'import torch\n'), ((23178, 23207), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (23200, 23207), False, 'import torch\n'), ((19771, 19986), 'image_folder_reader.ImageFolderReader', 'ImageFolderReader', ([], {'path_to_images': 'self.args.download_path_for_sun397_dataset', 'context_batch_size': 'context_set_size', 'target_batch_size': 'self.args.batch_size', 'image_size': 'self.args.image_size', 'device': 'self.device'}), '(path_to_images=self.args.download_path_for_sun397_dataset,\n context_batch_size=context_set_size, target_batch_size=self.args.\n batch_size, image_size=self.args.image_size, device=self.device)\n', (19788, 19986), False, 'from image_folder_reader import ImageFolderReader\n'), ((20195, 20471), 'tf_dataset_reader.TfDatasetReader', 'TfDatasetReader', ([], {'dataset': "dataset['name']", 'task': "dataset['task']", 'context_batch_size': 'context_set_size', 'target_batch_size': 'self.args.batch_size', 'path_to_datasets': 'self.args.download_path_for_tensorflow_datasets', 'image_size': 'self.args.image_size', 'device': 'self.device'}), "(dataset=dataset['name'], task=dataset['task'],\n context_batch_size=context_set_size, target_batch_size=self.args.\n batch_size, path_to_datasets=self.args.\n download_path_for_tensorflow_datasets, image_size=self.args.image_size,\n device=self.device)\n", (20210, 20471), False, 'from tf_dataset_reader import TfDatasetReader\n'), ((23435, 23457), 'torch.vstack', 'torch.vstack', (['features'], {}), '(features)\n', (23447, 23457), False, 'import torch\n'), ((14537, 14564), 'torch.vstack', 'torch.vstack', (['target_logits'], {}), '(target_logits)\n', (14549, 14564), False, 'import torch\n'), ((16972, 16999), 'torch.vstack', 'torch.vstack', (['target_logits'], {}), '(target_logits)\n', (16984, 16999), False, 'import torch\n'), ((15126, 15146), 'numpy.array', 'np.array', (['accuracies'], {}), '(accuracies)\n', (15134, 15146), True, 'import numpy as np\n'), ((17612, 17632), 'numpy.array', 'np.array', (['accuracies'], {}), '(accuracies)\n', (17620, 17632), True, 'import numpy as np\n'), ((15200, 15220), 'numpy.array', 'np.array', (['accuracies'], {}), '(accuracies)\n', (15208, 15220), True, 'import numpy as np\n'), ((17695, 17715), 'numpy.array', 'np.array', (['accuracies'], {}), '(accuracies)\n', (17703, 17715), True, 'import numpy as np\n'), ((8606, 8626), 'torch.Tensor', 'torch.Tensor', (['losses'], {}), '(losses)\n', (8618, 8626), False, 'import torch\n'), ((8696, 8726), 'torch.Tensor', 'torch.Tensor', (['train_accuracies'], {}), '(train_accuracies)\n', (8708, 8726), False, 'import torch\n')]
|
#!/usr/bin/env python3
import os
import numpy as np
import sys
try:
import torch
except ImportError:
pass
from easypbr import *
from dataloaders import *
config_file="lnn_check_lattice_size.cfg"
config_path=os.path.join( os.path.dirname( os.path.realpath(__file__) ) , '../../config', config_file)
view=Viewer.create(config_path) #first because it needs to init context
loader=DataLoaderScanNet(config_path)
loader.start()
nr_points_in_radius=[]
while True:
if(loader.has_data()):
cloud=loader.get_cloud()
Scene.show(cloud, "cloud")
random_point=cloud.V[1,:]
# print("random point is ", random_point)
nr_points=cloud.radius_search(random_point, 0.05)
nr_points_in_radius.append(nr_points)
print("mean_nr_points: ", np.mean(nr_points_in_radius))
view.update()
|
[
"os.path.realpath",
"numpy.mean"
] |
[((249, 275), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (265, 275), False, 'import os\n'), ((792, 820), 'numpy.mean', 'np.mean', (['nr_points_in_radius'], {}), '(nr_points_in_radius)\n', (799, 820), True, 'import numpy as np\n')]
|
import arff
import argparse
import json
import logging
import numpy as np
import openmlcontrib
import openmldefaults
import os
import pandas as pd
# SSHFS NEMO FREIBURG:
# sshfs <EMAIL>:/rigel/home/jv2657/experiments ~/habanero_experiments
#
# SSHFS GRACE LEIDEN:
# ssh -f -N -L 1233:grace.liacs.nl:22 <EMAIL>
# sshfs -p 1233 vanrijn@localhost:/home/vanrijn/experiments ~/grace_experiments
def parse_args():
metadata_file_text_classification = os.path.expanduser('../../data/text_classification.arff')
parser = argparse.ArgumentParser(description='Creates an ARFF file')
parser.add_argument('--output_directory', type=str, help='directory to store output',
default=os.path.expanduser('~/experiments/openml-defaults/at_vs_ar/'))
parser.add_argument('--task_idx', type=int)
parser.add_argument('--metadata_files', type=str, nargs='+', default=[metadata_file_text_classification])
parser.add_argument('--scoring', type=str, default='missclassification_rate')
parser.add_argument('--search_space_identifier', type=str, default='ferreira')
parser.add_argument('--minimize', action='store_true', default=True)
parser.add_argument('--normalize_base', type=str, default=None)
parser.add_argument('--normalize_a3r', type=str, default=None)
parser.add_argument('--a3r_r', type=int, default=2)
parser.add_argument('--aggregate', type=str, choices=openmldefaults.experiments.AGGREGATES, default='sum')
parser.add_argument('--n_defaults', type=int, default=384)
parser.add_argument('--n_estimators', type=int, default=64)
parser.add_argument('--minimum_evals', type=int, default=128)
parser.add_argument('--random_iterations', type=int, default=1)
parser.add_argument('--run_on_surrogates', action='store_true', default=True)
parser.add_argument('--task_limit', type=int, default=None, help='For speed')
parser.add_argument('--task_id_column', default='dataset', type=str)
parser.add_argument('--override_parameters', type=str)
args_ = parser.parse_args()
return args_
def run(args):
root = logging.getLogger()
root.setLevel(logging.INFO)
task_ids = None
for arff_file in args.metadata_files:
with open(arff_file, 'r') as fp:
df = openmlcontrib.meta.arff_to_dataframe(arff.load(fp), None)
if task_ids is None:
task_ids = np.sort(np.unique(df[args.task_id_column].values))
else:
task_ids = np.sort(np.unique(np.append(task_ids, df[args.task_id_column].values)))
logging.info('Task ids: %s' % task_ids)
if args.task_idx is None:
task_ids_to_process = task_ids
else:
task_ids_to_process = [task_ids[args.task_idx]]
# run random search
for random_seed in range(args.random_iterations):
for task_id in task_ids_to_process:
openmldefaults.experiments.run_vanilla_surrogates_on_task(
task_id=task_id,
models=[openmldefaults.models.AverageRankDefaults(), openmldefaults.models.ActiveTestingDefaults()],
use_surrogates=False,
random_seed=random_seed,
search_space_identifier=args.search_space_identifier,
metadata_files=args.metadata_files,
scoring=args.scoring,
minimize_measure=args.minimize,
n_defaults=args.n_defaults,
aggregate=args.aggregate,
a3r_r=args.a3r_r,
normalize_base=args.normalize_base,
normalize_a3r=args.normalize_a3r,
surrogate_n_estimators=args.n_estimators,
surrogate_minimum_evals=args.minimum_evals,
runtime_column='runtime',
consider_a3r=True,
evaluate_on_surrogate=args.run_on_surrogates,
task_limit=args.task_limit,
output_directory=args.output_directory,
task_id_column=args.task_id_column,
skip_row_check=True,
override_parameters=json.loads(args.override_parameters) if args.override_parameters else None
)
if __name__ == '__main__':
pd.options.mode.chained_assignment = 'raise'
run(parse_args())
|
[
"logging.getLogger",
"json.loads",
"numpy.unique",
"argparse.ArgumentParser",
"openmldefaults.models.AverageRankDefaults",
"arff.load",
"numpy.append",
"logging.info",
"os.path.expanduser",
"openmldefaults.models.ActiveTestingDefaults"
] |
[((450, 507), 'os.path.expanduser', 'os.path.expanduser', (['"""../../data/text_classification.arff"""'], {}), "('../../data/text_classification.arff')\n", (468, 507), False, 'import os\n'), ((521, 580), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Creates an ARFF file"""'}), "(description='Creates an ARFF file')\n", (544, 580), False, 'import argparse\n'), ((2098, 2117), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (2115, 2117), False, 'import logging\n'), ((2561, 2600), 'logging.info', 'logging.info', (["('Task ids: %s' % task_ids)"], {}), "('Task ids: %s' % task_ids)\n", (2573, 2600), False, 'import logging\n'), ((703, 764), 'os.path.expanduser', 'os.path.expanduser', (['"""~/experiments/openml-defaults/at_vs_ar/"""'], {}), "('~/experiments/openml-defaults/at_vs_ar/')\n", (721, 764), False, 'import os\n'), ((2308, 2321), 'arff.load', 'arff.load', (['fp'], {}), '(fp)\n', (2317, 2321), False, 'import arff\n'), ((2397, 2438), 'numpy.unique', 'np.unique', (['df[args.task_id_column].values'], {}), '(df[args.task_id_column].values)\n', (2406, 2438), True, 'import numpy as np\n'), ((2503, 2554), 'numpy.append', 'np.append', (['task_ids', 'df[args.task_id_column].values'], {}), '(task_ids, df[args.task_id_column].values)\n', (2512, 2554), True, 'import numpy as np\n'), ((2988, 3031), 'openmldefaults.models.AverageRankDefaults', 'openmldefaults.models.AverageRankDefaults', ([], {}), '()\n', (3029, 3031), False, 'import openmldefaults\n'), ((3033, 3078), 'openmldefaults.models.ActiveTestingDefaults', 'openmldefaults.models.ActiveTestingDefaults', ([], {}), '()\n', (3076, 3078), False, 'import openmldefaults\n'), ((4072, 4108), 'json.loads', 'json.loads', (['args.override_parameters'], {}), '(args.override_parameters)\n', (4082, 4108), False, 'import json\n')]
|
# -*- coding: utf-8 -*-
"""
This module contains a method for determining the highest concentration recorded
by passed dataframes within the testing period (including sensor and/or
reference data).
================================================================================
@Author:
| <NAME>, NSSC Contractor (ORAU)
| U.S. EPA / ORD / CEMM / AMCD / SFSB
Created:
Wed Sep 8 12:11:43 2021
Last Updated:
Wed Sep 8 12:11:43 2021
"""
import numpy as np
def get_max_conc(param, df_list=None, ref_df=None, bdate=None, edate=None):
"""Determine maximum concentration measured across passed dataframes.
If both sensor dataframes are passed to ``df_list`` and a reference
dataframe is passed to ``ref_df``, the maximum will be computed across
both sensor and reference concentrations.
Args:
param (str): The name of the evaluation parameter.
df_list (list of pandas dataframes, optional): A list of sensor
dataframes. Defaults to None.
ref_df (pandas dataframe, optional): Reference dataframe. Defaults to
None. If dataframe passed, will be considered in calculation of
maximum concentration.
bdate (str, optional): The starting timestamp to begin search. Defaults
to None, will use the earliest timestamp recorded in datasets.
edate (str, optional): The ending timestamp to end search. Defaults
to None, will use the latest timestamp recorded in datasets.
Returns:
max_conc (float):
The maximum concentration indicated by the dataframes passed to the
function for the specified parameter.
Raises:
TypeError: If `df_list` and `ref_df` are both ``None`` (i.e., no
dataframes passed to function).
"""
if df_list is None and ref_df is None:
raise TypeError('Get_Max() missing required dataframe objects: '
'"df_list" and/or "ref_df"')
max_list = [df.loc[bdate:edate, param + '_Value'].max() for df in df_list]
if ref_df is not None:
ref_max = ref_df.loc[bdate:edate, param + '_Value'].max()
max_list.append(ref_max)
# Remove nans
max_list = [i for i in max_list if not np.isnan(i)]
max_conc = max(max_list)
return max_conc
|
[
"numpy.isnan"
] |
[((2235, 2246), 'numpy.isnan', 'np.isnan', (['i'], {}), '(i)\n', (2243, 2246), True, 'import numpy as np\n')]
|
# This script takes as an argument the path to the folder which
# contains folders of images.
# It is assumed that name of each folder with images is
# the label for the images, that is all the images in each folder belong
# to the the same class, and the name of that class is the name of the folder.
# Images are assumed to be in the .png format. It is also assumed that
# each folder has the same number of images. It is NOT assumed that all images
# have the same dimensionality, but all the images will be rescaled to 32x32
# before being saved into the dataset file.
# The total number of images is assumed to be divisible by 10.
# The script will produce a file named "characters_dataset" which will
# contain the train/validation/test datasets and labels in numpy arrays.
# The file will also contain the names of all the
# image folders in alphabetic order.
import os
import sys
from scipy import misc
import numpy as np
# The path that you have your image folders in
path = sys.argv[1]
# We rescale each image to be of size "SHAPE"
SHAPE = (32, 32)
# folder_names is a sorted list containing names of all the folders with images
folder_names = []
for name in sorted(os.listdir(path)):
if os.path.isdir(os.path.join(path, name)):
folder_names.append(name)
# Each element of folder_files is a sorted list of file names
# that are contained within a folder from folder_names
folder_files = []
for folder_name in folder_names:
folder_files.append(sorted(os.listdir(os.path.join(path, folder_name))))
number_of_classes = len(folder_names)
# we assume that all classes have the same number of elements
number_of_examples_per_class = len(folder_files[0])
# the data samples X and the labels y
X = []
y = []
# Load the images and labels into numpy arrays
for i in range(number_of_classes):
for j in range(number_of_examples_per_class):
image_location = os.path.join(
path, folder_names[i], folder_files[i][j])
image = misc.imread(image_location)
image = misc.imresize(image, size=SHAPE, interp='bilinear', mode=None)
X.append(image)
y.append(i)
# Turn the samples into proper numpy array of type
# float32 (for use with GPU) rescaled in [0,1] interval.
X = np.float32(np.array(X)/255.0)
y = np.int32(np.array(y))
hex_codes = np.array(folder_names)
# Make so that each batch of size "number_of_classes" samples is
# balanced with respect to classes.
# That is, each batch of size "number_of_classes" samples
# will contain exactly one sample of each class.
# In this way, when we split the data into train, validation, and test
# datasets, all of them will be balanced with respect to classes
# as long as the sizes of all of them are divisible by "number_of_classes".
X = np.concatenate(
[X[i::number_of_examples_per_class]
for i in range(number_of_examples_per_class)])
y = np.concatenate(
[y[i::number_of_examples_per_class]
for i in range(number_of_examples_per_class)])
dataset_size = number_of_classes * number_of_examples_per_class
# train - validation - test split is 80% - 10% - 10%
# We also assume that the dataset_size is divisible by 10.
X_train = X[:(dataset_size*8)//10]
y_train = y[:(dataset_size*8)//10]
X_val = X[(dataset_size*8)//10:(dataset_size*9)//10]
y_val = y[(dataset_size*8)//10:(dataset_size*9)//10]
X_test = X[(dataset_size*9)//10:]
y_test = y[(dataset_size*9)//10:]
f = open("characters_dataset", "wb")
np.save(f, X_train)
np.save(f, y_train)
np.save(f, X_val)
np.save(f, y_val)
np.save(f, X_test)
np.save(f, y_test)
np.save(f, hex_codes) # hex codes of each class (same as folder names)
f.close()
|
[
"os.listdir",
"os.path.join",
"numpy.array",
"scipy.misc.imread",
"scipy.misc.imresize",
"numpy.save"
] |
[((2311, 2333), 'numpy.array', 'np.array', (['folder_names'], {}), '(folder_names)\n', (2319, 2333), True, 'import numpy as np\n'), ((3448, 3467), 'numpy.save', 'np.save', (['f', 'X_train'], {}), '(f, X_train)\n', (3455, 3467), True, 'import numpy as np\n'), ((3468, 3487), 'numpy.save', 'np.save', (['f', 'y_train'], {}), '(f, y_train)\n', (3475, 3487), True, 'import numpy as np\n'), ((3488, 3505), 'numpy.save', 'np.save', (['f', 'X_val'], {}), '(f, X_val)\n', (3495, 3505), True, 'import numpy as np\n'), ((3506, 3523), 'numpy.save', 'np.save', (['f', 'y_val'], {}), '(f, y_val)\n', (3513, 3523), True, 'import numpy as np\n'), ((3524, 3542), 'numpy.save', 'np.save', (['f', 'X_test'], {}), '(f, X_test)\n', (3531, 3542), True, 'import numpy as np\n'), ((3543, 3561), 'numpy.save', 'np.save', (['f', 'y_test'], {}), '(f, y_test)\n', (3550, 3561), True, 'import numpy as np\n'), ((3562, 3583), 'numpy.save', 'np.save', (['f', 'hex_codes'], {}), '(f, hex_codes)\n', (3569, 3583), True, 'import numpy as np\n'), ((1181, 1197), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1191, 1197), False, 'import os\n'), ((2286, 2297), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (2294, 2297), True, 'import numpy as np\n'), ((1221, 1245), 'os.path.join', 'os.path.join', (['path', 'name'], {}), '(path, name)\n', (1233, 1245), False, 'import os\n'), ((1894, 1949), 'os.path.join', 'os.path.join', (['path', 'folder_names[i]', 'folder_files[i][j]'], {}), '(path, folder_names[i], folder_files[i][j])\n', (1906, 1949), False, 'import os\n'), ((1979, 2006), 'scipy.misc.imread', 'misc.imread', (['image_location'], {}), '(image_location)\n', (1990, 2006), False, 'from scipy import misc\n'), ((2023, 2085), 'scipy.misc.imresize', 'misc.imresize', (['image'], {'size': 'SHAPE', 'interp': '"""bilinear"""', 'mode': 'None'}), "(image, size=SHAPE, interp='bilinear', mode=None)\n", (2036, 2085), False, 'from scipy import misc\n'), ((2254, 2265), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (2262, 2265), True, 'import numpy as np\n'), ((1493, 1524), 'os.path.join', 'os.path.join', (['path', 'folder_name'], {}), '(path, folder_name)\n', (1505, 1524), False, 'import os\n')]
|
''' Helper class and functions for loading SUN RGB-D objects
Author: <NAME>
Date: October 2017
Modified by <NAME>
'''
import os
import sys
import numpy as np
import pickle
import argparse
from PIL import Image
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
import sunrgbd_utils as utils
from sunrgbd_object import sunrgbd_object
from sunrgbd_utils import random_shift_box2d, extract_pc_in_box3d
def ravel_hash(coord):
assert coord.ndim == 2
coord -= coord.min(0)
coord_max = coord.max(0) + 1
keys = np.zeros(coord.shape[0], dtype=np.int64)
for i in range(coord.shape[1] - 1):
keys += coord[:, i]
keys *= coord_max[i + 1]
keys += coord[:, -1]
return keys
def down_sample(x, voxel_size=(0.05, )):
if isinstance(voxel_size, float):
voxel_size = (voxel_size, )
if len(voxel_size) == 1:
voxel_size = voxel_size * 3
voxel_size = np.array(voxel_size, dtype=np.float32)
voxel_index = np.floor(x / voxel_size).astype(np.int64, copy=False)
hash_keys = ravel_hash(voxel_index)
_, idx = np.unique(hash_keys, return_index=True)
return idx
def get_box3d_dim_statistics(my_sunrgbd_dir, idx_filename, type_whitelist):
dataset = sunrgbd_object(my_sunrgbd_dir)
dimension_list = []
type_list = []
data_idx_list = [int(line.rstrip()) for line in open(idx_filename)]
for data_idx in data_idx_list:
print('------------- ', data_idx)
objects = dataset.get_label_objects(data_idx)
for obj_idx in range(len(objects)):
obj = objects[obj_idx]
if obj.classname not in type_whitelist:
continue
dimension_list.append(np.array([obj.l, obj.w, obj.h]))
type_list.append(obj.classname)
print("number of objects: {} ".format(len(type_list)))
print("categories:", list(sorted(type_whitelist)))
# Get average box size for different categories
for class_type in sorted(set(type_list)):
cnt = 0
box3d_list = []
for i in range(len(dimension_list)):
if type_list[i] == class_type:
cnt += 1
box3d_list.append(dimension_list[i])
median_box3d = np.median(box3d_list, 0)
print("\'%s\': np.array([%f,%f,%f])," %
(class_type, median_box3d[0] * 2, median_box3d[1] * 2, median_box3d[2] * 2))
def read_det_file(det_file):
id_list = []
type_list = []
prob_list = []
box2d_list = []
# data_idx, type_list, prob, box2d
with open(det_file, 'rt') as f:
for line in f:
t = line.rstrip().split(" ")
id_list.append(int(t[0]))
type_list.append(t[1])
prob_list.append(float(t[2]))
box2d_list.append(np.array([float(t[i]) for i in range(3, 7)]))
return id_list, type_list, box2d_list, prob_list
def read_det_pkl_file(det_file):
classes = [
'__background__', 'bathtub', 'bed', 'bookshelf', 'box', 'chair', 'counter', 'desk', 'door', 'dresser',
'garbage_bin', 'lamp', 'monitor', 'night_stand', 'pillow', 'sink', 'sofa', 'table', 'tv', 'toilet'
]
with open(det_file, 'rb') as f:
dets = pickle.load(f)
num_classes = len(dets)
num_images = len(dets[0])
id_list = []
type_list = []
prob_list = []
box2d_list = []
for i in range(num_images):
for c in range(1, num_classes):
det = dets[c][i]
for j in range(len(det)):
id_list.append((i + 1))
type_list.append(classes[c])
prob_list.append(det[j][4])
box2d_list.append(det[j][:4])
return id_list, type_list, box2d_list, prob_list
def extract_frustum_data(sunrgbd_dir,
idx_filename,
split,
output_filename,
type_whitelist,
perturb_box2d=False,
augmentX=1,
with_down_sample=False):
dataset = sunrgbd_object(sunrgbd_dir, split)
data_idx_list = [int(line.rstrip()) for line in open(idx_filename)]
id_list = [] # int number
box2d_list = [] # [xmin,ymin,xmax,ymax]
box3d_list = [] # (8,3) array in upright depth coord
input_list = [] # channel number = 6, xyz,rgb in upright depth coord
label_list = [] # 1 for roi object, 0 for clutter
type_list = [] # string e.g. bed
heading_list = [] # face of object angle, radius of clockwise angle from positive x axis in upright camera coord
box3d_size_list = [] # array of l,w,h
frustum_angle_list = [] # angle of 2d box center from pos x-axis (clockwise)
img_coord_list = []
calib_K_list = []
calib_R_list = []
pos_cnt = 0
all_cnt = 0
for data_idx in data_idx_list:
print('------------- ', data_idx)
calib = dataset.get_calibration(data_idx)
objects = dataset.get_label_objects(data_idx)
pc_upright_depth = dataset.get_pointcloud(data_idx)
pc_upright_camera = np.zeros_like(pc_upright_depth)
pc_upright_camera[:, 0:3] = calib.project_upright_depth_to_upright_camera(pc_upright_depth[:, 0:3])
pc_upright_camera[:, 3:] = pc_upright_depth[:, 3:]
if with_down_sample:
idx = down_sample(pc_upright_camera[:, :3], 0.01)
# print(len(idx), len(pc_upright_camera))
pc_upright_camera = pc_upright_camera[idx]
pc_upright_depth = pc_upright_depth[idx]
# img = dataset.get_image(data_idx)
# img_height, img_width, img_channel = img.shape
pc_image_coord, _ = calib.project_upright_depth_to_image(pc_upright_depth)
for obj_idx in range(len(objects)):
obj = objects[obj_idx]
if obj.classname not in type_whitelist:
continue
# 2D BOX: Get pts rect backprojected
box2d = obj.box2d
for _ in range(augmentX):
if perturb_box2d:
xmin, ymin, xmax, ymax = random_shift_box2d(box2d)
# print(xmin,ymin,xmax,ymax)
else:
xmin, ymin, xmax, ymax = box2d
box_fov_inds = (pc_image_coord[:, 0] < xmax) & (pc_image_coord[:, 0] >= xmin) & (
pc_image_coord[:, 1] < ymax) & (pc_image_coord[:, 1] >= ymin)
coord_in_box_fov = pc_image_coord[box_fov_inds, :]
pc_in_box_fov = pc_upright_camera[box_fov_inds, :]
# Get frustum angle (according to center pixel in 2D BOX)
box2d_center = np.array([(xmin + xmax) / 2.0, (ymin + ymax) / 2.0])
uvdepth = np.zeros((1, 3))
uvdepth[0, 0:2] = box2d_center
uvdepth[0, 2] = 20 # some random depth
box2d_center_upright_camera = calib.project_image_to_upright_camera(uvdepth)
# print('UVdepth, center in upright camera: ', uvdepth, box2d_center_upright_camera)
frustum_angle = -1 * np.arctan2(
box2d_center_upright_camera[0, 2],
box2d_center_upright_camera[0, 0]) # angle as to positive x-axis as in the Zoox paper
# print('Frustum angle: ', frustum_angle)
# 3D BOX: Get pts velo in 3d box
box3d_pts_2d, box3d_pts_3d = utils.compute_box_3d(obj, calib)
box3d_pts_3d = calib.project_upright_depth_to_upright_camera(box3d_pts_3d)
try:
_, inds = extract_pc_in_box3d(pc_in_box_fov, box3d_pts_3d)
except Exception as e:
print(e)
continue
label = np.zeros((pc_in_box_fov.shape[0]))
label[inds] = 1
box3d_size = np.array([2 * obj.l, 2 * obj.w, 2 * obj.h])
# Subsample points..
num_point = pc_in_box_fov.shape[0]
if num_point > 2048:
choice = np.random.choice(pc_in_box_fov.shape[0], 2048, replace=False)
coord_in_box_fov = coord_in_box_fov[choice, :]
pc_in_box_fov = pc_in_box_fov[choice, :]
label = label[choice]
# Reject object with too few points
if np.sum(label) < 5:
continue
id_list.append(data_idx)
box2d_list.append(np.array([xmin, ymin, xmax, ymax], dtype=np.float32))
box3d_list.append(box3d_pts_3d)
input_list.append(pc_in_box_fov.astype(np.float32))
label_list.append(label.astype(np.bool))
type_list.append(obj.classname)
heading_list.append(obj.heading_angle)
box3d_size_list.append(box3d_size)
frustum_angle_list.append(frustum_angle)
img_coord_list.append(coord_in_box_fov.astype(np.float32))
calib_K_list.append(calib.K)
calib_R_list.append(calib.Rtilt)
# collect statistics
pos_cnt += np.sum(label)
all_cnt += pc_in_box_fov.shape[0]
print('Average pos ratio: ', pos_cnt / float(all_cnt))
print('Average npoints: ', float(all_cnt) / len(id_list))
data_dict = {
'id': id_list,
'box2d': box2d_list,
'box3d': box3d_list,
'box3d_size': box3d_size_list,
'box3d_heading': heading_list,
'type': type_list,
'input': input_list,
'frustum_angle': frustum_angle_list,
'label': label_list,
'calib_K': calib_K_list,
'calib_R': calib_R_list,
# 'image_coord': img_coord_list,
}
with open(output_filename, 'wb') as f:
pickle.dump(data_dict, f, -1)
print("save in {}".format(output_filename))
def extract_frustum_data_from_rgb_detection(sunrgbd_dir,
det_file,
split,
output_filename,
type_whitelist,
valid_id_list=None,
with_down_sample=False):
dataset = sunrgbd_object(sunrgbd_dir, split)
if det_file.split('.')[-1] == 'txt':
det_id_list, det_type_list, det_box2d_list, det_prob_list = read_det_file(det_file)
else:
det_id_list, det_type_list, det_box2d_list, det_prob_list = read_det_pkl_file(det_file)
cache_id = -1
cache = None
id_list = []
type_list = []
box2d_list = []
prob_list = []
input_list = [] # channel number = 4, xyz,intensity in rect camera coord
frustum_angle_list = [] # angle of 2d box center from pos x-axis
img_coord_list = []
calib_K_list = []
calib_R_list = []
for det_idx in range(len(det_id_list)):
data_idx = det_id_list[det_idx]
if valid_id_list is not None and data_idx not in valid_id_list:
continue
if det_type_list[det_idx] not in type_whitelist:
continue
print('det idx: %d/%d, data idx: %d' % (det_idx, len(det_id_list), data_idx))
if cache_id != data_idx:
calib = dataset.get_calibration(data_idx)
pc_upright_depth = dataset.get_pointcloud(data_idx)
pc_upright_camera = np.zeros_like(pc_upright_depth)
pc_upright_camera[:, 0:3] = calib.project_upright_depth_to_upright_camera(pc_upright_depth[:, 0:3])
pc_upright_camera[:, 3:] = pc_upright_depth[:, 3:]
if with_down_sample:
idx = down_sample(pc_upright_camera[:, :3], 0.01)
# print(len(idx), len(pc_upright_camera))
pc_upright_camera = pc_upright_camera[idx]
pc_upright_depth = pc_upright_depth[idx]
# img = dataset.get_image(data_idx)
# img_height, img_width, img_channel = img.shape
pc_image_coord, _ = calib.project_upright_depth_to_image(pc_upright_depth)
cache = [calib, pc_upright_camera, pc_image_coord]
cache_id = data_idx
else:
calib, pc_upright_camera, pc_image_coord = cache
# 2D BOX: Get pts rect backprojected
xmin, ymin, xmax, ymax = det_box2d_list[det_idx]
box_fov_inds = (pc_image_coord[:, 0] < xmax) & (pc_image_coord[:, 0] >= xmin) & (
pc_image_coord[:, 1] < ymax) & (pc_image_coord[:, 1] >= ymin)
coord_in_box_fov = pc_image_coord[box_fov_inds, :]
pc_in_box_fov = pc_upright_camera[box_fov_inds, :]
# Get frustum angle (according to center pixel in 2D BOX)
box2d_center = np.array([(xmin + xmax) / 2.0, (ymin + ymax) / 2.0])
uvdepth = np.zeros((1, 3))
uvdepth[0, 0:2] = box2d_center
uvdepth[0, 2] = 20 # some random depth
box2d_center_upright_camera = calib.project_image_to_upright_camera(uvdepth)
frustum_angle = -1 * np.arctan2(
box2d_center_upright_camera[0, 2],
box2d_center_upright_camera[0, 0]) # angle as to positive x-axis as in the Zoox paper
# Subsample points..
num_point = pc_in_box_fov.shape[0]
if num_point > 2048:
choice = np.random.choice(pc_in_box_fov.shape[0], 2048, replace=False)
coord_in_box_fov = coord_in_box_fov[choice, :]
pc_in_box_fov = pc_in_box_fov[choice, :]
# Pass objects that are too small
if len(pc_in_box_fov) < 5:
continue
id_list.append(data_idx)
type_list.append(det_type_list[det_idx])
box2d_list.append(det_box2d_list[det_idx])
prob_list.append(det_prob_list[det_idx])
input_list.append(pc_in_box_fov.astype(np.float32))
frustum_angle_list.append(frustum_angle)
img_coord_list.append(coord_in_box_fov.astype(np.float32))
calib_K_list.append(calib.K)
calib_R_list.append(calib.Rtilt)
data_dict = {
'id': id_list,
'type': type_list,
'box2d': box2d_list,
'box2d_prob': prob_list,
'input': input_list,
'frustum_angle': frustum_angle_list,
'calib_K': calib_K_list,
'calib_R': calib_R_list,
# 'image_coord': img_coord_list,
}
with open(output_filename, 'wb') as f:
pickle.dump(data_dict, f, -1)
print("save in {}".format(output_filename))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gen_train',
action='store_true',
help='Generate train split frustum data with perturbed GT 2D boxes')
parser.add_argument('--gen_val', action='store_true', help='Generate val split frustum data with GT 2D boxes')
parser.add_argument('--gen_val_rgb_detection',
action='store_true',
help='Generate val split frustum data with RGB detection 2D boxes')
parser.add_argument('--num_classes', default=10, type=int, help='19 or 10 categories, default 10')
parser.add_argument('--save_dir',
default='sunrgbd/data/pickle_data',
type=str,
help='directory to save data, default[sunrgbd/data/pickle_data]')
parser.add_argument('--gen_avg_dim', action='store_true', help='get average dimension of each class')
args = parser.parse_args()
my_sunrgbd_dir = 'sunrgbd/mysunrgbd' # change if you do not set default path
if args.num_classes == 10:
type_whitelist = [
'bed', 'table', 'sofa', 'chair', 'toilet', 'desk', 'dresser', 'night_stand', 'bookshelf', 'bathtub'
]
elif args.num_classes == 19:
type_whitelist = [
'bathtub', 'bed', 'bookshelf', 'box', 'chair', 'counter', 'desk', 'door', 'dresser', 'garbage_bin', 'lamp',
'monitor', 'night_stand', 'pillow', 'sink', 'sofa', 'table', 'tv', 'toilet'
]
else:
assert False, 'please set correct num_classes'
type_whitelist = set(type_whitelist)
if args.gen_avg_dim:
get_box3d_dim_statistics(my_sunrgbd_dir, 'sunrgbd/image_sets/train.txt', type_whitelist)
save_dir = args.save_dir
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if args.gen_train:
extract_frustum_data(my_sunrgbd_dir,
'sunrgbd/image_sets/train.txt',
'training',
output_filename=os.path.join(save_dir, 'sunrgbd_train_aug5x.pickle'),
type_whitelist=type_whitelist,
perturb_box2d=True,
augmentX=5,
with_down_sample=False)
if args.gen_val:
extract_frustum_data(my_sunrgbd_dir,
'sunrgbd/image_sets/val.txt',
'training',
output_filename=os.path.join(save_dir, 'sunrgbd_val.pickle'),
type_whitelist=type_whitelist,
perturb_box2d=False,
augmentX=1,
with_down_sample=False)
if args.gen_val_rgb_detection:
extract_frustum_data_from_rgb_detection(my_sunrgbd_dir,
'./sunrgbd/rgb_detections/sunrgbd_rgb_det_val_classes19_mAP50.2.txt',
'training',
os.path.join(save_dir,'sunrgbd_rgb_det_val.pickle'),
type_whitelist=type_whitelist)
|
[
"numpy.array",
"numpy.arctan2",
"sys.path.append",
"sunrgbd_utils.random_shift_box2d",
"os.path.exists",
"argparse.ArgumentParser",
"sunrgbd_utils.compute_box_3d",
"sunrgbd_object.sunrgbd_object",
"numpy.random.choice",
"pickle.load",
"numpy.floor",
"numpy.median",
"pickle.dump",
"numpy.unique",
"os.makedirs",
"os.path.join",
"numpy.sum",
"numpy.zeros",
"sunrgbd_utils.extract_pc_in_box3d",
"os.path.abspath",
"numpy.zeros_like"
] |
[((268, 293), 'sys.path.append', 'sys.path.append', (['BASE_DIR'], {}), '(BASE_DIR)\n', (283, 293), False, 'import sys\n'), ((241, 266), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (256, 266), False, 'import os\n'), ((557, 597), 'numpy.zeros', 'np.zeros', (['coord.shape[0]'], {'dtype': 'np.int64'}), '(coord.shape[0], dtype=np.int64)\n', (565, 597), True, 'import numpy as np\n'), ((944, 982), 'numpy.array', 'np.array', (['voxel_size'], {'dtype': 'np.float32'}), '(voxel_size, dtype=np.float32)\n', (952, 982), True, 'import numpy as np\n'), ((1108, 1147), 'numpy.unique', 'np.unique', (['hash_keys'], {'return_index': '(True)'}), '(hash_keys, return_index=True)\n', (1117, 1147), True, 'import numpy as np\n'), ((1256, 1286), 'sunrgbd_object.sunrgbd_object', 'sunrgbd_object', (['my_sunrgbd_dir'], {}), '(my_sunrgbd_dir)\n', (1270, 1286), False, 'from sunrgbd_object import sunrgbd_object\n'), ((4083, 4117), 'sunrgbd_object.sunrgbd_object', 'sunrgbd_object', (['sunrgbd_dir', 'split'], {}), '(sunrgbd_dir, split)\n', (4097, 4117), False, 'from sunrgbd_object import sunrgbd_object\n'), ((10331, 10365), 'sunrgbd_object.sunrgbd_object', 'sunrgbd_object', (['sunrgbd_dir', 'split'], {}), '(sunrgbd_dir, split)\n', (10345, 10365), False, 'from sunrgbd_object import sunrgbd_object\n'), ((14556, 14581), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (14579, 14581), False, 'import argparse\n'), ((2244, 2268), 'numpy.median', 'np.median', (['box3d_list', '(0)'], {}), '(box3d_list, 0)\n', (2253, 2268), True, 'import numpy as np\n'), ((3224, 3238), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3235, 3238), False, 'import pickle\n'), ((5107, 5138), 'numpy.zeros_like', 'np.zeros_like', (['pc_upright_depth'], {}), '(pc_upright_depth)\n', (5120, 5138), True, 'import numpy as np\n'), ((9819, 9848), 'pickle.dump', 'pickle.dump', (['data_dict', 'f', '(-1)'], {}), '(data_dict, f, -1)\n', (9830, 9848), False, 'import pickle\n'), ((12784, 12836), 'numpy.array', 'np.array', (['[(xmin + xmax) / 2.0, (ymin + ymax) / 2.0]'], {}), '([(xmin + xmax) / 2.0, (ymin + ymax) / 2.0])\n', (12792, 12836), True, 'import numpy as np\n'), ((12855, 12871), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (12863, 12871), True, 'import numpy as np\n'), ((14434, 14463), 'pickle.dump', 'pickle.dump', (['data_dict', 'f', '(-1)'], {}), '(data_dict, f, -1)\n', (14445, 14463), False, 'import pickle\n'), ((16339, 16363), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (16353, 16363), False, 'import os\n'), ((16373, 16394), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (16384, 16394), False, 'import os\n'), ((1001, 1025), 'numpy.floor', 'np.floor', (['(x / voxel_size)'], {}), '(x / voxel_size)\n', (1009, 1025), True, 'import numpy as np\n'), ((11461, 11492), 'numpy.zeros_like', 'np.zeros_like', (['pc_upright_depth'], {}), '(pc_upright_depth)\n', (11474, 11492), True, 'import numpy as np\n'), ((13073, 13158), 'numpy.arctan2', 'np.arctan2', (['box2d_center_upright_camera[0, 2]', 'box2d_center_upright_camera[0, 0]'], {}), '(box2d_center_upright_camera[0, 2], box2d_center_upright_camera[0, 0]\n )\n', (13083, 13158), True, 'import numpy as np\n'), ((13353, 13414), 'numpy.random.choice', 'np.random.choice', (['pc_in_box_fov.shape[0]', '(2048)'], {'replace': '(False)'}), '(pc_in_box_fov.shape[0], 2048, replace=False)\n', (13369, 13414), True, 'import numpy as np\n'), ((17656, 17708), 'os.path.join', 'os.path.join', (['save_dir', '"""sunrgbd_rgb_det_val.pickle"""'], {}), "(save_dir, 'sunrgbd_rgb_det_val.pickle')\n", (17668, 17708), False, 'import os\n'), ((1723, 1754), 'numpy.array', 'np.array', (['[obj.l, obj.w, obj.h]'], {}), '([obj.l, obj.w, obj.h])\n', (1731, 1754), True, 'import numpy as np\n'), ((6665, 6717), 'numpy.array', 'np.array', (['[(xmin + xmax) / 2.0, (ymin + ymax) / 2.0]'], {}), '([(xmin + xmax) / 2.0, (ymin + ymax) / 2.0])\n', (6673, 6717), True, 'import numpy as np\n'), ((6744, 6760), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (6752, 6760), True, 'import numpy as np\n'), ((7421, 7453), 'sunrgbd_utils.compute_box_3d', 'utils.compute_box_3d', (['obj', 'calib'], {}), '(obj, calib)\n', (7441, 7453), True, 'import sunrgbd_utils as utils\n'), ((7767, 7799), 'numpy.zeros', 'np.zeros', (['pc_in_box_fov.shape[0]'], {}), '(pc_in_box_fov.shape[0])\n', (7775, 7799), True, 'import numpy as np\n'), ((7863, 7906), 'numpy.array', 'np.array', (['[2 * obj.l, 2 * obj.w, 2 * obj.h]'], {}), '([2 * obj.l, 2 * obj.w, 2 * obj.h])\n', (7871, 7906), True, 'import numpy as np\n'), ((9160, 9173), 'numpy.sum', 'np.sum', (['label'], {}), '(label)\n', (9166, 9173), True, 'import numpy as np\n'), ((16611, 16663), 'os.path.join', 'os.path.join', (['save_dir', '"""sunrgbd_train_aug5x.pickle"""'], {}), "(save_dir, 'sunrgbd_train_aug5x.pickle')\n", (16623, 16663), False, 'import os\n'), ((17080, 17124), 'os.path.join', 'os.path.join', (['save_dir', '"""sunrgbd_val.pickle"""'], {}), "(save_dir, 'sunrgbd_val.pickle')\n", (17092, 17124), False, 'import os\n'), ((6098, 6123), 'sunrgbd_utils.random_shift_box2d', 'random_shift_box2d', (['box2d'], {}), '(box2d)\n', (6116, 6123), False, 'from sunrgbd_utils import random_shift_box2d, extract_pc_in_box3d\n'), ((7095, 7180), 'numpy.arctan2', 'np.arctan2', (['box2d_center_upright_camera[0, 2]', 'box2d_center_upright_camera[0, 0]'], {}), '(box2d_center_upright_camera[0, 2], box2d_center_upright_camera[0, 0]\n )\n', (7105, 7180), True, 'import numpy as np\n'), ((7596, 7644), 'sunrgbd_utils.extract_pc_in_box3d', 'extract_pc_in_box3d', (['pc_in_box_fov', 'box3d_pts_3d'], {}), '(pc_in_box_fov, box3d_pts_3d)\n', (7615, 7644), False, 'from sunrgbd_utils import random_shift_box2d, extract_pc_in_box3d\n'), ((8061, 8122), 'numpy.random.choice', 'np.random.choice', (['pc_in_box_fov.shape[0]', '(2048)'], {'replace': '(False)'}), '(pc_in_box_fov.shape[0], 2048, replace=False)\n', (8077, 8122), True, 'import numpy as np\n'), ((8364, 8377), 'numpy.sum', 'np.sum', (['label'], {}), '(label)\n', (8370, 8377), True, 'import numpy as np\n'), ((8488, 8540), 'numpy.array', 'np.array', (['[xmin, ymin, xmax, ymax]'], {'dtype': 'np.float32'}), '([xmin, ymin, xmax, ymax], dtype=np.float32)\n', (8496, 8540), True, 'import numpy as np\n')]
|
import copy
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
class DataFrame(object):
"""Minimal pd.DataFrame analog for handling n-dimensional numpy matrices with additional
support for shuffling, batching, and train/test splitting.
Args:
columns: List of names corresponding to the matrices in data.
data: List of n-dimensional data matrices ordered in correspondence with columns.
All matrices must have the same leading dimension. Data can also be fed a list of
instances of np.memmap, in which case RAM usage can be limited to the size of a
single batch.
"""
def __init__(self, columns, data):
assert len(columns) == len(data), 'columns length does not match data length'
lengths = [mat.shape[0] for mat in data]
assert len(set(lengths)) == 1, 'all matrices in data must have same first dimension'
self.length = lengths[0]
self.columns = columns
self.data = data
self.dict = dict(zip(self.columns, self.data))
self.idx = np.arange(self.length)
def shapes(self):
return pd.Series(dict(zip(self.columns, [mat.shape for mat in self.data])))
def dtypes(self):
return pd.Series(dict(zip(self.columns, [mat.dtype for mat in self.data])))
def shuffle(self):
np.random.shuffle(self.idx)
def train_test_split(self, train_size, random_state=np.random.randint(1000), stratify=None):
train_idx, test_idx = train_test_split(
self.idx,
train_size=train_size,
random_state=random_state,
stratify=stratify
)
train_df = DataFrame(copy.copy(self.columns), [mat[train_idx] for mat in self.data])
test_df = DataFrame(copy.copy(self.columns), [mat[test_idx] for mat in self.data])
return train_df, test_df
def batch_generator(self, batch_size, shuffle=True, num_epochs=10000, allow_smaller_final_batch=False):
epoch_num = 0
while epoch_num < num_epochs:
if shuffle:
self.shuffle()
for i in range(0, self.length + 1, batch_size):
batch_idx = self.idx[i: i + batch_size]
if not allow_smaller_final_batch and len(batch_idx) != batch_size:
break
yield DataFrame(
columns=copy.copy(self.columns),
data=[mat[batch_idx].copy() for mat in self.data]
)
epoch_num += 1
def iterrows(self):
for i in self.idx:
yield self[i]
def mask(self, mask):
return DataFrame(copy.copy(self.columns), [mat[mask] for mat in self.data])
def concat(self, other_df):
mats = []
for column in self.columns:
mats.append(np.concatenate([self[column], other_df[column]], axis=0))
return DataFrame(copy.copy(self.columns), mats)
def items(self):
return self.dict.items()
def __iter__(self):
return self.dict.items().__iter__()
def __len__(self):
return self.length
def __getitem__(self, key):
if isinstance(key, str):
return self.dict[key]
elif isinstance(key, int):
return pd.Series(dict(zip(self.columns, [mat[self.idx[key]] for mat in self.data])))
def __setitem__(self, key, value):
assert value.shape[0] == len(self), 'matrix first dimension does not match'
if key not in self.columns:
self.columns.append(key)
self.data.append(value)
self.dict[key] = value
|
[
"sklearn.model_selection.train_test_split",
"numpy.random.randint",
"numpy.concatenate",
"copy.copy",
"numpy.arange",
"numpy.random.shuffle"
] |
[((1114, 1136), 'numpy.arange', 'np.arange', (['self.length'], {}), '(self.length)\n', (1123, 1136), True, 'import numpy as np\n'), ((1383, 1410), 'numpy.random.shuffle', 'np.random.shuffle', (['self.idx'], {}), '(self.idx)\n', (1400, 1410), True, 'import numpy as np\n'), ((1468, 1491), 'numpy.random.randint', 'np.random.randint', (['(1000)'], {}), '(1000)\n', (1485, 1491), True, 'import numpy as np\n'), ((1539, 1638), 'sklearn.model_selection.train_test_split', 'train_test_split', (['self.idx'], {'train_size': 'train_size', 'random_state': 'random_state', 'stratify': 'stratify'}), '(self.idx, train_size=train_size, random_state=random_state,\n stratify=stratify)\n', (1555, 1638), False, 'from sklearn.model_selection import train_test_split\n'), ((1722, 1745), 'copy.copy', 'copy.copy', (['self.columns'], {}), '(self.columns)\n', (1731, 1745), False, 'import copy\n'), ((1814, 1837), 'copy.copy', 'copy.copy', (['self.columns'], {}), '(self.columns)\n', (1823, 1837), False, 'import copy\n'), ((2692, 2715), 'copy.copy', 'copy.copy', (['self.columns'], {}), '(self.columns)\n', (2701, 2715), False, 'import copy\n'), ((2945, 2968), 'copy.copy', 'copy.copy', (['self.columns'], {}), '(self.columns)\n', (2954, 2968), False, 'import copy\n'), ((2862, 2918), 'numpy.concatenate', 'np.concatenate', (['[self[column], other_df[column]]'], {'axis': '(0)'}), '([self[column], other_df[column]], axis=0)\n', (2876, 2918), True, 'import numpy as np\n'), ((2421, 2444), 'copy.copy', 'copy.copy', (['self.columns'], {}), '(self.columns)\n', (2430, 2444), False, 'import copy\n')]
|
#! /usr/bin/env python3
# coding=utf-8
'''
Loads a saved pytorch model checkpoint and an image and prints the most likely
image class and it's associated probability. If provided, uses a category to
name json file to map categories to names and print the names as well.
SPECS:
- Allows users to print out the top K classes along with associated
probabilities.
- Allows users to use the GPU to calculate the predictions.
- Allows users to load a JSON file that maps the class values to other category
names.
TODO:
- args validation,
- complete docstrings,
- write unit tests
'''
import os
import argparse
import json
from PIL import Image
import numpy as np
import torch
from torch.autograd import Variable
from torchvision import models
def main():
''''''
args = get_input_args()
# Load model from checkpoint
model = load_checkpoint(args)
# Predict and print top K classes along with their probabilities
predict(model, args)
def get_input_args():
''''''
parser = argparse.ArgumentParser(description='')
parser.add_argument('checkpoint_path', metavar='CHKPT_PATH',
help='path to chekpoint')
parser.add_argument('image_path', metavar='IMG_PATH',
help='path to image')
parser.add_argument('--gpu', dest='gpu', default=False,
action='store_true', help='use gpu for the prediction')
parser.add_argument('-k', '--topk', dest='topk', default=1,
type=int,
help='number of top K classes to print (default: 1)')
parser.add_argument('-ctn', '--cat_to_name', dest='cat_to_name',
default=None,
type=str,
help="""
The path to an alternative JSON file that maps the class
values to category names (default:None)
""")
return parser.parse_args()
def load_checkpoint(args):
''''''
checkpoint_path = os.path.relpath(args.checkpoint_path)
checkpoint = torch.load(checkpoint_path)
model = models.__dict__[checkpoint['architecture']](pretrained=True)
model.classifier = checkpoint['classifier']
model.class_to_idx = checkpoint['class_to_idx']
model.load_state_dict(checkpoint['state_dict'])
return model
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array'''
image_size = image.size
# Resize the image where the shortest side is 256 pixels,
# keeping the aspect ratio
shorter_side_idx = image_size.index(min(image_size))
bigger_side_idx = image_size.index(max(image_size))
aspect_ratio = image_size[bigger_side_idx] / image_size[shorter_side_idx]
new_size = [None, None]
new_size[shorter_side_idx] = 256
new_size[bigger_side_idx] = int(256 * aspect_ratio)
image = image.resize(new_size)
# Crop out the center 224x224 portion of the image
width, height = new_size
new_width, new_height = (224, 224)
left = (width - new_width) / 2
top = (height - new_height) / 2
right = (width + new_width) / 2
bottom = (height + new_height) / 2
image = image.crop((left, top, right, bottom))
# Convert image color channels from 0-255 to floats 0-1.
np_image = np.array(image)
np_image = np_image / 255
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
np_image = (np_image - mean) / std
# PyTorch expects the color channel to be the first dimension but it's the
# third dimension in the PIL image and Numpy array. Traspose the numpy array
np_image = np_image.transpose((2, 0, 1))
return np_image
def predict(model, args):
''' Predict the class (or classes) of an image using a trained deep learning
model. If available, uses a category to name json file to map categories to
names and print the names as well'''
print("=> Predicting probabilities..\n")
model.eval()
# Create class to name dictionary
idx_to_class = {i: k for k, i in model.class_to_idx.items()}
# Load and process image
image_path = os.path.relpath(args.image_path)
image = process_image(Image.open(image_path))
image = torch.FloatTensor([image])
# Configure use of gpu
if args.gpu:
print(' Using GPU..\n')
model = model.cuda()
image = image.cuda()
# map model indexes to image classes
idx_to_class = {i: k for k, i in model.class_to_idx.items()}
# get top K predictions and indexes
output = model.forward(Variable(image))
ps = torch.exp(output).data[0]
cl_index = ps.topk(args.topk)
# Map to classes and names
classes = [idx_to_class[idx]
for idx in cl_index[1].cpu().numpy()]
probs = cl_index[0].cpu().numpy()
print(' Probabilities: ', probs)
if args.cat_to_name:
ctn_path = os.path.relpath(args.cat_to_name)
with open(ctn_path, 'r') as f:
cat_to_name = json.load(f)
names = [cat_to_name[cl] for cl in classes]
print(' Classes: ', [(cl, nm) for cl, nm in
zip(classes, names)])
else:
print(' Classes: ', classes)
if __name__ == '__main__':
main()
|
[
"PIL.Image.open",
"argparse.ArgumentParser",
"torch.load",
"torch.exp",
"numpy.array",
"json.load",
"torch.autograd.Variable",
"torch.FloatTensor",
"os.path.relpath"
] |
[((1022, 1061), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '""""""'}), "(description='')\n", (1045, 1061), False, 'import argparse\n'), ((2041, 2078), 'os.path.relpath', 'os.path.relpath', (['args.checkpoint_path'], {}), '(args.checkpoint_path)\n', (2056, 2078), False, 'import os\n'), ((2096, 2123), 'torch.load', 'torch.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (2106, 2123), False, 'import torch\n'), ((3373, 3388), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (3381, 3388), True, 'import numpy as np\n'), ((3431, 3462), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (3439, 3462), True, 'import numpy as np\n'), ((3473, 3504), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (3481, 3504), True, 'import numpy as np\n'), ((4217, 4249), 'os.path.relpath', 'os.path.relpath', (['args.image_path'], {}), '(args.image_path)\n', (4232, 4249), False, 'import os\n'), ((4312, 4338), 'torch.FloatTensor', 'torch.FloatTensor', (['[image]'], {}), '([image])\n', (4329, 4338), False, 'import torch\n'), ((4276, 4298), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (4286, 4298), False, 'from PIL import Image\n'), ((4651, 4666), 'torch.autograd.Variable', 'Variable', (['image'], {}), '(image)\n', (4659, 4666), False, 'from torch.autograd import Variable\n'), ((4978, 5011), 'os.path.relpath', 'os.path.relpath', (['args.cat_to_name'], {}), '(args.cat_to_name)\n', (4993, 5011), False, 'import os\n'), ((4677, 4694), 'torch.exp', 'torch.exp', (['output'], {}), '(output)\n', (4686, 4694), False, 'import torch\n'), ((5077, 5089), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5086, 5089), False, 'import json\n')]
|
import os
import torch
import numpy as np
class IoUAverager:
def __init__(self, nCls, eps=1e-5):
self.nCls = nCls
self.eps = eps
self.shape_ious = [[] for _ in range(self.nCls)]
def clear(self):
self.shape_ious = [[] for _ in range(self.nCls)]
def update(self, outputs, truths):
preds = outputs.max(dim=1)[1]
preds_np = preds.detach().cpu().numpy()
pids_np = truths.detach().cpu().numpy()
batch_size = pids_np.shape[0]
for batch in range(batch_size):
for part in range(self.nCls):
I = np.sum(np.logical_and(preds_np[batch] == part, pids_np[batch] == part))
U = np.sum(np.logical_or(preds_np[batch] == part, pids_np[batch] == part))
if U == 0: continue
else: self.shape_ious[part].append(I/U)
def measure(self):
res = []
for part in range(self.nCls):
if self.shape_ious[part] != []:
res.append(np.mean(self.shape_ious[part]))
return np.mean(res)
def better(self, A, B):
return A > B
def write(self, writer, global_step, prefix=""):
writer.add_scalar(os.path.join(prefix, "mIoU"), self.measure(), global_step)
def report(self):
text = f"mIoU = {self.measure():.4f}\n"
for part in range(self.nCls):
if self.shape_ious[part] != []:
text += f"\t Class {part}: {np.mean(self.shape_ious[part]):.4f}\n"
else:
text += f"\t Class {part}: None\n"
return text
class ClassificationAverager:
""" statistics for classification """
def __init__(self, nCls, eps=1e-5, names=None):
self.nCls = nCls
self.names = names
self.eps = eps
self.N = 0
self.table = np.zeros((self.nCls, 4), dtype=np.int32)
self.hist_preds = []
self.hist_truths = []
def clear(self):
self.N = 0
self.table = np.zeros((self.nCls, 4), dtype=np.int32)
self.hist_preds = []
self.hist_truths = []
def update(self, outputs, truths):
preds = torch.argmax(outputs, dim=1).detach().cpu().numpy() # [B, ]
labels = truths.detach().cpu().numpy() # [B, ]
self.hist_preds.extend(preds.tolist())
self.hist_truths.extend(labels.tolist())
self.N += np.prod(labels.shape)
for Cls in range(self.nCls):
true_positive = np.count_nonzero(np.bitwise_and(preds == Cls, labels == Cls))
true_negative = np.count_nonzero(np.bitwise_and(preds != Cls, labels != Cls))
false_positive = np.count_nonzero(np.bitwise_and(preds == Cls, labels != Cls))
false_negative = np.count_nonzero(np.bitwise_and(preds != Cls, labels == Cls))
self.table[Cls] += [true_positive, true_negative, false_positive, false_negative]
def measure(self):
"""Overall Accuracy"""
total_TP = np.sum(self.table[:, 0]) # all true positives
accuracy = total_TP/self.N
return accuracy
def better(self, A, B):
return A > B
def write(self, writer, global_step, prefix=""):
writer.add_scalar(os.path.join(prefix, "Accuracy"), self.measure(), global_step)
def plot_conf_mat(self):
#mat = confusion_matrix(self.hist_truths, self.hist_preds)
from .vision import plot_confusion_matrix
plot_confusion_matrix(self.hist_truths, self.hist_preds)
def report(self, each_class=False, conf_mat=False):
precisions = []
recalls = []
for Cls in range(self.nCls):
precision = self.table[Cls,0] / (self.table[Cls,0] + self.table[Cls,3] + self.eps) # TP / (TP + FN)
recall = self.table[Cls,0] / (self.table[Cls,0] + self.table[Cls,2] + self.eps) # TP / (TP + FP)
precisions.append(precision)
recalls.append(recall)
total_TP = np.sum(self.table[:, 0]) # all true positives
accuracy = total_TP/self.N
accuracy_mean_class = np.mean(precisions)
text = f"Overall Accuracy = {accuracy:.4f}({total_TP}/{self.N})\n"
text += f"\tMean-class Accuracy = {accuracy_mean_class:.4f}\n"
if each_class:
for Cls in range(self.nCls):
if precisions[Cls] != 0 or recalls[Cls] != 0:
text += f"\tClass {str(Cls)+'('+self.names[Cls]+')' if self.names is not None else Cls}: precision = {precisions[Cls]:.3f} recall = {recalls[Cls]:.3f}\n"
if conf_mat:
self.plot_conf_mat()
return text
|
[
"numpy.mean",
"numpy.prod",
"numpy.logical_and",
"os.path.join",
"numpy.logical_or",
"numpy.bitwise_and",
"numpy.sum",
"numpy.zeros",
"torch.argmax"
] |
[((1053, 1065), 'numpy.mean', 'np.mean', (['res'], {}), '(res)\n', (1060, 1065), True, 'import numpy as np\n'), ((1821, 1861), 'numpy.zeros', 'np.zeros', (['(self.nCls, 4)'], {'dtype': 'np.int32'}), '((self.nCls, 4), dtype=np.int32)\n', (1829, 1861), True, 'import numpy as np\n'), ((1983, 2023), 'numpy.zeros', 'np.zeros', (['(self.nCls, 4)'], {'dtype': 'np.int32'}), '((self.nCls, 4), dtype=np.int32)\n', (1991, 2023), True, 'import numpy as np\n'), ((2370, 2391), 'numpy.prod', 'np.prod', (['labels.shape'], {}), '(labels.shape)\n', (2377, 2391), True, 'import numpy as np\n'), ((2959, 2983), 'numpy.sum', 'np.sum', (['self.table[:, 0]'], {}), '(self.table[:, 0])\n', (2965, 2983), True, 'import numpy as np\n'), ((3929, 3953), 'numpy.sum', 'np.sum', (['self.table[:, 0]'], {}), '(self.table[:, 0])\n', (3935, 3953), True, 'import numpy as np\n'), ((4041, 4060), 'numpy.mean', 'np.mean', (['precisions'], {}), '(precisions)\n', (4048, 4060), True, 'import numpy as np\n'), ((1196, 1224), 'os.path.join', 'os.path.join', (['prefix', '"""mIoU"""'], {}), "(prefix, 'mIoU')\n", (1208, 1224), False, 'import os\n'), ((3195, 3227), 'os.path.join', 'os.path.join', (['prefix', '"""Accuracy"""'], {}), "(prefix, 'Accuracy')\n", (3207, 3227), False, 'import os\n'), ((2474, 2517), 'numpy.bitwise_and', 'np.bitwise_and', (['(preds == Cls)', '(labels == Cls)'], {}), '(preds == Cls, labels == Cls)\n', (2488, 2517), True, 'import numpy as np\n'), ((2564, 2607), 'numpy.bitwise_and', 'np.bitwise_and', (['(preds != Cls)', '(labels != Cls)'], {}), '(preds != Cls, labels != Cls)\n', (2578, 2607), True, 'import numpy as np\n'), ((2655, 2698), 'numpy.bitwise_and', 'np.bitwise_and', (['(preds == Cls)', '(labels != Cls)'], {}), '(preds == Cls, labels != Cls)\n', (2669, 2698), True, 'import numpy as np\n'), ((2746, 2789), 'numpy.bitwise_and', 'np.bitwise_and', (['(preds != Cls)', '(labels == Cls)'], {}), '(preds != Cls, labels == Cls)\n', (2760, 2789), True, 'import numpy as np\n'), ((608, 671), 'numpy.logical_and', 'np.logical_and', (['(preds_np[batch] == part)', '(pids_np[batch] == part)'], {}), '(preds_np[batch] == part, pids_np[batch] == part)\n', (622, 671), True, 'import numpy as np\n'), ((700, 762), 'numpy.logical_or', 'np.logical_or', (['(preds_np[batch] == part)', '(pids_np[batch] == part)'], {}), '(preds_np[batch] == part, pids_np[batch] == part)\n', (713, 762), True, 'import numpy as np\n'), ((1006, 1036), 'numpy.mean', 'np.mean', (['self.shape_ious[part]'], {}), '(self.shape_ious[part])\n', (1013, 1036), True, 'import numpy as np\n'), ((1452, 1482), 'numpy.mean', 'np.mean', (['self.shape_ious[part]'], {}), '(self.shape_ious[part])\n', (1459, 1482), True, 'import numpy as np\n'), ((2139, 2167), 'torch.argmax', 'torch.argmax', (['outputs'], {'dim': '(1)'}), '(outputs, dim=1)\n', (2151, 2167), False, 'import torch\n')]
|
# Author: <NAME>
import numpy as np
import pickle
class evolutionary_strategies_model(object):
def __init__(
self, n_population, n_params, n_survival,
n_crossover = 2, sigma_init = 1, mu_init = 0, tau = None):
"""
Evolutionary strategies model loosely based on
Beyer and Schwefel, 2002, Evolution strategies - A Comprehensive Introduction
Model type (in the notation from the paper): (mu/ro, lambda) where
mu = n_survival
ro = n_crossover
lambda = n_population
Parameters
----------
n_population : integer
number of instances that are created each generation
n_params : integer
dimension of the parameter space to optimize
n_survival : integer
number of instances to be selected each generation
n_crossover : integer
number of parent instances for each new child usually 2
sigma_init : integer
standard deviation for the normal distribution the
mutation term is sampled from at the start
mu_init : integer
starting value for parameters
tau : float
learning rate like parameter
default (if None): tau = 1/sqrt(2*n_population)
"""
assert sigma_init > 0
assert n_population > n_survival
assert n_population % n_crossover == 0
assert n_population % n_survival == 0
self.n_population = n_population
self.n_survival = n_survival
self.sigma_init = sigma_init
self.n_crossover = n_crossover
if tau == None:
self.tau = 1/((2*n_population)**0.5)
else: self.tau = tau
self.n_params = n_params
self.params = np.random.normal(mu_init, sigma_init, (n_population, n_params))
self.sigmas = np.full((n_population, n_params), sigma_init, dtype = 'float64')
self.fitness = np.zeros(n_population)
self.indices_fittest = None
def mutate(self):
"""
mutate parameters : x = N(x,sigma)
mutate standard deviations : sigma = sigma * exp(N(0,tau))
"""
self.params = np.random.multivariate_normal(
self.params.reshape(self.n_population * self.n_params),
np.diag(self.sigmas.reshape(self.n_population * self.n_params)))\
.reshape((self.n_population, self.n_params))
self.sigmas *= np.exp(np.random.multivariate_normal(
np.zeros(self.n_population * self.n_params),
self.tau * np.eye(self.n_population * self.n_params)))\
.reshape((self.n_population, self.n_params))
def select(self):
"""
retreive the indices of the n_survival best instances
"""
self.indices_fittest = np.argsort(self.fitness)[-self.n_survival:]
def procreate(self):
"""
Create n_population new instances from the fittest instances of
the current generation.
Parent groups are selected randomly.
Parameters and sigmas of n_crossover parents are shuffled to create
n_crossover children per parent group.
"""
n_children = self.n_population // self.n_survival
parent_list = np.tile(self.indices_fittest, n_children)
np.random.shuffle(parent_list)
next_generation_params = self.params[parent_list,:]
next_generation_sigmas = self.sigmas[parent_list,:]
n_groups = self.n_population // self.n_crossover
for group in range(n_groups):
for i in range(self.n_params):
np.random.shuffle(
next_generation_params[
group * self.n_crossover : (group + 1) * self.n_crossover,i])
np.random.shuffle(
next_generation_sigmas[
group * self.n_crossover : (group + 1) * self.n_crossover,i])
self.params = next_generation_params
self.sigmas = next_generation_sigmas
def save(self):
"""
create/replace an object file to store the current model.
"""
filehandler = open("evolutionary_strategies_model", 'wb')
pickle.dump(self, filehandler)
filehandler.close()
print("### saved ###")
|
[
"numpy.random.normal",
"numpy.tile",
"numpy.eye",
"pickle.dump",
"numpy.argsort",
"numpy.zeros",
"numpy.full",
"numpy.random.shuffle"
] |
[((1928, 1991), 'numpy.random.normal', 'np.random.normal', (['mu_init', 'sigma_init', '(n_population, n_params)'], {}), '(mu_init, sigma_init, (n_population, n_params))\n', (1944, 1991), True, 'import numpy as np\n'), ((2014, 2076), 'numpy.full', 'np.full', (['(n_population, n_params)', 'sigma_init'], {'dtype': '"""float64"""'}), "((n_population, n_params), sigma_init, dtype='float64')\n", (2021, 2076), True, 'import numpy as np\n'), ((2102, 2124), 'numpy.zeros', 'np.zeros', (['n_population'], {}), '(n_population)\n', (2110, 2124), True, 'import numpy as np\n'), ((3446, 3487), 'numpy.tile', 'np.tile', (['self.indices_fittest', 'n_children'], {}), '(self.indices_fittest, n_children)\n', (3453, 3487), True, 'import numpy as np\n'), ((3496, 3526), 'numpy.random.shuffle', 'np.random.shuffle', (['parent_list'], {}), '(parent_list)\n', (3513, 3526), True, 'import numpy as np\n'), ((4383, 4413), 'pickle.dump', 'pickle.dump', (['self', 'filehandler'], {}), '(self, filehandler)\n', (4394, 4413), False, 'import pickle\n'), ((2987, 3011), 'numpy.argsort', 'np.argsort', (['self.fitness'], {}), '(self.fitness)\n', (2997, 3011), True, 'import numpy as np\n'), ((3811, 3917), 'numpy.random.shuffle', 'np.random.shuffle', (['next_generation_params[group * self.n_crossover:(group + 1) * self.\n n_crossover, i]'], {}), '(next_generation_params[group * self.n_crossover:(group + \n 1) * self.n_crossover, i])\n', (3828, 3917), True, 'import numpy as np\n'), ((3968, 4074), 'numpy.random.shuffle', 'np.random.shuffle', (['next_generation_sigmas[group * self.n_crossover:(group + 1) * self.\n n_crossover, i]'], {}), '(next_generation_sigmas[group * self.n_crossover:(group + \n 1) * self.n_crossover, i])\n', (3985, 4074), True, 'import numpy as np\n'), ((2665, 2708), 'numpy.zeros', 'np.zeros', (['(self.n_population * self.n_params)'], {}), '(self.n_population * self.n_params)\n', (2673, 2708), True, 'import numpy as np\n'), ((2733, 2774), 'numpy.eye', 'np.eye', (['(self.n_population * self.n_params)'], {}), '(self.n_population * self.n_params)\n', (2739, 2774), True, 'import numpy as np\n')]
|
################################################################################
## Imports and configurations
import sys
import os
PROJ_PATH = '.'
#PROJ_PATH = os.path.realpath(os.path.join(os.path.dirname(__file__), '../../'))
#sys.path.append(PROJ_PATH)
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
# feature selection
from sklearn.feature_selection import SelectFromModel
from rfpimp import importances as permutation_importances, plot_importances
# classifiers
from sklearn.ensemble import RandomForestClassifier
# reporting
from src.reporting.reports import reports
## configs
DATA_PATH = PROJ_PATH+'/data/DGT/central_pt/'
RAW_PATH = DATA_PATH+'raw/'
PROCESSED_PATH = DATA_PATH+'processed/'
TRAIN_DATA = RAW_PATH+'training.csv'
TEST_DATA = RAW_PATH+'testing.csv'
LABELS_PATH = RAW_PATH+'Class_legend.txt'
random_state = 0
################################################################################
## read data and preprocess
# read
df_train = pd.read_csv(TRAIN_DATA).drop(columns='Unnamed: 0')
X = df_train.drop(columns='CLASS')
y = df_train['CLASS'].astype(int)
# get feature names and labels
feat_labels = list(X.columns)
class_labels = pd.read_csv(LABELS_PATH, sep='\t', header=None,
index_col=0)[1].to_dict()
# standardize
scaler = StandardScaler()
scaler.fit(X)
X = scaler.transform(X)
################################################################################
## feature selection
# Split data into 40% test and 60% training
_X_tr, _X_te, _y_tr, _y_te = train_test_split(X, y, test_size=0.4,
random_state=random_state)
# Create and train a random forest classifier
clf = RandomForestClassifier(n_estimators=100, n_jobs=-1,
random_state=random_state)
clf.fit(_X_tr, _y_tr)
# Gini Index Importance Feature Selection Method
gini_imp_feat_sel = SelectFromModel(clf, prefit=True, threshold='.8*mean')
gini_accepted = gini_imp_feat_sel.get_support()
# Permutation
imp = permutation_importances(
clf,
pd.DataFrame(_X_te, columns=feat_labels),
pd.Series(_y_te, name='CLASS')
)
permutation_accepted = (imp['Importance']>0).loc[feat_labels].values
# Keep the ones accepted with both methods
accepted_feats = (gini_accepted.astype(int)+permutation_accepted.astype(int))==2
# save feature selection results
feat_sel_results = pd.DataFrame(
np.array([gini_accepted, permutation_accepted, accepted_feats]).T,
index=feat_labels,
columns=['Gini', 'Permutation', 'Selected']
)
feat_sel_results.to_csv(PROCESSED_PATH+'feature_selection_results.csv')
################################################################################
## test different methods using test set
df_train = pd.read_csv(TRAIN_DATA).drop(columns='Unnamed: 0')
X_train = df_train.drop(columns='CLASS')
y_train = df_train['CLASS'].astype(int)
df_test = pd.read_csv(TEST_DATA).drop(columns='Unnamed: 0')
X_test = df_test.drop(columns='CLASS')
y_test = df_test['CLASS'].astype(int)
features_selected = pd.read_csv(PROCESSED_PATH+'feature_selection_results.csv')\
.rename(columns={'Unnamed: 0': 'features'}).set_index('features')
features_selected['Original'] = True
#pd.DataFrame(features_selected[features_selected].count(),
# columns=['# features used'])\
# .sort_values('# features used', ascending=False)\
# .to_csv('feature_selection_count.csv')
# get feature names and labels
feat_labels = list(X_train.columns)
class_labels = pd.read_csv(LABELS_PATH, sep='\t', header=None,
index_col=0)[1].to_dict()
# standardize
scaler = StandardScaler()
scaler.fit(X_train)
scaler.transform(X_train.values, copy=False)
scaler.transform(X_test.values, copy=False)
scores = []
for method in features_selected.columns:
rfc = RandomForestClassifier(100, random_state=0)
features = features_selected[method]
_X_tr = X_train[features[features].index]
_y_tr = y_train.copy()
rfc.fit(_X_tr, _y_tr)
_X_te = X_test[features[features].index]
_y_te = y_test.copy()
_y_pred = rfc.predict(_X_te)
scores.append(reports(_y_te, _y_pred)[-1].rename({'Score': method}))
pd.DataFrame(features_selected[features_selected].count(),
columns=['# features used'])\
.join(pd.concat(scores, 1).T)\
.sort_values('# features used', ascending=False)\
.rename(index={'Selected':'Intersect'})\
.to_csv('feature_selection_results.csv')
################################################################################
## define noise introduction procedure
## define filters
## define classifiers
## setup and run experiment
## save results
## setup and train models using hyperparameters with best scores
## get testing dataset scores
|
[
"pandas.Series",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.ensemble.RandomForestClassifier",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"src.reporting.reports.reports",
"pandas.DataFrame",
"pandas.concat",
"sklearn.feature_selection.SelectFromModel"
] |
[((1358, 1374), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (1372, 1374), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1589, 1653), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.4)', 'random_state': 'random_state'}), '(X, y, test_size=0.4, random_state=random_state)\n', (1605, 1653), False, 'from sklearn.model_selection import train_test_split\n'), ((1711, 1789), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(100)', 'n_jobs': '(-1)', 'random_state': 'random_state'}), '(n_estimators=100, n_jobs=-1, random_state=random_state)\n', (1733, 1789), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1886, 1940), 'sklearn.feature_selection.SelectFromModel', 'SelectFromModel', (['clf'], {'prefit': '(True)', 'threshold': '""".8*mean"""'}), "(clf, prefit=True, threshold='.8*mean')\n", (1901, 1940), False, 'from sklearn.feature_selection import SelectFromModel\n'), ((3578, 3594), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3592, 3594), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2048, 2088), 'pandas.DataFrame', 'pd.DataFrame', (['_X_te'], {'columns': 'feat_labels'}), '(_X_te, columns=feat_labels)\n', (2060, 2088), True, 'import pandas as pd\n'), ((2094, 2124), 'pandas.Series', 'pd.Series', (['_y_te'], {'name': '"""CLASS"""'}), "(_y_te, name='CLASS')\n", (2103, 2124), True, 'import pandas as pd\n'), ((3768, 3811), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', (['(100)'], {'random_state': '(0)'}), '(100, random_state=0)\n', (3790, 3811), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((1061, 1084), 'pandas.read_csv', 'pd.read_csv', (['TRAIN_DATA'], {}), '(TRAIN_DATA)\n', (1072, 1084), True, 'import pandas as pd\n'), ((2392, 2455), 'numpy.array', 'np.array', (['[gini_accepted, permutation_accepted, accepted_feats]'], {}), '([gini_accepted, permutation_accepted, accepted_feats])\n', (2400, 2455), True, 'import numpy as np\n'), ((2738, 2761), 'pandas.read_csv', 'pd.read_csv', (['TRAIN_DATA'], {}), '(TRAIN_DATA)\n', (2749, 2761), True, 'import pandas as pd\n'), ((2882, 2904), 'pandas.read_csv', 'pd.read_csv', (['TEST_DATA'], {}), '(TEST_DATA)\n', (2893, 2904), True, 'import pandas as pd\n'), ((1257, 1317), 'pandas.read_csv', 'pd.read_csv', (['LABELS_PATH'], {'sep': '"""\t"""', 'header': 'None', 'index_col': '(0)'}), "(LABELS_PATH, sep='\\t', header=None, index_col=0)\n", (1268, 1317), True, 'import pandas as pd\n'), ((3476, 3536), 'pandas.read_csv', 'pd.read_csv', (['LABELS_PATH'], {'sep': '"""\t"""', 'header': 'None', 'index_col': '(0)'}), "(LABELS_PATH, sep='\\t', header=None, index_col=0)\n", (3487, 3536), True, 'import pandas as pd\n'), ((3030, 3091), 'pandas.read_csv', 'pd.read_csv', (["(PROCESSED_PATH + 'feature_selection_results.csv')"], {}), "(PROCESSED_PATH + 'feature_selection_results.csv')\n", (3041, 3091), True, 'import pandas as pd\n'), ((4074, 4097), 'src.reporting.reports.reports', 'reports', (['_y_te', '_y_pred'], {}), '(_y_te, _y_pred)\n', (4081, 4097), False, 'from src.reporting.reports import reports\n'), ((4233, 4253), 'pandas.concat', 'pd.concat', (['scores', '(1)'], {}), '(scores, 1)\n', (4242, 4253), True, 'import pandas as pd\n')]
|
# Test methods with long descriptive names can omit docstrings
# pylint: disable=missing-docstring
import warnings
from time import time
from numbers import Real
from itertools import starmap, chain
import unittest
import pickle
import numpy as np
from numpy.testing import assert_array_equal
from Orange.data import (
ContinuousVariable,
DiscreteVariable,
StringVariable,
TimeVariable,
Variable,
Domain,
Table,
DomainConversion,
)
from Orange.data.domain import filter_visible
from Orange.preprocess import Continuize, Impute
from Orange.tests.base import create_pickling_tests
from Orange.util import OrangeDeprecationWarning
def create_domain(*ss):
Variable._clear_all_caches()
vars = dict(
age=ContinuousVariable(name="AGE"),
gender=DiscreteVariable(name="Gender", values=["M", "F"]),
incomeA=ContinuousVariable(name="incomeA"),
income=ContinuousVariable(name="income"),
education=DiscreteVariable(name="education", values=["GS", "HS", "C"]),
ssn=StringVariable(name="SSN"),
race=DiscreteVariable(
name="race", values=["White", "Hypsanic", "African", "Other"]
),
arrival=TimeVariable("arrival"),
)
def map_vars(s):
return [vars[x] for x in s]
return Domain(*[map_vars(s) for s in ss])
PickleDomain = create_pickling_tests(
"PickleDomain",
("empty_domain", lambda: create_domain([])),
("with_continuous_variable", lambda: create_domain(["age"])),
("with_discrete_variable", lambda: create_domain(["gender"])),
("with_mixed_variables", lambda: create_domain(["age", "gender"])),
("with_continuous_class", lambda: create_domain(["age", "gender"], ["incomeA"])),
("with_discrete_class", lambda: create_domain(["age", "gender"], ["education"])),
(
"with_multiple_classes",
lambda: create_domain(["age", "gender"], ["incomeA", "education"]),
),
("with_metas", lambda: create_domain(["age", "gender"], [], ["ssn"])),
(
"with_class_and_metas",
lambda: create_domain(["age", "gender"], ["incomeA", "education"], ["ssn"]),
),
)
age, gender, incomeA, income, education, ssn, race, arrival = create_domain(
[],
[],
["age", "gender", "incomeA", "income", "education", "ssn", "race", "arrival"],
).metas
class TestDomainInit(unittest.TestCase):
def test_init_class(self):
attributes = (age, gender, income)
d = Domain(attributes, race)
self.assertEqual(d.variables, attributes + (race,))
self.assertEqual(d.attributes, attributes)
self.assertEqual(d.class_var, race)
self.assertEqual(d.class_vars, (race,))
self.assertEqual(d.metas, ())
def test_init_class_list(self):
attributes = (age, gender, income)
d = Domain(attributes, [race])
self.assertEqual(d.variables, attributes + (race,))
self.assertEqual(d.attributes, attributes)
self.assertEqual(d.class_var, race)
self.assertEqual(d.class_vars, (race,))
self.assertEqual(d.metas, ())
def test_init_no_class(self):
attributes = (age, gender, income)
d = Domain(attributes)
self.assertEqual(d.variables, attributes)
self.assertEqual(d.attributes, attributes)
self.assertEqual(d.class_var, None)
self.assertEqual(d.class_vars, ())
self.assertEqual(d.metas, ())
def test_init_no_class_false(self):
attributes = (age, gender, income)
d = Domain(attributes, None)
self.assertEqual(d.variables, attributes)
self.assertEqual(d.attributes, attributes)
self.assertEqual(d.class_var, None)
self.assertEqual(d.class_vars, ())
self.assertEqual(d.metas, ())
def test_init_multi_class(self):
attributes = (age, gender, income)
d = Domain(attributes, (education, race))
self.assertEqual(d.variables, attributes + (education, race))
self.assertEqual(d.attributes, attributes)
self.assertIsNone(d.class_var)
self.assertEqual(d.class_vars, (education, race))
self.assertEqual(d.metas, ())
def test_init_source(self):
attributes = (age, gender, income)
d = Domain(attributes, (education, race))
d2 = Domain(["Gender", 0, income], source=d)
self.assertEqual(d2.variables, (gender, age, income))
def test_init_source_class(self):
attributes = (age, gender, income)
d = Domain(attributes, (education, race))
d2 = Domain(["Gender", 0], "income", source=d)
self.assertEqual(d2.variables, (gender, age, income))
def test_init_metas(self):
attributes = (age, gender, income)
metas = (ssn, race)
d = Domain(attributes, race, metas=metas)
self.assertEqual(d.variables, attributes + (race,))
self.assertEqual(d.attributes, attributes)
self.assertEqual(d.class_var, race)
self.assertEqual(d.class_vars, (race,))
self.assertEqual(d.metas, metas)
def test_from_numpy_names(self):
for n_cols, name in [
(5, "Feature {}"),
(99, "Feature {:02}"),
(100, "Feature {:03}"),
]:
d = Domain.from_numpy(np.zeros((1, n_cols)))
self.assertTrue(d.anonymous)
self.assertEqual(
[var.name for var in d.attributes],
[name.format(i) for i in range(1, n_cols + 1)],
)
d = Domain.from_numpy(np.zeros((1, 1)))
self.assertTrue(d.anonymous)
self.assertEqual(d.attributes[0].name, "Feature")
d = Domain.from_numpy(np.zeros((1, 3)), np.zeros((1, 1)), np.zeros((1, 100)))
self.assertTrue(d.anonymous)
self.assertEqual(
[var.name for var in d.attributes],
["Feature {}".format(i) for i in range(1, 4)],
)
self.assertEqual(d.class_var.name, "Target")
self.assertEqual(
[var.name for var in d.metas],
["Meta {:03}".format(i) for i in range(1, 101)],
)
def test_from_numpy_dimensions(self):
for dimension in [[5], [5, 1]]:
d = Domain.from_numpy(np.zeros((1, 1)), np.zeros(dimension))
self.assertTrue(d.anonymous)
self.assertEqual(len(d.class_vars), 1)
self.assertRaises(ValueError, Domain.from_numpy, np.zeros(2))
self.assertRaises(ValueError, Domain.from_numpy, np.zeros((2, 2, 2)))
self.assertRaises(
ValueError, Domain.from_numpy, np.zeros((2, 2)), np.zeros((2, 2, 2))
)
def test_from_numpy_values(self):
for aran_min, aran_max, vartype in [
(1, 3, ContinuousVariable),
(0, 2, DiscreteVariable),
(18, 23, ContinuousVariable),
]:
n_rows, n_cols, = aran_max - aran_min, 1
d = Domain.from_numpy(
np.zeros((1, 1)), np.arange(aran_min, aran_max).reshape(n_rows, n_cols)
)
self.assertTrue(d.anonymous)
self.assertIsInstance(d.class_var, vartype)
if isinstance(vartype, DiscreteVariable):
self.assertEqual(
d.class_var.values, ["v{}".format(i) for i in range(1, 3)]
)
def test_wrong_vartypes(self):
attributes = (age, gender, income)
for args in ((attributes, ssn), (attributes + (ssn,)), ((ssn,) + attributes)):
with self.assertRaises(TypeError):
Domain(*args)
def test_wrong_vartypes_w_source(self):
d = Domain((age, gender), metas=(ssn,))
with self.assertRaises(TypeError):
Domain(-1, source=d)
def test_wrong_types(self):
with self.assertRaises(TypeError):
Domain((age, []))
with self.assertRaises(TypeError):
Domain((age, "income"))
with self.assertRaises(TypeError):
Domain(([], age))
with self.assertRaises(TypeError):
Domain(("income", age))
with self.assertRaises(TypeError):
Domain((age,), self)
with self.assertRaises(TypeError):
Domain((age,), metas=("income",))
def test_get_item(self):
d = Domain((age, gender, income), metas=(ssn, race))
for idx, var in [
(age, age),
("AGE", age),
(0, age),
(income, income),
("income", income),
(2, income),
(ssn, ssn),
("SSN", ssn),
(-1, ssn),
(-2, race),
]:
self.assertEqual(d[idx], var)
def test_index(self):
d = Domain((age, gender, income), metas=(ssn, race))
for idx, var in [
(age, 0),
("AGE", 0),
(0, 0),
(np.int_(0), 0),
(income, 2),
("income", 2),
(2, 2),
(np.int_(2), 2),
(ssn, -1),
("SSN", -1),
(-1, -1),
(np.int_(-1), -1),
(-2, -2),
(np.int_(-2), -2),
]:
self.assertEqual(d.index(idx), var)
def test_get_item_slices(self):
d = Domain((age, gender, income, race), metas=(ssn, race))
self.assertEqual(d[:2], (age, gender))
self.assertEqual(d[1:3], (gender, income))
self.assertEqual(d[2:], (income, race))
def test_get_item_error(self):
d = Domain((age, gender, income), metas=(ssn, race))
for idx in (3, -3, incomeA, "no_such_thing"):
with self.assertRaises(KeyError):
_ = d[idx]
with self.assertRaises(TypeError):
_ = d[[2]]
def test_index_error(self):
d = Domain((age, gender, income), metas=(ssn, race))
for idx in (3, np.int(3), -3, np.int(-3), incomeA, "no_such_thing"):
with self.assertRaises(ValueError):
d.index(idx)
with self.assertRaises(TypeError):
d.index([2])
def test_contains(self):
d = Domain((age, gender, income), metas=(ssn,))
for var in [
"AGE",
age,
0,
np.int_(0),
"income",
income,
2,
np.int_(2),
"SSN",
ssn,
-1,
np.int_(-1),
]:
self.assertIn(var, d)
for var in ["no_such_thing", race, 3, np.int_(3), -2, np.int_(-2)]:
self.assertNotIn(var, d)
with self.assertRaises(TypeError):
{} in d
with self.assertRaises(TypeError):
[] in d
def test_iter(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter("error")
d = Domain((age, gender, income), metas=(ssn,))
with self.assertRaises(OrangeDeprecationWarning):
list(d)
warnings.simplefilter("ignore")
self.assertEqual([var for var in d], [age, gender, income])
d = Domain((age,), metas=(ssn,))
self.assertEqual([var for var in d], [age])
d = Domain((), metas=(ssn,))
self.assertEqual([var for var in d], [])
def test_str(self):
cases = (
(((),), "[]"),
(((age,),), "[AGE]"),
(((), age), "[ | AGE]"),
(((gender,), age), "[Gender | AGE]"),
(((gender, income), None), "[Gender, income]"),
(((gender, income), age), "[Gender, income | AGE]"),
(((gender,), (age, income)), "[Gender | AGE, income]"),
(((gender,), (age, income), (ssn,)), "[Gender | AGE, income] {SSN}"),
(
((gender,), (age, income), (ssn, race)),
"[Gender | AGE, income] {SSN, race}",
),
(((), (), (ssn, race)), "[] {SSN, race}"),
)
for args, printout in cases:
self.assertEqual(str(Domain(*args)), printout)
def test_has_discrete(self):
self.assertFalse(Domain([]).has_discrete_attributes())
self.assertFalse(Domain([], [age]).has_discrete_attributes())
self.assertFalse(Domain([], race).has_discrete_attributes())
self.assertFalse(Domain([age], None).has_discrete_attributes())
self.assertTrue(Domain([race], None).has_discrete_attributes())
self.assertTrue(Domain([age, race], None).has_discrete_attributes())
self.assertTrue(Domain([race, age], None).has_discrete_attributes())
self.assertFalse(Domain([], [age]).has_discrete_attributes(True))
self.assertTrue(Domain([], [race]).has_discrete_attributes(True))
self.assertFalse(Domain([age], None).has_discrete_attributes(True))
self.assertTrue(Domain([race], None).has_discrete_attributes(True))
self.assertTrue(Domain([age], race).has_discrete_attributes(True))
self.assertTrue(Domain([race], age).has_discrete_attributes(True))
self.assertTrue(Domain([], [race, age]).has_discrete_attributes(True))
d = Domain([], None, [gender])
self.assertTrue(d.has_discrete_attributes(False, True))
d = Domain([], None, [age])
self.assertFalse(d.has_discrete_attributes(False, True))
d = Domain([], [age], [gender])
self.assertTrue(d.has_discrete_attributes(True, True))
d = Domain([], [incomeA], [age])
self.assertFalse(d.has_discrete_attributes(True, True))
def test_has_continuous(self):
self.assertFalse(Domain([]).has_continuous_attributes())
self.assertFalse(Domain([], [age]).has_continuous_attributes())
self.assertFalse(Domain([], [race]).has_continuous_attributes())
self.assertTrue(Domain([age], None).has_continuous_attributes())
self.assertFalse(Domain([race], None).has_continuous_attributes())
self.assertTrue(Domain([age, race], None).has_continuous_attributes())
self.assertTrue(Domain([race, age], None).has_continuous_attributes())
self.assertTrue(Domain([], [age]).has_continuous_attributes(True))
self.assertFalse(Domain([], [race]).has_continuous_attributes(True))
self.assertTrue(Domain([age], None).has_continuous_attributes(True))
self.assertFalse(Domain([race], None).has_continuous_attributes(True))
self.assertTrue(Domain([age], race).has_continuous_attributes(True))
self.assertTrue(Domain([race], age).has_continuous_attributes(True))
self.assertTrue(Domain([], [race, age]).has_continuous_attributes(True))
d = Domain([], None, [age])
self.assertTrue(d.has_continuous_attributes(False, True))
d = Domain([], None, [gender])
self.assertFalse(d.has_continuous_attributes(False, True))
d = Domain([], [gender], [age])
self.assertTrue(d.has_continuous_attributes(True, True))
d = Domain([], [race], [gender])
self.assertFalse(d.has_continuous_attributes(True, True))
def test_has_time(self):
self.assertFalse(Domain([]).has_time_attributes())
self.assertFalse(Domain([], [age]).has_time_attributes())
self.assertFalse(Domain([], [race]).has_time_attributes())
self.assertFalse(Domain([], [arrival]).has_time_attributes())
self.assertFalse(Domain([], [], [arrival]).has_time_attributes())
self.assertTrue(Domain([arrival], []).has_time_attributes())
self.assertTrue(Domain([], [arrival]).has_time_attributes(include_class=True))
self.assertTrue(
Domain([], [], [arrival]).has_time_attributes(include_metas=True)
)
self.assertFalse(Domain([arrival], []).has_time_class)
self.assertTrue(Domain([], [arrival]).has_time_class)
self.assertFalse(Domain([], [], [arrival]).has_time_class)
def test_get_conversion(self):
compute_value = lambda: 42
new_income = income.copy(compute_value=compute_value)
d = Domain((age, gender, income), metas=(ssn, race))
e = Domain((gender, race), None, metas=(age, gender, ssn))
f = Domain((gender,), (race, income), metas=(age, income, ssn))
g = Domain((), metas=(age, gender, ssn))
h = Domain((gender,), (race, new_income), metas=(age, new_income, ssn))
for conver, domain, attr, class_vars, metas in (
(d, e, [1, -2], [], [0, 1, -1]),
(d, f, [1], [-2, 2], [0, 2, -1]),
(f, g, [], [], [-1, 0, -3]),
(g, h, [-2], [None, compute_value], [-1, compute_value, -3]),
):
to_domain = domain.get_conversion(conver)
self.assertIs(to_domain.source, conver)
self.assertEqual(to_domain.attributes, attr)
self.assertEqual(to_domain.class_vars, class_vars)
self.assertEqual(to_domain.metas, metas)
def test_conversion(self):
domain = Domain([age, income], [race], [gender, education, ssn])
x, y, metas = domain.convert([42, 13, "White"])
assert_array_equal(x, np.array([42, 13]))
assert_array_equal(y, np.array([0]))
metas_exp = [gender.Unknown, education.Unknown, ssn.Unknown]
def equal(a, b):
if (
isinstance(a, Real)
and isinstance(b, Real)
and np.isnan(a)
and np.isnan(b)
):
return True
else:
return a == b
self.assertTrue(all(starmap(equal, zip(metas, metas_exp))))
x, y, metas = domain.convert([42, 13, "White", "M", "HS", "1234567"])
assert_array_equal(x, np.array([42, 13]))
assert_array_equal(y, np.array([0]))
assert_array_equal(metas, np.array([0, 1, "1234567"], dtype=object))
def test_conversion_size(self):
domain = Domain([age, gender, income], [race])
self.assertRaises(ValueError, domain.convert, [0] * 3)
self.assertRaises(ValueError, domain.convert, [0] * 5)
domain = Domain([age, income], [race], [gender, education, ssn])
self.assertRaises(ValueError, domain.convert, [0] * 2)
self.assertRaises(ValueError, domain.convert, [0] * 4)
self.assertRaises(ValueError, domain.convert, [0] * 7)
domain.convert([0] * 3)
domain.convert([0] * 6)
def test_preprocessor_chaining(self):
domain = Domain(
[DiscreteVariable("a", values="01"), DiscreteVariable("b", values="01")],
DiscreteVariable("y", values="01"),
)
table = Table(domain, [[0, 1], [1, np.NaN]], [0, 1])
pre1 = Continuize()(Impute()(table))
pre2 = Table(pre1.domain, table)
np.testing.assert_almost_equal(pre1.X, pre2.X)
def test_unpickling_recreates_known_domains(self):
Variable._clear_all_caches()
domain = Domain([])
unpickled_domain = pickle.loads(pickle.dumps(domain))
self.assertTrue(hasattr(unpickled_domain, "_known_domains"))
def test_different_domains_with_same_attributes_are_equal(self):
domain1 = Domain([])
domain2 = Domain([])
self.assertEqual(domain1, domain2)
var1 = ContinuousVariable("var1")
domain1.attributes = (var1,)
self.assertNotEqual(domain1, domain2)
domain2.attributes = (var1,)
self.assertEqual(domain1, domain2)
domain1.class_vars = (var1,)
self.assertNotEqual(domain1, domain2)
domain2.class_vars = (var1,)
self.assertEqual(domain1, domain2)
domain1._metas = (var1,)
self.assertNotEqual(domain1, domain2)
domain2._metas = (var1,)
self.assertEqual(domain1, domain2)
def test_domain_conversion_is_fast_enough(self):
attrs = [ContinuousVariable("f%i" % i) for i in range(10000)]
class_vars = [ContinuousVariable("c%i" % i) for i in range(10)]
metas = [ContinuousVariable("m%i" % i) for i in range(10)]
source = Domain(attrs, class_vars, metas)
start = time()
cases = (
(
(attrs[:1000], class_vars, metas),
list(range(1000)),
list(range(10000, 10010)),
list(range(-1, -11, -1)),
),
(
(metas, attrs[:1000], class_vars),
list(range(-1, -11, -1)),
list(range(1000)),
list(range(10000, 10010)),
),
(
(class_vars, metas, attrs[:1000]),
list(range(10000, 10010)),
list(range(-1, -11, -1)),
list(range(1000)),
),
)
for domain_args, attributes, class_vars, metas in cases:
c1 = DomainConversion(source, Domain(*domain_args))
self.assertEqual(c1.attributes, attributes)
self.assertEqual(c1.class_vars, class_vars)
self.assertEqual(c1.metas, metas)
self.assertLessEqual(time() - start, 1)
def test_copy(self):
age.number_of_decimals = 5
attributes = (age, gender, income)
domain = Domain(attributes, [race], [ssn])
new_domain = domain.copy()
new_domain[age].number_of_decimals = 10
self.assertEqual(domain[age].number_of_decimals, 5)
self.assertEqual(new_domain[age].number_of_decimals, 10)
def test_domain_conversion_sparsity(self):
destination = Domain(
attributes=[
ContinuousVariable(name="a"),
ContinuousVariable(name="b"),
ContinuousVariable(name="c"),
],
class_vars=[DiscreteVariable("d", values=["e"])],
metas=[StringVariable("f")],
)
# all dense
source = Domain(attributes=[])
conversion = DomainConversion(source, destination)
self.assertFalse(conversion.sparse_X)
self.assertFalse(conversion.sparse_Y)
self.assertFalse(conversion.sparse_metas)
# set destination attributes as sparse
for a in destination.attributes:
a.sparse = True
source = Domain(attributes=[])
conversion = DomainConversion(source, destination)
self.assertTrue(conversion.sparse_X)
self.assertFalse(conversion.sparse_Y)
self.assertFalse(conversion.sparse_metas)
# set all destination variable as sparse
for a in chain(destination.variables, destination.metas):
a.sparse = True
source = Domain(attributes=[])
conversion = DomainConversion(source, destination)
self.assertTrue(conversion.sparse_X)
self.assertTrue(conversion.sparse_Y)
self.assertFalse(conversion.sparse_metas)
class TestDomainFilter(unittest.TestCase):
def setUp(self):
self.iris = Table("iris")
def test_filter_visible(self):
n_feats = len(self.iris.domain.attributes)
self.iris.domain.attributes[0].attributes.update({"hidden": True})
filtered = list(filter_visible(self.iris.domain.attributes))
self.assertNotIn(self.iris.domain.attributes[0], filtered)
self.assertEqual(len(filtered), n_feats - 1)
if __name__ == "__main__":
unittest.main()
|
[
"itertools.chain",
"pickle.dumps",
"Orange.preprocess.Impute",
"Orange.data.DiscreteVariable",
"numpy.array",
"unittest.main",
"numpy.arange",
"Orange.data.domain.filter_visible",
"Orange.data.DomainConversion",
"numpy.testing.assert_almost_equal",
"warnings.simplefilter",
"Orange.data.Domain",
"Orange.data.StringVariable",
"Orange.preprocess.Continuize",
"numpy.isnan",
"numpy.int_",
"numpy.int",
"time.time",
"Orange.data.Variable._clear_all_caches",
"Orange.data.Table",
"warnings.catch_warnings",
"Orange.data.TimeVariable",
"numpy.zeros",
"Orange.data.ContinuousVariable"
] |
[((692, 720), 'Orange.data.Variable._clear_all_caches', 'Variable._clear_all_caches', ([], {}), '()\n', (718, 720), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((23186, 23201), 'unittest.main', 'unittest.main', ([], {}), '()\n', (23199, 23201), False, 'import unittest\n'), ((2469, 2493), 'Orange.data.Domain', 'Domain', (['attributes', 'race'], {}), '(attributes, race)\n', (2475, 2493), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((2827, 2853), 'Orange.data.Domain', 'Domain', (['attributes', '[race]'], {}), '(attributes, [race])\n', (2833, 2853), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((3185, 3203), 'Orange.data.Domain', 'Domain', (['attributes'], {}), '(attributes)\n', (3191, 3203), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((3526, 3550), 'Orange.data.Domain', 'Domain', (['attributes', 'None'], {}), '(attributes, None)\n', (3532, 3550), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((3870, 3907), 'Orange.data.Domain', 'Domain', (['attributes', '(education, race)'], {}), '(attributes, (education, race))\n', (3876, 3907), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((4252, 4289), 'Orange.data.Domain', 'Domain', (['attributes', '(education, race)'], {}), '(attributes, (education, race))\n', (4258, 4289), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((4303, 4342), 'Orange.data.Domain', 'Domain', (["['Gender', 0, income]"], {'source': 'd'}), "(['Gender', 0, income], source=d)\n", (4309, 4342), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((4499, 4536), 'Orange.data.Domain', 'Domain', (['attributes', '(education, race)'], {}), '(attributes, (education, race))\n', (4505, 4536), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((4550, 4591), 'Orange.data.Domain', 'Domain', (["['Gender', 0]", '"""income"""'], {'source': 'd'}), "(['Gender', 0], 'income', source=d)\n", (4556, 4591), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((4769, 4806), 'Orange.data.Domain', 'Domain', (['attributes', 'race'], {'metas': 'metas'}), '(attributes, race, metas=metas)\n', (4775, 4806), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((7596, 7631), 'Orange.data.Domain', 'Domain', (['(age, gender)'], {'metas': '(ssn,)'}), '((age, gender), metas=(ssn,))\n', (7602, 7631), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((8252, 8300), 'Orange.data.Domain', 'Domain', (['(age, gender, income)'], {'metas': '(ssn, race)'}), '((age, gender, income), metas=(ssn, race))\n', (8258, 8300), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((8675, 8723), 'Orange.data.Domain', 'Domain', (['(age, gender, income)'], {'metas': '(ssn, race)'}), '((age, gender, income), metas=(ssn, race))\n', (8681, 8723), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((9208, 9262), 'Orange.data.Domain', 'Domain', (['(age, gender, income, race)'], {'metas': '(ssn, race)'}), '((age, gender, income, race), metas=(ssn, race))\n', (9214, 9262), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((9457, 9505), 'Orange.data.Domain', 'Domain', (['(age, gender, income)'], {'metas': '(ssn, race)'}), '((age, gender, income), metas=(ssn, race))\n', (9463, 9505), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((9745, 9793), 'Orange.data.Domain', 'Domain', (['(age, gender, income)'], {'metas': '(ssn, race)'}), '((age, gender, income), metas=(ssn, race))\n', (9751, 9793), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((10059, 10102), 'Orange.data.Domain', 'Domain', (['(age, gender, income)'], {'metas': '(ssn,)'}), '((age, gender, income), metas=(ssn,))\n', (10065, 10102), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((13070, 13096), 'Orange.data.Domain', 'Domain', (['[]', 'None', '[gender]'], {}), '([], None, [gender])\n', (13076, 13096), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((13173, 13196), 'Orange.data.Domain', 'Domain', (['[]', 'None', '[age]'], {}), '([], None, [age])\n', (13179, 13196), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((13274, 13301), 'Orange.data.Domain', 'Domain', (['[]', '[age]', '[gender]'], {}), '([], [age], [gender])\n', (13280, 13301), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((13377, 13405), 'Orange.data.Domain', 'Domain', (['[]', '[incomeA]', '[age]'], {}), '([], [incomeA], [age])\n', (13383, 13405), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((14580, 14603), 'Orange.data.Domain', 'Domain', (['[]', 'None', '[age]'], {}), '([], None, [age])\n', (14586, 14603), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((14682, 14708), 'Orange.data.Domain', 'Domain', (['[]', 'None', '[gender]'], {}), '([], None, [gender])\n', (14688, 14708), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((14788, 14815), 'Orange.data.Domain', 'Domain', (['[]', '[gender]', '[age]'], {}), '([], [gender], [age])\n', (14794, 14815), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((14893, 14921), 'Orange.data.Domain', 'Domain', (['[]', '[race]', '[gender]'], {}), '([], [race], [gender])\n', (14899, 14921), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((15963, 16011), 'Orange.data.Domain', 'Domain', (['(age, gender, income)'], {'metas': '(ssn, race)'}), '((age, gender, income), metas=(ssn, race))\n', (15969, 16011), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((16024, 16078), 'Orange.data.Domain', 'Domain', (['(gender, race)', 'None'], {'metas': '(age, gender, ssn)'}), '((gender, race), None, metas=(age, gender, ssn))\n', (16030, 16078), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((16091, 16150), 'Orange.data.Domain', 'Domain', (['(gender,)', '(race, income)'], {'metas': '(age, income, ssn)'}), '((gender,), (race, income), metas=(age, income, ssn))\n', (16097, 16150), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((16163, 16199), 'Orange.data.Domain', 'Domain', (['()'], {'metas': '(age, gender, ssn)'}), '((), metas=(age, gender, ssn))\n', (16169, 16199), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((16212, 16279), 'Orange.data.Domain', 'Domain', (['(gender,)', '(race, new_income)'], {'metas': '(age, new_income, ssn)'}), '((gender,), (race, new_income), metas=(age, new_income, ssn))\n', (16218, 16279), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((16883, 16938), 'Orange.data.Domain', 'Domain', (['[age, income]', '[race]', '[gender, education, ssn]'], {}), '([age, income], [race], [gender, education, ssn])\n', (16889, 16938), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((17808, 17845), 'Orange.data.Domain', 'Domain', (['[age, gender, income]', '[race]'], {}), '([age, gender, income], [race])\n', (17814, 17845), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((17990, 18045), 'Orange.data.Domain', 'Domain', (['[age, income]', '[race]', '[gender, education, ssn]'], {}), '([age, income], [race], [gender, education, ssn])\n', (17996, 18045), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((18527, 18571), 'Orange.data.Table', 'Table', (['domain', '[[0, 1], [1, np.NaN]]', '[0, 1]'], {}), '(domain, [[0, 1], [1, np.NaN]], [0, 1])\n', (18532, 18571), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((18632, 18657), 'Orange.data.Table', 'Table', (['pre1.domain', 'table'], {}), '(pre1.domain, table)\n', (18637, 18657), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((18666, 18712), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['pre1.X', 'pre2.X'], {}), '(pre1.X, pre2.X)\n', (18696, 18712), True, 'import numpy as np\n'), ((18777, 18805), 'Orange.data.Variable._clear_all_caches', 'Variable._clear_all_caches', ([], {}), '()\n', (18803, 18805), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((18823, 18833), 'Orange.data.Domain', 'Domain', (['[]'], {}), '([])\n', (18829, 18833), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((19053, 19063), 'Orange.data.Domain', 'Domain', (['[]'], {}), '([])\n', (19059, 19063), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((19082, 19092), 'Orange.data.Domain', 'Domain', (['[]'], {}), '([])\n', (19088, 19092), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((19152, 19178), 'Orange.data.ContinuousVariable', 'ContinuousVariable', (['"""var1"""'], {}), "('var1')\n", (19170, 19178), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((19945, 19977), 'Orange.data.Domain', 'Domain', (['attrs', 'class_vars', 'metas'], {}), '(attrs, class_vars, metas)\n', (19951, 19977), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((19995, 20001), 'time.time', 'time', ([], {}), '()\n', (19999, 20001), False, 'from time import time\n'), ((21089, 21122), 'Orange.data.Domain', 'Domain', (['attributes', '[race]', '[ssn]'], {}), '(attributes, [race], [ssn])\n', (21095, 21122), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((21740, 21761), 'Orange.data.Domain', 'Domain', ([], {'attributes': '[]'}), '(attributes=[])\n', (21746, 21761), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((21783, 21820), 'Orange.data.DomainConversion', 'DomainConversion', (['source', 'destination'], {}), '(source, destination)\n', (21799, 21820), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((22097, 22118), 'Orange.data.Domain', 'Domain', ([], {'attributes': '[]'}), '(attributes=[])\n', (22103, 22118), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((22140, 22177), 'Orange.data.DomainConversion', 'DomainConversion', (['source', 'destination'], {}), '(source, destination)\n', (22156, 22177), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((22386, 22433), 'itertools.chain', 'chain', (['destination.variables', 'destination.metas'], {}), '(destination.variables, destination.metas)\n', (22391, 22433), False, 'from itertools import starmap, chain\n'), ((22480, 22501), 'Orange.data.Domain', 'Domain', ([], {'attributes': '[]'}), '(attributes=[])\n', (22486, 22501), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((22523, 22560), 'Orange.data.DomainConversion', 'DomainConversion', (['source', 'destination'], {}), '(source, destination)\n', (22539, 22560), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((22787, 22800), 'Orange.data.Table', 'Table', (['"""iris"""'], {}), "('iris')\n", (22792, 22800), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((750, 780), 'Orange.data.ContinuousVariable', 'ContinuousVariable', ([], {'name': '"""AGE"""'}), "(name='AGE')\n", (768, 780), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((797, 847), 'Orange.data.DiscreteVariable', 'DiscreteVariable', ([], {'name': '"""Gender"""', 'values': "['M', 'F']"}), "(name='Gender', values=['M', 'F'])\n", (813, 847), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((865, 899), 'Orange.data.ContinuousVariable', 'ContinuousVariable', ([], {'name': '"""incomeA"""'}), "(name='incomeA')\n", (883, 899), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((916, 949), 'Orange.data.ContinuousVariable', 'ContinuousVariable', ([], {'name': '"""income"""'}), "(name='income')\n", (934, 949), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((969, 1029), 'Orange.data.DiscreteVariable', 'DiscreteVariable', ([], {'name': '"""education"""', 'values': "['GS', 'HS', 'C']"}), "(name='education', values=['GS', 'HS', 'C'])\n", (985, 1029), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((1043, 1069), 'Orange.data.StringVariable', 'StringVariable', ([], {'name': '"""SSN"""'}), "(name='SSN')\n", (1057, 1069), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((1084, 1163), 'Orange.data.DiscreteVariable', 'DiscreteVariable', ([], {'name': '"""race"""', 'values': "['White', 'Hypsanic', 'African', 'Other']"}), "(name='race', values=['White', 'Hypsanic', 'African', 'Other'])\n", (1100, 1163), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((1203, 1226), 'Orange.data.TimeVariable', 'TimeVariable', (['"""arrival"""'], {}), "('arrival')\n", (1215, 1226), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((5521, 5537), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (5529, 5537), True, 'import numpy as np\n'), ((5665, 5681), 'numpy.zeros', 'np.zeros', (['(1, 3)'], {}), '((1, 3))\n', (5673, 5681), True, 'import numpy as np\n'), ((5683, 5699), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (5691, 5699), True, 'import numpy as np\n'), ((5701, 5719), 'numpy.zeros', 'np.zeros', (['(1, 100)'], {}), '((1, 100))\n', (5709, 5719), True, 'import numpy as np\n'), ((6400, 6411), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (6408, 6411), True, 'import numpy as np\n'), ((6470, 6489), 'numpy.zeros', 'np.zeros', (['(2, 2, 2)'], {}), '((2, 2, 2))\n', (6478, 6489), True, 'import numpy as np\n'), ((6561, 6577), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (6569, 6577), True, 'import numpy as np\n'), ((6579, 6598), 'numpy.zeros', 'np.zeros', (['(2, 2, 2)'], {}), '((2, 2, 2))\n', (6587, 6598), True, 'import numpy as np\n'), ((7687, 7707), 'Orange.data.Domain', 'Domain', (['(-1)'], {'source': 'd'}), '(-1, source=d)\n', (7693, 7707), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((7796, 7813), 'Orange.data.Domain', 'Domain', (['(age, [])'], {}), '((age, []))\n', (7802, 7813), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((7869, 7892), 'Orange.data.Domain', 'Domain', (["(age, 'income')"], {}), "((age, 'income'))\n", (7875, 7892), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((7948, 7965), 'Orange.data.Domain', 'Domain', (['([], age)'], {}), '(([], age))\n', (7954, 7965), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((8021, 8044), 'Orange.data.Domain', 'Domain', (["('income', age)"], {}), "(('income', age))\n", (8027, 8044), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((8100, 8120), 'Orange.data.Domain', 'Domain', (['(age,)', 'self'], {}), '((age,), self)\n', (8106, 8120), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((8176, 8209), 'Orange.data.Domain', 'Domain', (['(age,)'], {'metas': "('income',)"}), "((age,), metas=('income',))\n", (8182, 8209), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((9817, 9826), 'numpy.int', 'np.int', (['(3)'], {}), '(3)\n', (9823, 9826), True, 'import numpy as np\n'), ((9832, 9842), 'numpy.int', 'np.int', (['(-3)'], {}), '(-3)\n', (9838, 9842), True, 'import numpy as np\n'), ((10187, 10197), 'numpy.int_', 'np.int_', (['(0)'], {}), '(0)\n', (10194, 10197), True, 'import numpy as np\n'), ((10268, 10278), 'numpy.int_', 'np.int_', (['(2)'], {}), '(2)\n', (10275, 10278), True, 'import numpy as np\n'), ((10344, 10355), 'numpy.int_', 'np.int_', (['(-1)'], {}), '(-1)\n', (10351, 10355), True, 'import numpy as np\n'), ((10449, 10459), 'numpy.int_', 'np.int_', (['(3)'], {}), '(3)\n', (10456, 10459), True, 'import numpy as np\n'), ((10465, 10476), 'numpy.int_', 'np.int_', (['(-2)'], {}), '(-2)\n', (10472, 10476), True, 'import numpy as np\n'), ((10682, 10718), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (10705, 10718), False, 'import warnings\n'), ((10732, 10762), 'warnings.simplefilter', 'warnings.simplefilter', (['"""error"""'], {}), "('error')\n", (10753, 10762), False, 'import warnings\n'), ((10780, 10823), 'Orange.data.Domain', 'Domain', (['(age, gender, income)'], {'metas': '(ssn,)'}), '((age, gender, income), metas=(ssn,))\n', (10786, 10823), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((10923, 10954), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (10944, 10954), False, 'import warnings\n'), ((11044, 11072), 'Orange.data.Domain', 'Domain', (['(age,)'], {'metas': '(ssn,)'}), '((age,), metas=(ssn,))\n', (11050, 11072), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((11146, 11170), 'Orange.data.Domain', 'Domain', (['()'], {'metas': '(ssn,)'}), '((), metas=(ssn,))\n', (11152, 11170), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((17026, 17044), 'numpy.array', 'np.array', (['[42, 13]'], {}), '([42, 13])\n', (17034, 17044), True, 'import numpy as np\n'), ((17076, 17089), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (17084, 17089), True, 'import numpy as np\n'), ((17612, 17630), 'numpy.array', 'np.array', (['[42, 13]'], {}), '([42, 13])\n', (17620, 17630), True, 'import numpy as np\n'), ((17662, 17675), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (17670, 17675), True, 'import numpy as np\n'), ((17711, 17752), 'numpy.array', 'np.array', (["[0, 1, '1234567']"], {'dtype': 'object'}), "([0, 1, '1234567'], dtype=object)\n", (17719, 17752), True, 'import numpy as np\n'), ((18465, 18499), 'Orange.data.DiscreteVariable', 'DiscreteVariable', (['"""y"""'], {'values': '"""01"""'}), "('y', values='01')\n", (18481, 18499), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((18587, 18599), 'Orange.preprocess.Continuize', 'Continuize', ([], {}), '()\n', (18597, 18599), False, 'from Orange.preprocess import Continuize, Impute\n'), ((18874, 18894), 'pickle.dumps', 'pickle.dumps', (['domain'], {}), '(domain)\n', (18886, 18894), False, 'import pickle\n'), ((19736, 19765), 'Orange.data.ContinuousVariable', 'ContinuousVariable', (["('f%i' % i)"], {}), "('f%i' % i)\n", (19754, 19765), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((19811, 19840), 'Orange.data.ContinuousVariable', 'ContinuousVariable', (["('c%i' % i)"], {}), "('c%i' % i)\n", (19829, 19840), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((19878, 19907), 'Orange.data.ContinuousVariable', 'ContinuousVariable', (["('m%i' % i)"], {}), "('m%i' % i)\n", (19896, 19907), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((22988, 23031), 'Orange.data.domain.filter_visible', 'filter_visible', (['self.iris.domain.attributes'], {}), '(self.iris.domain.attributes)\n', (23002, 23031), False, 'from Orange.data.domain import filter_visible\n'), ((5266, 5287), 'numpy.zeros', 'np.zeros', (['(1, n_cols)'], {}), '((1, n_cols))\n', (5274, 5287), True, 'import numpy as np\n'), ((6211, 6227), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (6219, 6227), True, 'import numpy as np\n'), ((6229, 6248), 'numpy.zeros', 'np.zeros', (['dimension'], {}), '(dimension)\n', (6237, 6248), True, 'import numpy as np\n'), ((6928, 6944), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (6936, 6944), True, 'import numpy as np\n'), ((7525, 7538), 'Orange.data.Domain', 'Domain', (['*args'], {}), '(*args)\n', (7531, 7538), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((8829, 8839), 'numpy.int_', 'np.int_', (['(0)'], {}), '(0)\n', (8836, 8839), True, 'import numpy as np\n'), ((8930, 8940), 'numpy.int_', 'np.int_', (['(2)'], {}), '(2)\n', (8937, 8940), True, 'import numpy as np\n'), ((9029, 9040), 'numpy.int_', 'np.int_', (['(-1)'], {}), '(-1)\n', (9036, 9040), True, 'import numpy as np\n'), ((9082, 9093), 'numpy.int_', 'np.int_', (['(-2)'], {}), '(-2)\n', (9089, 9093), True, 'import numpy as np\n'), ((15650, 15671), 'Orange.data.Domain', 'Domain', (['[arrival]', '[]'], {}), '([arrival], [])\n', (15656, 15671), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((15712, 15733), 'Orange.data.Domain', 'Domain', (['[]', '[arrival]'], {}), '([], [arrival])\n', (15718, 15733), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((15775, 15800), 'Orange.data.Domain', 'Domain', (['[]', '[]', '[arrival]'], {}), '([], [], [arrival])\n', (15781, 15800), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((17299, 17310), 'numpy.isnan', 'np.isnan', (['a'], {}), '(a)\n', (17307, 17310), True, 'import numpy as np\n'), ((17331, 17342), 'numpy.isnan', 'np.isnan', (['b'], {}), '(b)\n', (17339, 17342), True, 'import numpy as np\n'), ((18380, 18414), 'Orange.data.DiscreteVariable', 'DiscreteVariable', (['"""a"""'], {'values': '"""01"""'}), "('a', values='01')\n", (18396, 18414), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((18416, 18450), 'Orange.data.DiscreteVariable', 'DiscreteVariable', (['"""b"""'], {'values': '"""01"""'}), "('b', values='01')\n", (18432, 18450), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((18600, 18608), 'Orange.preprocess.Impute', 'Impute', ([], {}), '()\n', (18606, 18608), False, 'from Orange.preprocess import Continuize, Impute\n'), ((20738, 20758), 'Orange.data.Domain', 'Domain', (['*domain_args'], {}), '(*domain_args)\n', (20744, 20758), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((20948, 20954), 'time.time', 'time', ([], {}), '()\n', (20952, 20954), False, 'from time import time\n'), ((11966, 11979), 'Orange.data.Domain', 'Domain', (['*args'], {}), '(*args)\n', (11972, 11979), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((12051, 12061), 'Orange.data.Domain', 'Domain', (['[]'], {}), '([])\n', (12057, 12061), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((12114, 12131), 'Orange.data.Domain', 'Domain', (['[]', '[age]'], {}), '([], [age])\n', (12120, 12131), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((12184, 12200), 'Orange.data.Domain', 'Domain', (['[]', 'race'], {}), '([], race)\n', (12190, 12200), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((12254, 12273), 'Orange.data.Domain', 'Domain', (['[age]', 'None'], {}), '([age], None)\n', (12260, 12273), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((12325, 12345), 'Orange.data.Domain', 'Domain', (['[race]', 'None'], {}), '([race], None)\n', (12331, 12345), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((12397, 12422), 'Orange.data.Domain', 'Domain', (['[age, race]', 'None'], {}), '([age, race], None)\n', (12403, 12422), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((12474, 12499), 'Orange.data.Domain', 'Domain', (['[race, age]', 'None'], {}), '([race, age], None)\n', (12480, 12499), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((12553, 12570), 'Orange.data.Domain', 'Domain', (['[]', '[age]'], {}), '([], [age])\n', (12559, 12570), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((12626, 12644), 'Orange.data.Domain', 'Domain', (['[]', '[race]'], {}), '([], [race])\n', (12632, 12644), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((12701, 12720), 'Orange.data.Domain', 'Domain', (['[age]', 'None'], {}), '([age], None)\n', (12707, 12720), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((12776, 12796), 'Orange.data.Domain', 'Domain', (['[race]', 'None'], {}), '([race], None)\n', (12782, 12796), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((12852, 12871), 'Orange.data.Domain', 'Domain', (['[age]', 'race'], {}), '([age], race)\n', (12858, 12871), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((12927, 12946), 'Orange.data.Domain', 'Domain', (['[race]', 'age'], {}), '([race], age)\n', (12933, 12946), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((13002, 13025), 'Orange.data.Domain', 'Domain', (['[]', '[race, age]'], {}), '([], [race, age])\n', (13008, 13025), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((13531, 13541), 'Orange.data.Domain', 'Domain', (['[]'], {}), '([])\n', (13537, 13541), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((13596, 13613), 'Orange.data.Domain', 'Domain', (['[]', '[age]'], {}), '([], [age])\n', (13602, 13613), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((13668, 13686), 'Orange.data.Domain', 'Domain', (['[]', '[race]'], {}), '([], [race])\n', (13674, 13686), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((13741, 13760), 'Orange.data.Domain', 'Domain', (['[age]', 'None'], {}), '([age], None)\n', (13747, 13760), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((13815, 13835), 'Orange.data.Domain', 'Domain', (['[race]', 'None'], {}), '([race], None)\n', (13821, 13835), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((13889, 13914), 'Orange.data.Domain', 'Domain', (['[age, race]', 'None'], {}), '([age, race], None)\n', (13895, 13914), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((13968, 13993), 'Orange.data.Domain', 'Domain', (['[race, age]', 'None'], {}), '([race, age], None)\n', (13974, 13993), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((14048, 14065), 'Orange.data.Domain', 'Domain', (['[]', '[age]'], {}), '([], [age])\n', (14054, 14065), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((14124, 14142), 'Orange.data.Domain', 'Domain', (['[]', '[race]'], {}), '([], [race])\n', (14130, 14142), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((14200, 14219), 'Orange.data.Domain', 'Domain', (['[age]', 'None'], {}), '([age], None)\n', (14206, 14219), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((14278, 14298), 'Orange.data.Domain', 'Domain', (['[race]', 'None'], {}), '([race], None)\n', (14284, 14298), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((14356, 14375), 'Orange.data.Domain', 'Domain', (['[age]', 'race'], {}), '([age], race)\n', (14362, 14375), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((14433, 14452), 'Orange.data.Domain', 'Domain', (['[race]', 'age'], {}), '([race], age)\n', (14439, 14452), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((14510, 14533), 'Orange.data.Domain', 'Domain', (['[]', '[race, age]'], {}), '([], [race, age])\n', (14516, 14533), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((15043, 15053), 'Orange.data.Domain', 'Domain', (['[]'], {}), '([])\n', (15049, 15053), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((15102, 15119), 'Orange.data.Domain', 'Domain', (['[]', '[age]'], {}), '([], [age])\n', (15108, 15119), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((15168, 15186), 'Orange.data.Domain', 'Domain', (['[]', '[race]'], {}), '([], [race])\n', (15174, 15186), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((15235, 15256), 'Orange.data.Domain', 'Domain', (['[]', '[arrival]'], {}), '([], [arrival])\n', (15241, 15256), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((15305, 15330), 'Orange.data.Domain', 'Domain', (['[]', '[]', '[arrival]'], {}), '([], [], [arrival])\n', (15311, 15330), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((15379, 15400), 'Orange.data.Domain', 'Domain', (['[arrival]', '[]'], {}), '([arrival], [])\n', (15385, 15400), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((15448, 15469), 'Orange.data.Domain', 'Domain', (['[]', '[arrival]'], {}), '([], [arrival])\n', (15454, 15469), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((15548, 15573), 'Orange.data.Domain', 'Domain', (['[]', '[]', '[arrival]'], {}), '([], [], [arrival])\n', (15554, 15573), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((21452, 21480), 'Orange.data.ContinuousVariable', 'ContinuousVariable', ([], {'name': '"""a"""'}), "(name='a')\n", (21470, 21480), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((21498, 21526), 'Orange.data.ContinuousVariable', 'ContinuousVariable', ([], {'name': '"""b"""'}), "(name='b')\n", (21516, 21526), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((21544, 21572), 'Orange.data.ContinuousVariable', 'ContinuousVariable', ([], {'name': '"""c"""'}), "(name='c')\n", (21562, 21572), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((21613, 21648), 'Orange.data.DiscreteVariable', 'DiscreteVariable', (['"""d"""'], {'values': "['e']"}), "('d', values=['e'])\n", (21629, 21648), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((21670, 21689), 'Orange.data.StringVariable', 'StringVariable', (['"""f"""'], {}), "('f')\n", (21684, 21689), False, 'from Orange.data import ContinuousVariable, DiscreteVariable, StringVariable, TimeVariable, Variable, Domain, Table, DomainConversion\n'), ((6946, 6975), 'numpy.arange', 'np.arange', (['aran_min', 'aran_max'], {}), '(aran_min, aran_max)\n', (6955, 6975), True, 'import numpy as np\n')]
|
"""
.. module:: mixtures
:platform: Unix, Windows
:synopsis: a module for defining the class :class:`Mixture`.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import numpy as np
import pandas as pd
import mics
from mics.funcs import deltaMethod
from mics.funcs import diff
from mics.funcs import func
from mics.utils import InputError
from mics.utils import bennett
from mics.utils import cases
from mics.utils import crypto
from mics.utils import errorTitle
from mics.utils import info
from mics.utils import multimap
from mics.utils import propertyDict
from mics.utils import stdError
class mixture:
"""
A mixture of independently collected samples (MICS).
Parameters
----------
samples : :class:`pooledsample` or list(:class:`sample`)
A list of samples.
engine : :class:`MICS` or :class:`MBAR`
A method for mixture-model analysis.
"""
def __init__(self, samples, engine):
self.samples = samples
self.engine = engine
m = self.m = len(samples)
if mics.verbose:
# np.set_printoptions(precision=4, threshold=15, edgeitems=4, suppress=True)
info("\n=== Setting up mixture ===")
info("Analysis method: ", self.engine.__class__.__name__)
info("Number of samples:", m)
if m == 0:
raise InputError("list of samples is empty")
self.n = np.array([len(sample.dataset) for sample in samples])
self.neff = np.array([sample.neff for sample in samples])
names = self.names = list(samples[0].dataset.columns)
if mics.verbose:
info("Sample sizes:", self.n)
info("Effective sample sizes:", self.neff)
info("Properties:", ", ".join(names))
potentials = [sample.potential.lambdify() for sample in samples]
self.u = [multimap(potentials, sample.dataset) for sample in samples]
self.f = bennett(self.u)
mics.verbose and info("Initial free-energy guess:", self.f)
self.engine.__initialize__(self)
# ======================================================================================
def __compute__(self, functions, constants):
try:
if isinstance(functions, str):
funcs = [func(functions, self.names, constants).lambdify()]
else:
funcs = [func(f, self.names, constants).lambdify() for f in functions]
return [multimap(funcs, sample.dataset) for sample in self.samples]
except (InputError, KeyError):
return None
# ======================================================================================
def free_energies(self, reference=0):
"""
Computes the free energies of all sampled states relative to a given
reference state, as well as their standard errors.
Parameters
----------
reference : int, optional, default=0
Specifies which sampled state will be considered as a reference
for computing free-energy differences.
Returns
-------
pandas.DataFrame
A data frame containing the free-energy differences and their
computed standard errors for all sampled states.
"""
frame = self.samples.__qualifiers__()
frame["f"] = self.f - self.f[reference]
T = self.Theta
frame["df"] = np.sqrt(np.diag(T) - 2*T[:, reference] + T[reference, reference])
return frame
# ======================================================================================
def reweighting(self, potential, properties={}, derivatives={}, combinations={},
conditions={}, reference=0, **constants):
"""
Computes averages of specified properties at target states defined by
a given reduced `potential` function with distinct passed parameter
values, as well as the free energies of such states with respect to a
sampled `reference` state. Also, computes derivatives of these averages
and free energies with respect to the mentioned parameters. In addition,
evaluates combinations of free energies, averages, and derivatives. In
all cases, uncertainty propagation is handled automatically by means of
the delta method.
Parameters
----------
potential : str
A mathematical expression defining the reduced potential of the
target states. It might depend on the collective variables of
the mixture samples, as well as on external parameters whose
values will be passed via `conditions` or `constants`, such as
explained below.
properties : dict(str: str), optional, default={}
A dictionary associating names to mathematical expressions, thus
defining a set of properties whose averages must be evaluated at
the target states. If it is omitted, then only the relative free
energies of the target states will be evaluated. The expressions
might depend on the same collective variables and parameters
mentioned above for `potential`.
derivatives : dict(str: (str, str)), optional, default={}
A dictionary associating names to (property, parameter) pairs,
thus specifying derivatives of average properties at the target
states or relative free energies of these states with respect
to external parameters. For each pair, property must be either
"f" (for free energy) or a name defined in `properties`, while
parameter must be an external parameter such as described above
for `potential`.
combinations : dict(str: str), optional, default={}
A dictionary associating names to mathematical expressions, thus
defining combinations among average properties at the target
states, the relative free energies of these states, and their
derivatives with respect to external parameters. The expressions
might depend on "f" (for free energy) or on the names defined in
`properties`, as well as on external parameters such as described
above for `potential`.
conditions : pandas.DataFrame or dict, optional, default={}
A data frame whose column names are external parameters present
in mathematical expressions specified in arguments `potential`,
`properties`, and `combinations`. The rows of the data frame
contain sets of values of these parameters, in such as way that
the reweighting is carried out for every single set. This is a
way of defining multiple target states from a single `potential`
expression. The same information can be passed as a dictionary
associating names to lists of numerical values, provided that
all lists are equally sized. If it is empty, then a unique
target state will be considered and all external parameters in
`potential`, if any, must be passed as keyword arguments.
reference : int, optional, default=0
The index of a sampled state to be considered as a reference for
computing relative free energies.
**constants : keyword arguments
A set of keyword arguments passed as name=value, aimed to define
external parameter values for the evaluation of mathematical
expressions. These values will be repeated at all target states
specified via `potential` and `conditions`.
Returns
-------
pandas.DataFrame
A data frame containing the computed quantities, along with
their estimated uncertainties, at all target states specified
via `potential` and `conditions`.
"""
if mics.verbose:
info("\n=== Performing reweighting with %s ===" % self.engine.__class__.__name__)
info("Reduced potential:", potential)
constants and info("Provided constants: ", constants)
freeEnergy = "f"
if freeEnergy in properties.keys():
raise InputError("Word % is reserved for free energies" % freeEnergy)
condframe = pd.DataFrame(data=conditions) if isinstance(conditions, dict) else conditions
propfuncs = list(properties.values())
if not derivatives:
propnames = [freeEnergy] + list(properties.keys())
combs = combinations.values()
gProps = self.__compute__(propfuncs, constants)
if combinations:
gDelta = deltaMethod(combs, propnames, constants)
results = list()
for (index, condition) in cases(condframe):
mics.verbose and condition and info("Condition[%s]" % index, condition)
consts = dict(condition, **constants)
u = self.__compute__(potential, consts)
y = gProps if gProps else self.__compute__(propfuncs, consts)
(yu, Theta) = self.engine.__reweight__(self, u, y, reference)
result = propertyDict(propnames, yu, stdError(Theta))
if combinations:
delta = gDelta if gDelta.valid else deltaMethod(combs, propnames, consts)
(h, dh) = delta.evaluate(yu, Theta)
result.update(propertyDict(combinations.keys(), h, dh))
results.append(result.to_frame(index))
return condframe.join(pd.concat(results))
else:
symbols = list(condframe.columns) + list(constants.keys())
parameters = set(x for (y, x) in derivatives.values())
props = dict()
for x in parameters:
props[crypto(x)] = diff(potential, x, symbols)
combs = dict()
for (z, (y, x)) in derivatives.items():
if y == freeEnergy:
combs[z] = crypto(x)
else:
dydx = diff(properties[y], x, symbols)
props[crypto(z)] = "%s - (%s)*(%s)" % (dydx, props[crypto(x)], properties[y])
combs[z] = "%s + (%s)*(%s)" % (crypto(z), crypto(x), y)
unwanted = sum([[x, errorTitle(x)] for x in props.keys()], [])
return self.reweighting(potential, dict(properties, **props), {},
dict(combs, **combinations), condframe, reference,
**constants).drop(unwanted, axis=1)
# ======================================================================================
def pmf(self, potential, property, bins=10, interval=None, **constants):
if mics.verbose:
info("\n=== Computing PMF with %s ===" % self.engine.__class__.__name__)
info("Reduced potential:", potential)
u = self.__compute__(potential, constants)
z = self.__compute__(property, constants)
if interval:
(zmin, zmax) = interval
else:
zmin = min(np.amin(x[0, :]) for x in z)
zmax = max(np.amax(x[0, :]) for x in z)
delta = (zmax - zmin)/bins
ibin = [np.floor((x[0:1, :] - zmin)/delta).astype(int) for x in z]
results = list()
for i in range(bins):
zc = zmin + delta*(i + 0.5)
mics.verbose and info("Bin[%d]:" % (i + 1), "%s = %s" % (property, str(zc)))
y = [np.equal(x, i).astype(np.float) for x in ibin]
(yu, Theta) = self.engine.__reweight__(self, u, y)
if yu[1] > 0.0:
dyu = np.sqrt(max(0.0, Theta[1, 1]))
results.append([zc, -np.log(yu[1]), dyu/yu[1]])
return pd.DataFrame(results, columns=[property, "pmf", errorTitle("pmf")])
# ======================================================================================
def histograms(self, property="u0", bins=100, **constants):
if property == "u0":
y = self.u0
elif property == "state":
w = np.arange(self.m) + 1
wsum = sum(w)
y = [wsum*np.average(p, axis=0, weights=w) for p in self.P]
elif property == "potential":
y = [self.u[i][i, :] for i in range(self.m)]
else:
y = self.__compute__(property, constants)
ymin = min([np.amin(x) for x in y])
ymax = max([np.amax(x) for x in y])
delta = (ymax - ymin)/bins
center = [ymin + delta*(i + 0.5) for i in range(bins)]
frame = pd.DataFrame({property: center})
for i in range(self.m):
frame["state %s" % (i+1)] = np.histogram(y[i], bins, (ymin, ymax))[0]
return frame
|
[
"mics.utils.InputError",
"numpy.log",
"numpy.equal",
"mics.utils.stdError",
"numpy.array",
"mics.utils.multimap",
"numpy.arange",
"numpy.histogram",
"mics.utils.cases",
"mics.funcs.diff",
"pandas.concat",
"pandas.DataFrame",
"mics.utils.info",
"mics.funcs.deltaMethod",
"numpy.amin",
"numpy.average",
"numpy.floor",
"mics.utils.errorTitle",
"mics.utils.bennett",
"mics.funcs.func",
"numpy.diag",
"mics.utils.crypto",
"numpy.amax"
] |
[((1484, 1529), 'numpy.array', 'np.array', (['[sample.neff for sample in samples]'], {}), '([sample.neff for sample in samples])\n', (1492, 1529), True, 'import numpy as np\n'), ((1933, 1948), 'mics.utils.bennett', 'bennett', (['self.u'], {}), '(self.u)\n', (1940, 1948), False, 'from mics.utils import bennett\n'), ((12889, 12921), 'pandas.DataFrame', 'pd.DataFrame', (['{property: center}'], {}), '({property: center})\n', (12901, 12921), True, 'import pandas as pd\n'), ((1166, 1205), 'mics.utils.info', 'info', (['"""\n=== Setting up mixture ==="""'], {}), '("""\n=== Setting up mixture ===""")\n', (1170, 1205), False, 'from mics.utils import info\n'), ((1215, 1272), 'mics.utils.info', 'info', (['"""Analysis method: """', 'self.engine.__class__.__name__'], {}), "('Analysis method: ', self.engine.__class__.__name__)\n", (1219, 1272), False, 'from mics.utils import info\n'), ((1285, 1314), 'mics.utils.info', 'info', (['"""Number of samples:"""', 'm'], {}), "('Number of samples:', m)\n", (1289, 1314), False, 'from mics.utils import info\n'), ((1353, 1391), 'mics.utils.InputError', 'InputError', (['"""list of samples is empty"""'], {}), "('list of samples is empty')\n", (1363, 1391), False, 'from mics.utils import InputError\n'), ((1629, 1658), 'mics.utils.info', 'info', (['"""Sample sizes:"""', 'self.n'], {}), "('Sample sizes:', self.n)\n", (1633, 1658), False, 'from mics.utils import info\n'), ((1671, 1713), 'mics.utils.info', 'info', (['"""Effective sample sizes:"""', 'self.neff'], {}), "('Effective sample sizes:', self.neff)\n", (1675, 1713), False, 'from mics.utils import info\n'), ((1856, 1892), 'mics.utils.multimap', 'multimap', (['potentials', 'sample.dataset'], {}), '(potentials, sample.dataset)\n', (1864, 1892), False, 'from mics.utils import multimap\n'), ((1974, 2016), 'mics.utils.info', 'info', (['"""Initial free-energy guess:"""', 'self.f'], {}), "('Initial free-energy guess:', self.f)\n", (1978, 2016), False, 'from mics.utils import info\n'), ((8222, 8311), 'mics.utils.info', 'info', (['("""\n=== Performing reweighting with %s ===""" % self.engine.__class__.__name__\n )'], {}), '("""\n=== Performing reweighting with %s ===""" % self.engine.__class__.\n __name__)\n', (8226, 8311), False, 'from mics.utils import info\n'), ((8316, 8353), 'mics.utils.info', 'info', (['"""Reduced potential:"""', 'potential'], {}), "('Reduced potential:', potential)\n", (8320, 8353), False, 'from mics.utils import info\n'), ((8508, 8571), 'mics.utils.InputError', 'InputError', (["('Word % is reserved for free energies' % freeEnergy)"], {}), "('Word % is reserved for free energies' % freeEnergy)\n", (8518, 8571), False, 'from mics.utils import InputError\n'), ((8592, 8621), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'conditions'}), '(data=conditions)\n', (8604, 8621), True, 'import pandas as pd\n'), ((9074, 9090), 'mics.utils.cases', 'cases', (['condframe'], {}), '(condframe)\n', (9079, 9090), False, 'from mics.utils import cases\n'), ((11093, 11168), 'mics.utils.info', 'info', (['("""\n=== Computing PMF with %s ===""" % self.engine.__class__.__name__)'], {}), '("""\n=== Computing PMF with %s ===""" % self.engine.__class__.__name__)\n', (11097, 11168), False, 'from mics.utils import info\n'), ((11178, 11215), 'mics.utils.info', 'info', (['"""Reduced potential:"""', 'potential'], {}), "('Reduced potential:', potential)\n", (11182, 11215), False, 'from mics.utils import info\n'), ((2458, 2489), 'mics.utils.multimap', 'multimap', (['funcs', 'sample.dataset'], {}), '(funcs, sample.dataset)\n', (2466, 2489), False, 'from mics.utils import multimap\n'), ((8380, 8419), 'mics.utils.info', 'info', (['"""Provided constants: """', 'constants'], {}), "('Provided constants: ', constants)\n", (8384, 8419), False, 'from mics.utils import info\n'), ((8965, 9005), 'mics.funcs.deltaMethod', 'deltaMethod', (['combs', 'propnames', 'constants'], {}), '(combs, propnames, constants)\n', (8976, 9005), False, 'from mics.funcs import deltaMethod\n'), ((9865, 9883), 'pandas.concat', 'pd.concat', (['results'], {}), '(results)\n', (9874, 9883), True, 'import pandas as pd\n'), ((10133, 10160), 'mics.funcs.diff', 'diff', (['potential', 'x', 'symbols'], {}), '(potential, x, symbols)\n', (10137, 10160), False, 'from mics.funcs import diff\n'), ((12707, 12717), 'numpy.amin', 'np.amin', (['x'], {}), '(x)\n', (12714, 12717), True, 'import numpy as np\n'), ((12751, 12761), 'numpy.amax', 'np.amax', (['x'], {}), '(x)\n', (12758, 12761), True, 'import numpy as np\n'), ((12994, 13032), 'numpy.histogram', 'np.histogram', (['y[i]', 'bins', '(ymin, ymax)'], {}), '(y[i], bins, (ymin, ymax))\n', (13006, 13032), True, 'import numpy as np\n'), ((3453, 3463), 'numpy.diag', 'np.diag', (['T'], {}), '(T)\n', (3460, 3463), True, 'import numpy as np\n'), ((9139, 9179), 'mics.utils.info', 'info', (["('Condition[%s]' % index)", 'condition'], {}), "('Condition[%s]' % index, condition)\n", (9143, 9179), False, 'from mics.utils import info\n'), ((9499, 9514), 'mics.utils.stdError', 'stdError', (['Theta'], {}), '(Theta)\n', (9507, 9514), False, 'from mics.utils import stdError\n'), ((10120, 10129), 'mics.utils.crypto', 'crypto', (['x'], {}), '(x)\n', (10126, 10129), False, 'from mics.utils import crypto\n'), ((10307, 10316), 'mics.utils.crypto', 'crypto', (['x'], {}), '(x)\n', (10313, 10316), False, 'from mics.utils import crypto\n'), ((10366, 10397), 'mics.funcs.diff', 'diff', (['properties[y]', 'x', 'symbols'], {}), '(properties[y], x, symbols)\n', (10370, 10397), False, 'from mics.funcs import diff\n'), ((11411, 11427), 'numpy.amin', 'np.amin', (['x[0, :]'], {}), '(x[0, :])\n', (11418, 11427), True, 'import numpy as np\n'), ((11463, 11479), 'numpy.amax', 'np.amax', (['x[0, :]'], {}), '(x[0, :])\n', (11470, 11479), True, 'import numpy as np\n'), ((11543, 11579), 'numpy.floor', 'np.floor', (['((x[0:1, :] - zmin) / delta)'], {}), '((x[0:1, :] - zmin) / delta)\n', (11551, 11579), True, 'import numpy as np\n'), ((12123, 12140), 'mics.utils.errorTitle', 'errorTitle', (['"""pmf"""'], {}), "('pmf')\n", (12133, 12140), False, 'from mics.utils import errorTitle\n'), ((12404, 12421), 'numpy.arange', 'np.arange', (['self.m'], {}), '(self.m)\n', (12413, 12421), True, 'import numpy as np\n'), ((9605, 9642), 'mics.funcs.deltaMethod', 'deltaMethod', (['combs', 'propnames', 'consts'], {}), '(combs, propnames, consts)\n', (9616, 9642), False, 'from mics.funcs import deltaMethod\n'), ((10424, 10433), 'mics.utils.crypto', 'crypto', (['z'], {}), '(z)\n', (10430, 10433), False, 'from mics.utils import crypto\n'), ((10604, 10617), 'mics.utils.errorTitle', 'errorTitle', (['x'], {}), '(x)\n', (10614, 10617), False, 'from mics.utils import errorTitle\n'), ((11804, 11818), 'numpy.equal', 'np.equal', (['x', 'i'], {}), '(x, i)\n', (11812, 11818), True, 'import numpy as np\n'), ((12474, 12506), 'numpy.average', 'np.average', (['p'], {'axis': '(0)', 'weights': 'w'}), '(p, axis=0, weights=w)\n', (12484, 12506), True, 'import numpy as np\n'), ((2282, 2320), 'mics.funcs.func', 'func', (['functions', 'self.names', 'constants'], {}), '(functions, self.names, constants)\n', (2286, 2320), False, 'from mics.funcs import func\n'), ((2376, 2406), 'mics.funcs.func', 'func', (['f', 'self.names', 'constants'], {}), '(f, self.names, constants)\n', (2380, 2406), False, 'from mics.funcs import func\n'), ((10547, 10556), 'mics.utils.crypto', 'crypto', (['z'], {}), '(z)\n', (10553, 10556), False, 'from mics.utils import crypto\n'), ((10558, 10567), 'mics.utils.crypto', 'crypto', (['x'], {}), '(x)\n', (10564, 10567), False, 'from mics.utils import crypto\n'), ((12032, 12045), 'numpy.log', 'np.log', (['yu[1]'], {}), '(yu[1])\n', (12038, 12045), True, 'import numpy as np\n'), ((10469, 10478), 'mics.utils.crypto', 'crypto', (['x'], {}), '(x)\n', (10475, 10478), False, 'from mics.utils import crypto\n')]
|
import math
import os
import time
import numpy as np
import pybullet as p
import pybullet_utils.bullet_client as bc
from gripper_module import load_gripper
from misc.urdf_editor import UrdfEditor
import utils
from fusion import TSDFVolume
class Gripper(object):
"""
A moving mount and a gripper.
the mount has 4 joints:
0: prismatic x;
1: prismatic y;
2: prismatic z;
3: revolute z;
the gripper is defined by the `gripper_type`.
"""
def __init__(self, gripper_type, bullet_client, home_position, num_side_images, voxel_size=0.004, trunc_margin_scale=5, **kwargs):
self._bullet_client = bullet_client
self._gripper_type = gripper_type
self._gripper_size = kwargs['gripper_size']
self._home_position = home_position
self._default_orientation = [0,0,0]
self._num_side_images = num_side_images
# load gripper
self._gripper = load_gripper(gripper_type)(self._bullet_client, **kwargs)
gripper_body_id = self._gripper.load(self._home_position)
# load mount
mount_urdf = 'assets/gripper/mount.urdf'
mount_body_id = self._bullet_client.loadURDF(
mount_urdf,
basePosition=self._home_position,
useFixedBase=True
)
# combine mount and gripper by a joint
ed_mount = UrdfEditor()
ed_mount.initializeFromBulletBody(mount_body_id, self._bullet_client._client)
ed_gripper = UrdfEditor()
ed_gripper.initializeFromBulletBody(gripper_body_id, self._bullet_client._client)
self._gripper_parent_index = 4
newjoint = ed_mount.joinUrdf(
childEditor=ed_gripper,
parentLinkIndex=self._gripper_parent_index,
jointPivotXYZInParent=self._gripper.get_pos_offset(),
jointPivotRPYInParent=self._bullet_client.getEulerFromQuaternion(self._gripper.get_orn_offset()),
jointPivotXYZInChild=[0, 0, 0],
jointPivotRPYInChild=[0, 0, 0],
parentPhysicsClientId=self._bullet_client._client,
childPhysicsClientId=self._bullet_client._client
)
newjoint.joint_type = self._bullet_client.JOINT_FIXED
newjoint.joint_name = "joint_mount_gripper"
urdfname = f".tmp_combined_{self._gripper_type}_{self._gripper_size:.4f}_{np.random.random():.10f}_{time.time():.10f}.urdf"
ed_mount.saveUrdf(urdfname)
# remove mount and gripper bodies
self._bullet_client.removeBody(mount_body_id)
self._bullet_client.removeBody(gripper_body_id)
self._body_id = self._bullet_client.loadURDF(
urdfname,
useFixedBase=True,
basePosition=self._home_position,
baseOrientation=self._bullet_client.getQuaternionFromEuler([0, 0, 0])
)
# remove the combined URDF
os.remove(urdfname)
# configure the gripper (e.g. friction)
self._gripper.configure(self._body_id, self._gripper_parent_index+1)
# define force and speed (movement of mount)
self._force = 10000
self._speed = 0.005
self._tsdf_size = [64, 64, 32]
self._voxel_size = voxel_size
self._trunc_margin_scale = trunc_margin_scale
bond = np.array(self._tsdf_size) * self._voxel_size
self._vol_bnds = np.array([[-bond[0]/2, bond[0]/2],
[-bond[1]/2, bond[1]/2],
[0, bond[2]]])
self._vol_bnds += np.array(self._home_position).reshape(3, -1)
# Add RGB-D camera (mimic RealSense D415) for gripper
self._gripper_cam_lookat = self._vol_bnds.mean(1)
self._gripper_cam_image_size = (512, 512)
self._gripper_cam_z_near = 0.01
self._gripper_cam_z_far = 10.0
self._gripper_cam_fov_w = 69.40
self._gripper_cam_focal_length = (float(self._gripper_cam_image_size[1])/2)/np.tan((np.pi*self._gripper_cam_fov_w/180)/2)
self._gripper_cam_fov_h = (math.atan((float(self._gripper_cam_image_size[0])/2)/self._gripper_cam_focal_length)*2/np.pi)*180
self._gripper_cam_projection_matrix = self._bullet_client.computeProjectionMatrixFOV(
fov=self._gripper_cam_fov_h,
aspect=float(self._gripper_cam_image_size[1])/float(self._gripper_cam_image_size[0]),
nearVal=self._gripper_cam_z_near,
farVal=self._gripper_cam_z_far
) # notes: 1) FOV is vertical FOV 2) aspect must be float
self._gripper_cam_intrinsics = np.array([[self._gripper_cam_focal_length, 0, float(self._gripper_cam_image_size[1])/2],
[0, self._gripper_cam_focal_length, float(self._gripper_cam_image_size[0])/2],
[0, 0, 1]])
self.fix_joints(range(self._bullet_client.getNumJoints(self._body_id)))
def get_gripper_cam_data(self, cam_position, cam_lookat, cam_up_direction):
cam_view_matrix = self._bullet_client.computeViewMatrix(cam_position, cam_lookat, cam_up_direction)
cam_pose_matrix = np.linalg.inv(np.array(cam_view_matrix).reshape(4, 4).T)
# TODO: fix flipped up and forward vectors (quick hack)
cam_pose_matrix[:, 1:3] = -cam_pose_matrix[:, 1:3]
camera_data = self._bullet_client.getCameraImage(self._gripper_cam_image_size[1],self._gripper_cam_image_size[0],
cam_view_matrix,self._gripper_cam_projection_matrix,
shadow=1,flags=self._bullet_client.ER_SEGMENTATION_MASK_OBJECT_AND_LINKINDEX,
renderer=self._bullet_client.ER_BULLET_HARDWARE_OPENGL)
rgb_pixels = np.array(camera_data[2]).reshape((self._gripper_cam_image_size[0], self._gripper_cam_image_size[1], 4))
color_image = rgb_pixels[:,:,:3] # remove alpha channel
z_buffer = np.array(camera_data[3]).reshape((self._gripper_cam_image_size[0], self._gripper_cam_image_size[1]))
segmentation_mask = None # camera_data[4] - not implemented yet with renderer=p.ER_BULLET_HARDWARE_OPENGL
depth_image = (2.0*self._gripper_cam_z_near*self._gripper_cam_z_far)/(self._gripper_cam_z_far+self._gripper_cam_z_near-(2.0*z_buffer-1.0)*(self._gripper_cam_z_far-self._gripper_cam_z_near))
return color_image, depth_image, segmentation_mask, cam_pose_matrix
def get_tsdf(self, open_scale):
self.move(self._home_position, 0)
self.close()
self.open(open_scale=open_scale)
self._gripper_tsdf = TSDFVolume(self._vol_bnds, voxel_size=self._voxel_size)
# take side images
cam_up_direction = [0, 0, 1]
side_look_directions = np.linspace(0, 2*np.pi, num=self._num_side_images, endpoint=False)
cam_distance = 1
for direction in side_look_directions:
cam_position = [
self._home_position[0] + cam_distance * np.cos(direction),
self._home_position[1] + cam_distance * np.sin(direction),
self._home_position[2]
]
color_image, depth_image, _, cam_pose_matrix = self.get_gripper_cam_data(cam_position, self._gripper_cam_lookat, cam_up_direction)
self._gripper_tsdf.integrate(color_image, depth_image, self._gripper_cam_intrinsics, cam_pose_matrix, obs_weight=1.)
# take image from top
color_image, depth_image, _, cam_pose_matrix = self.get_gripper_cam_data([0, 0, 2], self._gripper_cam_lookat, [1, 0, 0])
self._gripper_tsdf.integrate(color_image, depth_image, self._gripper_cam_intrinsics, cam_pose_matrix, obs_weight=2.)
# take image from bottom
color_image, depth_image, _, cam_pose_matrix = self.get_gripper_cam_data([0, 0, 0], self._gripper_cam_lookat, [1, 0, 0])
self._gripper_tsdf.integrate(color_image, depth_image, self._gripper_cam_intrinsics, cam_pose_matrix, obs_weight=2.)
tsdf_vol_cpu, _ = self._gripper_tsdf.get_volume()
tsdf_vol_cpu = np.transpose(tsdf_vol_cpu, [1, 0, 2]) # swap x-axis and y-axis to make it consitent with scene_tsdf
return tsdf_vol_cpu
def open(self, open_scale):
self._gripper.open(self._body_id, self._gripper_parent_index+1, open_scale=open_scale)
def close(self):
self._gripper.close(self._body_id, self._gripper_parent_index+1)
def move(self, target_position, rotation_angle, stop_at_contact=False):
"""
:param target_position: (x, y, z). the position of the bottom center, not the base!
:param rotation_angle: rotation in z axis \in [0, 2 * \pi]. For 2-finger gripper, angle=0 --> parallel to x-axis
"""
target_position = np.array(target_position) - np.array(self._home_position)
joint_ids = [0, 1, 2, 3]
target_states = [target_position[0], target_position[1], target_position[2], rotation_angle%(2*np.pi)]
self._bullet_client.setJointMotorControlArray(
self._body_id,
joint_ids,
self._bullet_client.POSITION_CONTROL,
targetPositions=target_states,
forces=[self._force] * len(joint_ids),
positionGains=[self._speed] * len(joint_ids)
)
for i in range(240 * 6):
current_states = np.array([self._bullet_client.getJointState(self._body_id, joint_id)[0] for joint_id in joint_ids])
states_diff = np.abs(target_states - current_states)
# stop moving gripper if gripper collide with other objects
if stop_at_contact:
is_in_contact = False
points = self._bullet_client.getContactPoints(bodyA=self._body_id)
if len(points) > 0:
for p in points:
if p[9] > 0:
is_in_contact = True
break
if is_in_contact:
break
if np.all(states_diff < 1e-4):
break
self._gripper.step_constraints(self._body_id, self._gripper_parent_index+1)
self._bullet_client.stepSimulation()
self.fix_joints(joint_ids)
def fix_joints(self, joint_ids):
current_states = np.array([self._bullet_client.getJointState(self._body_id, joint_id)[0] for joint_id in joint_ids])
self._bullet_client.setJointMotorControlArray(
self._body_id,
joint_ids,
self._bullet_client.POSITION_CONTROL,
targetPositions=current_states,
forces=[self._force] * len(joint_ids),
positionGains=[self._speed] * len(joint_ids)
)
def primitive_grasping(self, target_position, rotation_angle, open_scale=1.0, stop_at_contact=False):
"""
:param target_position: (x, y, z). the position of the bottom center, not the base!
:param rotation_angle: rotation in z axis \in [0, 2 * \pi]
:return successs or not (True/False)
"""
self.move([target_position[0], target_position[1], self._home_position[2]], rotation_angle)
self.open(open_scale)
self.move(target_position, rotation_angle, stop_at_contact=stop_at_contact)
self.close()
self.move([target_position[0], target_position[1], self._home_position[2]], rotation_angle)
def remove(self):
self._bullet_client.removeBody(self._body_id)
def get_vis_pts(self, open_scale):
pts = self._gripper.get_vis_pts(open_scale)
angle = self._default_orientation[-1] # only add rotation around z axis
rotated_pts = np.transpose(np.dot(np.asarray(
[[np.cos(angle),-np.sin(angle)],
[np.sin(angle), np.cos(angle)]]),np.transpose(pts)))
return rotated_pts
|
[
"numpy.abs",
"numpy.tan",
"numpy.random.random",
"fusion.TSDFVolume",
"misc.urdf_editor.UrdfEditor",
"numpy.array",
"numpy.linspace",
"numpy.cos",
"gripper_module.load_gripper",
"numpy.sin",
"numpy.all",
"numpy.transpose",
"time.time",
"os.remove"
] |
[((1374, 1386), 'misc.urdf_editor.UrdfEditor', 'UrdfEditor', ([], {}), '()\n', (1384, 1386), False, 'from misc.urdf_editor import UrdfEditor\n'), ((1494, 1506), 'misc.urdf_editor.UrdfEditor', 'UrdfEditor', ([], {}), '()\n', (1504, 1506), False, 'from misc.urdf_editor import UrdfEditor\n'), ((2897, 2916), 'os.remove', 'os.remove', (['urdfname'], {}), '(urdfname)\n', (2906, 2916), False, 'import os\n'), ((3378, 3464), 'numpy.array', 'np.array', (['[[-bond[0] / 2, bond[0] / 2], [-bond[1] / 2, bond[1] / 2], [0, bond[2]]]'], {}), '([[-bond[0] / 2, bond[0] / 2], [-bond[1] / 2, bond[1] / 2], [0,\n bond[2]]])\n', (3386, 3464), True, 'import numpy as np\n'), ((6627, 6682), 'fusion.TSDFVolume', 'TSDFVolume', (['self._vol_bnds'], {'voxel_size': 'self._voxel_size'}), '(self._vol_bnds, voxel_size=self._voxel_size)\n', (6637, 6682), False, 'from fusion import TSDFVolume\n'), ((6779, 6847), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)'], {'num': 'self._num_side_images', 'endpoint': '(False)'}), '(0, 2 * np.pi, num=self._num_side_images, endpoint=False)\n', (6790, 6847), True, 'import numpy as np\n'), ((8076, 8113), 'numpy.transpose', 'np.transpose', (['tsdf_vol_cpu', '[1, 0, 2]'], {}), '(tsdf_vol_cpu, [1, 0, 2])\n', (8088, 8113), True, 'import numpy as np\n'), ((948, 974), 'gripper_module.load_gripper', 'load_gripper', (['gripper_type'], {}), '(gripper_type)\n', (960, 974), False, 'from gripper_module import load_gripper\n'), ((3308, 3333), 'numpy.array', 'np.array', (['self._tsdf_size'], {}), '(self._tsdf_size)\n', (3316, 3333), True, 'import numpy as np\n'), ((3968, 4017), 'numpy.tan', 'np.tan', (['(np.pi * self._gripper_cam_fov_w / 180 / 2)'], {}), '(np.pi * self._gripper_cam_fov_w / 180 / 2)\n', (3974, 4017), True, 'import numpy as np\n'), ((8776, 8801), 'numpy.array', 'np.array', (['target_position'], {}), '(target_position)\n', (8784, 8801), True, 'import numpy as np\n'), ((8804, 8833), 'numpy.array', 'np.array', (['self._home_position'], {}), '(self._home_position)\n', (8812, 8833), True, 'import numpy as np\n'), ((9492, 9530), 'numpy.abs', 'np.abs', (['(target_states - current_states)'], {}), '(target_states - current_states)\n', (9498, 9530), True, 'import numpy as np\n'), ((10024, 10052), 'numpy.all', 'np.all', (['(states_diff < 0.0001)'], {}), '(states_diff < 0.0001)\n', (10030, 10052), True, 'import numpy as np\n'), ((2361, 2379), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2377, 2379), True, 'import numpy as np\n'), ((2387, 2398), 'time.time', 'time.time', ([], {}), '()\n', (2396, 2398), False, 'import time\n'), ((3549, 3578), 'numpy.array', 'np.array', (['self._home_position'], {}), '(self._home_position)\n', (3557, 3578), True, 'import numpy as np\n'), ((5775, 5799), 'numpy.array', 'np.array', (['camera_data[2]'], {}), '(camera_data[2])\n', (5783, 5799), True, 'import numpy as np\n'), ((5962, 5986), 'numpy.array', 'np.array', (['camera_data[3]'], {}), '(camera_data[3])\n', (5970, 5986), True, 'import numpy as np\n'), ((11805, 11822), 'numpy.transpose', 'np.transpose', (['pts'], {}), '(pts)\n', (11817, 11822), True, 'import numpy as np\n'), ((5161, 5186), 'numpy.array', 'np.array', (['cam_view_matrix'], {}), '(cam_view_matrix)\n', (5169, 5186), True, 'import numpy as np\n'), ((7003, 7020), 'numpy.cos', 'np.cos', (['direction'], {}), '(direction)\n', (7009, 7020), True, 'import numpy as np\n'), ((7078, 7095), 'numpy.sin', 'np.sin', (['direction'], {}), '(direction)\n', (7084, 7095), True, 'import numpy as np\n'), ((11728, 11741), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (11734, 11741), True, 'import numpy as np\n'), ((11773, 11786), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (11779, 11786), True, 'import numpy as np\n'), ((11788, 11801), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (11794, 11801), True, 'import numpy as np\n'), ((11743, 11756), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (11749, 11756), True, 'import numpy as np\n')]
|
"""
Scattering GUI
"""
import sys, os
import matplotlib.pyplot as plt # Plotting
import numpy as np
if sys.version_info[0] < 3:
import Tkinter as tk
else:
import tkinter as tk
from .. import functions_general as fg
from .. import functions_crystallography as fc
from .basic_widgets import StringViewer
from .basic_widgets import (TF, BF, SF, LF, HF,
bkg, ety, btn, opt, btn2,
btn_active, opt_active, txtcol,
btn_txt, ety_txt, opt_txt)
class ScatteringGui:
"""
Simulate scattering of various forms
"""
def __init__(self, xtl):
"""Initialise"""
self.xtl = xtl
# Create Tk inter instance
self.root = tk.Tk()
self.root.wm_title('Scattering %s' % xtl.name)
# self.root.minsize(width=640, height=480)
self.root.maxsize(width=self.root.winfo_screenwidth(), height=self.root.winfo_screenheight())
self.root.tk_setPalette(
background=bkg,
foreground=txtcol,
activeBackground=opt_active,
activeForeground=txtcol)
frame = tk.Frame(self.root)
frame.pack(side=tk.LEFT, anchor=tk.N)
# Variatbles
self.energy_kev = tk.DoubleVar(frame, 8.0)
self.edge = tk.StringVar(frame, 'Edge')
self.type = tk.StringVar(frame, 'X-Ray')
self.orientation = tk.StringVar(frame, 'None')
self.direction_h = tk.IntVar(frame, 0)
self.direction_k = tk.IntVar(frame, 0)
self.direction_l = tk.IntVar(frame, 1)
self.theta_offset = tk.DoubleVar(frame, 0.0)
self.theta_min = tk.DoubleVar(frame, -180.0)
self.theta_max = tk.DoubleVar(frame, 180.0)
self.twotheta_min = tk.DoubleVar(frame, -180.0)
self.twotheta_max = tk.DoubleVar(frame, 180.0)
self.powder_units = tk.StringVar(frame, 'Two-Theta')
self.powderaverage = tk.BooleanVar(frame, True)
self.powder_width = tk.DoubleVar(frame, 0.01)
self.hkl_check = tk.StringVar(frame, '0 0 1')
self.hkl_result = tk.StringVar(frame, 'I:%10.0f TTH:%8.2f' % (0, 0))
self.val_i = tk.IntVar(frame, 0)
self.hkl_magnetic = tk.StringVar(frame, '0 0 1')
self.azim_zero = tk.StringVar(frame, '1 0 0')
self.isres = tk.BooleanVar(frame, True)
self.psival = tk.DoubleVar(frame, 0.0)
self.polval = tk.StringVar(frame, u'\u03c3-\u03c0')
self.resF0 = tk.DoubleVar(frame, 0.0)
self.resF1 = tk.DoubleVar(frame, 1.0)
self.resF2 = tk.DoubleVar(frame, 0.0)
self.magresult = tk.StringVar(frame, 'I = --')
# X-ray edges:
self.xr_edges, self.xr_energies = self.xtl.Properties.xray_edges()
self.xr_edges.insert(0, 'Cu Ka')
self.xr_edges.insert(1, 'Mo Ka')
self.xr_energies.insert(0, fg.Cu)
self.xr_energies.insert(1, fg.Mo)
line = tk.Frame(frame)
line.pack(side=tk.TOP, fill=tk.X, pady=5)
var = tk.Label(line, text='Scattering', font=LF)
var.pack(side=tk.LEFT)
var = tk.Button(line, text='Supernova', font=BF, command=self.fun_supernova, bg=btn,
activebackground=btn_active)
var.pack(side=tk.RIGHT)
var = tk.Button(line, text='Wish', font=BF, command=self.fun_wish, bg=btn, activebackground=btn_active)
var.pack(side=tk.RIGHT)
var = tk.Button(line, text='I16', font=BF, command=self.fun_i16, bg=btn, activebackground=btn_active)
var.pack(side=tk.RIGHT)
# ---Settings---
box = tk.LabelFrame(frame, text='Settings')
box.pack(side=tk.TOP, fill=tk.BOTH, padx=5, pady=5)
# Energy
line = tk.Frame(box)
line.pack(side=tk.TOP, fill=tk.X, pady=5)
var = tk.Label(line, text='Energy (keV):', font=SF)
var.pack(side=tk.LEFT)
var = tk.OptionMenu(line, self.edge, *self.xr_edges, command=self.fun_edge)
var.config(font=SF, width=5, bg=opt, activebackground=opt_active)
var["menu"].config(bg=opt, bd=0, activebackground=opt_active)
var.pack(side=tk.LEFT)
var = tk.Entry(line, textvariable=self.energy_kev, font=TF, width=8, bg=ety, fg=ety_txt)
var.pack(side=tk.LEFT)
# Type
line = tk.Frame(box)
line.pack(side=tk.TOP, fill=tk.X, pady=5)
types = ['X-Ray', 'Neutron', 'XRay Magnetic', 'Neutron Magnetic', 'XRay Resonant', 'XRay Dispersion']
var = tk.Label(line, text='Type:', font=SF)
var.pack(side=tk.LEFT)
var = tk.OptionMenu(line, self.type, *types)
var.config(font=SF, width=10, bg=opt, activebackground=opt_active)
var["menu"].config(bg=opt, bd=0, activebackground=opt_active)
var.pack(side=tk.LEFT)
# Units
xaxistypes = ['two-theta', 'd-spacing', 'Q']
var = tk.Label(line, text='Units:', font=SF)
var.pack(side=tk.LEFT)
var = tk.OptionMenu(line, self.powder_units, *xaxistypes)
var.config(font=SF, width=10, bg=opt, activebackground=opt_active)
var["menu"].config(bg=opt, bd=0, activebackground=opt_active)
var.pack(side=tk.LEFT)
# Orientation
line = tk.Frame(box)
line.pack(side=tk.TOP, fill=tk.X, pady=5)
var = tk.Label(line, text='Geometry:', font=SF)
var.pack(side=tk.LEFT)
orients = ['None', 'Reflection', 'Transmission']
var = tk.OptionMenu(line, self.orientation, *orients)
var.config(font=SF, width=10, bg=opt, activebackground=opt_active)
var["menu"].config(bg=opt, bd=0, activebackground=opt_active)
var.pack(side=tk.LEFT)
# Direction
var = tk.Label(line, text='Direction:', font=SF)
var.pack(side=tk.LEFT)
var = tk.Entry(line, textvariable=self.direction_h, font=TF, width=2, bg=ety, fg=ety_txt)
var.pack(side=tk.LEFT)
var = tk.Entry(line, textvariable=self.direction_k, font=TF, width=2, bg=ety, fg=ety_txt)
var.pack(side=tk.LEFT)
var = tk.Entry(line, textvariable=self.direction_l, font=TF, width=2, bg=ety, fg=ety_txt)
var.pack(side=tk.LEFT)
# Theta offset
line = tk.Frame(box)
line.pack(side=tk.TOP, fill=tk.X, pady=5)
var = tk.Label(line, text='Offset:', font=SF)
var.pack(side=tk.LEFT)
var = tk.Entry(line, textvariable=self.theta_offset, font=TF, width=5, bg=ety, fg=ety_txt)
var.pack(side=tk.LEFT)
# Theta min
var = tk.Label(line, text='Min Theta:', font=SF)
var.pack(side=tk.LEFT)
var = tk.Entry(line, textvariable=self.theta_min, font=TF, width=5, bg=ety, fg=ety_txt)
var.pack(side=tk.LEFT)
# Theta max
var = tk.Label(line, text='Max Theta:', font=SF)
var.pack(side=tk.LEFT)
var = tk.Entry(line, textvariable=self.theta_max, font=TF, width=5, bg=ety, fg=ety_txt)
var.pack(side=tk.LEFT)
# TwoTheta min
line = tk.Frame(box)
line.pack(side=tk.TOP, fill=tk.X, pady=5)
var = tk.Label(line, text='Min TwoTheta:', font=SF)
var.pack(side=tk.LEFT)
var = tk.Entry(line, textvariable=self.twotheta_min, font=TF, width=5, bg=ety, fg=ety_txt)
var.pack(side=tk.LEFT)
# TwoTheta max
var = tk.Entry(line, textvariable=self.twotheta_max, font=TF, width=5, bg=ety, fg=ety_txt)
var.pack(side=tk.RIGHT)
var = tk.Label(line, text='Max TwoTheta:', font=SF)
var.pack(side=tk.RIGHT)
# Powder width
line = tk.Frame(box)
line.pack(side=tk.TOP, fill=tk.X, pady=5)
var = tk.Label(line, text='Powder peak width:', font=SF)
var.pack(side=tk.LEFT, padx=3)
var = tk.Entry(line, textvariable=self.powder_width, font=TF, width=5, bg=ety, fg=ety_txt)
var.pack(side=tk.LEFT)
# Powder average tickbox
var = tk.Checkbutton(line, text='Powder average', variable=self.powderaverage, font=SF)
var.pack(side=tk.LEFT, padx=6)
# ---Intensities---
box = tk.LabelFrame(frame, text='Intensities')
box.pack(side=tk.TOP, fill=tk.BOTH, padx=5, pady=5)
line = tk.Frame(box)
line.pack(side=tk.TOP, fill=tk.X, pady=5)
var = tk.Button(line, text='Display Intensities', font=BF, command=self.fun_intensities, bg=btn2,
activebackground=btn_active)
var.pack(side=tk.LEFT)
var = tk.Button(line, text='Plot Powder', font=BF, command=self.fun_powder, bg=btn,
activebackground=btn_active)
var.pack(side=tk.LEFT)
# hkl check
line = tk.Frame(box)
line.pack(side=tk.TOP, fill=tk.X, pady=5)
hklbox = tk.LabelFrame(line, text='Quick Check')
hklbox.pack(side=tk.RIGHT)
var = tk.Entry(hklbox, textvariable=self.hkl_check, font=TF, width=6, bg=ety, fg=ety_txt)
var.pack(side=tk.LEFT)
var.bind('<Return>', self.fun_hklcheck)
var.bind('<KP_Enter>', self.fun_hklcheck)
var = tk.Label(hklbox, textvariable=self.hkl_result, font=TF, width=22)
var.pack(side=tk.LEFT)
var = tk.Button(hklbox, text='Check HKL', font=BF, command=self.fun_hklcheck, bg=btn,
activebackground=btn_active)
var.pack(side=tk.LEFT, pady=2)
# ---Planes---
box = tk.LabelFrame(frame, text='Reciprocal Space Planes')
box.pack(side=tk.TOP, fill=tk.BOTH, padx=5, pady=5)
line = tk.Frame(box)
line.pack(side=tk.TOP, pady=5)
# ---HKL Planes---
# i value
var = tk.Label(line, text='i:', font=SF)
var.pack(side=tk.LEFT)
var = tk.Entry(line, textvariable=self.val_i, font=TF, width=3, bg=ety, fg=ety_txt)
var.pack(side=tk.LEFT)
# directions
vframe = tk.Frame(line)
vframe.pack(side=tk.LEFT, padx=3)
var = tk.Button(vframe, text='HKi', font=BF, command=self.fun_hki, width=5, bg=btn, activebackground=btn_active)
var.pack()
var = tk.Button(vframe, text='HiL', font=BF, command=self.fun_hil, width=5, bg=btn, activebackground=btn_active)
var.pack()
vframe = tk.Frame(line)
vframe.pack(side=tk.LEFT)
var = tk.Button(vframe, text='iKL', font=BF, command=self.fun_ikl, width=5, bg=btn, activebackground=btn_active)
var.pack()
var = tk.Button(vframe, text='HHi', font=BF, command=self.fun_hhi, width=5, bg=btn, activebackground=btn_active)
var.pack()
# ---X-ray Magnetic scattering----
if np.any(self.xtl.Structure.mxmymz()):
box = tk.LabelFrame(frame, text='X-Ray Magnetic Scattering')
box.pack(side=tk.TOP, fill=tk.BOTH, padx=3)
line = tk.Frame(box)
line.pack(side=tk.TOP, fill=tk.BOTH, pady=5)
# Resonant HKL, azimuthal reference
vframe = tk.Frame(line)
vframe.pack(side=tk.LEFT, fill=tk.Y, padx=3)
hframe = tk.Frame(vframe)
hframe.pack()
var = tk.Label(hframe, text=' HKL:', font=SF, width=11)
var.pack(side=tk.LEFT)
var = tk.Entry(hframe, textvariable=self.hkl_magnetic, font=TF, width=6, bg=ety, fg=ety_txt)
var.pack(side=tk.LEFT)
var.bind('<Return>', self.fun_hklmag)
var.bind('<KP_Enter>', self.fun_hklmag)
hframe = tk.Frame(vframe)
hframe.pack()
var = tk.Label(vframe, text='Azim. Ref.:', font=SF, width=11)
var.pack(side=tk.LEFT)
var = tk.Entry(vframe, textvariable=self.azim_zero, font=TF, width=6, bg=ety, fg=ety_txt)
var.pack(side=tk.LEFT)
# Resonant value
vframe = tk.Frame(line)
vframe.pack(side=tk.LEFT, fill=tk.Y, padx=3)
hframe = tk.Frame(vframe)
hframe.pack()
var = tk.Label(hframe, text='F0:', font=SF)
var.pack(side=tk.LEFT)
var = tk.Entry(hframe, textvariable=self.resF0, font=TF, width=3, bg=ety, fg=ety_txt)
var.pack(side=tk.LEFT)
hframe = tk.Frame(vframe)
hframe.pack()
var = tk.Label(hframe, text='F1:', font=SF)
var.pack(side=tk.LEFT)
var = tk.Entry(hframe, textvariable=self.resF1, font=TF, width=3, bg=ety, fg=ety_txt)
var.pack(side=tk.LEFT)
hframe = tk.Frame(vframe)
hframe.pack()
var = tk.Label(hframe, text='F2:', font=SF)
var.pack(side=tk.LEFT)
var = tk.Entry(hframe, textvariable=self.resF2, font=TF, width=3, bg=ety, fg=ety_txt)
var.pack(side=tk.LEFT)
vframe = tk.Frame(line)
vframe.pack(side=tk.LEFT, fill=tk.Y, padx=3)
# Polarisation
poltypes = [u'\u03c3-\u03c3', u'\u03c3-\u03c0', u'\u03c0-\u03c3', u'\u03c0-\u03c0']
hframe = tk.Frame(vframe)
hframe.pack()
var = tk.Label(hframe, text='Polarisation:', font=SF)
var.pack(side=tk.LEFT)
var = tk.OptionMenu(hframe, self.polval, *poltypes)
var.config(font=SF, width=5, bg=opt, activebackground=opt_active)
var["menu"].config(bg=opt, bd=0, activebackground=opt_active)
var.pack(side=tk.LEFT)
hframe = tk.Frame(vframe)
hframe.pack()
# Resonant tickbox
var = tk.Checkbutton(hframe, text='Resonant', variable=self.isres, font=SF)
var.pack(side=tk.LEFT, padx=6)
# psi
var = tk.Label(hframe, text='psi:', font=SF, width=4)
var.pack(side=tk.LEFT)
var = tk.Entry(hframe, textvariable=self.psival, font=TF, width=4, bg=ety, fg=ety_txt)
var.pack(side=tk.LEFT)
var.bind('<Return>', self.fun_hklmag)
var.bind('<KP_Enter>', self.fun_hklmag)
line = tk.Frame(box)
line.pack(side=tk.TOP, fill=tk.BOTH, pady=5)
vframe = tk.Frame(line)
vframe.pack(side=tk.LEFT, fill=tk.Y, padx=3)
# Mag. Inten button
var = tk.Button(vframe, text='Calc. Mag. Inten.', font=BF, command=self.fun_hklmag, bg=btn,
activebackground=btn_active)
var.pack(side=tk.LEFT, padx=5)
# Magnetic Result
var = tk.Label(vframe, textvariable=self.magresult, font=SF, width=12)
var.pack(side=tk.LEFT, fill=tk.Y)
# Azimuth Button
var = tk.Button(line, text='Simulate\n Azimuth', font=BF, command=self.fun_azimuth, width=7, bg=btn,
activebackground=btn_active)
var.pack(side=tk.RIGHT)
def fun_set(self):
""""Set gui parameters from crystal"""
self.type.set(self.xtl._scattering_type)
# self.energy_kev.set(8)
self.theta_offset.set(self.xtl._scattering_theta_offset)
self.theta_min.set(self.xtl._scattering_min_theta)
self.theta_max.set(self.xtl._scattering_max_theta)
self.twotheta_min.set(self.xtl._scattering_min_two_theta)
self.twotheta_max.set(self.xtl._scattering_max_two_theta)
if self.orientation.get() == 'Reflection':
self.direction_h.set(self.xtl._scattering_specular_direction[0])
self.direction_k.set(self.xtl._scattering_specular_direction[1])
self.direction_l.set(self.xtl._scattering_specular_direction[2])
else:
self.direction_h.set(self.xtl._scattering_parallel_direction[0])
self.direction_k.set(self.xtl._scattering_parallel_direction[1])
self.direction_l.set(self.xtl._scattering_parallel_direction[2])
def fun_get(self):
"""Set crytal parameters from gui"""
scat = self.xtl.Scatter
scat._scattering_type = self.type.get()
scat._energy_kev = self.energy_kev.get()
scat._scattering_theta_offset = self.theta_offset.get()
scat._scattering_min_theta = self.theta_min.get()
scat._scattering_max_theta = self.theta_max.get()
scat._scattering_min_twotheta = self.twotheta_min.get()
scat._scattering_max_twotheta = self.twotheta_max.get()
scat._powder_units = self.powder_units.get()
if self.orientation.get() == 'Reflection':
scat._scattering_specular_direction[0] = self.direction_h.get()
scat._scattering_specular_direction[1] = self.direction_k.get()
scat._scattering_specular_direction[2] = self.direction_l.get()
elif self.orientation.get() == 'Transmission':
scat._scattering_parallel_direction[0] = self.direction_h.get()
scat._scattering_parallel_direction[1] = self.direction_k.get()
scat._scattering_parallel_direction[2] = self.direction_l.get()
def fun_i16(self):
""""Add I16 parameters"""
self.type.set('X-Ray')
self.energy_kev.set(8.0)
self.edge.set('Edge')
self.powder_units.set('Two-Theta')
self.powderaverage.set(False)
self.orientation.set('Reflection')
self.theta_offset.set(0.0)
self.theta_min.set(-20.0)
self.theta_max.set(150.0)
self.twotheta_min.set(0.0)
self.twotheta_max.set(130.0)
def fun_wish(self):
""""Add Wish parameters"""
self.type.set('Neutron')
self.energy_kev.set(17.7)
self.edge.set('Edge')
self.powder_units.set('d-spacing')
self.orientation.set('None')
self.theta_offset.set(0.0)
self.theta_min.set(-180.0)
self.theta_max.set(180.0)
self.twotheta_min.set(10.0)
self.twotheta_max.set(170.0)
def fun_supernova(self):
"""Add SuperNova parameters"""
self.type.set('X-Ray')
idx = self.xr_edges.index('Mo Ka')
self.edge.set('Mo Ka')
self.energy_kev.set(self.xr_energies[idx])
self.powder_units.set('Two-Theta')
self.orientation.set('None')
self.theta_offset.set(0.0)
self.theta_min.set(-180.0)
self.theta_max.set(180.0)
self.twotheta_min.set(-170.0)
self.twotheta_max.set(170.0)
def fun_edge(self, event=None):
"""X-ray edge option menu"""
edge = self.edge.get()
if self.edge.get() in self.xr_edges:
idx = self.xr_edges.index(edge)
self.energy_kev.set(self.xr_energies[idx])
def fun_hklcheck(self, event=None):
""""Show single hkl intensity"""
self.fun_get()
hkl = self.hkl_check.get()
hkl = hkl.replace(',', ' ') # remove commas
hkl = hkl.replace('(', '').replace(')', '') # remove brackets
hkl = hkl.replace('[', '').replace(']', '') # remove brackets
hkl = np.fromstring(hkl, sep=' ')
I = self.xtl.Scatter.intensity(hkl)
unit = self.powder_units.get()
energy = self.energy_kev.get()
tth = self.xtl.Cell.tth(hkl, energy)
if unit.lower() in ['tth', 'angle', 'twotheta', 'theta', 'two-theta']:
self.hkl_result.set('I:%10.0f TTH:%8.2f' % (I, tth))
elif unit.lower() in ['d', 'dspace', 'd-spacing', 'dspacing']:
q = fc.calqmag(tth, energy)
d = fc.q2dspace(q)
self.hkl_result.set(u'I:%10.0f d:%8.2f \u00c5' % (I, d))
else:
q = fc.calqmag(tth, energy)
self.hkl_result.set(u'I:%8.0f Q:%8.2f \u00c5\u207B\u00B9' % (I, q))
def fun_intensities(self):
"""Display intensities"""
self.fun_get()
if self.orientation.get() == 'Reflection':
string = self.xtl.Scatter.print_ref_reflections(min_intensity=-1, max_intensity=None)
elif self.orientation.get() == 'Transmission':
string = self.xtl.Scatter.print_tran_reflections(min_intensity=-1, max_intensity=None)
else:
units = self.powder_units.get()
string = self.xtl.Scatter.print_all_reflections(min_intensity=-1, max_intensity=None, units=units)
StringViewer(string, 'Intensities %s' % self.xtl.name)
def fun_powder(self):
"""Plot Powder"""
self.fun_get()
energy = self.energy_kev.get()
min_q = fc.calqmag(self.twotheta_min.get(), energy)
max_q = fc.calqmag(self.twotheta_max.get(), energy)
pow_avg = self.powderaverage.get()
pow_wid = self.powder_width.get()
#if min_q < 0: min_q = 0.0
self.xtl.Plot.simulate_powder(energy, peak_width=pow_wid, powder_average=pow_avg)
plt.show()
def fun_hki(self):
"""Plot hki plane"""
self.fun_get()
i = self.val_i.get()
self.xtl.Plot.simulate_hk0(i)
plt.show()
def fun_hil(self):
"""Plot hil plane"""
self.fun_get()
i = self.val_i.get()
self.xtl.Plot.simulate_h0l(i)
plt.show()
def fun_ikl(self):
"""Plot ikl plane"""
self.fun_get()
i = self.val_i.get()
self.xtl.Plot.simulate_0kl(i)
plt.show()
def fun_hhi(self):
"""Plot hhl plane"""
self.fun_get()
i = self.val_i.get()
self.xtl.Plot.simulate_hhl(i)
plt.show()
def fun_hklmag(self, event=None):
""""Magnetic scattering"""
energy_kev = self.energy_kev.get()
hkl = self.hkl_magnetic.get()
hkl = hkl.replace(',', ' ') # remove commas
hkl = hkl.replace('(', '').replace(')', '') # remove brackets
hkl = hkl.replace('[', '').replace(']', '') # remove brackets
hkl = np.fromstring(hkl, sep=' ')
azi = self.azim_zero.get()
azi = azi.replace(',', ' ') # remove commas
azi = azi.replace('(', '').replace(')', '') # remove brackets
azi = azi.replace('[', '').replace(']', '') # remove brackets
azi = np.fromstring(azi, sep=' ')
psi = self.psival.get()
pol = self.polval.get()
if pol == u'\u03c3-\u03c3':
pol = 's-s'
elif pol == u'\u03c3-\u03c0':
pol = 's-p'
elif pol == u'\u03c0-\u03c3':
pol = 'p-s'
else:
pol = 'p-p'
F0 = self.resF0.get()
F1 = self.resF1.get()
F2 = self.resF2.get()
isres = self.isres.get()
if isres:
# Resonant scattering
maginten = self.xtl.Scatter.xray_resonant_magnetic(
hkl,
energy_kev=energy_kev,
azim_zero=azi, psi=psi,
polarisation=pol,
F0=F0, F1=F1, F2=F2)
else:
# Non-Resonant scattering
maginten = self.xtl.Scatter.xray_nonresonant_magnetic(
hkl,
energy_kev=energy_kev,
azim_zero=azi, psi=psi,
polarisation=pol)
self.magresult.set('I = %9.4g' % maginten)
def fun_azimuth(self):
"""Simulate azimuthal magnetic scattering"""
energy_kev = self.energy_kev.get()
hkl = self.hkl_magnetic.get()
hkl = hkl.replace(',', ' ') # remove commas
hkl = hkl.replace('(', '').replace(')', '') # remove brackets
hkl = hkl.replace('[', '').replace(']', '') # remove brackets
hkl = np.fromstring(hkl, sep=' ')
azi = self.azim_zero.get()
azi = azi.replace(',', ' ') # remove commas
azi = azi.replace('(', '').replace(')', '') # remove brackets
azi = azi.replace('[', '').replace(']', '') # remove brackets
azi = np.fromstring(azi, sep=' ')
pol = self.polval.get()
if pol == u'\u03c3-\u03c3':
pol = 's-s'
elif pol == u'\u03c3-\u03c0':
pol = 's-p'
elif pol == u'\u03c0-\u03c3':
pol = 'p-s'
else:
pol = 'p-p'
F0 = self.resF0.get()
F1 = self.resF1.get()
F2 = self.resF2.get()
isres = self.isres.get()
if isres:
# Resonant scattering
self.xtl.Plot.simulate_azimuth_resonant(
hkl,
energy_kev=energy_kev,
azim_zero=azi,
polarisation=pol,
F0=F0, F1=F1, F2=F2)
plt.show()
else:
# Non-Resonant scattering
self.xtl.Plot.simulate_azimuth_nonresonant(
hkl,
energy_kev=energy_kev,
azim_zero=azi,
polarisation=pol)
plt.show()
|
[
"tkinter.IntVar",
"tkinter.LabelFrame",
"tkinter.Entry",
"tkinter.Checkbutton",
"tkinter.BooleanVar",
"tkinter.Button",
"tkinter.StringVar",
"tkinter.Tk",
"tkinter.Label",
"tkinter.DoubleVar",
"tkinter.OptionMenu",
"numpy.fromstring",
"tkinter.Frame",
"matplotlib.pyplot.show"
] |
[((743, 750), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (748, 750), True, 'import tkinter as tk\n'), ((1146, 1165), 'tkinter.Frame', 'tk.Frame', (['self.root'], {}), '(self.root)\n', (1154, 1165), True, 'import tkinter as tk\n'), ((1260, 1284), 'tkinter.DoubleVar', 'tk.DoubleVar', (['frame', '(8.0)'], {}), '(frame, 8.0)\n', (1272, 1284), True, 'import tkinter as tk\n'), ((1305, 1332), 'tkinter.StringVar', 'tk.StringVar', (['frame', '"""Edge"""'], {}), "(frame, 'Edge')\n", (1317, 1332), True, 'import tkinter as tk\n'), ((1353, 1381), 'tkinter.StringVar', 'tk.StringVar', (['frame', '"""X-Ray"""'], {}), "(frame, 'X-Ray')\n", (1365, 1381), True, 'import tkinter as tk\n'), ((1409, 1436), 'tkinter.StringVar', 'tk.StringVar', (['frame', '"""None"""'], {}), "(frame, 'None')\n", (1421, 1436), True, 'import tkinter as tk\n'), ((1464, 1483), 'tkinter.IntVar', 'tk.IntVar', (['frame', '(0)'], {}), '(frame, 0)\n', (1473, 1483), True, 'import tkinter as tk\n'), ((1511, 1530), 'tkinter.IntVar', 'tk.IntVar', (['frame', '(0)'], {}), '(frame, 0)\n', (1520, 1530), True, 'import tkinter as tk\n'), ((1558, 1577), 'tkinter.IntVar', 'tk.IntVar', (['frame', '(1)'], {}), '(frame, 1)\n', (1567, 1577), True, 'import tkinter as tk\n'), ((1606, 1630), 'tkinter.DoubleVar', 'tk.DoubleVar', (['frame', '(0.0)'], {}), '(frame, 0.0)\n', (1618, 1630), True, 'import tkinter as tk\n'), ((1656, 1683), 'tkinter.DoubleVar', 'tk.DoubleVar', (['frame', '(-180.0)'], {}), '(frame, -180.0)\n', (1668, 1683), True, 'import tkinter as tk\n'), ((1709, 1735), 'tkinter.DoubleVar', 'tk.DoubleVar', (['frame', '(180.0)'], {}), '(frame, 180.0)\n', (1721, 1735), True, 'import tkinter as tk\n'), ((1764, 1791), 'tkinter.DoubleVar', 'tk.DoubleVar', (['frame', '(-180.0)'], {}), '(frame, -180.0)\n', (1776, 1791), True, 'import tkinter as tk\n'), ((1820, 1846), 'tkinter.DoubleVar', 'tk.DoubleVar', (['frame', '(180.0)'], {}), '(frame, 180.0)\n', (1832, 1846), True, 'import tkinter as tk\n'), ((1875, 1907), 'tkinter.StringVar', 'tk.StringVar', (['frame', '"""Two-Theta"""'], {}), "(frame, 'Two-Theta')\n", (1887, 1907), True, 'import tkinter as tk\n'), ((1937, 1963), 'tkinter.BooleanVar', 'tk.BooleanVar', (['frame', '(True)'], {}), '(frame, True)\n', (1950, 1963), True, 'import tkinter as tk\n'), ((1992, 2017), 'tkinter.DoubleVar', 'tk.DoubleVar', (['frame', '(0.01)'], {}), '(frame, 0.01)\n', (2004, 2017), True, 'import tkinter as tk\n'), ((2043, 2071), 'tkinter.StringVar', 'tk.StringVar', (['frame', '"""0 0 1"""'], {}), "(frame, '0 0 1')\n", (2055, 2071), True, 'import tkinter as tk\n'), ((2098, 2148), 'tkinter.StringVar', 'tk.StringVar', (['frame', "('I:%10.0f TTH:%8.2f' % (0, 0))"], {}), "(frame, 'I:%10.0f TTH:%8.2f' % (0, 0))\n", (2110, 2148), True, 'import tkinter as tk\n'), ((2170, 2189), 'tkinter.IntVar', 'tk.IntVar', (['frame', '(0)'], {}), '(frame, 0)\n', (2179, 2189), True, 'import tkinter as tk\n'), ((2218, 2246), 'tkinter.StringVar', 'tk.StringVar', (['frame', '"""0 0 1"""'], {}), "(frame, '0 0 1')\n", (2230, 2246), True, 'import tkinter as tk\n'), ((2272, 2300), 'tkinter.StringVar', 'tk.StringVar', (['frame', '"""1 0 0"""'], {}), "(frame, '1 0 0')\n", (2284, 2300), True, 'import tkinter as tk\n'), ((2322, 2348), 'tkinter.BooleanVar', 'tk.BooleanVar', (['frame', '(True)'], {}), '(frame, True)\n', (2335, 2348), True, 'import tkinter as tk\n'), ((2371, 2395), 'tkinter.DoubleVar', 'tk.DoubleVar', (['frame', '(0.0)'], {}), '(frame, 0.0)\n', (2383, 2395), True, 'import tkinter as tk\n'), ((2418, 2445), 'tkinter.StringVar', 'tk.StringVar', (['frame', 'u"""σ-π"""'], {}), "(frame, u'σ-π')\n", (2430, 2445), True, 'import tkinter as tk\n'), ((2477, 2501), 'tkinter.DoubleVar', 'tk.DoubleVar', (['frame', '(0.0)'], {}), '(frame, 0.0)\n', (2489, 2501), True, 'import tkinter as tk\n'), ((2523, 2547), 'tkinter.DoubleVar', 'tk.DoubleVar', (['frame', '(1.0)'], {}), '(frame, 1.0)\n', (2535, 2547), True, 'import tkinter as tk\n'), ((2569, 2593), 'tkinter.DoubleVar', 'tk.DoubleVar', (['frame', '(0.0)'], {}), '(frame, 0.0)\n', (2581, 2593), True, 'import tkinter as tk\n'), ((2619, 2648), 'tkinter.StringVar', 'tk.StringVar', (['frame', '"""I = --"""'], {}), "(frame, 'I = --')\n", (2631, 2648), True, 'import tkinter as tk\n'), ((2931, 2946), 'tkinter.Frame', 'tk.Frame', (['frame'], {}), '(frame)\n', (2939, 2946), True, 'import tkinter as tk\n'), ((3012, 3054), 'tkinter.Label', 'tk.Label', (['line'], {'text': '"""Scattering"""', 'font': 'LF'}), "(line, text='Scattering', font=LF)\n", (3020, 3054), True, 'import tkinter as tk\n'), ((3101, 3213), 'tkinter.Button', 'tk.Button', (['line'], {'text': '"""Supernova"""', 'font': 'BF', 'command': 'self.fun_supernova', 'bg': 'btn', 'activebackground': 'btn_active'}), "(line, text='Supernova', font=BF, command=self.fun_supernova, bg=\n btn, activebackground=btn_active)\n", (3110, 3213), True, 'import tkinter as tk\n'), ((3279, 3380), 'tkinter.Button', 'tk.Button', (['line'], {'text': '"""Wish"""', 'font': 'BF', 'command': 'self.fun_wish', 'bg': 'btn', 'activebackground': 'btn_active'}), "(line, text='Wish', font=BF, command=self.fun_wish, bg=btn,\n activebackground=btn_active)\n", (3288, 3380), True, 'import tkinter as tk\n'), ((3423, 3522), 'tkinter.Button', 'tk.Button', (['line'], {'text': '"""I16"""', 'font': 'BF', 'command': 'self.fun_i16', 'bg': 'btn', 'activebackground': 'btn_active'}), "(line, text='I16', font=BF, command=self.fun_i16, bg=btn,\n activebackground=btn_active)\n", (3432, 3522), True, 'import tkinter as tk\n'), ((3591, 3628), 'tkinter.LabelFrame', 'tk.LabelFrame', (['frame'], {'text': '"""Settings"""'}), "(frame, text='Settings')\n", (3604, 3628), True, 'import tkinter as tk\n'), ((3722, 3735), 'tkinter.Frame', 'tk.Frame', (['box'], {}), '(box)\n', (3730, 3735), True, 'import tkinter as tk\n'), ((3800, 3845), 'tkinter.Label', 'tk.Label', (['line'], {'text': '"""Energy (keV):"""', 'font': 'SF'}), "(line, text='Energy (keV):', font=SF)\n", (3808, 3845), True, 'import tkinter as tk\n'), ((3891, 3960), 'tkinter.OptionMenu', 'tk.OptionMenu', (['line', 'self.edge', '*self.xr_edges'], {'command': 'self.fun_edge'}), '(line, self.edge, *self.xr_edges, command=self.fun_edge)\n', (3904, 3960), True, 'import tkinter as tk\n'), ((4150, 4237), 'tkinter.Entry', 'tk.Entry', (['line'], {'textvariable': 'self.energy_kev', 'font': 'TF', 'width': '(8)', 'bg': 'ety', 'fg': 'ety_txt'}), '(line, textvariable=self.energy_kev, font=TF, width=8, bg=ety, fg=\n ety_txt)\n', (4158, 4237), True, 'import tkinter as tk\n'), ((4295, 4308), 'tkinter.Frame', 'tk.Frame', (['box'], {}), '(box)\n', (4303, 4308), True, 'import tkinter as tk\n'), ((4483, 4520), 'tkinter.Label', 'tk.Label', (['line'], {'text': '"""Type:"""', 'font': 'SF'}), "(line, text='Type:', font=SF)\n", (4491, 4520), True, 'import tkinter as tk\n'), ((4566, 4604), 'tkinter.OptionMenu', 'tk.OptionMenu', (['line', 'self.type', '*types'], {}), '(line, self.type, *types)\n', (4579, 4604), True, 'import tkinter as tk\n'), ((4865, 4903), 'tkinter.Label', 'tk.Label', (['line'], {'text': '"""Units:"""', 'font': 'SF'}), "(line, text='Units:', font=SF)\n", (4873, 4903), True, 'import tkinter as tk\n'), ((4949, 5000), 'tkinter.OptionMenu', 'tk.OptionMenu', (['line', 'self.powder_units', '*xaxistypes'], {}), '(line, self.powder_units, *xaxistypes)\n', (4962, 5000), True, 'import tkinter as tk\n'), ((5215, 5228), 'tkinter.Frame', 'tk.Frame', (['box'], {}), '(box)\n', (5223, 5228), True, 'import tkinter as tk\n'), ((5293, 5334), 'tkinter.Label', 'tk.Label', (['line'], {'text': '"""Geometry:"""', 'font': 'SF'}), "(line, text='Geometry:', font=SF)\n", (5301, 5334), True, 'import tkinter as tk\n'), ((5437, 5484), 'tkinter.OptionMenu', 'tk.OptionMenu', (['line', 'self.orientation', '*orients'], {}), '(line, self.orientation, *orients)\n', (5450, 5484), True, 'import tkinter as tk\n'), ((5696, 5738), 'tkinter.Label', 'tk.Label', (['line'], {'text': '"""Direction:"""', 'font': 'SF'}), "(line, text='Direction:', font=SF)\n", (5704, 5738), True, 'import tkinter as tk\n'), ((5784, 5872), 'tkinter.Entry', 'tk.Entry', (['line'], {'textvariable': 'self.direction_h', 'font': 'TF', 'width': '(2)', 'bg': 'ety', 'fg': 'ety_txt'}), '(line, textvariable=self.direction_h, font=TF, width=2, bg=ety, fg=\n ety_txt)\n', (5792, 5872), True, 'import tkinter as tk\n'), ((5913, 6001), 'tkinter.Entry', 'tk.Entry', (['line'], {'textvariable': 'self.direction_k', 'font': 'TF', 'width': '(2)', 'bg': 'ety', 'fg': 'ety_txt'}), '(line, textvariable=self.direction_k, font=TF, width=2, bg=ety, fg=\n ety_txt)\n', (5921, 6001), True, 'import tkinter as tk\n'), ((6042, 6130), 'tkinter.Entry', 'tk.Entry', (['line'], {'textvariable': 'self.direction_l', 'font': 'TF', 'width': '(2)', 'bg': 'ety', 'fg': 'ety_txt'}), '(line, textvariable=self.direction_l, font=TF, width=2, bg=ety, fg=\n ety_txt)\n', (6050, 6130), True, 'import tkinter as tk\n'), ((6196, 6209), 'tkinter.Frame', 'tk.Frame', (['box'], {}), '(box)\n', (6204, 6209), True, 'import tkinter as tk\n'), ((6274, 6313), 'tkinter.Label', 'tk.Label', (['line'], {'text': '"""Offset:"""', 'font': 'SF'}), "(line, text='Offset:', font=SF)\n", (6282, 6313), True, 'import tkinter as tk\n'), ((6359, 6448), 'tkinter.Entry', 'tk.Entry', (['line'], {'textvariable': 'self.theta_offset', 'font': 'TF', 'width': '(5)', 'bg': 'ety', 'fg': 'ety_txt'}), '(line, textvariable=self.theta_offset, font=TF, width=5, bg=ety, fg\n =ety_txt)\n', (6367, 6448), True, 'import tkinter as tk\n'), ((6510, 6552), 'tkinter.Label', 'tk.Label', (['line'], {'text': '"""Min Theta:"""', 'font': 'SF'}), "(line, text='Min Theta:', font=SF)\n", (6518, 6552), True, 'import tkinter as tk\n'), ((6598, 6684), 'tkinter.Entry', 'tk.Entry', (['line'], {'textvariable': 'self.theta_min', 'font': 'TF', 'width': '(5)', 'bg': 'ety', 'fg': 'ety_txt'}), '(line, textvariable=self.theta_min, font=TF, width=5, bg=ety, fg=\n ety_txt)\n', (6606, 6684), True, 'import tkinter as tk\n'), ((6746, 6788), 'tkinter.Label', 'tk.Label', (['line'], {'text': '"""Max Theta:"""', 'font': 'SF'}), "(line, text='Max Theta:', font=SF)\n", (6754, 6788), True, 'import tkinter as tk\n'), ((6834, 6920), 'tkinter.Entry', 'tk.Entry', (['line'], {'textvariable': 'self.theta_max', 'font': 'TF', 'width': '(5)', 'bg': 'ety', 'fg': 'ety_txt'}), '(line, textvariable=self.theta_max, font=TF, width=5, bg=ety, fg=\n ety_txt)\n', (6842, 6920), True, 'import tkinter as tk\n'), ((6986, 6999), 'tkinter.Frame', 'tk.Frame', (['box'], {}), '(box)\n', (6994, 6999), True, 'import tkinter as tk\n'), ((7064, 7109), 'tkinter.Label', 'tk.Label', (['line'], {'text': '"""Min TwoTheta:"""', 'font': 'SF'}), "(line, text='Min TwoTheta:', font=SF)\n", (7072, 7109), True, 'import tkinter as tk\n'), ((7155, 7244), 'tkinter.Entry', 'tk.Entry', (['line'], {'textvariable': 'self.twotheta_min', 'font': 'TF', 'width': '(5)', 'bg': 'ety', 'fg': 'ety_txt'}), '(line, textvariable=self.twotheta_min, font=TF, width=5, bg=ety, fg\n =ety_txt)\n', (7163, 7244), True, 'import tkinter as tk\n'), ((7309, 7398), 'tkinter.Entry', 'tk.Entry', (['line'], {'textvariable': 'self.twotheta_max', 'font': 'TF', 'width': '(5)', 'bg': 'ety', 'fg': 'ety_txt'}), '(line, textvariable=self.twotheta_max, font=TF, width=5, bg=ety, fg\n =ety_txt)\n', (7317, 7398), True, 'import tkinter as tk\n'), ((7440, 7485), 'tkinter.Label', 'tk.Label', (['line'], {'text': '"""Max TwoTheta:"""', 'font': 'SF'}), "(line, text='Max TwoTheta:', font=SF)\n", (7448, 7485), True, 'import tkinter as tk\n'), ((7557, 7570), 'tkinter.Frame', 'tk.Frame', (['box'], {}), '(box)\n', (7565, 7570), True, 'import tkinter as tk\n'), ((7635, 7685), 'tkinter.Label', 'tk.Label', (['line'], {'text': '"""Powder peak width:"""', 'font': 'SF'}), "(line, text='Powder peak width:', font=SF)\n", (7643, 7685), True, 'import tkinter as tk\n'), ((7739, 7828), 'tkinter.Entry', 'tk.Entry', (['line'], {'textvariable': 'self.powder_width', 'font': 'TF', 'width': '(5)', 'bg': 'ety', 'fg': 'ety_txt'}), '(line, textvariable=self.powder_width, font=TF, width=5, bg=ety, fg\n =ety_txt)\n', (7747, 7828), True, 'import tkinter as tk\n'), ((7903, 7988), 'tkinter.Checkbutton', 'tk.Checkbutton', (['line'], {'text': '"""Powder average"""', 'variable': 'self.powderaverage', 'font': 'SF'}), "(line, text='Powder average', variable=self.powderaverage,\n font=SF)\n", (7917, 7988), True, 'import tkinter as tk\n'), ((8067, 8107), 'tkinter.LabelFrame', 'tk.LabelFrame', (['frame'], {'text': '"""Intensities"""'}), "(frame, text='Intensities')\n", (8080, 8107), True, 'import tkinter as tk\n'), ((8184, 8197), 'tkinter.Frame', 'tk.Frame', (['box'], {}), '(box)\n', (8192, 8197), True, 'import tkinter as tk\n'), ((8263, 8388), 'tkinter.Button', 'tk.Button', (['line'], {'text': '"""Display Intensities"""', 'font': 'BF', 'command': 'self.fun_intensities', 'bg': 'btn2', 'activebackground': 'btn_active'}), "(line, text='Display Intensities', font=BF, command=self.\n fun_intensities, bg=btn2, activebackground=btn_active)\n", (8272, 8388), True, 'import tkinter as tk\n'), ((8454, 8565), 'tkinter.Button', 'tk.Button', (['line'], {'text': '"""Plot Powder"""', 'font': 'BF', 'command': 'self.fun_powder', 'bg': 'btn', 'activebackground': 'btn_active'}), "(line, text='Plot Powder', font=BF, command=self.fun_powder, bg=\n btn, activebackground=btn_active)\n", (8463, 8565), True, 'import tkinter as tk\n'), ((8652, 8665), 'tkinter.Frame', 'tk.Frame', (['box'], {}), '(box)\n', (8660, 8665), True, 'import tkinter as tk\n'), ((8733, 8772), 'tkinter.LabelFrame', 'tk.LabelFrame', (['line'], {'text': '"""Quick Check"""'}), "(line, text='Quick Check')\n", (8746, 8772), True, 'import tkinter as tk\n'), ((8822, 8910), 'tkinter.Entry', 'tk.Entry', (['hklbox'], {'textvariable': 'self.hkl_check', 'font': 'TF', 'width': '(6)', 'bg': 'ety', 'fg': 'ety_txt'}), '(hklbox, textvariable=self.hkl_check, font=TF, width=6, bg=ety, fg=\n ety_txt)\n', (8830, 8910), True, 'import tkinter as tk\n'), ((9049, 9114), 'tkinter.Label', 'tk.Label', (['hklbox'], {'textvariable': 'self.hkl_result', 'font': 'TF', 'width': '(22)'}), '(hklbox, textvariable=self.hkl_result, font=TF, width=22)\n', (9057, 9114), True, 'import tkinter as tk\n'), ((9160, 9273), 'tkinter.Button', 'tk.Button', (['hklbox'], {'text': '"""Check HKL"""', 'font': 'BF', 'command': 'self.fun_hklcheck', 'bg': 'btn', 'activebackground': 'btn_active'}), "(hklbox, text='Check HKL', font=BF, command=self.fun_hklcheck, bg=\n btn, activebackground=btn_active)\n", (9169, 9273), True, 'import tkinter as tk\n'), ((9369, 9421), 'tkinter.LabelFrame', 'tk.LabelFrame', (['frame'], {'text': '"""Reciprocal Space Planes"""'}), "(frame, text='Reciprocal Space Planes')\n", (9382, 9421), True, 'import tkinter as tk\n'), ((9498, 9511), 'tkinter.Frame', 'tk.Frame', (['box'], {}), '(box)\n', (9506, 9511), True, 'import tkinter as tk\n'), ((9611, 9645), 'tkinter.Label', 'tk.Label', (['line'], {'text': '"""i:"""', 'font': 'SF'}), "(line, text='i:', font=SF)\n", (9619, 9645), True, 'import tkinter as tk\n'), ((9691, 9768), 'tkinter.Entry', 'tk.Entry', (['line'], {'textvariable': 'self.val_i', 'font': 'TF', 'width': '(3)', 'bg': 'ety', 'fg': 'ety_txt'}), '(line, textvariable=self.val_i, font=TF, width=3, bg=ety, fg=ety_txt)\n', (9699, 9768), True, 'import tkinter as tk\n'), ((9839, 9853), 'tkinter.Frame', 'tk.Frame', (['line'], {}), '(line)\n', (9847, 9853), True, 'import tkinter as tk\n'), ((9910, 10021), 'tkinter.Button', 'tk.Button', (['vframe'], {'text': '"""HKi"""', 'font': 'BF', 'command': 'self.fun_hki', 'width': '(5)', 'bg': 'btn', 'activebackground': 'btn_active'}), "(vframe, text='HKi', font=BF, command=self.fun_hki, width=5, bg=\n btn, activebackground=btn_active)\n", (9919, 10021), True, 'import tkinter as tk\n'), ((10050, 10161), 'tkinter.Button', 'tk.Button', (['vframe'], {'text': '"""HiL"""', 'font': 'BF', 'command': 'self.fun_hil', 'width': '(5)', 'bg': 'btn', 'activebackground': 'btn_active'}), "(vframe, text='HiL', font=BF, command=self.fun_hil, width=5, bg=\n btn, activebackground=btn_active)\n", (10059, 10161), True, 'import tkinter as tk\n'), ((10194, 10208), 'tkinter.Frame', 'tk.Frame', (['line'], {}), '(line)\n', (10202, 10208), True, 'import tkinter as tk\n'), ((10257, 10368), 'tkinter.Button', 'tk.Button', (['vframe'], {'text': '"""iKL"""', 'font': 'BF', 'command': 'self.fun_ikl', 'width': '(5)', 'bg': 'btn', 'activebackground': 'btn_active'}), "(vframe, text='iKL', font=BF, command=self.fun_ikl, width=5, bg=\n btn, activebackground=btn_active)\n", (10266, 10368), True, 'import tkinter as tk\n'), ((10397, 10508), 'tkinter.Button', 'tk.Button', (['vframe'], {'text': '"""HHi"""', 'font': 'BF', 'command': 'self.fun_hhi', 'width': '(5)', 'bg': 'btn', 'activebackground': 'btn_active'}), "(vframe, text='HHi', font=BF, command=self.fun_hhi, width=5, bg=\n btn, activebackground=btn_active)\n", (10406, 10508), True, 'import tkinter as tk\n'), ((18799, 18826), 'numpy.fromstring', 'np.fromstring', (['hkl'], {'sep': '""" """'}), "(hkl, sep=' ')\n", (18812, 18826), True, 'import numpy as np\n'), ((20568, 20578), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20576, 20578), True, 'import matplotlib.pyplot as plt\n'), ((20730, 20740), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20738, 20740), True, 'import matplotlib.pyplot as plt\n'), ((20892, 20902), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20900, 20902), True, 'import matplotlib.pyplot as plt\n'), ((21054, 21064), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21062, 21064), True, 'import matplotlib.pyplot as plt\n'), ((21216, 21226), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21224, 21226), True, 'import matplotlib.pyplot as plt\n'), ((21592, 21619), 'numpy.fromstring', 'np.fromstring', (['hkl'], {'sep': '""" """'}), "(hkl, sep=' ')\n", (21605, 21619), True, 'import numpy as np\n'), ((21865, 21892), 'numpy.fromstring', 'np.fromstring', (['azi'], {'sep': '""" """'}), "(azi, sep=' ')\n", (21878, 21892), True, 'import numpy as np\n'), ((23269, 23296), 'numpy.fromstring', 'np.fromstring', (['hkl'], {'sep': '""" """'}), "(hkl, sep=' ')\n", (23282, 23296), True, 'import numpy as np\n'), ((23542, 23569), 'numpy.fromstring', 'np.fromstring', (['azi'], {'sep': '""" """'}), "(azi, sep=' ')\n", (23555, 23569), True, 'import numpy as np\n'), ((10633, 10687), 'tkinter.LabelFrame', 'tk.LabelFrame', (['frame'], {'text': '"""X-Ray Magnetic Scattering"""'}), "(frame, text='X-Ray Magnetic Scattering')\n", (10646, 10687), True, 'import tkinter as tk\n'), ((10764, 10777), 'tkinter.Frame', 'tk.Frame', (['box'], {}), '(box)\n', (10772, 10777), True, 'import tkinter as tk\n'), ((10905, 10919), 'tkinter.Frame', 'tk.Frame', (['line'], {}), '(line)\n', (10913, 10919), True, 'import tkinter as tk\n'), ((10999, 11015), 'tkinter.Frame', 'tk.Frame', (['vframe'], {}), '(vframe)\n', (11007, 11015), True, 'import tkinter as tk\n'), ((11060, 11115), 'tkinter.Label', 'tk.Label', (['hframe'], {'text': '""" HKL:"""', 'font': 'SF', 'width': '(11)'}), "(hframe, text=' HKL:', font=SF, width=11)\n", (11068, 11115), True, 'import tkinter as tk\n'), ((11169, 11259), 'tkinter.Entry', 'tk.Entry', (['hframe'], {'textvariable': 'self.hkl_magnetic', 'font': 'TF', 'width': '(6)', 'bg': 'ety', 'fg': 'ety_txt'}), '(hframe, textvariable=self.hkl_magnetic, font=TF, width=6, bg=ety,\n fg=ety_txt)\n', (11177, 11259), True, 'import tkinter as tk\n'), ((11415, 11431), 'tkinter.Frame', 'tk.Frame', (['vframe'], {}), '(vframe)\n', (11423, 11431), True, 'import tkinter as tk\n'), ((11476, 11531), 'tkinter.Label', 'tk.Label', (['vframe'], {'text': '"""Azim. Ref.:"""', 'font': 'SF', 'width': '(11)'}), "(vframe, text='Azim. Ref.:', font=SF, width=11)\n", (11484, 11531), True, 'import tkinter as tk\n'), ((11585, 11673), 'tkinter.Entry', 'tk.Entry', (['vframe'], {'textvariable': 'self.azim_zero', 'font': 'TF', 'width': '(6)', 'bg': 'ety', 'fg': 'ety_txt'}), '(vframe, textvariable=self.azim_zero, font=TF, width=6, bg=ety, fg=\n ety_txt)\n', (11593, 11673), True, 'import tkinter as tk\n'), ((11755, 11769), 'tkinter.Frame', 'tk.Frame', (['line'], {}), '(line)\n', (11763, 11769), True, 'import tkinter as tk\n'), ((11849, 11865), 'tkinter.Frame', 'tk.Frame', (['vframe'], {}), '(vframe)\n', (11857, 11865), True, 'import tkinter as tk\n'), ((11910, 11947), 'tkinter.Label', 'tk.Label', (['hframe'], {'text': '"""F0:"""', 'font': 'SF'}), "(hframe, text='F0:', font=SF)\n", (11918, 11947), True, 'import tkinter as tk\n'), ((12001, 12080), 'tkinter.Entry', 'tk.Entry', (['hframe'], {'textvariable': 'self.resF0', 'font': 'TF', 'width': '(3)', 'bg': 'ety', 'fg': 'ety_txt'}), '(hframe, textvariable=self.resF0, font=TF, width=3, bg=ety, fg=ety_txt)\n', (12009, 12080), True, 'import tkinter as tk\n'), ((12138, 12154), 'tkinter.Frame', 'tk.Frame', (['vframe'], {}), '(vframe)\n', (12146, 12154), True, 'import tkinter as tk\n'), ((12199, 12236), 'tkinter.Label', 'tk.Label', (['hframe'], {'text': '"""F1:"""', 'font': 'SF'}), "(hframe, text='F1:', font=SF)\n", (12207, 12236), True, 'import tkinter as tk\n'), ((12290, 12369), 'tkinter.Entry', 'tk.Entry', (['hframe'], {'textvariable': 'self.resF1', 'font': 'TF', 'width': '(3)', 'bg': 'ety', 'fg': 'ety_txt'}), '(hframe, textvariable=self.resF1, font=TF, width=3, bg=ety, fg=ety_txt)\n', (12298, 12369), True, 'import tkinter as tk\n'), ((12427, 12443), 'tkinter.Frame', 'tk.Frame', (['vframe'], {}), '(vframe)\n', (12435, 12443), True, 'import tkinter as tk\n'), ((12488, 12525), 'tkinter.Label', 'tk.Label', (['hframe'], {'text': '"""F2:"""', 'font': 'SF'}), "(hframe, text='F2:', font=SF)\n", (12496, 12525), True, 'import tkinter as tk\n'), ((12579, 12658), 'tkinter.Entry', 'tk.Entry', (['hframe'], {'textvariable': 'self.resF2', 'font': 'TF', 'width': '(3)', 'bg': 'ety', 'fg': 'ety_txt'}), '(hframe, textvariable=self.resF2, font=TF, width=3, bg=ety, fg=ety_txt)\n', (12587, 12658), True, 'import tkinter as tk\n'), ((12716, 12730), 'tkinter.Frame', 'tk.Frame', (['line'], {}), '(line)\n', (12724, 12730), True, 'import tkinter as tk\n'), ((12933, 12949), 'tkinter.Frame', 'tk.Frame', (['vframe'], {}), '(vframe)\n', (12941, 12949), True, 'import tkinter as tk\n'), ((12994, 13041), 'tkinter.Label', 'tk.Label', (['hframe'], {'text': '"""Polarisation:"""', 'font': 'SF'}), "(hframe, text='Polarisation:', font=SF)\n", (13002, 13041), True, 'import tkinter as tk\n'), ((13095, 13140), 'tkinter.OptionMenu', 'tk.OptionMenu', (['hframe', 'self.polval', '*poltypes'], {}), '(hframe, self.polval, *poltypes)\n', (13108, 13140), True, 'import tkinter as tk\n'), ((13350, 13366), 'tkinter.Frame', 'tk.Frame', (['vframe'], {}), '(vframe)\n', (13358, 13366), True, 'import tkinter as tk\n'), ((13443, 13512), 'tkinter.Checkbutton', 'tk.Checkbutton', (['hframe'], {'text': '"""Resonant"""', 'variable': 'self.isres', 'font': 'SF'}), "(hframe, text='Resonant', variable=self.isres, font=SF)\n", (13457, 13512), True, 'import tkinter as tk\n'), ((13592, 13639), 'tkinter.Label', 'tk.Label', (['hframe'], {'text': '"""psi:"""', 'font': 'SF', 'width': '(4)'}), "(hframe, text='psi:', font=SF, width=4)\n", (13600, 13639), True, 'import tkinter as tk\n'), ((13693, 13778), 'tkinter.Entry', 'tk.Entry', (['hframe'], {'textvariable': 'self.psival', 'font': 'TF', 'width': '(4)', 'bg': 'ety', 'fg': 'ety_txt'}), '(hframe, textvariable=self.psival, font=TF, width=4, bg=ety, fg=ety_txt\n )\n', (13701, 13778), True, 'import tkinter as tk\n'), ((13931, 13944), 'tkinter.Frame', 'tk.Frame', (['box'], {}), '(box)\n', (13939, 13944), True, 'import tkinter as tk\n'), ((14024, 14038), 'tkinter.Frame', 'tk.Frame', (['line'], {}), '(line)\n', (14032, 14038), True, 'import tkinter as tk\n'), ((14147, 14266), 'tkinter.Button', 'tk.Button', (['vframe'], {'text': '"""Calc. Mag. Inten."""', 'font': 'BF', 'command': 'self.fun_hklmag', 'bg': 'btn', 'activebackground': 'btn_active'}), "(vframe, text='Calc. Mag. Inten.', font=BF, command=self.\n fun_hklmag, bg=btn, activebackground=btn_active)\n", (14156, 14266), True, 'import tkinter as tk\n'), ((14381, 14445), 'tkinter.Label', 'tk.Label', (['vframe'], {'textvariable': 'self.magresult', 'font': 'SF', 'width': '(12)'}), '(vframe, textvariable=self.magresult, font=SF, width=12)\n', (14389, 14445), True, 'import tkinter as tk\n'), ((14540, 14671), 'tkinter.Button', 'tk.Button', (['line'], {'text': '"""Simulate\n Azimuth"""', 'font': 'BF', 'command': 'self.fun_azimuth', 'width': '(7)', 'bg': 'btn', 'activebackground': 'btn_active'}), '(line, text="""Simulate\n Azimuth""", font=BF, command=self.\n fun_azimuth, width=7, bg=btn, activebackground=btn_active)\n', (14549, 14671), True, 'import tkinter as tk\n'), ((24229, 24239), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (24237, 24239), True, 'import matplotlib.pyplot as plt\n'), ((24485, 24495), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (24493, 24495), True, 'import matplotlib.pyplot as plt\n')]
|
from time import clock
import sys
sys.path.append('../../../')
print (sys.path)
from tspdb.src.data import generateHarmonics as gH
from tspdb.src.data import generateTrend as gT
import tspdb.src.data.generateARMA as gA
import numpy as np
from tspdb.src.hdf_util import write_data
import matplotlib.pyplot as plt
def armaDataTest(timeSteps):
arLags = []#[0.4, 0.3, 0.2]
maLags = []#[0.5, 0.1]
startingArray = np.zeros(np.max([len(arLags), len(maLags)])) # start with all 0's
noiseMean = 0.0
noiseSD = [1.0]
(observedArray, meanArray, errorArray) = gA.generate(arLags, maLags, startingArray, timeSteps, noiseMean, noiseSD)
return (observedArray, meanArray)
def trendDataTest(timeSteps):
dampening = 2.0*float(1.0/timeSteps)
power = 0.35
displacement = -2.5
f1 = gT.linearTrendFn
data = gT.generate(f1, power=power, displacement=displacement, timeSteps=timeSteps)
f2 = gT.logTrendFn
f3 = gT.negExpTrendFn
return data
def harmonicDataTest(timeSteps):
sineCoeffs = [-2.0, 3.0]
sinePeriods = [560.0, 30.0]
cosineCoeffs = [-2.5]
cosinePeriods = [16.0]
data = gH.generate(sineCoeffs, sinePeriods, cosineCoeffs, cosinePeriods, timeSteps)
#plt.plot(data)
#plt.show()
return data
timeSteps = 10**5 +10000
print('generating data..')
dt = clock()
harmonicsTS = harmonicDataTest(timeSteps)
trendTS = trendDataTest(timeSteps)
(armaTS, armaMeanTS) = armaDataTest(timeSteps)
meanTS = harmonicsTS + trendTS #+ armaMeanTS
# combinedTS = harmonicsTS + trendTS + armaTS
var = harmonicsTS
var = (var - min(var))
errorArray = np.random.normal(0, np.sqrt(var[:timeSteps]), timeSteps)
combinedTS = meanTS + errorArray
# max1 = np.nanmax(combinedTS)
# min1 = np.nanmin(combinedTS)
# max2 = np.nanmax(meanTS)
# min2 = np.nanmin(meanTS)
# max = np.max([max1, max2])
# min = np.min([min1, min2])
# combinedTS = tsUtils.normalize(combinedTS, max, min)
# meanTS = tsUtils.normalize(meanTS, max, min)
# p = 1
plt.plot(combinedTS, label = 'obs')
plt.plot(meanTS, label = 'mean')
plt.plot(var, label = 'var')
plt.show()
print('Data Generated in ', clock() - dt)
write_data('MixtureTS_var2.h5', 'means', meanTS)
write_data('MixtureTS_var2.h5', 'obs', combinedTS,'a')
write_data('MixtureTS_var2.h5', 'var', var,'a')
# DF = pd.DataFrame()
# DF['means'] = meanTS
# DF['Obs'] = combinedTS
# DF['trainData'] = trainData
# DF.to_hdf('MixtureTS.h5','ts1')
|
[
"numpy.sqrt",
"time.clock",
"tspdb.src.data.generateARMA.generate",
"tspdb.src.data.generateTrend.generate",
"matplotlib.pyplot.plot",
"tspdb.src.hdf_util.write_data",
"tspdb.src.data.generateHarmonics.generate",
"sys.path.append",
"matplotlib.pyplot.show"
] |
[((36, 64), 'sys.path.append', 'sys.path.append', (['"""../../../"""'], {}), "('../../../')\n", (51, 64), False, 'import sys\n'), ((1375, 1382), 'time.clock', 'clock', ([], {}), '()\n', (1380, 1382), False, 'from time import clock\n'), ((2049, 2082), 'matplotlib.pyplot.plot', 'plt.plot', (['combinedTS'], {'label': '"""obs"""'}), "(combinedTS, label='obs')\n", (2057, 2082), True, 'import matplotlib.pyplot as plt\n'), ((2086, 2116), 'matplotlib.pyplot.plot', 'plt.plot', (['meanTS'], {'label': '"""mean"""'}), "(meanTS, label='mean')\n", (2094, 2116), True, 'import matplotlib.pyplot as plt\n'), ((2120, 2146), 'matplotlib.pyplot.plot', 'plt.plot', (['var'], {'label': '"""var"""'}), "(var, label='var')\n", (2128, 2146), True, 'import matplotlib.pyplot as plt\n'), ((2150, 2160), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2158, 2160), True, 'import matplotlib.pyplot as plt\n'), ((2207, 2255), 'tspdb.src.hdf_util.write_data', 'write_data', (['"""MixtureTS_var2.h5"""', '"""means"""', 'meanTS'], {}), "('MixtureTS_var2.h5', 'means', meanTS)\n", (2217, 2255), False, 'from tspdb.src.hdf_util import write_data\n'), ((2257, 2312), 'tspdb.src.hdf_util.write_data', 'write_data', (['"""MixtureTS_var2.h5"""', '"""obs"""', 'combinedTS', '"""a"""'], {}), "('MixtureTS_var2.h5', 'obs', combinedTS, 'a')\n", (2267, 2312), False, 'from tspdb.src.hdf_util import write_data\n'), ((2313, 2361), 'tspdb.src.hdf_util.write_data', 'write_data', (['"""MixtureTS_var2.h5"""', '"""var"""', 'var', '"""a"""'], {}), "('MixtureTS_var2.h5', 'var', var, 'a')\n", (2323, 2361), False, 'from tspdb.src.hdf_util import write_data\n'), ((590, 663), 'tspdb.src.data.generateARMA.generate', 'gA.generate', (['arLags', 'maLags', 'startingArray', 'timeSteps', 'noiseMean', 'noiseSD'], {}), '(arLags, maLags, startingArray, timeSteps, noiseMean, noiseSD)\n', (601, 663), True, 'import tspdb.src.data.generateARMA as gA\n'), ((860, 936), 'tspdb.src.data.generateTrend.generate', 'gT.generate', (['f1'], {'power': 'power', 'displacement': 'displacement', 'timeSteps': 'timeSteps'}), '(f1, power=power, displacement=displacement, timeSteps=timeSteps)\n', (871, 936), True, 'from tspdb.src.data import generateTrend as gT\n'), ((1173, 1249), 'tspdb.src.data.generateHarmonics.generate', 'gH.generate', (['sineCoeffs', 'sinePeriods', 'cosineCoeffs', 'cosinePeriods', 'timeSteps'], {}), '(sineCoeffs, sinePeriods, cosineCoeffs, cosinePeriods, timeSteps)\n', (1184, 1249), True, 'from tspdb.src.data import generateHarmonics as gH\n'), ((1682, 1706), 'numpy.sqrt', 'np.sqrt', (['var[:timeSteps]'], {}), '(var[:timeSteps])\n', (1689, 1706), True, 'import numpy as np\n'), ((2190, 2197), 'time.clock', 'clock', ([], {}), '()\n', (2195, 2197), False, 'from time import clock\n')]
|
import os
import sys
import math
import numpy as np
import pandas as pd
sys.path.append(os.getcwd())
pdata = pd.read_csv(os.getcwd() + '/Bayes/data/train_data.csv', header=None)
test_data = pd.read_csv(os.getcwd() + '/Bayes/data/test_data.csv', header=None)
npdata = pd.DataFrame(pdata).values
final_test = pd.DataFrame(test_data).values
def get_params(data):
X = data[:, 1:]
mu = X.sum(axis=0) / len(X)
X -= mu
delta=np.sum(X ** 2, axis=0) / len(X)
return mu, delta
w1 = np.array(list(filter(lambda x: x[0] == 1, npdata)))
w2 = np.array(list(filter(lambda x: x[0] == 2, npdata)))
w3 = np.array(list(filter(lambda x: x[0] == 3, npdata)))
super_p1 = len(w1) / len(npdata)
super_p2 = len(w2) / len(npdata)
super_p3 = len(w3) / len(npdata)
def calc_prob(x, mu, delta):
f = []
for row in x:
base = 1
for i in range(len(row)):
base *= (1 / math.sqrt(2 * math.pi * delta[i]) * math.exp(-(row[i] - mu[i]) ** 2 / (2 * delta[i])))
f.append(base)
f = np.array(f)
return f
a, b = get_params(w1)
x_prob1 = calc_prob(final_test[:, 1:], a, b) * super_p1
a, b = get_params(w2)
x_prob2 = calc_prob(final_test[:, 1:], a, b) * super_p2
a, b = get_params(w3)
x_prob3 = calc_prob(final_test[:, 1:], a, b) * super_p3
cnt = 0
res = final_test
for i in range(len(x_prob3)):
if x_prob1[i] == max(x_prob1[i], x_prob2[i], x_prob3[i]):
res[i][0] = 1
if final_test[i][0] == 1:
cnt = cnt + 1
continue
if x_prob2[i] == max(x_prob1[i], x_prob2[i], x_prob3[i]):
res[i][0] = 2
if final_test[i][0] == 2:
cnt = cnt + 1
continue
if x_prob3[i] == max(x_prob1[i], x_prob2[i], x_prob3[i]):
res[i][0] = 3
if final_test[i][0] == 3:
cnt = cnt + 1
continue
res = pd.DataFrame(res)
res.to_csv('test_prediction.csv', index=False, sep=',', header=None)
print ('prediction acc: ', cnt / len(final_test))
|
[
"math.sqrt",
"os.getcwd",
"numpy.array",
"numpy.sum",
"pandas.DataFrame",
"math.exp"
] |
[((1836, 1853), 'pandas.DataFrame', 'pd.DataFrame', (['res'], {}), '(res)\n', (1848, 1853), True, 'import pandas as pd\n'), ((88, 99), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (97, 99), False, 'import os\n'), ((269, 288), 'pandas.DataFrame', 'pd.DataFrame', (['pdata'], {}), '(pdata)\n', (281, 288), True, 'import pandas as pd\n'), ((309, 332), 'pandas.DataFrame', 'pd.DataFrame', (['test_data'], {}), '(test_data)\n', (321, 332), True, 'import pandas as pd\n'), ((1017, 1028), 'numpy.array', 'np.array', (['f'], {}), '(f)\n', (1025, 1028), True, 'import numpy as np\n'), ((122, 133), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (131, 133), False, 'import os\n'), ((203, 214), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (212, 214), False, 'import os\n'), ((438, 460), 'numpy.sum', 'np.sum', (['(X ** 2)'], {'axis': '(0)'}), '(X ** 2, axis=0)\n', (444, 460), True, 'import numpy as np\n'), ((935, 984), 'math.exp', 'math.exp', (['(-(row[i] - mu[i]) ** 2 / (2 * delta[i]))'], {}), '(-(row[i] - mu[i]) ** 2 / (2 * delta[i]))\n', (943, 984), False, 'import math\n'), ((899, 932), 'math.sqrt', 'math.sqrt', (['(2 * math.pi * delta[i])'], {}), '(2 * math.pi * delta[i])\n', (908, 932), False, 'import math\n')]
|
#!/usr/bin/env python
import tensorflow as tf
import numpy as np
import time
# shortcut
tfs = tf.sparse
logger = tf.compat.v1.logging
# eager execution
#tf.enable_eager_execution()
config = tf.ConfigProto()
config.inter_op_parallelism_threads = 4
config.intra_op_parallelism_threads = 4
tf.compat.v1.enable_eager_execution(config=config)
class ValueModel(tf.keras.Model):
def __init__(self,input_dim,hidden_dim=100):
super(ValueModel, self).__init__()
self.cnn = tf.keras.layers.Conv2D(28,4,input_shape=(input_dim,input_dim))
self.flatten = tf.keras.layers.Flatten()
# this is our dense hidden layer witha ReLU activiation that will encode most of our information
self.hidden_layer = tf.keras.layers.Dense(100,'relu',use_bias=False)
# then we reduce to a single output with a tanh activation
# we use tanh because -1 <= tanh(x) <= 1 and we will build a reward system based on a range -1 to 1
self.output_layer = tf.keras.layers.Dense(1,'tanh',use_bias=False)
def call(self,input):
# this is the function used to actually evaluate our model on input data
x = self.cnn(input)
x = self.flatten(x)
x = self.hidden_layer(x)
x = self.output_layer(x)
return x
def main():
flags = tf.app.flags
logger.set_verbosity(tf.logging.INFO)
ranks = [2 ** 7,2 ** 8,2 ** 9,2 ** 10,2 ** 11,2 ** 12]
funcs = [ss_add,sd_add,ss_matmul,sd_matmul]
dim = 100
model = ValueModel(dim)
test = np.random.randn(50,dim,dim,1)
result = model(test)
logger.info('result = %s',result)
data = {}
for func in funcs:
logger.info(func.__name__)
func_data = []
for rank in ranks:
min,mean,sigma = timer(func,100,rank)
logger.info(" \t%10d\t%6.3f\t%6.3f\t%6.3f",rank,min,mean,sigma)
func_data.append((rank,min,mean,max))
data[func.__name__] = func_data
def timer(func,tests,*argv):
sum = 0.
sum2 = 0.
n = 0
min = 999999999.
for _ in range(tests):
#start = time.time()
diff = func(*argv)
#end = time.time()
# diff = end - start
sum += diff
sum2 += diff ** 2
n += 1
if diff < min:
min = diff
mean = sum / n
sigma = np.sqrt((1. / n) * (sum2 - mean ** 2))
return min,mean,sigma
####
# tfs.add with sparse + sparse
def ss_add(rank=1000,dim=2,sparsity_mean=0.5,sparsity_sigma=10):
# number of points to fill in the matrix
a_np = int(np.random.normal(sparsity_mean * rank,sparsity_sigma))
if a_np <= 0:
raise Exception(f'produced sparse tensor with no entries, settings:\n rank={rank}\n' +
f' dim={dim}\n sparsity_mean={sparsity_mean} sparsity_sigma={sparsity_sigma}')
logger.debug('a_np = %s',a_np)
a = tfs.SparseTensor(indices=np.random.randint(0,rank,(a_np,dim)),
values=np.random.randn(a_np),
dense_shape=[rank] * dim)
b = tfs.SparseTensor(indices=np.random.randint(0,rank,(a_np,dim)),
values=np.random.randn(a_np),
dense_shape=[rank] * dim)
start = time.time()
c = tfs.add(a,b)
end = time.time()
return end - start
####
# tfs.add with sparse + dense
def sd_add(rank=1000,dim=2,sparsity_mean=0.5,sparsity_sigma=10):
# number of points to fill in the matrix
a_np = int(np.random.normal(sparsity_mean * rank,sparsity_sigma))
if a_np <= 0:
raise Exception(f'produced sparse tensor with no entries, settings:\n rank={rank}\n' +
f' dim={dim}\n sparsity_mean={sparsity_mean} sparsity_sigma={sparsity_sigma}')
logger.debug('a_np = %s',a_np)
a = tfs.SparseTensor(indices=np.random.randint(0,rank,(a_np,dim)),
values=np.random.randn(a_np),
dense_shape=[rank] * dim)
b = np.random.randn(rank ** dim)
b = np.reshape(b,[rank] * dim)
start = time.time()
c = tfs.add(a,b)
end = time.time()
return end - start
####
# tfs.add with sparse + dense
def ss_matmul(rank=1000,dim=2,sparsity_mean=0.5,sparsity_sigma=10):
# number of points to fill in the matrix
a_np = int(np.random.normal(sparsity_mean * rank,sparsity_sigma))
if a_np <= 0:
raise Exception(f'produced sparse tensor with no entries, settings:\n rank={rank}\n' +
f' dim={dim}\n sparsity_mean={sparsity_mean} sparsity_sigma={sparsity_sigma}')
logger.debug('a_np = %s',a_np)
a = tfs.SparseTensor(indices=np.random.randint(0,rank,(a_np,dim)),
values=np.random.randn(a_np),
dense_shape=[rank] * dim)
b = tfs.SparseTensor(indices=np.random.randint(0,rank,(a_np,dim)),
values=np.random.randn(a_np),
dense_shape=[rank] * dim)
start = time.time()
c = tfs.to_dense(a,0.,validate_indices=False) * tfs.to_dense(b,0.,validate_indices=False)
end = time.time()
return end - start
####
# tfs.add with sparse + dense
def sd_matmul(rank=1000,dim=2,sparsity_mean=0.5,sparsity_sigma=10):
# number of points to fill in the matrix
a_np = int(np.random.normal(sparsity_mean * rank,sparsity_sigma))
if a_np <= 0:
raise Exception(f'produced sparse tensor with no entries, settings:\n rank={rank}\n' +
f' dim={dim}\n sparsity_mean={sparsity_mean} sparsity_sigma={sparsity_sigma}')
logger.debug('a_np = %s',a_np)
a = tfs.SparseTensor(indices=np.random.randint(0,rank,(a_np,dim)),
values=np.random.randn(a_np),
dense_shape=[rank] * dim)
b = np.random.randn(rank ** dim)
b = np.reshape(b,[rank] * dim)
start = time.time()
c = tfs.sparse_dense_matmul(a,b)
end = time.time()
return end - start
if __name__ == "__main__":
main()
|
[
"numpy.random.normal",
"numpy.sqrt",
"numpy.reshape",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.Flatten",
"numpy.random.randint",
"tensorflow.keras.layers.Dense",
"tensorflow.compat.v1.enable_eager_execution",
"time.time",
"tensorflow.ConfigProto",
"numpy.random.randn"
] |
[((192, 208), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (206, 208), True, 'import tensorflow as tf\n'), ((289, 339), 'tensorflow.compat.v1.enable_eager_execution', 'tf.compat.v1.enable_eager_execution', ([], {'config': 'config'}), '(config=config)\n', (324, 339), True, 'import tensorflow as tf\n'), ((1486, 1518), 'numpy.random.randn', 'np.random.randn', (['(50)', 'dim', 'dim', '(1)'], {}), '(50, dim, dim, 1)\n', (1501, 1518), True, 'import numpy as np\n'), ((2243, 2280), 'numpy.sqrt', 'np.sqrt', (['(1.0 / n * (sum2 - mean ** 2))'], {}), '(1.0 / n * (sum2 - mean ** 2))\n', (2250, 2280), True, 'import numpy as np\n'), ((3129, 3140), 'time.time', 'time.time', ([], {}), '()\n', (3138, 3140), False, 'import time\n'), ((3170, 3181), 'time.time', 'time.time', ([], {}), '()\n', (3179, 3181), False, 'import time\n'), ((3847, 3875), 'numpy.random.randn', 'np.random.randn', (['(rank ** dim)'], {}), '(rank ** dim)\n', (3862, 3875), True, 'import numpy as np\n'), ((3883, 3910), 'numpy.reshape', 'np.reshape', (['b', '([rank] * dim)'], {}), '(b, [rank] * dim)\n', (3893, 3910), True, 'import numpy as np\n'), ((3922, 3933), 'time.time', 'time.time', ([], {}), '()\n', (3931, 3933), False, 'import time\n'), ((3963, 3974), 'time.time', 'time.time', ([], {}), '()\n', (3972, 3974), False, 'import time\n'), ((4822, 4833), 'time.time', 'time.time', ([], {}), '()\n', (4831, 4833), False, 'import time\n'), ((4936, 4947), 'time.time', 'time.time', ([], {}), '()\n', (4945, 4947), False, 'import time\n'), ((5616, 5644), 'numpy.random.randn', 'np.random.randn', (['(rank ** dim)'], {}), '(rank ** dim)\n', (5631, 5644), True, 'import numpy as np\n'), ((5652, 5679), 'numpy.reshape', 'np.reshape', (['b', '([rank] * dim)'], {}), '(b, [rank] * dim)\n', (5662, 5679), True, 'import numpy as np\n'), ((5691, 5702), 'time.time', 'time.time', ([], {}), '()\n', (5700, 5702), False, 'import time\n'), ((5748, 5759), 'time.time', 'time.time', ([], {}), '()\n', (5757, 5759), False, 'import time\n'), ((483, 548), 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(28)', '(4)'], {'input_shape': '(input_dim, input_dim)'}), '(28, 4, input_shape=(input_dim, input_dim))\n', (505, 548), True, 'import tensorflow as tf\n'), ((567, 592), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (590, 592), True, 'import tensorflow as tf\n'), ((722, 772), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(100)', '"""relu"""'], {'use_bias': '(False)'}), "(100, 'relu', use_bias=False)\n", (743, 772), True, 'import tensorflow as tf\n'), ((968, 1016), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)', '"""tanh"""'], {'use_bias': '(False)'}), "(1, 'tanh', use_bias=False)\n", (989, 1016), True, 'import tensorflow as tf\n'), ((2470, 2524), 'numpy.random.normal', 'np.random.normal', (['(sparsity_mean * rank)', 'sparsity_sigma'], {}), '(sparsity_mean * rank, sparsity_sigma)\n', (2486, 2524), True, 'import numpy as np\n'), ((3366, 3420), 'numpy.random.normal', 'np.random.normal', (['(sparsity_mean * rank)', 'sparsity_sigma'], {}), '(sparsity_mean * rank, sparsity_sigma)\n', (3382, 3420), True, 'import numpy as np\n'), ((4162, 4216), 'numpy.random.normal', 'np.random.normal', (['(sparsity_mean * rank)', 'sparsity_sigma'], {}), '(sparsity_mean * rank, sparsity_sigma)\n', (4178, 4216), True, 'import numpy as np\n'), ((5135, 5189), 'numpy.random.normal', 'np.random.normal', (['(sparsity_mean * rank)', 'sparsity_sigma'], {}), '(sparsity_mean * rank, sparsity_sigma)\n', (5151, 5189), True, 'import numpy as np\n'), ((2802, 2841), 'numpy.random.randint', 'np.random.randint', (['(0)', 'rank', '(a_np, dim)'], {}), '(0, rank, (a_np, dim))\n', (2819, 2841), True, 'import numpy as np\n'), ((2871, 2892), 'numpy.random.randn', 'np.random.randn', (['a_np'], {}), '(a_np)\n', (2886, 2892), True, 'import numpy as np\n'), ((2976, 3015), 'numpy.random.randint', 'np.random.randint', (['(0)', 'rank', '(a_np, dim)'], {}), '(0, rank, (a_np, dim))\n', (2993, 3015), True, 'import numpy as np\n'), ((3045, 3066), 'numpy.random.randn', 'np.random.randn', (['a_np'], {}), '(a_np)\n', (3060, 3066), True, 'import numpy as np\n'), ((3698, 3737), 'numpy.random.randint', 'np.random.randint', (['(0)', 'rank', '(a_np, dim)'], {}), '(0, rank, (a_np, dim))\n', (3715, 3737), True, 'import numpy as np\n'), ((3767, 3788), 'numpy.random.randn', 'np.random.randn', (['a_np'], {}), '(a_np)\n', (3782, 3788), True, 'import numpy as np\n'), ((4494, 4533), 'numpy.random.randint', 'np.random.randint', (['(0)', 'rank', '(a_np, dim)'], {}), '(0, rank, (a_np, dim))\n', (4511, 4533), True, 'import numpy as np\n'), ((4563, 4584), 'numpy.random.randn', 'np.random.randn', (['a_np'], {}), '(a_np)\n', (4578, 4584), True, 'import numpy as np\n'), ((4668, 4707), 'numpy.random.randint', 'np.random.randint', (['(0)', 'rank', '(a_np, dim)'], {}), '(0, rank, (a_np, dim))\n', (4685, 4707), True, 'import numpy as np\n'), ((4737, 4758), 'numpy.random.randn', 'np.random.randn', (['a_np'], {}), '(a_np)\n', (4752, 4758), True, 'import numpy as np\n'), ((5467, 5506), 'numpy.random.randint', 'np.random.randint', (['(0)', 'rank', '(a_np, dim)'], {}), '(0, rank, (a_np, dim))\n', (5484, 5506), True, 'import numpy as np\n'), ((5536, 5557), 'numpy.random.randn', 'np.random.randn', (['a_np'], {}), '(a_np)\n', (5551, 5557), True, 'import numpy as np\n')]
|
import PIL
from skimage.io import imread
import numpy as np
# https://stackoverflow.com/questions/27026866/convert-an-image-to-2d-array-in-python
im = imread("snowboarder.jpg")
indices = np.dstack(np.indices(im.shape[:2]))
data = np.concatenate((im, indices), axis=-1)
new_data = data[:, :, :]
print(new_data)
# np.savetxt("somefile.txt", new_data.reshape((4,5,10)), newline="\n")
|
[
"skimage.io.imread",
"numpy.indices",
"numpy.concatenate"
] |
[((153, 178), 'skimage.io.imread', 'imread', (['"""snowboarder.jpg"""'], {}), "('snowboarder.jpg')\n", (159, 178), False, 'from skimage.io import imread\n'), ((232, 270), 'numpy.concatenate', 'np.concatenate', (['(im, indices)'], {'axis': '(-1)'}), '((im, indices), axis=-1)\n', (246, 270), True, 'import numpy as np\n'), ((199, 223), 'numpy.indices', 'np.indices', (['im.shape[:2]'], {}), '(im.shape[:2])\n', (209, 223), True, 'import numpy as np\n')]
|
"""Convert a CP trace to NCP or VIP.
blaze run convert_traces -- \
--tracefile=german_partial_prior.npz \
--model=german_credit_lognormalcentered \
--vip_json=german_credit_lognormalcentered_data/cVIP_exp_tied.json
"""
from absl import app
from absl import flags
import io
import json
import os
import numpy as np
import tensorflow as tf
from tensorflow_probability import edward2 as ed
import models as models
flags.DEFINE_string('tracefile', default='', help='')
flags.DEFINE_string('vip_json', default='', help='')
flags.DEFINE_string('model', default='', help='')
flags.DEFINE_string('dataset', default='', help='')
FLAGS = flags.FLAGS
def main(_):
model_config = models.get_model_by_name(FLAGS.model, dataset=FLAGS.dataset)
if FLAGS.vip_json:
if tf.io.gfile.exists(FLAGS.vip_json):
with tf.io.gfile.GFile(FLAGS.vip_json, 'r') as f:
prev_results = json.load(f)
else:
raise Exception('Run VI first to find initial step sizes')
vip_reparam = prev_results['learned_reparam']
new_method = 'cVIP'
to_noncentered = model_config.make_to_partially_noncentered(**vip_reparam)
else:
new_method = 'NCP'
to_noncentered = model_config.to_noncentered
with tf.io.gfile.GFile(FLAGS.tracefile) as f:
traces = dict(np.load(f))
# Get ordered list of latent variable names for this model.
with ed.tape() as model_tape:
model_config.model(*model_config.model_args)
param_names = [
k for k in list(model_tape.keys()) if k not in model_config.observed_data
]
traces_as_list = [traces[k] for k in param_names]
initial_shape = traces_as_list[0].shape[:2] # [num_results x num_chains]
flattened_traces = [np.reshape(v, [-1] + list(v.shape[2:]))
for v in traces_as_list]
transformed_traces = tf.vectorized_map(to_noncentered, flattened_traces)
unflattened_traces = {k: tf.reshape(v, initial_shape + v.shape[1:])
for (k, v) in zip(param_names, transformed_traces)}
with tf.compat.v1.Session() as sess:
unflattened_traces_ = sess.run(unflattened_traces)
np_path = FLAGS.tracefile[:-4] + '_{}.npz'.format(new_method)
with tf.io.gfile.GFile(np_path, 'wb') as out_f:
io_buffer = io.BytesIO()
np.savez(io_buffer, **unflattened_traces_)
out_f.write(io_buffer.getvalue())
if __name__ == '__main__':
app.run()
|
[
"tensorflow_probability.edward2.tape",
"numpy.savez",
"tensorflow.io.gfile.GFile",
"tensorflow.compat.v1.Session",
"io.BytesIO",
"absl.app.run",
"tensorflow.vectorized_map",
"tensorflow.reshape",
"json.load",
"models.get_model_by_name",
"numpy.load",
"absl.flags.DEFINE_string",
"tensorflow.io.gfile.exists"
] |
[((421, 474), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""tracefile"""'], {'default': '""""""', 'help': '""""""'}), "('tracefile', default='', help='')\n", (440, 474), False, 'from absl import flags\n'), ((475, 527), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""vip_json"""'], {'default': '""""""', 'help': '""""""'}), "('vip_json', default='', help='')\n", (494, 527), False, 'from absl import flags\n'), ((528, 577), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""model"""'], {'default': '""""""', 'help': '""""""'}), "('model', default='', help='')\n", (547, 577), False, 'from absl import flags\n'), ((578, 629), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""dataset"""'], {'default': '""""""', 'help': '""""""'}), "('dataset', default='', help='')\n", (597, 629), False, 'from absl import flags\n'), ((682, 742), 'models.get_model_by_name', 'models.get_model_by_name', (['FLAGS.model'], {'dataset': 'FLAGS.dataset'}), '(FLAGS.model, dataset=FLAGS.dataset)\n', (706, 742), True, 'import models as models\n'), ((1793, 1844), 'tensorflow.vectorized_map', 'tf.vectorized_map', (['to_noncentered', 'flattened_traces'], {}), '(to_noncentered, flattened_traces)\n', (1810, 1844), True, 'import tensorflow as tf\n'), ((2345, 2354), 'absl.app.run', 'app.run', ([], {}), '()\n', (2352, 2354), False, 'from absl import app\n'), ((772, 806), 'tensorflow.io.gfile.exists', 'tf.io.gfile.exists', (['FLAGS.vip_json'], {}), '(FLAGS.vip_json)\n', (790, 806), True, 'import tensorflow as tf\n'), ((1216, 1250), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['FLAGS.tracefile'], {}), '(FLAGS.tracefile)\n', (1233, 1250), True, 'import tensorflow as tf\n'), ((1357, 1366), 'tensorflow_probability.edward2.tape', 'ed.tape', ([], {}), '()\n', (1364, 1366), True, 'from tensorflow_probability import edward2 as ed\n'), ((1872, 1914), 'tensorflow.reshape', 'tf.reshape', (['v', '(initial_shape + v.shape[1:])'], {}), '(v, initial_shape + v.shape[1:])\n', (1882, 1914), True, 'import tensorflow as tf\n'), ((1999, 2021), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (2019, 2021), True, 'import tensorflow as tf\n'), ((2158, 2190), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['np_path', '"""wb"""'], {}), "(np_path, 'wb')\n", (2175, 2190), True, 'import tensorflow as tf\n'), ((2217, 2229), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (2227, 2229), False, 'import io\n'), ((2234, 2276), 'numpy.savez', 'np.savez', (['io_buffer'], {}), '(io_buffer, **unflattened_traces_)\n', (2242, 2276), True, 'import numpy as np\n'), ((1275, 1285), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (1282, 1285), True, 'import numpy as np\n'), ((819, 857), 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['FLAGS.vip_json', '"""r"""'], {}), "(FLAGS.vip_json, 'r')\n", (836, 857), True, 'import tensorflow as tf\n'), ((887, 899), 'json.load', 'json.load', (['f'], {}), '(f)\n', (896, 899), False, 'import json\n')]
|
import cv2
import numpy as np
def ColorAzul():
black_screen = np.zeros([500, 500, 3], dtype=np.uint8)
black_screen[:, :, 0] = np.ones([500, 500]) * 255
black_screen[:, :, 1] = np.ones([500, 500]) * 0
black_screen[:, :, 2] = np.ones([500, 500]) * 0
return black_screen
def ColorRojo():
black_screen = np.zeros([500, 500, 3], dtype=np.uint8)
black_screen[:, :, 0] = np.ones([500, 500]) * 0
black_screen[:, :, 1] = np.ones([500, 500]) * 0
black_screen[:, :, 2] = np.ones([500, 500]) * 255
return black_screen
fondo_base = True
def back(*args):
global fondo_base
if fondo_base:
fondo_base = False
else:
fondo_base = True
pass
cv2.namedWindow("Frame")
cv2.createButton("Cahnge Color", back)
while True:
fondo = ColorAzul() if fondo_base else ColorRojo()
cv2.imshow('Frame', fondo)
key = cv2.waitKey(1)
if key == ord('q'):
break
cv2.waitKey(0)
cv2.destroyAllWindows()
|
[
"numpy.ones",
"cv2.createButton",
"cv2.imshow",
"numpy.zeros",
"cv2.destroyAllWindows",
"cv2.waitKey",
"cv2.namedWindow"
] |
[((708, 732), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Frame"""'], {}), "('Frame')\n", (723, 732), False, 'import cv2\n'), ((733, 771), 'cv2.createButton', 'cv2.createButton', (['"""Cahnge Color"""', 'back'], {}), "('Cahnge Color', back)\n", (749, 771), False, 'import cv2\n'), ((937, 951), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (948, 951), False, 'import cv2\n'), ((952, 975), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (973, 975), False, 'import cv2\n'), ((68, 107), 'numpy.zeros', 'np.zeros', (['[500, 500, 3]'], {'dtype': 'np.uint8'}), '([500, 500, 3], dtype=np.uint8)\n', (76, 107), True, 'import numpy as np\n'), ((330, 369), 'numpy.zeros', 'np.zeros', (['[500, 500, 3]'], {'dtype': 'np.uint8'}), '([500, 500, 3], dtype=np.uint8)\n', (338, 369), True, 'import numpy as np\n'), ((845, 871), 'cv2.imshow', 'cv2.imshow', (['"""Frame"""', 'fondo'], {}), "('Frame', fondo)\n", (855, 871), False, 'import cv2\n'), ((883, 897), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (894, 897), False, 'import cv2\n'), ((137, 156), 'numpy.ones', 'np.ones', (['[500, 500]'], {}), '([500, 500])\n', (144, 156), True, 'import numpy as np\n'), ((191, 210), 'numpy.ones', 'np.ones', (['[500, 500]'], {}), '([500, 500])\n', (198, 210), True, 'import numpy as np\n'), ((243, 262), 'numpy.ones', 'np.ones', (['[500, 500]'], {}), '([500, 500])\n', (250, 262), True, 'import numpy as np\n'), ((399, 418), 'numpy.ones', 'np.ones', (['[500, 500]'], {}), '([500, 500])\n', (406, 418), True, 'import numpy as np\n'), ((451, 470), 'numpy.ones', 'np.ones', (['[500, 500]'], {}), '([500, 500])\n', (458, 470), True, 'import numpy as np\n'), ((503, 522), 'numpy.ones', 'np.ones', (['[500, 500]'], {}), '([500, 500])\n', (510, 522), True, 'import numpy as np\n')]
|
import torch
from time import time
from IPython import display
import numpy as np
import matplotlib.pyplot as plt
import random
import torch.utils.data as Data
from torch.nn import init
import torch.nn as nn
# 3.3.1 生成数据集
# 我们生成与上一节中相同的数据集。其中features是训练数据特征,labels是标签
num_inputs = 2 #x1,x2 有几个x
num_examples = 1000
true_w = [2, -3.4]
true_b = 4.2
features = torch.tensor(np.random.normal(0, 1, (num_examples, num_inputs)), dtype=torch.float)
labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)
# 3.3.2 读取数据
# PyTorch提供了data包来读取数据。由于data常用作变量名,我们将导入的data模块用Data代替。
# 在每一次迭代中,我们将随机读取包含10个数据样本的小批量。
batch_size = 10
# 将训练数据的特征和标签组合
dataset = Data.TensorDataset(features, labels)
# 随机读取小批量
data_iter = Data.DataLoader(dataset, batch_size, shuffle=True)
# 这里data_iter的使用跟上一节中的一样。让我们读取并打印第一个小批量数据样本。
for X, y in data_iter:
print(X, y)
break
#
# 3.3.3 定义模型
# 在上一节从零开始的实现中,我们需要定义模型参数,并使用它们一步步描述模型是怎样计算的。
# 当模型结构变得更复杂时,这些步骤将变得更繁琐。
# 其实,PyTorch提供了大量预定义的层,这使我们只需关注使用哪些层来构造模型。
# 下面将介绍如何使用PyTorch更简洁地定义线性回归。
#
# 首先,导入torch.nn模块。
# 实际上,“nn”是neural networks(神经网络)的缩写。
# 顾名思义,该模块定义了大量神经网络的层。
# 之前我们已经用过了autograd,而nn就是利用autograd来定义模型。
# nn的核心数据结构是Module,它是一个抽象概念,既可以表示神经网络中的某个层(layer),也可以表示一个包含很多层的神经网络。
# 在实际使用中,最常见的做法是继承nn.Module,撰写自己的网络/层。一个nn.Module实例应该包含一些层以及返回输出的前向传播(forward)方法。
# 下面先来看看如何用nn.Module实现一个线性回归模型。
class LinearNet(nn.Module):
def __init__(self, n_feature):
super(LinearNet, self).__init__()
self.linear = nn.Linear(n_feature, 1)
# forward 定义前向传播
def forward(self, x):
y = self.linear(x)
return y
net = LinearNet(num_inputs)
print(net) # 使用print可以打印出网络的结构
# 事实上我们还可以用nn.Sequential来更加方便地搭建网络,Sequential是一个有序的容器,网络层将按照在传入Sequential的顺序依次被添加到计算图中。
# 写法一
net = nn.Sequential(
nn.Linear(num_inputs, 1)#in_features = 2, out_features = 1 输入为2维,输出为1维
# 此处还可以传入其他层
)
# 写法二
net = nn.Sequential()
net.add_module('linear', nn.Linear(num_inputs, 1))
# net.add_module ......
# 写法三
from collections import OrderedDict
net = nn.Sequential(OrderedDict([
('linear', nn.Linear(num_inputs, 1))
# ......
]))
print(net)
print(net[0])
#
# 可以通过net.parameters()来查看模型所有的可学习参数,此函数将返回一个生成器。
for param in net.parameters():
print(param)
# 回顾图3.1中线性回归在神经网络图中的表示。
# 作为一个单层神经网络,线性回归输出层中的神经元和输入层中各个输入完全连接。
# 因此,线性回归的输出层又叫全连接层。
# 注意:torch.nn仅支持输入一个batch的样本不支持单个样本输入,如果只有单个样本,可使用input.unsqueeze(0)来添加一维。
# 3.3.4 初始化模型参数
# 在使用net前,我们需要初始化模型参数,如线性回归模型中的权重和偏差。
# PyTorch在init模块中提供了多种参数初始化方法。
# 这里的init是initializer的缩写形式。
# 我们通过init.normal_将权重参数每个元素初始化为随机采样于均值为0、标准差为0.01的正态分布。偏差会初始化为零。
init.normal_(net[0].weight, mean=0, std=0.01)
init.constant_(net[0].bias, val=0) # 也可以直接修改bias的data: net[0].bias.data.fill_(0)
# 注:如果这里的net是用3.3.3节一开始的代码自定义的,那么上面代码会报错,net[0].weight应改为net.linear.weight,bias亦然。因为net[0]这样根据下标访问子模块的写法只有当net是个ModuleList或者Sequential实例时才可以,详见4.1节。
# 3.3.5 定义损失函数
# PyTorch在nn模块中提供了各种损失函数,这些损失函数可看作是一种特殊的层,PyTorch也将这些损失函数实现为nn.Module的子类。
# 我们现在使用它提供的均方误差损失作为模型的损失函数。
loss = nn.MSELoss()
# 3.3.6 定义优化算法
# 同样,我们也无须自己实现小批量随机梯度下降算法。
# torch.optim模块提供了很多常用的优化算法比如SGD、Adam和RMSProp等。
# 下面我们创建一个用于优化net所有参数的优化器实例,并指定学习率为0.03的小批量随机梯度下降(SGD)为优化算法。
import torch.optim as optim
optimizer = optim.SGD(net.parameters(), lr=0.03)
print(optimizer)
# 我们还可以为不同子网络设置不同的学习率,这在finetune时经常用到。例:
#
# optimizer = optim.SGD([
# # 如果对某个参数不指定学习率,就使用最外层的默认学习率
# {'params': net.subnet1.parameters()}, # lr=0.03
# {'params': net.subnet2.parameters(), 'lr': 0.01}], lr=0.03)
# 有时候我们不想让学习率固定成一个常数,那如何调整学习率呢?
# 主要有两种做法。一种是修改optimizer.param_groups中对应的学习率,
# 另一种是更简单也是较为推荐的做法——新建优化器,由于optimizer十分轻量级,构建开销很小,故而可以构建新的optimizer。
# 但是后者对于使用动量的优化器(如Adam),会丢失动量等状态信息,可能会造成损失函数的收敛出现震荡等情况。
# 调整学习率
for param_group in optimizer.param_groups:
param_group['lr'] *= 0.1 # 学习率为之前的0.1倍
# 3.3.7 训练模型
# 在使用Gluon训练模型时,我们通过调用optim实例的step函数来迭代模型参数。
# 按照小批量随机梯度下降的定义,我们在step函数中指明批量大小,从而对批量中样本梯度求平均。
num_epochs = 3
for epoch in range(1, num_epochs + 1):
for X, y in data_iter:
output = net(X)
l = loss(output, y.view(-1, 1))
optimizer.zero_grad() # 梯度清零,等价于net.zero_grad()
l.backward()
optimizer.step()
print('epoch %d, loss: %f' % (epoch, l.item()))
# 下面我们分别比较学到的模型参数和真实的模型参数。
# 我们从net获得需要的层,并访问其权重(weight)和偏差(bias)。学到的参数和真实的参数很接近。
dense = net[0]
print(true_w, dense.weight)
print(true_b, dense.bias)
|
[
"numpy.random.normal",
"torch.nn.init.constant_",
"torch.nn.Sequential",
"torch.utils.data.TensorDataset",
"torch.nn.MSELoss",
"torch.nn.Linear",
"torch.utils.data.DataLoader",
"torch.nn.init.normal_"
] |
[((751, 787), 'torch.utils.data.TensorDataset', 'Data.TensorDataset', (['features', 'labels'], {}), '(features, labels)\n', (769, 787), True, 'import torch.utils.data as Data\n'), ((810, 860), 'torch.utils.data.DataLoader', 'Data.DataLoader', (['dataset', 'batch_size'], {'shuffle': '(True)'}), '(dataset, batch_size, shuffle=True)\n', (825, 860), True, 'import torch.utils.data as Data\n'), ((1954, 1969), 'torch.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (1967, 1969), True, 'import torch.nn as nn\n'), ((2650, 2695), 'torch.nn.init.normal_', 'init.normal_', (['net[0].weight'], {'mean': '(0)', 'std': '(0.01)'}), '(net[0].weight, mean=0, std=0.01)\n', (2662, 2695), False, 'from torch.nn import init\n'), ((2696, 2730), 'torch.nn.init.constant_', 'init.constant_', (['net[0].bias'], {'val': '(0)'}), '(net[0].bias, val=0)\n', (2710, 2730), False, 'from torch.nn import init\n'), ((3054, 3066), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (3064, 3066), True, 'import torch.nn as nn\n'), ((371, 421), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(num_examples, num_inputs)'], {}), '(0, 1, (num_examples, num_inputs))\n', (387, 421), True, 'import numpy as np\n'), ((1851, 1875), 'torch.nn.Linear', 'nn.Linear', (['num_inputs', '(1)'], {}), '(num_inputs, 1)\n', (1860, 1875), True, 'import torch.nn as nn\n'), ((1995, 2019), 'torch.nn.Linear', 'nn.Linear', (['num_inputs', '(1)'], {}), '(num_inputs, 1)\n', (2004, 2019), True, 'import torch.nn as nn\n'), ((1552, 1575), 'torch.nn.Linear', 'nn.Linear', (['n_feature', '(1)'], {}), '(n_feature, 1)\n', (1561, 1575), True, 'import torch.nn as nn\n'), ((2138, 2162), 'torch.nn.Linear', 'nn.Linear', (['num_inputs', '(1)'], {}), '(num_inputs, 1)\n', (2147, 2162), True, 'import torch.nn as nn\n')]
|
import numpy as np
from netCDF4 import Dataset
from datetime import datetime
from datetime import timedelta
import os
import sys
TOP = "/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/BAIU2018_5.3.6"
stime = datetime( 2018, 6, 30, 0, 0, 0 )
vtime = datetime( 2018, 7, 6, 0, 0, 0 )
adt = timedelta( hours=24 )
m = 1
INFO = {"TOP": TOP, }
def get_lonlat( INFO, stime=datetime(2018,7,1), ):
mem = str(1).zfill(4)
fn = os.path.join( INFO["TOP"], stime.strftime('%Y%m%d%H%M%S'), "fcst_sno_np00001",
mem, "p_history.pe000000.nc" )
with Dataset( fn, "r", format="NETCDF4") as nc:
lon = nc.variables["lon"][:]
lat = nc.variables["lat"][:]
return( lon, lat )
def get_arain( INFO, stime=datetime(2018,7,1), vtime=datetime(2018,7,1),
adt=timedelta(hours=24), m=1 ):
mem = str(m).zfill(4)
if m == 0:
mem = "mean"
fn = os.path.join( INFO["TOP"], stime.strftime('%Y%m%d%H%M%S'), "fcst_sno_np00001",
mem, "p_history.pe000000.nc" )
print( fn )
ft_max = ( vtime - stime ).total_seconds()
ft_min = ( vtime - adt - stime ).total_seconds()
with Dataset( fn, "r", format="NETCDF4") as nc:
fts = nc.variables["time"][:]
# print("time", fts/3600)
idx_s = np.abs( ( fts - ft_min ) ).argmin()
idx_e = np.abs( ( fts - ft_max ) ).argmin()
#
rain = np.sum( nc.variables["PREC"][idx_s+1:idx_e+1,:,:], axis=0 )*21600
print( rain.shape )
# print( ft_max, ft_min, idx_s, idx_e )
# print( stime + timedelta( seconds=fts[idx_s+1]),
# stime + timedelta( seconds=fts[idx_e+1]) )
# print( fts[idx_s:idx_e]/3600)
return( rain )
rain = get_arain( INFO, stime=stime, vtime=vtime, adt=adt, m=1 )
lon2d, lat2d = get_lonlat( INFO, stime=stime )
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import matplotlib.ticker as mticker
from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter
import cartopy.feature as cfeature
fig = plt.figure(figsize=(10, 10))
lons = 110
lone = 160
lats = 10
late = 55
#central_longitude = 135.0
#central_latitude = 35.0
#ax1 = fig.add_subplot(1,1,1, projection=ccrs.LambertConformal( central_longitude=central_longitude,
# central_latitude=central_latitude,
# ))
#ax1 = fig.add_subplot(1,1,1, projection=ccrs.Mercator( central_longitude=central_longitude,
# min_latitude=min_latitude,
# max_latitude=max_latitude,
# latitude_true_scale=latitude_true_scale,
#ax1 = plt.subplot(2, 2, 1, projection=ccrs.Mercator( central_longitude=180.0, ))
#ax1 = fig.add_subplot(1,1,1, projection=ccrs.PlateCarree(central_longitude=180))
ax1 = fig.add_subplot(1,1,1, projection=ccrs.PlateCarree(central_longitude=180))
ax1.set_extent( [lons, lone, lats, late ])
#ax1.coastlines()
ax1.add_feature(cfeature.COASTLINE, linewidth=0.8)
dlon, dlat = 5, 5
#gl = ax1.gridlines(crs=ccrs.PlateCarree())
#gl.xlocator = mticker.FixedLocator(np.arange( lons, lone+dlon, dlon))
#gl.ylocator = mticker.FixedLocator(np.arange( lats, late+dlat, dlat))
xticks_lab = np.arange( lons, lone+dlon, dlon)
yticks_lab = np.arange( lats, late+dlat, dlat)
ax1.set_xticks(xticks_lab, crs=ccrs.PlateCarree())
ax1.set_yticks(yticks_lab, crs=ccrs.PlateCarree())
gl = ax1.gridlines( crs=ccrs.PlateCarree(), \
linewidth=0.5, linestyle='--', color='k', alpha=0.8)
ax1.xaxis.set_major_formatter(LongitudeFormatter(zero_direction_label=True))
ax1.yaxis.set_major_formatter(LatitudeFormatter())
SHADE = ax1.contourf( lon2d, lat2d, rain,
transform=ccrs.PlateCarree(), )
#ax1.set_xlimit( lons, lone )
#ax1.set_ylimit( lats, late )
plt.show()
sys.exit()
fig, ((ax1)) = plt.subplots(1, 1, figsize=( 8,7.))
fig.subplots_adjust( left=0.04, bottom=0.04, right=0.92, top=0.91,
wspace=0.15, hspace=0.3)
plt.show()
|
[
"datetime.datetime",
"numpy.abs",
"netCDF4.Dataset",
"cartopy.crs.PlateCarree",
"numpy.sum",
"matplotlib.pyplot.figure",
"cartopy.mpl.ticker.LatitudeFormatter",
"cartopy.mpl.ticker.LongitudeFormatter",
"sys.exit",
"datetime.timedelta",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] |
[((211, 241), 'datetime.datetime', 'datetime', (['(2018)', '(6)', '(30)', '(0)', '(0)', '(0)'], {}), '(2018, 6, 30, 0, 0, 0)\n', (219, 241), False, 'from datetime import datetime\n'), ((252, 281), 'datetime.datetime', 'datetime', (['(2018)', '(7)', '(6)', '(0)', '(0)', '(0)'], {}), '(2018, 7, 6, 0, 0, 0)\n', (260, 281), False, 'from datetime import datetime\n'), ((291, 310), 'datetime.timedelta', 'timedelta', ([], {'hours': '(24)'}), '(hours=24)\n', (300, 310), False, 'from datetime import timedelta\n'), ((2021, 2049), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (2031, 2049), True, 'import matplotlib.pyplot as plt\n'), ((3333, 3367), 'numpy.arange', 'np.arange', (['lons', '(lone + dlon)', 'dlon'], {}), '(lons, lone + dlon, dlon)\n', (3342, 3367), True, 'import numpy as np\n'), ((3381, 3415), 'numpy.arange', 'np.arange', (['lats', '(late + dlat)', 'dlat'], {}), '(lats, late + dlat, dlat)\n', (3390, 3415), True, 'import numpy as np\n'), ((3929, 3939), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3937, 3939), True, 'import matplotlib.pyplot as plt\n'), ((3941, 3951), 'sys.exit', 'sys.exit', ([], {}), '()\n', (3949, 3951), False, 'import sys\n'), ((3968, 4004), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(8, 7.0)'}), '(1, 1, figsize=(8, 7.0))\n', (3980, 4004), True, 'import matplotlib.pyplot as plt\n'), ((4119, 4129), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4127, 4129), True, 'import matplotlib.pyplot as plt\n'), ((373, 393), 'datetime.datetime', 'datetime', (['(2018)', '(7)', '(1)'], {}), '(2018, 7, 1)\n', (381, 393), False, 'from datetime import datetime\n'), ((737, 757), 'datetime.datetime', 'datetime', (['(2018)', '(7)', '(1)'], {}), '(2018, 7, 1)\n', (745, 757), False, 'from datetime import datetime\n'), ((763, 783), 'datetime.datetime', 'datetime', (['(2018)', '(7)', '(1)'], {}), '(2018, 7, 1)\n', (771, 783), False, 'from datetime import datetime\n'), ((803, 822), 'datetime.timedelta', 'timedelta', ([], {'hours': '(24)'}), '(hours=24)\n', (812, 822), False, 'from datetime import timedelta\n'), ((3671, 3716), 'cartopy.mpl.ticker.LongitudeFormatter', 'LongitudeFormatter', ([], {'zero_direction_label': '(True)'}), '(zero_direction_label=True)\n', (3689, 3716), False, 'from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter\n'), ((3748, 3767), 'cartopy.mpl.ticker.LatitudeFormatter', 'LatitudeFormatter', ([], {}), '()\n', (3765, 3767), False, 'from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter\n'), ((569, 603), 'netCDF4.Dataset', 'Dataset', (['fn', '"""r"""'], {'format': '"""NETCDF4"""'}), "(fn, 'r', format='NETCDF4')\n", (576, 603), False, 'from netCDF4 import Dataset\n'), ((1161, 1195), 'netCDF4.Dataset', 'Dataset', (['fn', '"""r"""'], {'format': '"""NETCDF4"""'}), "(fn, 'r', format='NETCDF4')\n", (1168, 1195), False, 'from netCDF4 import Dataset\n'), ((2958, 2997), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {'central_longitude': '(180)'}), '(central_longitude=180)\n', (2974, 2997), True, 'import cartopy.crs as ccrs\n'), ((3448, 3466), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (3464, 3466), True, 'import cartopy.crs as ccrs\n'), ((3500, 3518), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (3516, 3518), True, 'import cartopy.crs as ccrs\n'), ((3545, 3563), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (3561, 3563), True, 'import cartopy.crs as ccrs\n'), ((3845, 3863), 'cartopy.crs.PlateCarree', 'ccrs.PlateCarree', ([], {}), '()\n', (3861, 3863), True, 'import cartopy.crs as ccrs\n'), ((1401, 1464), 'numpy.sum', 'np.sum', (["nc.variables['PREC'][idx_s + 1:idx_e + 1, :, :]"], {'axis': '(0)'}), "(nc.variables['PREC'][idx_s + 1:idx_e + 1, :, :], axis=0)\n", (1407, 1464), True, 'import numpy as np\n'), ((1289, 1309), 'numpy.abs', 'np.abs', (['(fts - ft_min)'], {}), '(fts - ft_min)\n', (1295, 1309), True, 'import numpy as np\n'), ((1340, 1360), 'numpy.abs', 'np.abs', (['(fts - ft_max)'], {}), '(fts - ft_max)\n', (1346, 1360), True, 'import numpy as np\n')]
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.9.1+dev
# kernelspec:
# display_name: Python [conda env:annorxiver]
# language: python
# name: conda-env-annorxiver-py
# ---
# # Re-Run Analyses with Polka et. al. Subset
# This notebook was created in response to Polka et al. Group's inquiry on training a logistic regression model on preprints posted recently rather than preprints from 2019 and below.
# Overall their subset can be separated with a few features.
# +
from pathlib import Path
import sys
from gensim.models import Word2Vec
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import pandas as pd
import plotnine as p9
import requests
from scipy.spatial.distance import cdist
from scipy.stats import linregress
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegressionCV, LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeClassifier, export_graphviz
import spacy
import tqdm
from annorxiver_modules.document_helper import generate_doc_vector
mpl.rcParams["figure.dpi"] = 250
# -
# # Random BioRxiv Sample
manual_papers_df = pd.read_csv(str(Path("output/all_pairs_2021-02-11.csv")))
manual_papers_df.head().T
api_url = "https://api.biorxiv.org/details/biorxiv/2020-01-01/2020-04-30"
response = requests.get(api_url)
content = response.json()
total_papers = content["messages"][0]["total"]
total_papers
np.random.seed(100)
selected_biorxiv_papers = np.random.randint(0, total_papers, 100)
selected_biorxiv_papers.sort()
selected_biorxiv_papers
paper_cursor = {}
for paper in selected_biorxiv_papers:
cursor = int(np.ceil(int(paper / 100)))
if cursor not in paper_cursor:
paper_cursor[cursor] = []
paper_cursor[cursor].append(paper)
paper_cursor
published_doi_map = []
for paper in tqdm.tqdm(paper_cursor):
api_url = f"https://api.biorxiv.org/details/biorxiv/2020-01-01/2020-04-30/{paper}"
response = requests.get(api_url)
content = response.json()
collection = content["collection"]
for paper_idx in paper_cursor[paper]:
user_doi = collection[paper_idx % 100]["doi"]
file_name = user_doi.split("/")[-1]
api_url = f"https://api.biorxiv.org/details/biorxiv/{user_doi}"
response = requests.get(api_url)
content = response.json()
latest_paper = content["collection"][-1]
version_count = len(content["collection"])
doc_url = "http://biorxiv.org/content"
file_url = f"{doc_url}/early/{latest_paper['date'].replace('-', '/')}/{file_name}.source.xml"
response = requests.get(file_url)
with open(
f"output/biorxiv_xml_files_recent/{file_name}_v{version_count}.xml", "wb"
) as outfile:
outfile.write(response.content)
# # Document Embeddings
# ## Convert New biorxiv subset
biorxiv_documents = [
Path(x.name) for x in list(Path("output/biorxiv_xml_files_recent").rglob("*xml"))
]
biorxiv_xpath_str = "//abstract/p|//abstract/title|//body/sec//p|//body/sec//title"
word_model = Word2Vec.load(
str(Path("../word_vector_experiment/output/word2vec_models/300/biorxiv_300.model"))
)
biorxiv_document_map = {
document: generate_doc_vector(
word_model,
document_path=str(Path("output/biorxiv_xml_files_recent") / document),
xpath=biorxiv_xpath_str,
)
for document in tqdm.tqdm_notebook(biorxiv_documents)
}
# +
biorxiv_vec_df = (
pd.DataFrame.from_dict(biorxiv_document_map, orient="index")
.rename(columns={col: f"feat_{col}" for col in range(int(300))})
.rename_axis("document")
.reset_index()
)
biorxiv_vec_df.to_csv(
"output/random_recent_biorxiv_subset_embeddings.tsv", sep="\t", index=False
)
biorxiv_vec_df.head().T
# -
# ## Load the Documents
polka_preprints_df = pd.read_csv("output/polka_et_al_biorxiv_embeddings.tsv", sep="\t")
polka_preprints_df.head()
pca_components = pd.read_csv(
Path("../pca_association_experiment/output/word_pca_similarity/pca_components.tsv"),
sep="\t",
)
pca_components.head()
# ## PCA Components
# This section aims to see which principal components have a high association with Polka et al's subset. Furthermore, we also aim to see if we can use linear models to explain which PCs affect preprint prediction.
document_pca_sim = 1 - cdist(
polka_preprints_df.drop("document", axis=1).values, pca_components.values, "cosine"
)
print(document_pca_sim.shape)
document_pca_sim
document_to_pca_map = {
document: document_pca_sim[idx, :]
for idx, document in enumerate(polka_preprints_df.document.tolist())
}
polka_pca_sim_df = (
pd.DataFrame.from_dict(document_to_pca_map, orient="index")
.rename(index=str, columns={col: f"pc{col+1}" for col in range(int(300))})
.reset_index()
.rename(index=str, columns={"index": "document"})
)
# polka_pca_sim_df.to_csv("output/polka_pca_enrichment.tsv", sep="\t")
polka_pca_sim_df = polka_pca_sim_df.assign(label="polka")
polka_pca_sim_df.head()
document_pca_sim = 1 - cdist(
biorxiv_vec_df.drop("document", axis=1).values,
pca_components.values,
"cosine",
)
print(document_pca_sim.shape)
document_pca_sim
document_to_pca_map = {
document: document_pca_sim[idx, :]
for idx, document in enumerate(biorxiv_vec_df.document.tolist())
}
biorxiv_pca_sim_df = (
pd.DataFrame.from_dict(document_to_pca_map, orient="index")
.rename(index=str, columns={col: f"pc{col+1}" for col in range(int(300))})
.reset_index()
.rename(index=str, columns={"index": "document"})
.assign(label="biorxiv")
)
biorxiv_pca_sim_df.head()
# ## PC Regression
# ### Logistic Regression
# Goal here is to determine if we can figure out which PCs separate the bioRxiv subset from Polka et al.'s subset. Given that their dataset is only 60 papers we downsampled our dataset to contain only 60 papers.
dataset_df = biorxiv_pca_sim_df.append(polka_pca_sim_df)
dataset_df.head()
model = LogisticRegressionCV(
cv=10, Cs=100, max_iter=1000, penalty="l1", solver="liblinear"
)
model.fit(
StandardScaler().fit_transform(dataset_df[[f"pc{idx+1}" for idx in range(50)]]),
dataset_df["label"],
)
best_result = list(filter(lambda x: x[1] == model.C_, enumerate(model.Cs_)))[0]
print(best_result)
print("Best CV Fold")
print(model.scores_["polka"][:, best_result[0]])
model.scores_["polka"][:, best_result[0]].mean()
model_weights_df = pd.DataFrame.from_dict(
{
"weight": model.coef_[0],
"pc": list(range(1, 51)),
}
)
model_weights_df["pc"] = pd.Categorical(model_weights_df["pc"])
model_weights_df.head()
g = (
p9.ggplot(model_weights_df, p9.aes(x="pc", y="weight"))
+ p9.geom_col(position=p9.position_dodge(width=5), fill="#253494")
+ p9.coord_flip()
+ p9.scale_x_discrete(limits=list(sorted(range(1, 51), reverse=True)))
+ p9.theme_seaborn(context="paper", style="ticks", font_scale=1.1, font="Arial")
+ p9.theme(figure_size=(10, 8))
+ p9.labs(
title="Regression Model Weights", x="Princpial Component", y="Model Weight"
)
)
# g.save("output/figures/pca_log_regression_weights.svg")
# g.save("output/figures/pca_log_regression_weights.png", dpi=250)
print(g)
fold_features = model.coefs_paths_["polka"].transpose(1, 0, 2)
model_performance_df = pd.DataFrame.from_dict(
{
"feat_num": ((fold_features.astype(bool).sum(axis=1)) > 0).sum(axis=1),
"C": model.Cs_,
"score": model.scores_["polka"].mean(axis=0),
}
)
model_performance_df.head()
# +
fig, ax1 = plt.subplots()
ax1.set_xscale("log")
ax2 = plt.twinx()
ax1.plot(
model_performance_df.C.tolist(),
model_performance_df.feat_num.tolist(),
label="Features",
marker=".",
)
ax1.set_ylabel("# of Features")
ax1.set_xlabel("Inverse Regularization (C)")
ax1.legend(loc=0)
ax2.plot(
model_performance_df.C.tolist(),
model_performance_df.score.tolist(),
label="Score",
marker=".",
color="green",
)
ax2.set_ylabel("Score (Accuracy %)")
ax2.legend(loc=4)
# plt.savefig("output/preprint_classifier_results.png")
# -
plot_path = list(
zip(
model.Cs_,
model.scores_["polka"].transpose(),
model.coefs_paths_["polka"].transpose(1, 0, 2),
)
)
data_records = []
for cs in plot_path[33:40]:
model = LogisticRegression(C=cs[0], max_iter=1000, penalty="l1", solver="liblinear")
model.fit(
StandardScaler().fit_transform(dataset_df[[f"pc{idx+1}" for idx in range(50)]]),
dataset_df["label"],
)
data_records.append(
{
"C": cs[0],
"PCs": ",".join(map(str, model.coef_.nonzero()[1] + 1)),
"feat_num": len(model.coef_.nonzero()[1]),
"accuracy": cs[1].mean(),
}
)
model_coefs_df = pd.DataFrame.from_records(data_records)
model_coefs_df
|
[
"pandas.read_csv",
"plotnine.coord_flip",
"plotnine.aes",
"plotnine.position_dodge",
"sklearn.linear_model.LogisticRegressionCV",
"pathlib.Path",
"matplotlib.pyplot.twinx",
"pandas.DataFrame.from_dict",
"pandas.Categorical",
"numpy.random.seed",
"tqdm.tqdm_notebook",
"requests.get",
"plotnine.theme",
"pandas.DataFrame.from_records",
"tqdm.tqdm",
"sklearn.linear_model.LogisticRegression",
"sklearn.preprocessing.StandardScaler",
"numpy.random.randint",
"plotnine.theme_seaborn",
"plotnine.labs",
"matplotlib.pyplot.subplots"
] |
[((1494, 1515), 'requests.get', 'requests.get', (['api_url'], {}), '(api_url)\n', (1506, 1515), False, 'import requests\n'), ((1603, 1622), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (1617, 1622), True, 'import numpy as np\n'), ((1649, 1688), 'numpy.random.randint', 'np.random.randint', (['(0)', 'total_papers', '(100)'], {}), '(0, total_papers, 100)\n', (1666, 1688), True, 'import numpy as np\n'), ((2003, 2026), 'tqdm.tqdm', 'tqdm.tqdm', (['paper_cursor'], {}), '(paper_cursor)\n', (2012, 2026), False, 'import tqdm\n'), ((3996, 4062), 'pandas.read_csv', 'pd.read_csv', (['"""output/polka_et_al_biorxiv_embeddings.tsv"""'], {'sep': '"""\t"""'}), "('output/polka_et_al_biorxiv_embeddings.tsv', sep='\\t')\n", (4007, 4062), True, 'import pandas as pd\n'), ((6133, 6222), 'sklearn.linear_model.LogisticRegressionCV', 'LogisticRegressionCV', ([], {'cv': '(10)', 'Cs': '(100)', 'max_iter': '(1000)', 'penalty': '"""l1"""', 'solver': '"""liblinear"""'}), "(cv=10, Cs=100, max_iter=1000, penalty='l1', solver=\n 'liblinear')\n", (6153, 6222), False, 'from sklearn.linear_model import LogisticRegressionCV, LogisticRegression\n'), ((6719, 6757), 'pandas.Categorical', 'pd.Categorical', (["model_weights_df['pc']"], {}), "(model_weights_df['pc'])\n", (6733, 6757), True, 'import pandas as pd\n'), ((7706, 7720), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (7718, 7720), True, 'import matplotlib.pyplot as plt\n'), ((7749, 7760), 'matplotlib.pyplot.twinx', 'plt.twinx', ([], {}), '()\n', (7758, 7760), True, 'import matplotlib.pyplot as plt\n'), ((8933, 8972), 'pandas.DataFrame.from_records', 'pd.DataFrame.from_records', (['data_records'], {}), '(data_records)\n', (8958, 8972), True, 'import pandas as pd\n'), ((2130, 2151), 'requests.get', 'requests.get', (['api_url'], {}), '(api_url)\n', (2142, 2151), False, 'import requests\n'), ((3060, 3072), 'pathlib.Path', 'Path', (['x.name'], {}), '(x.name)\n', (3064, 3072), False, 'from pathlib import Path\n'), ((4124, 4217), 'pathlib.Path', 'Path', (['"""../pca_association_experiment/output/word_pca_similarity/pca_components.tsv"""'], {}), "(\n '../pca_association_experiment/output/word_pca_similarity/pca_components.tsv'\n )\n", (4128, 4217), False, 'from pathlib import Path\n'), ((7144, 7233), 'plotnine.labs', 'p9.labs', ([], {'title': '"""Regression Model Weights"""', 'x': '"""Princpial Component"""', 'y': '"""Model Weight"""'}), "(title='Regression Model Weights', x='Princpial Component', y=\n 'Model Weight')\n", (7151, 7233), True, 'import plotnine as p9\n'), ((8462, 8538), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': 'cs[0]', 'max_iter': '(1000)', 'penalty': '"""l1"""', 'solver': '"""liblinear"""'}), "(C=cs[0], max_iter=1000, penalty='l1', solver='liblinear')\n", (8480, 8538), False, 'from sklearn.linear_model import LogisticRegressionCV, LogisticRegression\n'), ((1340, 1379), 'pathlib.Path', 'Path', (['"""output/all_pairs_2021-02-11.csv"""'], {}), "('output/all_pairs_2021-02-11.csv')\n", (1344, 1379), False, 'from pathlib import Path\n'), ((2454, 2475), 'requests.get', 'requests.get', (['api_url'], {}), '(api_url)\n', (2466, 2475), False, 'import requests\n'), ((2780, 2802), 'requests.get', 'requests.get', (['file_url'], {}), '(file_url)\n', (2792, 2802), False, 'import requests\n'), ((3265, 3343), 'pathlib.Path', 'Path', (['"""../word_vector_experiment/output/word2vec_models/300/biorxiv_300.model"""'], {}), "('../word_vector_experiment/output/word2vec_models/300/biorxiv_300.model')\n", (3269, 3343), False, 'from pathlib import Path\n'), ((3566, 3603), 'tqdm.tqdm_notebook', 'tqdm.tqdm_notebook', (['biorxiv_documents'], {}), '(biorxiv_documents)\n', (3584, 3603), False, 'import tqdm\n'), ((7108, 7137), 'plotnine.theme', 'p9.theme', ([], {'figure_size': '(10, 8)'}), '(figure_size=(10, 8))\n', (7116, 7137), True, 'import plotnine as p9\n'), ((6239, 6255), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (6253, 6255), False, 'from sklearn.preprocessing import StandardScaler\n'), ((7023, 7101), 'plotnine.theme_seaborn', 'p9.theme_seaborn', ([], {'context': '"""paper"""', 'style': '"""ticks"""', 'font_scale': '(1.1)', 'font': '"""Arial"""'}), "(context='paper', style='ticks', font_scale=1.1, font='Arial')\n", (7039, 7101), True, 'import plotnine as p9\n'), ((8562, 8578), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (8576, 8578), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3087, 3126), 'pathlib.Path', 'Path', (['"""output/biorxiv_xml_files_recent"""'], {}), "('output/biorxiv_xml_files_recent')\n", (3091, 3126), False, 'from pathlib import Path\n'), ((3454, 3493), 'pathlib.Path', 'Path', (['"""output/biorxiv_xml_files_recent"""'], {}), "('output/biorxiv_xml_files_recent')\n", (3458, 3493), False, 'from pathlib import Path\n'), ((6926, 6941), 'plotnine.coord_flip', 'p9.coord_flip', ([], {}), '()\n', (6939, 6941), True, 'import plotnine as p9\n'), ((3634, 3694), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['biorxiv_document_map'], {'orient': '"""index"""'}), "(biorxiv_document_map, orient='index')\n", (3656, 3694), True, 'import pandas as pd\n'), ((4816, 4875), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['document_to_pca_map'], {'orient': '"""index"""'}), "(document_to_pca_map, orient='index')\n", (4838, 4875), True, 'import pandas as pd\n'), ((6821, 6847), 'plotnine.aes', 'p9.aes', ([], {'x': '"""pc"""', 'y': '"""weight"""'}), "(x='pc', y='weight')\n", (6827, 6847), True, 'import plotnine as p9\n'), ((5519, 5578), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['document_to_pca_map'], {'orient': '"""index"""'}), "(document_to_pca_map, orient='index')\n", (5541, 5578), True, 'import pandas as pd\n'), ((6876, 6902), 'plotnine.position_dodge', 'p9.position_dodge', ([], {'width': '(5)'}), '(width=5)\n', (6893, 6902), True, 'import plotnine as p9\n')]
|
# Car Pooling
'''
You are driving a vehicle that has capacity empty seats initially available for passengers. The vehicle only drives east (ie. it cannot turn around and drive west.)
Given a list of trips, trip[i] = [num_passengers, start_location, end_location] contains information about the i-th trip: the number of passengers that must be picked up, and the locations to pick them up and drop them off. The locations are given as the number of kilometers due east from your vehicle's initial location.
Return true if and only if it is possible to pick up and drop off all passengers for all the given trips.
Example 1:
Input: trips = [[2,1,5],[3,3,7]], capacity = 4
Output: false
Example 2:
Input: trips = [[2,1,5],[3,3,7]], capacity = 5
Output: true
Example 3:
Input: trips = [[2,1,5],[3,5,7]], capacity = 3
Output: true
Example 4:
Input: trips = [[3,2,7],[3,7,9],[8,3,9]], capacity = 11
Output: true
Constraints:
trips.length <= 1000
trips[i].length == 3
1 <= trips[i][0] <= 100
0 <= trips[i][1] < trips[i][2] <= 1000
1 <= capacity <= 100000
Hide Hint #1
Sort the pickup and dropoff events by location, then process them in order.
'''
class Solution0:
'''
Efficient approach
'''
def carPooling(self, trips: List[List[int]], capacity: int) -> bool:
timestamps = []
for trip in trips:
timestamps.append([trip[1], trip[0]])
timestamps.append([trip[2], -trip[0]])
timestamps.sort()
seat_status = 0
for timestamp in timestamps:
seat_status+=timestamp[1]
if seat_status>capacity:
return False
return True
import numpy as np
class Solution:
def carPooling(self, trips: List[List[int]], capacity: int) -> bool:
trips = np.array(trips)
if max(trips[:,0])>capacity:
return False
end = max(trips[:,2])
series = [0]*(end)
for trip in trips:
for i in range(trip[1],trip[2]):
series[i] = series[i]+trip[0]
if series[i]>capacity:
return False
return True
|
[
"numpy.array"
] |
[((1856, 1871), 'numpy.array', 'np.array', (['trips'], {}), '(trips)\n', (1864, 1871), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import math
import pandas as pd
import xml.etree.ElementTree as ET
import matplotlib.pyplot as plt
import os.path as osp
from PIL import Image
import numpy as np
plt.rcParams['font.sans-serif'] = ['SimHei'] #用来正常显示中文标签
plt.rcParams['font.family']='sans-serif'
plt.rcParams['figure.figsize'] = (20.0, 20.0)
dataset=dict(
ann_file=(
('', 'has_people_phone_rand3w_train2.lst'),
('', 'jianhang_0412_RMB_rand3w_train2.lst'),
('', 'money_phone_20210710_rand2w_2w_train.lst'),
('','colloect_phone_money_20210708_train.lst'),
),
img_prefix='/mnt/datadisk0/jingzhudata/phone_money/',
classes= ('phone', 'money')
)
ch, cw = 576, 960
# 读取数据
class PlotRatio(object):
def __init__(self, **kwargs):
super(PlotRatio, self).__init__()
self.img_prefix = dataset['img_prefix']
def plot_ratio(self, ann_file, classes):
"""Load annotation from XML style ann_file.
Args:
ann_file (str): Path of XML file.
Returns:
list[dict]: Annotation info from XML file.
"""
# print(ann_file, "....debug")
assert isinstance(ann_file, (list, tuple)), "ann_file must be list or tuple in DGVOCDataset"
count_wh = [0]*10
count_squares = [0]*11
num_class = [0]*2
for (year, name) in ann_file:
rootpath = osp.join(self.img_prefix, year)
for img_id, line in enumerate(open(osp.join(rootpath, name))):
if ';' not in line:
split_item = line.strip().split()
else:
split_item = line.strip().split(';')
if len(split_item) != 2:
img_path = split_item[0]
xml_path = None
else:
img_path, xml_path = split_item
if '.xml' != xml_path[-4:]: xml_path = None
if xml_path is None: continue
img_path_com = osp.join(rootpath, img_path)
xml_path_com = osp.join(rootpath, xml_path)
img = Image.open(img_path_com)
width, height = img.size # 原图宽高, 标签有时有问题
tree = ET.parse(xml_path_com)
root = tree.getroot()
# size = root.find('size')
# width = size.find('width')
# height = size.find('height')
for obj in root.findall('object'):
name = obj.find('name').text.lower().strip()
# if 'fjs_' in name:
# name = name.replace('fjs_', '')
if name not in classes:
continue
else :
idx = classes.index(name)
num_class[idx] += 1
bndbox = obj.find('bndbox')
xmin = bndbox.find('xmin').text
ymin = bndbox.find('ymin').text
xmax = bndbox.find('xmax').text
ymax = bndbox.find('ymax').text
#NOTE filter mislabeling gt
w_box = float(xmax) - float(xmin)
h_box = float(ymax) - float(ymin)
if w_box * h_box <= 0 or min(w_box, h_box) < 4 or max(w_box, h_box) < 4 or max(w_box, h_box) > 360:
continue
ratio2 = 1.
if height > ch or width > cw:
ratio2 = np.min(np.array([ch, cw]).astype(np.float64) / np.array([height, width]))
w = (w_box) * ratio2
h = (h_box) * ratio2
if w==0 or h==0:
continue
ratio = round(w/h, 1)
scale = round(w*h, 1)
square = math.sqrt(scale)
if ratio < 0.25:
count_wh[0] += 1
elif 0.25 <= ratio < 1/3:
count_wh[1] += 1
elif 1/3 <= ratio < 1/2:
count_wh[2] += 1
elif 1/2 <= ratio < 1:
count_wh[3] += 1
elif 1 <= ratio < 1.5:
count_wh[4] += 1
elif 1.5 <= ratio < 2:
count_wh[5] += 1
elif 2 <= ratio < 2.5:
count_wh[6] += 1
elif 2.5 <= ratio < 3:
count_wh[7] += 1
elif 3 <= ratio < 4:
count_wh[8] += 1
else:
count_wh[9] += 1
if square < 8:
count_squares[0] += 1
elif 8 <= square < 16:
count_squares[1] += 1
elif 16 <= square < 21:
count_squares[2] += 1
elif 21 <= square < 32:
count_squares[3] += 1
elif 32 <= square < 64:
count_squares[4] += 1
elif 64 <= square < 128:
count_squares[5] += 1
elif 128 <= square < 256:
count_squares[6] += 1
elif 256 <= square < 512:
count_squares[7] += 1
elif 512 <= square < 1024:
count_squares[8] += 1
elif 1024 <= square < 2048:
count_squares[9] += 1
elif 2048 <= square < 4096:
count_squares[10] += 1
# 绘图
wh_df = pd.DataFrame(count_wh, index=['0-0.25','0.25-0.33','0.33-0.5','0.5-1','1-1.5','1.5-2','2-2.5',\
'2.5-3','3-4', '>4'], columns=['宽高比'])
wh_df.plot(kind='bar', color ='#55aacc')
plt.savefig('./phone_wallet_ratios.jpg')
#plt.savefig('./dms_ratios_face.jpg')
squares_df = pd.DataFrame(count_squares, index=['0-8','8-16','16-21', '21-32','32-64','64-128',\
'128-256','256-512','512-1024','1024-2048','2048-4096'], columns=['边长范围'])
squares_df.plot(kind='bar', color ='#55aacc')
plt.savefig('./phone_wallet_squares.jpg')
#plt.savefig('./dms_squares_face.jpg')
num_class_df = pd.DataFrame(num_class,index=['phone', 'money'], columns=['类别数'])
num_class_df.plot(kind='bar')
plt.savefig('./rmp.jpg')
pr = PlotRatio()
pr.plot_ratio(ann_file = dataset['ann_file'], classes=dataset['classes'])
#pr.plot_ratio(ann_file = dataset['ann_file'], classes=dataset['classes'][3])
|
[
"PIL.Image.open",
"xml.etree.ElementTree.parse",
"matplotlib.pyplot.savefig",
"os.path.join",
"math.sqrt",
"numpy.array",
"pandas.DataFrame"
] |
[((6121, 6265), 'pandas.DataFrame', 'pd.DataFrame', (['count_wh'], {'index': "['0-0.25', '0.25-0.33', '0.33-0.5', '0.5-1', '1-1.5', '1.5-2', '2-2.5',\n '2.5-3', '3-4', '>4']", 'columns': "['宽高比']"}), "(count_wh, index=['0-0.25', '0.25-0.33', '0.33-0.5', '0.5-1',\n '1-1.5', '1.5-2', '2-2.5', '2.5-3', '3-4', '>4'], columns=['宽高比'])\n", (6133, 6265), True, 'import pandas as pd\n'), ((6359, 6399), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./phone_wallet_ratios.jpg"""'], {}), "('./phone_wallet_ratios.jpg')\n", (6370, 6399), True, 'import matplotlib.pyplot as plt\n'), ((6468, 6641), 'pandas.DataFrame', 'pd.DataFrame', (['count_squares'], {'index': "['0-8', '8-16', '16-21', '21-32', '32-64', '64-128', '128-256', '256-512',\n '512-1024', '1024-2048', '2048-4096']", 'columns': "['边长范围']"}), "(count_squares, index=['0-8', '8-16', '16-21', '21-32', '32-64',\n '64-128', '128-256', '256-512', '512-1024', '1024-2048', '2048-4096'],\n columns=['边长范围'])\n", (6480, 6641), True, 'import pandas as pd\n'), ((6741, 6782), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./phone_wallet_squares.jpg"""'], {}), "('./phone_wallet_squares.jpg')\n", (6752, 6782), True, 'import matplotlib.pyplot as plt\n'), ((6854, 6920), 'pandas.DataFrame', 'pd.DataFrame', (['num_class'], {'index': "['phone', 'money']", 'columns': "['类别数']"}), "(num_class, index=['phone', 'money'], columns=['类别数'])\n", (6866, 6920), True, 'import pandas as pd\n'), ((6966, 6990), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""./rmp.jpg"""'], {}), "('./rmp.jpg')\n", (6977, 6990), True, 'import matplotlib.pyplot as plt\n'), ((1520, 1551), 'os.path.join', 'osp.join', (['self.img_prefix', 'year'], {}), '(self.img_prefix, year)\n', (1528, 1551), True, 'import os.path as osp\n'), ((2133, 2161), 'os.path.join', 'osp.join', (['rootpath', 'img_path'], {}), '(rootpath, img_path)\n', (2141, 2161), True, 'import os.path as osp\n'), ((2193, 2221), 'os.path.join', 'osp.join', (['rootpath', 'xml_path'], {}), '(rootpath, xml_path)\n', (2201, 2221), True, 'import os.path as osp\n'), ((2244, 2268), 'PIL.Image.open', 'Image.open', (['img_path_com'], {}), '(img_path_com)\n', (2254, 2268), False, 'from PIL import Image\n'), ((2349, 2371), 'xml.etree.ElementTree.parse', 'ET.parse', (['xml_path_com'], {}), '(xml_path_com)\n', (2357, 2371), True, 'import xml.etree.ElementTree as ET\n'), ((1599, 1623), 'os.path.join', 'osp.join', (['rootpath', 'name'], {}), '(rootpath, name)\n', (1607, 1623), True, 'import os.path as osp\n'), ((4087, 4103), 'math.sqrt', 'math.sqrt', (['scale'], {}), '(scale)\n', (4096, 4103), False, 'import math\n'), ((3740, 3765), 'numpy.array', 'np.array', (['[height, width]'], {}), '([height, width])\n', (3748, 3765), True, 'import numpy as np\n'), ((3700, 3718), 'numpy.array', 'np.array', (['[ch, cw]'], {}), '([ch, cw])\n', (3708, 3718), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 8 14:45:56 2019
@author: Xuan-Laptop
"""
import pandas as pd
import numpy as np
from utils import np_macro_f1, encoding
from sklearn.linear_model import LinearRegression, LogisticRegression
import warnings
warnings.filterwarnings("ignore")
def calibration_flathead(y_val, p_pred):
best_ts = 0
best_f1 = 0
for i in range(1, 51):
ts = i/100
out = np_macro_f1(y_val, (p_pred > ts).astype(int), return_details=False)
if out > best_f1:
best_f1 = out
best_ts = ts
df_res = np_macro_f1(y_val, (p_pred > best_ts).astype(int), return_details=True)
return best_ts, df_res
def calibration_perclass(y_val, p_pred):
ts_list = []
for i in range(28):
ts, _ = calibration_flathead(y_val[:, i], p_pred[:, i])
ts_list.append(ts)
df_res = np_macro_f1(y_val, (p_pred > ts_list).astype(int), return_details=True)
return ts_list, df_res
stack_version = 'stacking_v4'
def getLogit(x, epsilon=1e-20):
return np.log(x/(1 - x + epsilon) + epsilon)
def getProb(logit):
return 1/(1 + np.log(-logit))
# load data and align
# chose one of the submission file, make sure id is the same as sample submission
submission = pd.read_csv('./data/sample_submission.csv')
# the files should be find in ./ref_data. It's the same as 5folds_v2
for i in range(5):
if i == 0:
df_val = pd.read_csv('./ref_data/fold_info_SEED1024_val_F{}.csv'.format(str(i)))
else:
df_val = df_val.append(pd.read_csv('./ref_data/fold_info_SEED1024_val_F{}.csv'.format(str(i))), ignore_index=True)
labels = df_val.Target.apply(encoding)
y_val = np.array(labels.tolist())
# put the names of models to be stacked here.
# two files should be included: model_name_val.csv and model_name_test.csv
# Model predicted probability for 28 classes of all images
# the val data will be aligned with respect to the val data loaded from ref_data
# the format would be: Id | 0 | 1 | ... | 28.
models = [#'seresnext50',
'seresnext50_tta',
'inceptionv3_tta',
#'zhu',
'zhu_614',
#'zhu_Jan9',
]
p_test_all = []
p_val_all = []
res_details = []
mask = [str(x) for x in range(28)]
for i, model in enumerate(models):
p_val = pd.read_csv('./data/{}_val.csv'.format(model))
p_val = pd.merge(df_val[['Id']], p_val, how='left', on='Id')
p_val_all.append(np.array(p_val[mask].values))
df_res = np_macro_f1(y_val, np.array(p_val[mask].values), return_details=True)
print('Model_%s f1 loss: %.4f'% (model, df_res.f1_scores.mean()))
res_details.append(df_res)
p_test = pd.read_csv('./data/{}_test.csv'.format(model))
p_test_all.append(np.array(p_test[mask].values))
# Train 28 linear models for each class
lr_models = []
coeff = []
for i in range(28):
tmp = []
for j in range(len(models)):
tmp.append(p_val_all[j][:, i])
X = np.array(tmp)
Y = y_val[:, i:i+1]
lr = LinearRegression()
#lr = LogisticRegression()
lr.fit(X.T, Y)
lr_models.append(lr)
coeff.append(lr.coef_[0])
coeff = np.array(coeff)
# Ensemble predictions
stacking_all = []
val_stack = []
for i in range(28):
lr = lr_models[i]
tmp = []
for j in range(len(models)):
tmp.append(p_test_all[j][:, i])
X = np.array(tmp)
Y = lr.predict(X.T)
Y = Y.clip(0, 1)
stacking_all.append(Y)
tmp = []
for j in range(len(models)):
tmp.append(p_val_all[j][:, i])
X_v = np.array(tmp)
Y_v = lr.predict(X_v.T)
Y_v = Y_v.clip(0, 1)
val_stack.append(Y_v)
p_stack = np.squeeze(np.dstack(stacking_all))
p_stack_val = np.squeeze(np.dstack(val_stack))
df_stack = np_macro_f1(y_val, p_stack_val, return_details=True)
print('Stacking f1-loss: %4f' % (df_stack.f1_scores.mean()))
ts_flat, df_flat = calibration_flathead(y_val, p_stack_val)
ts_perclass, df_perclass = calibration_perclass(y_val, p_stack_val)
print('Flathead: %.4f, Per Class: %.4f'
%(np.mean(df_flat.f1_scores), np.mean(df_perclass.f1_scores)))
df_stack_val = df_val[['Id']]
df_stack_test = submission[['Id']]
for i in range(28):
df_stack_val[str(i)] = p_stack_val[:, i]
df_stack_test[str(i)] = p_stack[:, i]
df_coeff = pd.DataFrame(coeff)
df_coeff.columns = models
# store all necessary information
df_coeff.to_csv('{}_coef.csv'.format(stack_version), index=False)
df_stack_val.to_csv('{}_val.csv'.format(stack_version), index=False)
df_stack_test.to_csv('{}_test.csv'.format(stack_version), index=False)
|
[
"numpy.dstack",
"numpy.mean",
"sklearn.linear_model.LinearRegression",
"pandas.read_csv",
"pandas.merge",
"numpy.log",
"numpy.array",
"pandas.DataFrame",
"warnings.filterwarnings",
"utils.np_macro_f1"
] |
[((268, 301), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (291, 301), False, 'import warnings\n'), ((1301, 1344), 'pandas.read_csv', 'pd.read_csv', (['"""./data/sample_submission.csv"""'], {}), "('./data/sample_submission.csv')\n", (1312, 1344), True, 'import pandas as pd\n'), ((3229, 3244), 'numpy.array', 'np.array', (['coeff'], {}), '(coeff)\n', (3237, 3244), True, 'import numpy as np\n'), ((3863, 3915), 'utils.np_macro_f1', 'np_macro_f1', (['y_val', 'p_stack_val'], {'return_details': '(True)'}), '(y_val, p_stack_val, return_details=True)\n', (3874, 3915), False, 'from utils import np_macro_f1, encoding\n'), ((4419, 4438), 'pandas.DataFrame', 'pd.DataFrame', (['coeff'], {}), '(coeff)\n', (4431, 4438), True, 'import pandas as pd\n'), ((1083, 1122), 'numpy.log', 'np.log', (['(x / (1 - x + epsilon) + epsilon)'], {}), '(x / (1 - x + epsilon) + epsilon)\n', (1089, 1122), True, 'import numpy as np\n'), ((2436, 2488), 'pandas.merge', 'pd.merge', (["df_val[['Id']]", 'p_val'], {'how': '"""left"""', 'on': '"""Id"""'}), "(df_val[['Id']], p_val, how='left', on='Id')\n", (2444, 2488), True, 'import pandas as pd\n'), ((3043, 3056), 'numpy.array', 'np.array', (['tmp'], {}), '(tmp)\n', (3051, 3056), True, 'import numpy as np\n'), ((3092, 3110), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (3108, 3110), False, 'from sklearn.linear_model import LinearRegression, LogisticRegression\n'), ((3454, 3467), 'numpy.array', 'np.array', (['tmp'], {}), '(tmp)\n', (3462, 3467), True, 'import numpy as np\n'), ((3648, 3661), 'numpy.array', 'np.array', (['tmp'], {}), '(tmp)\n', (3656, 3661), True, 'import numpy as np\n'), ((3778, 3801), 'numpy.dstack', 'np.dstack', (['stacking_all'], {}), '(stacking_all)\n', (3787, 3801), True, 'import numpy as np\n'), ((3829, 3849), 'numpy.dstack', 'np.dstack', (['val_stack'], {}), '(val_stack)\n', (3838, 3849), True, 'import numpy as np\n'), ((2511, 2539), 'numpy.array', 'np.array', (['p_val[mask].values'], {}), '(p_val[mask].values)\n', (2519, 2539), True, 'import numpy as np\n'), ((2574, 2602), 'numpy.array', 'np.array', (['p_val[mask].values'], {}), '(p_val[mask].values)\n', (2582, 2602), True, 'import numpy as np\n'), ((2819, 2848), 'numpy.array', 'np.array', (['p_test[mask].values'], {}), '(p_test[mask].values)\n', (2827, 2848), True, 'import numpy as np\n'), ((1163, 1177), 'numpy.log', 'np.log', (['(-logit)'], {}), '(-logit)\n', (1169, 1177), True, 'import numpy as np\n'), ((4165, 4191), 'numpy.mean', 'np.mean', (['df_flat.f1_scores'], {}), '(df_flat.f1_scores)\n', (4172, 4191), True, 'import numpy as np\n'), ((4193, 4223), 'numpy.mean', 'np.mean', (['df_perclass.f1_scores'], {}), '(df_perclass.f1_scores)\n', (4200, 4223), True, 'import numpy as np\n')]
|
"""
Plot different high-thrust corrections used in BEM
"""
# --- Common libraries
import numpy as np
import matplotlib.pyplot as plt
# --- Local libraries
from welib.BEM.highthrust import *
from welib.tools.figure import defaultRC; defaultRC();
def main(test=False):
Ct=np.linspace(0,2,50)
a =np.linspace(0,1,50)
Ct_MT = 4*a*(1-a)
fig,ax = plt.subplots(1, 1, sharey=False, figsize=(6.4,4.8)) # (6.4,4.8)
fig.subplots_adjust(left=0.12, right=0.95, top=0.95, bottom=0.11, hspace=0.20, wspace=0.20)
# Functions that depend on a only
ax.plot(a ,Ct_MT,'k-' ,label = 'Momentum theory' )
ax.plot(a ,Ct_a(a,method='Glauert'),'-' ,label = 'Glauert (ac=1/3)')
ax.plot(a ,Ct_a(a,method='Spera') ,'.' ,label = 'Spera (ac=0.3)')
# Functions that depend on Ct only
ax.plot(a_Ct(Ct,method = 'AeroDyn' ),Ct,'-' ,label = 'AeroDyn' )
ax.plot(a_Ct(Ct,method = 'HAWC2' ),Ct,'--',label = 'HAWC2' )
ax.plot(a_Ct(Ct,method = 'WEHandbook' ),Ct,':' ,label = 'Handbook' )
ax.plot(a_Ct(Ct,method = 'GlauertEmpirical'),Ct,'-.',label = 'Glauert Empirical')
ax.set_xlabel('Axial induction, a [-]')
ax.set_ylabel('Thrust coefficient, Ct [-]')
ax.set_xlim([0,1])
ax.set_ylim([0,2])
ax.legend()
ax.grid()
ax.set_title('BEM - High thrust correction')
if __name__=="__main__":
main()
plt.show()
if __name__=="__test__":
main()
if __name__=="__export__":
main()
from welib.tools.repo import export_figs_callback
export_figs_callback(__file__)
|
[
"welib.tools.repo.export_figs_callback",
"welib.tools.figure.defaultRC",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((235, 246), 'welib.tools.figure.defaultRC', 'defaultRC', ([], {}), '()\n', (244, 246), False, 'from welib.tools.figure import defaultRC\n'), ((278, 299), 'numpy.linspace', 'np.linspace', (['(0)', '(2)', '(50)'], {}), '(0, 2, 50)\n', (289, 299), True, 'import numpy as np\n'), ((305, 326), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(50)'], {}), '(0, 1, 50)\n', (316, 326), True, 'import numpy as np\n'), ((361, 413), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'sharey': '(False)', 'figsize': '(6.4, 4.8)'}), '(1, 1, sharey=False, figsize=(6.4, 4.8))\n', (373, 413), True, 'import matplotlib.pyplot as plt\n'), ((1409, 1419), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1417, 1419), True, 'import matplotlib.pyplot as plt\n'), ((1552, 1582), 'welib.tools.repo.export_figs_callback', 'export_figs_callback', (['__file__'], {}), '(__file__)\n', (1572, 1582), False, 'from welib.tools.repo import export_figs_callback\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from sarna.viz import highlight
def compare_box_and_slice(x, box, slc):
'''Function used to compare slice limits and box range of a rectangle.'''
halfsample = np.diff(x).mean() / 2
correct_limits = x[slc][[0, -1]] + [-halfsample, halfsample]
bbox_limits = box.get_bbox().get_points()
bbox_x_limits = bbox_limits[:, 0]
print(bbox_x_limits)
print(correct_limits)
return np.allclose(correct_limits, bbox_x_limits)
def test_highlight():
x = np.arange(0, 10, step=0.05)
n_times = len(x)
y = np.random.random(n_times)
# simple usage
# ------------
line = plt.plot(x, y)
highlight(x, slice(10, 40))
ax = line[0].axes
rectangles = ax.findobj(Rectangle)
assert len(rectangles) == 2
plt.close(ax.figure)
# two slices, setting color and alpha
# -----------------------------------
line = plt.plot(x, y)
use_alpha, use_color = 0.5, [0.75] * 3
slices = [slice(10, 40), slice(60, 105)]
highlight(x, slices, alpha=use_alpha, color=use_color)
ax = line[0].axes
rectangles = ax.findobj(Rectangle)
assert len(rectangles) == 3
# check box color and box alpha
rgba = rectangles[0].get_facecolor()
assert (rgba[:3] == np.array(use_color)).all()
assert rgba[-1] == use_alpha
# compare slices and rectangles:
for box, slc in zip(rectangles, slices):
assert compare_box_and_slice(x, box, slc)
plt.close(ax.figure)
# two slices, using bottom_bar
# ----------------------------
line = plt.plot(x, y)
slices = [slice(10, 40), slice(60, 105)]
highlight(x, slices, bottom_bar=True)
ax = line[0].axes
rectangles = ax.findobj(Rectangle)
assert len(rectangles) == 5
for idx, col in enumerate([0.95, 0, 0.95, 0]):
rect_color = rectangles[idx].get_facecolor()[:3]
assert (rect_color == np.array([col] * 3)).all()
plt.close(ax.figure)
|
[
"numpy.allclose",
"sarna.viz.highlight",
"numpy.random.random",
"matplotlib.pyplot.plot",
"numpy.diff",
"matplotlib.pyplot.close",
"numpy.array",
"numpy.arange"
] |
[((497, 539), 'numpy.allclose', 'np.allclose', (['correct_limits', 'bbox_x_limits'], {}), '(correct_limits, bbox_x_limits)\n', (508, 539), True, 'import numpy as np\n'), ((572, 599), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {'step': '(0.05)'}), '(0, 10, step=0.05)\n', (581, 599), True, 'import numpy as np\n'), ((630, 655), 'numpy.random.random', 'np.random.random', (['n_times'], {}), '(n_times)\n', (646, 655), True, 'import numpy as np\n'), ((706, 720), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (714, 720), True, 'import matplotlib.pyplot as plt\n'), ((851, 871), 'matplotlib.pyplot.close', 'plt.close', (['ax.figure'], {}), '(ax.figure)\n', (860, 871), True, 'import matplotlib.pyplot as plt\n'), ((968, 982), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (976, 982), True, 'import matplotlib.pyplot as plt\n'), ((1075, 1129), 'sarna.viz.highlight', 'highlight', (['x', 'slices'], {'alpha': 'use_alpha', 'color': 'use_color'}), '(x, slices, alpha=use_alpha, color=use_color)\n', (1084, 1129), False, 'from sarna.viz import highlight\n'), ((1523, 1543), 'matplotlib.pyplot.close', 'plt.close', (['ax.figure'], {}), '(ax.figure)\n', (1532, 1543), True, 'import matplotlib.pyplot as plt\n'), ((1626, 1640), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (1634, 1640), True, 'import matplotlib.pyplot as plt\n'), ((1691, 1728), 'sarna.viz.highlight', 'highlight', (['x', 'slices'], {'bottom_bar': '(True)'}), '(x, slices, bottom_bar=True)\n', (1700, 1728), False, 'from sarna.viz import highlight\n'), ((1993, 2013), 'matplotlib.pyplot.close', 'plt.close', (['ax.figure'], {}), '(ax.figure)\n', (2002, 2013), True, 'import matplotlib.pyplot as plt\n'), ((262, 272), 'numpy.diff', 'np.diff', (['x'], {}), '(x)\n', (269, 272), True, 'import numpy as np\n'), ((1326, 1345), 'numpy.array', 'np.array', (['use_color'], {}), '(use_color)\n', (1334, 1345), True, 'import numpy as np\n'), ((1962, 1981), 'numpy.array', 'np.array', (['([col] * 3)'], {}), '([col] * 3)\n', (1970, 1981), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from numpy import pi as pi
from numpy import sin as sin
from numpy import cos as cos
from scipy import signal
# example 1a
N=1000
f=50
T=1/f
t=np.arange(N)
Phi=np.random.normal(0,1,N)
X=sin(2*pi*f*t/N)
Y=sin(2*pi*f*t/N + Phi)
plt.plot(Phi)
plt.plot(X)
plt.plot(Y)
plt.legend(['$\Phi$','$X_t$','$Y_t$'])
plt.show()
#F, Pxx_den = signal.periodogram(X,N)
#plt.semilogy(F, Pxx_den)
G, Pyy_den = signal.periodogram(Y,N)
plt.plot(G, Pyy_den)
#plt.legend(['$F(X)$','$F(Y)$'])
plt.show()
#example 1b
N=10000
f1=50
f2=1e3
t=np.arange(N)
Phi1=np.random.normal(0,5,N)
Phi2=np.random.normal(0,5,N)
X=sin(2*pi*f2*t/N)*cos(2*pi*f1*t/N)
Y=sin(2*pi*f1*t/N + Phi1)*cos(2*pi*f1*t/N+Phi2)
plt.plot(Phi1)
plt.plot(X)
plt.plot(Y)
plt.legend(['$\Phi$','$X_t$','$Y_t$'])
plt.show()
F, Pxx_den = signal.periodogram(X,N)
plt.semilogy(F, Pxx_den)
plt.show()
G, Pyy_den = signal.periodogram(Y,N)
plt.plot(G, Pyy_den)
#plt.legend(['$F(X)$','$F(Y)$'])
plt.show()
# example 1c
N=10000
f1=7
f2=24
A=0.5
B=1.5
t=np.arange(N)
Phi1=np.random.normal(0,1,N)
Phi2=np.random.normal(0,1,N)
X=A*sin(2*pi*f2*t/N)+B*sin(2*pi*f1*t/N)
Y=A*sin(2*pi*f2*t/N + Phi1)+B*sin(2*pi*f1*t/N+Phi2)
plt.plot(Phi1)
plt.plot(X)
plt.plot(Y)
plt.legend(['$\Phi$','$X_t$','$Y_t$'])
plt.show()
F, Pxx_den = signal.periodogram(X,N)
plt.plot(F, Pxx_den)
#plt.show()
G, Pyy_den = signal.periodogram(Y,N)
plt.plot(G, Pyy_den)
plt.legend(['$F(X)$','$F(Y)$'])
plt.grid()
plt.show()
#
inputfile = "../Datasets/S1MME_week43.csv"
df = pd.read_csv(inputfile)
Y=df.S1_mode_combined_attach_success_times_SEQ
N=int(len(Y)/2)
G, Pyy_den = signal.periodogram(Y,N)
plt.subplot(2,1,1)
plt.plot(Y)
plt.subplot(2,1,2)
plt.plot(G, Pyy_den)
plt.show()
dataset=Y
interval=1
diff = list()
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i - interval]
diff.append(value)
plt.plot(Y)
plt.plot(diff)
plt.legend(['$Y_t$','$\\nabla Y_t$'])
plt.show()
N=int(len(diff))
G, Pyy_den = signal.periodogram(Y,N)
plt.subplot(2,1,1)
plt.plot(diff)
plt.subplot(2,1,2)
plt.plot(G, Pyy_den)
plt.grid()
plt.show()
# iterar, agregar más componentes
f1=21
f2=42
A=12.5*1e4
B=3.85*1e4
t=np.arange(N)
Y_=A*sin(2*pi*f2*t/N)+B*sin(2*pi*f1*t/N)
#plt.plot(Y)
plt.plot(Y-200e3)
plt.plot(Y_)
plt.grid()
#plt.legend(['$Y_t$','$\\nabla Y_t$','$\\hat{Y}_t$'])
plt.legend(['$Y_t-2e5$','$\\hat{Y}_t$'])
plt.show()
|
[
"numpy.random.normal",
"matplotlib.pyplot.semilogy",
"matplotlib.pyplot.grid",
"numpy.arange",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplot",
"numpy.cos",
"numpy.sin",
"scipy.signal.periodogram",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] |
[((196, 208), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (205, 208), True, 'import numpy as np\n'), ((213, 238), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (229, 238), True, 'import numpy as np\n'), ((239, 262), 'numpy.sin', 'sin', (['(2 * pi * f * t / N)'], {}), '(2 * pi * f * t / N)\n', (242, 262), True, 'from numpy import sin as sin\n'), ((257, 286), 'numpy.sin', 'sin', (['(2 * pi * f * t / N + Phi)'], {}), '(2 * pi * f * t / N + Phi)\n', (260, 286), True, 'from numpy import sin as sin\n'), ((280, 293), 'matplotlib.pyplot.plot', 'plt.plot', (['Phi'], {}), '(Phi)\n', (288, 293), True, 'import matplotlib.pyplot as plt\n'), ((294, 305), 'matplotlib.pyplot.plot', 'plt.plot', (['X'], {}), '(X)\n', (302, 305), True, 'import matplotlib.pyplot as plt\n'), ((306, 317), 'matplotlib.pyplot.plot', 'plt.plot', (['Y'], {}), '(Y)\n', (314, 317), True, 'import matplotlib.pyplot as plt\n'), ((318, 359), 'matplotlib.pyplot.legend', 'plt.legend', (["['$\\\\Phi$', '$X_t$', '$Y_t$']"], {}), "(['$\\\\Phi$', '$X_t$', '$Y_t$'])\n", (328, 359), True, 'import matplotlib.pyplot as plt\n'), ((357, 367), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (365, 367), True, 'import matplotlib.pyplot as plt\n'), ((446, 470), 'scipy.signal.periodogram', 'signal.periodogram', (['Y', 'N'], {}), '(Y, N)\n', (464, 470), False, 'from scipy import signal\n'), ((470, 490), 'matplotlib.pyplot.plot', 'plt.plot', (['G', 'Pyy_den'], {}), '(G, Pyy_den)\n', (478, 490), True, 'import matplotlib.pyplot as plt\n'), ((524, 534), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (532, 534), True, 'import matplotlib.pyplot as plt\n'), ((573, 585), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (582, 585), True, 'import numpy as np\n'), ((591, 616), 'numpy.random.normal', 'np.random.normal', (['(0)', '(5)', 'N'], {}), '(0, 5, N)\n', (607, 616), True, 'import numpy as np\n'), ((620, 645), 'numpy.random.normal', 'np.random.normal', (['(0)', '(5)', 'N'], {}), '(0, 5, N)\n', (636, 645), True, 'import numpy as np\n'), ((729, 743), 'matplotlib.pyplot.plot', 'plt.plot', (['Phi1'], {}), '(Phi1)\n', (737, 743), True, 'import matplotlib.pyplot as plt\n'), ((744, 755), 'matplotlib.pyplot.plot', 'plt.plot', (['X'], {}), '(X)\n', (752, 755), True, 'import matplotlib.pyplot as plt\n'), ((756, 767), 'matplotlib.pyplot.plot', 'plt.plot', (['Y'], {}), '(Y)\n', (764, 767), True, 'import matplotlib.pyplot as plt\n'), ((768, 809), 'matplotlib.pyplot.legend', 'plt.legend', (["['$\\\\Phi$', '$X_t$', '$Y_t$']"], {}), "(['$\\\\Phi$', '$X_t$', '$Y_t$'])\n", (778, 809), True, 'import matplotlib.pyplot as plt\n'), ((807, 817), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (815, 817), True, 'import matplotlib.pyplot as plt\n'), ((832, 856), 'scipy.signal.periodogram', 'signal.periodogram', (['X', 'N'], {}), '(X, N)\n', (850, 856), False, 'from scipy import signal\n'), ((856, 880), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (['F', 'Pxx_den'], {}), '(F, Pxx_den)\n', (868, 880), True, 'import matplotlib.pyplot as plt\n'), ((881, 891), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (889, 891), True, 'import matplotlib.pyplot as plt\n'), ((905, 929), 'scipy.signal.periodogram', 'signal.periodogram', (['Y', 'N'], {}), '(Y, N)\n', (923, 929), False, 'from scipy import signal\n'), ((929, 949), 'matplotlib.pyplot.plot', 'plt.plot', (['G', 'Pyy_den'], {}), '(G, Pyy_den)\n', (937, 949), True, 'import matplotlib.pyplot as plt\n'), ((983, 993), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (991, 993), True, 'import matplotlib.pyplot as plt\n'), ((1042, 1054), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (1051, 1054), True, 'import numpy as np\n'), ((1060, 1085), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (1076, 1085), True, 'import numpy as np\n'), ((1089, 1114), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (1105, 1114), True, 'import numpy as np\n'), ((1206, 1220), 'matplotlib.pyplot.plot', 'plt.plot', (['Phi1'], {}), '(Phi1)\n', (1214, 1220), True, 'import matplotlib.pyplot as plt\n'), ((1221, 1232), 'matplotlib.pyplot.plot', 'plt.plot', (['X'], {}), '(X)\n', (1229, 1232), True, 'import matplotlib.pyplot as plt\n'), ((1233, 1244), 'matplotlib.pyplot.plot', 'plt.plot', (['Y'], {}), '(Y)\n', (1241, 1244), True, 'import matplotlib.pyplot as plt\n'), ((1245, 1286), 'matplotlib.pyplot.legend', 'plt.legend', (["['$\\\\Phi$', '$X_t$', '$Y_t$']"], {}), "(['$\\\\Phi$', '$X_t$', '$Y_t$'])\n", (1255, 1286), True, 'import matplotlib.pyplot as plt\n'), ((1284, 1294), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1292, 1294), True, 'import matplotlib.pyplot as plt\n'), ((1309, 1333), 'scipy.signal.periodogram', 'signal.periodogram', (['X', 'N'], {}), '(X, N)\n', (1327, 1333), False, 'from scipy import signal\n'), ((1333, 1353), 'matplotlib.pyplot.plot', 'plt.plot', (['F', 'Pxx_den'], {}), '(F, Pxx_den)\n', (1341, 1353), True, 'import matplotlib.pyplot as plt\n'), ((1379, 1403), 'scipy.signal.periodogram', 'signal.periodogram', (['Y', 'N'], {}), '(Y, N)\n', (1397, 1403), False, 'from scipy import signal\n'), ((1403, 1423), 'matplotlib.pyplot.plot', 'plt.plot', (['G', 'Pyy_den'], {}), '(G, Pyy_den)\n', (1411, 1423), True, 'import matplotlib.pyplot as plt\n'), ((1424, 1456), 'matplotlib.pyplot.legend', 'plt.legend', (["['$F(X)$', '$F(Y)$']"], {}), "(['$F(X)$', '$F(Y)$'])\n", (1434, 1456), True, 'import matplotlib.pyplot as plt\n'), ((1456, 1466), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1464, 1466), True, 'import matplotlib.pyplot as plt\n'), ((1467, 1477), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1475, 1477), True, 'import matplotlib.pyplot as plt\n'), ((1630, 1654), 'scipy.signal.periodogram', 'signal.periodogram', (['Y', 'N'], {}), '(Y, N)\n', (1648, 1654), False, 'from scipy import signal\n'), ((1654, 1674), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (1665, 1674), True, 'import matplotlib.pyplot as plt\n'), ((1673, 1684), 'matplotlib.pyplot.plot', 'plt.plot', (['Y'], {}), '(Y)\n', (1681, 1684), True, 'import matplotlib.pyplot as plt\n'), ((1685, 1705), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (1696, 1705), True, 'import matplotlib.pyplot as plt\n'), ((1704, 1724), 'matplotlib.pyplot.plot', 'plt.plot', (['G', 'Pyy_den'], {}), '(G, Pyy_den)\n', (1712, 1724), True, 'import matplotlib.pyplot as plt\n'), ((1725, 1735), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1733, 1735), True, 'import matplotlib.pyplot as plt\n'), ((1878, 1889), 'matplotlib.pyplot.plot', 'plt.plot', (['Y'], {}), '(Y)\n', (1886, 1889), True, 'import matplotlib.pyplot as plt\n'), ((1890, 1904), 'matplotlib.pyplot.plot', 'plt.plot', (['diff'], {}), '(diff)\n', (1898, 1904), True, 'import matplotlib.pyplot as plt\n'), ((1905, 1943), 'matplotlib.pyplot.legend', 'plt.legend', (["['$Y_t$', '$\\\\nabla Y_t$']"], {}), "(['$Y_t$', '$\\\\nabla Y_t$'])\n", (1915, 1943), True, 'import matplotlib.pyplot as plt\n'), ((1943, 1953), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1951, 1953), True, 'import matplotlib.pyplot as plt\n'), ((1986, 2010), 'scipy.signal.periodogram', 'signal.periodogram', (['Y', 'N'], {}), '(Y, N)\n', (2004, 2010), False, 'from scipy import signal\n'), ((2010, 2030), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), '(2, 1, 1)\n', (2021, 2030), True, 'import matplotlib.pyplot as plt\n'), ((2029, 2043), 'matplotlib.pyplot.plot', 'plt.plot', (['diff'], {}), '(diff)\n', (2037, 2043), True, 'import matplotlib.pyplot as plt\n'), ((2044, 2064), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), '(2, 1, 2)\n', (2055, 2064), True, 'import matplotlib.pyplot as plt\n'), ((2063, 2083), 'matplotlib.pyplot.plot', 'plt.plot', (['G', 'Pyy_den'], {}), '(G, Pyy_den)\n', (2071, 2083), True, 'import matplotlib.pyplot as plt\n'), ((2084, 2094), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2092, 2094), True, 'import matplotlib.pyplot as plt\n'), ((2095, 2105), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2103, 2105), True, 'import matplotlib.pyplot as plt\n'), ((2178, 2190), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (2187, 2190), True, 'import numpy as np\n'), ((2246, 2268), 'matplotlib.pyplot.plot', 'plt.plot', (['(Y - 200000.0)'], {}), '(Y - 200000.0)\n', (2254, 2268), True, 'import matplotlib.pyplot as plt\n'), ((2264, 2276), 'matplotlib.pyplot.plot', 'plt.plot', (['Y_'], {}), '(Y_)\n', (2272, 2276), True, 'import matplotlib.pyplot as plt\n'), ((2277, 2287), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (2285, 2287), True, 'import matplotlib.pyplot as plt\n'), ((2342, 2383), 'matplotlib.pyplot.legend', 'plt.legend', (["['$Y_t-2e5$', '$\\\\hat{Y}_t$']"], {}), "(['$Y_t-2e5$', '$\\\\hat{Y}_t$'])\n", (2352, 2383), True, 'import matplotlib.pyplot as plt\n'), ((2383, 2393), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2391, 2393), True, 'import matplotlib.pyplot as plt\n'), ((646, 670), 'numpy.sin', 'sin', (['(2 * pi * f2 * t / N)'], {}), '(2 * pi * f2 * t / N)\n', (649, 670), True, 'from numpy import sin as sin\n'), ((663, 687), 'numpy.cos', 'cos', (['(2 * pi * f1 * t / N)'], {}), '(2 * pi * f1 * t / N)\n', (666, 687), True, 'from numpy import cos as cos\n'), ((682, 713), 'numpy.sin', 'sin', (['(2 * pi * f1 * t / N + Phi1)'], {}), '(2 * pi * f1 * t / N + Phi1)\n', (685, 713), True, 'from numpy import sin as sin\n'), ((706, 737), 'numpy.cos', 'cos', (['(2 * pi * f1 * t / N + Phi2)'], {}), '(2 * pi * f1 * t / N + Phi2)\n', (709, 737), True, 'from numpy import cos as cos\n'), ((1117, 1141), 'numpy.sin', 'sin', (['(2 * pi * f2 * t / N)'], {}), '(2 * pi * f2 * t / N)\n', (1120, 1141), True, 'from numpy import sin as sin\n'), ((1136, 1160), 'numpy.sin', 'sin', (['(2 * pi * f1 * t / N)'], {}), '(2 * pi * f1 * t / N)\n', (1139, 1160), True, 'from numpy import sin as sin\n'), ((1157, 1188), 'numpy.sin', 'sin', (['(2 * pi * f2 * t / N + Phi1)'], {}), '(2 * pi * f2 * t / N + Phi1)\n', (1160, 1188), True, 'from numpy import sin as sin\n'), ((1183, 1214), 'numpy.sin', 'sin', (['(2 * pi * f1 * t / N + Phi2)'], {}), '(2 * pi * f1 * t / N + Phi2)\n', (1186, 1214), True, 'from numpy import sin as sin\n'), ((2196, 2220), 'numpy.sin', 'sin', (['(2 * pi * f2 * t / N)'], {}), '(2 * pi * f2 * t / N)\n', (2199, 2220), True, 'from numpy import sin as sin\n'), ((2215, 2239), 'numpy.sin', 'sin', (['(2 * pi * f1 * t / N)'], {}), '(2 * pi * f1 * t / N)\n', (2218, 2239), True, 'from numpy import sin as sin\n')]
|
# -*- coding: utf-8 -*-
import unittest
import numpy as np
import tensorflow as tf
from tfsnippet.bayes import BernoulliLayer, StochasticTensor
from tests.helper import TestCase
class BernoulliLayerTestCase(TestCase):
def test_basic(self):
layer = BernoulliLayer()
output = layer({'logits': tf.zeros([10, 2])})
self.assertIsInstance(output, StochasticTensor)
with self.get_session():
np.testing.assert_almost_equal(
output.distribution.logits.eval(),
np.zeros([10, 2])
)
np.testing.assert_almost_equal(
output.distribution.probs.eval(),
np.ones([10, 2]) * 0.5
)
if __name__ == '__main__':
unittest.main()
|
[
"numpy.ones",
"numpy.zeros",
"tfsnippet.bayes.BernoulliLayer",
"unittest.main",
"tensorflow.zeros"
] |
[((748, 763), 'unittest.main', 'unittest.main', ([], {}), '()\n', (761, 763), False, 'import unittest\n'), ((265, 281), 'tfsnippet.bayes.BernoulliLayer', 'BernoulliLayer', ([], {}), '()\n', (279, 281), False, 'from tfsnippet.bayes import BernoulliLayer, StochasticTensor\n'), ((316, 333), 'tensorflow.zeros', 'tf.zeros', (['[10, 2]'], {}), '([10, 2])\n', (324, 333), True, 'import tensorflow as tf\n'), ((536, 553), 'numpy.zeros', 'np.zeros', (['[10, 2]'], {}), '([10, 2])\n', (544, 553), True, 'import numpy as np\n'), ((678, 694), 'numpy.ones', 'np.ones', (['[10, 2]'], {}), '([10, 2])\n', (685, 694), True, 'import numpy as np\n')]
|
"""Computation of quadrature points and weights for different schemes.
Attributes
----------
DEFAULT_COLLOCATION_POINTS_MAX : int
Constant default limitation on the maximum number of collocation points
per mesh section that a user can specify. The value of 20 has been chosen
as above this the algorithms that are used for evaluating the orthogonal
polynomials become numerically unstable and raise a warning.
DEFAULT_COLLOCATION_POINTS_MIN : int
Constant default limitation on the minimum number of collocation points
per mesh section that a user can specify. The value of 2 has been chosen
as this is the smallest possible value that still makes logical sense (i.e.
a mesh section cannot have fewer than two nodes).
GAUSS : str
Keyword identifier for Legendre-Gauss quadrature method.
LOBATTO : str
Keyword identifier for Legendre-Gauss-Lobatto quadrature method.
RADAU : str
Keyword identifier for Legendre-Gauss-Radau quadrature method.
"""
__all__ = []
import numpy as np
import scipy.interpolate as interpolate
from pyproprop import Options
GAUSS = "gauss"
LOBATTO = "lobatto"
RADAU = "radau"
QUADRATURES = Options((GAUSS, LOBATTO, RADAU), default=LOBATTO,
unsupported=GAUSS)
DEFAULT_COLLOCATION_POINTS_MIN = 4
DEFAULT_COLLOCATION_POINTS_MAX = 10
class Quadrature:
"""Class for quadrature schemes including weights and points."""
def __init__(self, backend):
self.backend = backend
self._polynomials = {}
self._quadrature_points = {}
self._quadrature_weights = {}
self._butcher_arrays = {}
self._D_matrices = {}
self._A_matrices = {}
self._W_matrices = {}
self._D_index_arrays = {}
self._A_index_arrays = {}
@property
def settings(self):
return self.backend.ocp.settings
@property
def backend(self):
return self._backend
@backend.setter
def backend(self, backend):
self._backend = backend
self.order_range = list(range(
self.settings.collocation_points_min, self.settings.collocation_points_max))
if self.settings.quadrature_method == LOBATTO:
self.quadrature_generator = self.lobatto_generator
elif self.settings.quadrature_method == RADAU:
self.quadrature_generator = self.radau_generator
elif self.settings.quadrature_method == GAUSS:
self.quadrature_generator = self.gauss_generator
def _retrive_or_generate_dict_value(self, quad_dict, order):
try:
quad_dict[order]
except KeyError:
self.quadrature_generator(order)
return quad_dict[order]
def polynomials(self, order):
return self._retrive_or_generate_dict_value(self._polynomials, order)
def quadrature_point(self, order, *, domain=None):
points = self._retrive_or_generate_dict_value(
self._quadrature_points, order)
if domain:
stretch = 0.5 * (domain[1] - domain[0])
scale = 0.5 * (domain[0] + domain[1])
return stretch * points + scale
else:
return points
def quadrature_weight(self, order):
return self._retrive_or_generate_dict_value(self._quadrature_weights, order)
def butcher_array(self, order):
return self._retrive_or_generate_dict_value(self._butcher_arrays, order)
def D_matrix(self, order):
return self._retrive_or_generate_dict_value(self._D_matrices, order)
def A_matrix(self, order):
return self._retrive_or_generate_dict_value(self._A_matrices, order)
def W_matrix(self, order):
return self._retrive_or_generate_dict_value(self._W_matrices, order)
def D_index_array(self, order):
return self._retrive_or_generate_dict_value(self._D_index_arrays, order)
def A_index_array(self, order):
return self._retrive_or_generate_dict_value(self._A_index_arrays, order)
def radau_generator(self, order):
coefficients = [0] * (order - 2)
coefficients.extend([1, 1])
legendre_polynomial = np.polynomial.legendre.Legendre(coefficients)
self._polynomials.update({order: legendre_polynomial})
radau_points = legendre_polynomial.roots()
radau_points = np.concatenate([radau_points, np.array([0])])
self._quadrature_points.update({order: radau_points})
coefficients = [0] * (order - 2)
coefficients.extend([1])
legendre_polynomial = np.polynomial.legendre.Legendre(coefficients)
radau_weights = [2 / (order - 1)**2]
radau_weights = np.array(
radau_weights + [(1 - x) / ((order - 1)**2 * (legendre_polynomial(x)**2))
for x in radau_points[1:-1]])
radau_weights = np.concatenate([radau_weights, np.array([0])])
self._quadrature_weights.update({order: radau_weights})
butcher_points = self.quadrature_point(order, domain=[0, 1])
butcher_array = np.zeros((order, order))
butcher_array[-1, :] = radau_weights / 2
if order > 2:
A_row = (order + 1) * (order - 2)
A_col = order * (order - 2)
A = np.zeros((A_row, A_col))
b = np.zeros(A_row)
for k in range(order - 2):
for j in range(order):
row = j + k * order
for i in range(order - 2):
col = i + j * (order - 2)
A[row, col] = radau_weights[i + 1] * \
butcher_points[i + 1]**k
b[row] = (radau_weights[j] / (k + 1)) * (1 - butcher_points[j]
** (k + 1)) - radau_weights[-1] * radau_weights[j]
del_row = []
for i, row in enumerate(A):
if np.count_nonzero(row) == 0:
del_row.append(i)
A = np.delete(A, del_row, axis=0)
b = np.delete(b, del_row, axis=0)
a = np.linalg.solve(A, b)
butcher_array[1:-1, :] = a.reshape(order - 2, -1, order='F')
self._butcher_arrays.update({order: butcher_array})
D_left = np.ones((order - 1, 1), dtype=int)
D_right = np.diag(-1 * np.ones((order - 1, ), dtype=int))
D_matrix = np.hstack([D_left, D_right])
self._D_matrices.update({order: D_matrix})
A_matrix = self.butcher_array(order)[1:, :]
self._A_matrices.update({order: A_matrix})
A_index_array = np.array(range(A_matrix.size), dtype=int)
self._A_index_arrays.update({order: A_index_array})
D_num_row, D_num_col = D_matrix.shape
D_rows = np.array(range(D_num_row), dtype=int)
D_left = D_rows * D_num_col
D_right = D_rows * (D_num_col + 1) + 1
D_index_array = np.concatenate((D_left, D_right))
D_index_array.sort()
self._D_index_arrays.update({order: D_index_array})
# print(f'x: {radau_points}')
# print(f'w: {radau_weights}, {sum(radau_weights)}')
# print(f"a: {butcher_array}")
# print(f"A: {D_matrix}")
# print(f"I: {A_matrix}")
# input()
def lobatto_generator(self, order):
num_interior_points = order - 1
coefficients = [0] * (num_interior_points)
coefficients.append(1)
legendre_polynomial = np.polynomial.legendre.Legendre(coefficients)
self._polynomials.update({order: legendre_polynomial})
lobatto_points = legendre_polynomial.deriv().roots()
lobatto_points = np.insert(lobatto_points, 0, -1, axis=0)
lobatto_points = np.append(lobatto_points, 1)
self._quadrature_points.update({order: lobatto_points})
lobatto_weights = np.array(
[1 / (order * (order - 1) * (legendre_polynomial(x)**2)) for x in lobatto_points])
self._quadrature_weights.update({order: lobatto_weights})
butcher_points = self.quadrature_point(order, domain=[0, 1])
# print(f'x\': {butcher_points}')
butcher_array = np.zeros((order, order))
butcher_array[-1, :] = lobatto_weights
if order > 2:
A_row = (order + 1) * (order - 2)
A_col = order * (order - 2)
A = np.zeros((A_row, A_col))
b = np.zeros(A_row)
for k in range(order - 2):
# print(f'k: {k}')
for j in range(order):
# print(f'j: {j}')
row = j + k * order
# print(f'row: {row}')
for i in range(order - 2):
# print(f'i: {i}')
col = i + j * (order - 2)
# print(f'col: {col}')
A[row, col] = lobatto_weights[i + 1] * \
butcher_points[i + 1]**k
# print(f'A: {lobatto_weights[i+1] * butcher_points[i+1]**k}')
b[row] = (lobatto_weights[j] / (k + 1)) * (1 - butcher_points[j]
** (k + 1)) - lobatto_weights[-1] * lobatto_weights[j]
# print(f'b: {(lobatto_weights[j]/(k+1))*(1 - butcher_points[j]**(k+1)) - lobatto_weights[-1]*lobatto_weights[j]}\n')
del_row = []
for i, row in enumerate(A):
if np.count_nonzero(row) == 0:
del_row.append(i)
A = np.delete(A, del_row, axis=0)
b = np.delete(b, del_row, axis=0)
a = np.linalg.solve(A, b)
# print(f'A: {A}')
# print(f'b: {b}')
# print(f'a: {a}')
butcher_array[1:-1, :] = a.reshape(order - 2, -1, order='F')
self._butcher_arrays.update({order: butcher_array})
D_left = np.ones((num_interior_points, 1), dtype=int)
D_right = np.diag(-1 * np.ones((num_interior_points, ), dtype=int))
D_matrix = np.hstack([D_left, D_right])
self._D_matrices.update({order: D_matrix})
A_matrix = self.butcher_array(order)[1:, :]
self._A_matrices.update({order: A_matrix})
A_index_array = np.array(range(A_matrix.size), dtype=int)
self._A_index_arrays.update({order: A_index_array})
D_num_row, D_num_col = D_matrix.shape
D_rows = np.array(range(D_num_row), dtype=int)
D_left = D_rows * D_num_col
D_right = D_rows * (D_num_col + 1) + 1
D_index_array = np.concatenate((D_left, D_right))
D_index_array.sort()
self._D_index_arrays.update({order: D_index_array})
# print(f'x: {lobatto_points}')
# print(f'w: {lobatto_weights}, {sum(lobatto_weights)}')
# print(f"a: {butcher_array}")
# print(f"A: {D_matrix}")
# print(f"I: {A_matrix}")
# input()
|
[
"numpy.insert",
"numpy.linalg.solve",
"numpy.ones",
"numpy.hstack",
"numpy.delete",
"numpy.append",
"numpy.array",
"numpy.zeros",
"numpy.count_nonzero",
"numpy.concatenate",
"numpy.polynomial.legendre.Legendre",
"pyproprop.Options"
] |
[((1165, 1233), 'pyproprop.Options', 'Options', (['(GAUSS, LOBATTO, RADAU)'], {'default': 'LOBATTO', 'unsupported': 'GAUSS'}), '((GAUSS, LOBATTO, RADAU), default=LOBATTO, unsupported=GAUSS)\n', (1172, 1233), False, 'from pyproprop import Options\n'), ((4125, 4170), 'numpy.polynomial.legendre.Legendre', 'np.polynomial.legendre.Legendre', (['coefficients'], {}), '(coefficients)\n', (4156, 4170), True, 'import numpy as np\n'), ((4522, 4567), 'numpy.polynomial.legendre.Legendre', 'np.polynomial.legendre.Legendre', (['coefficients'], {}), '(coefficients)\n', (4553, 4567), True, 'import numpy as np\n'), ((5021, 5045), 'numpy.zeros', 'np.zeros', (['(order, order)'], {}), '((order, order))\n', (5029, 5045), True, 'import numpy as np\n'), ((6233, 6267), 'numpy.ones', 'np.ones', (['(order - 1, 1)'], {'dtype': 'int'}), '((order - 1, 1), dtype=int)\n', (6240, 6267), True, 'import numpy as np\n'), ((6353, 6381), 'numpy.hstack', 'np.hstack', (['[D_left, D_right]'], {}), '([D_left, D_right])\n', (6362, 6381), True, 'import numpy as np\n'), ((6873, 6906), 'numpy.concatenate', 'np.concatenate', (['(D_left, D_right)'], {}), '((D_left, D_right))\n', (6887, 6906), True, 'import numpy as np\n'), ((7414, 7459), 'numpy.polynomial.legendre.Legendre', 'np.polynomial.legendre.Legendre', (['coefficients'], {}), '(coefficients)\n', (7445, 7459), True, 'import numpy as np\n'), ((7610, 7650), 'numpy.insert', 'np.insert', (['lobatto_points', '(0)', '(-1)'], {'axis': '(0)'}), '(lobatto_points, 0, -1, axis=0)\n', (7619, 7650), True, 'import numpy as np\n'), ((7676, 7704), 'numpy.append', 'np.append', (['lobatto_points', '(1)'], {}), '(lobatto_points, 1)\n', (7685, 7704), True, 'import numpy as np\n'), ((8103, 8127), 'numpy.zeros', 'np.zeros', (['(order, order)'], {}), '((order, order))\n', (8111, 8127), True, 'import numpy as np\n'), ((9849, 9893), 'numpy.ones', 'np.ones', (['(num_interior_points, 1)'], {'dtype': 'int'}), '((num_interior_points, 1), dtype=int)\n', (9856, 9893), True, 'import numpy as np\n'), ((9989, 10017), 'numpy.hstack', 'np.hstack', (['[D_left, D_right]'], {}), '([D_left, D_right])\n', (9998, 10017), True, 'import numpy as np\n'), ((10509, 10542), 'numpy.concatenate', 'np.concatenate', (['(D_left, D_right)'], {}), '((D_left, D_right))\n', (10523, 10542), True, 'import numpy as np\n'), ((5219, 5243), 'numpy.zeros', 'np.zeros', (['(A_row, A_col)'], {}), '((A_row, A_col))\n', (5227, 5243), True, 'import numpy as np\n'), ((5260, 5275), 'numpy.zeros', 'np.zeros', (['A_row'], {}), '(A_row)\n', (5268, 5275), True, 'import numpy as np\n'), ((5968, 5997), 'numpy.delete', 'np.delete', (['A', 'del_row'], {'axis': '(0)'}), '(A, del_row, axis=0)\n', (5977, 5997), True, 'import numpy as np\n'), ((6014, 6043), 'numpy.delete', 'np.delete', (['b', 'del_row'], {'axis': '(0)'}), '(b, del_row, axis=0)\n', (6023, 6043), True, 'import numpy as np\n'), ((6060, 6081), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (6075, 6081), True, 'import numpy as np\n'), ((8299, 8323), 'numpy.zeros', 'np.zeros', (['(A_row, A_col)'], {}), '((A_row, A_col))\n', (8307, 8323), True, 'import numpy as np\n'), ((8340, 8355), 'numpy.zeros', 'np.zeros', (['A_row'], {}), '(A_row)\n', (8348, 8355), True, 'import numpy as np\n'), ((9491, 9520), 'numpy.delete', 'np.delete', (['A', 'del_row'], {'axis': '(0)'}), '(A, del_row, axis=0)\n', (9500, 9520), True, 'import numpy as np\n'), ((9537, 9566), 'numpy.delete', 'np.delete', (['b', 'del_row'], {'axis': '(0)'}), '(b, del_row, axis=0)\n', (9546, 9566), True, 'import numpy as np\n'), ((9583, 9604), 'numpy.linalg.solve', 'np.linalg.solve', (['A', 'b'], {}), '(A, b)\n', (9598, 9604), True, 'import numpy as np\n'), ((4339, 4352), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (4347, 4352), True, 'import numpy as np\n'), ((4847, 4860), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (4855, 4860), True, 'import numpy as np\n'), ((6299, 6331), 'numpy.ones', 'np.ones', (['(order - 1,)'], {'dtype': 'int'}), '((order - 1,), dtype=int)\n', (6306, 6331), True, 'import numpy as np\n'), ((9925, 9967), 'numpy.ones', 'np.ones', (['(num_interior_points,)'], {'dtype': 'int'}), '((num_interior_points,), dtype=int)\n', (9932, 9967), True, 'import numpy as np\n'), ((5886, 5907), 'numpy.count_nonzero', 'np.count_nonzero', (['row'], {}), '(row)\n', (5902, 5907), True, 'import numpy as np\n'), ((9409, 9430), 'numpy.count_nonzero', 'np.count_nonzero', (['row'], {}), '(row)\n', (9425, 9430), True, 'import numpy as np\n')]
|
import visa
import numpy as np
class Agilent54845A:
def __init__(self,instrument,chan_num=1):
"""
Default constructor only requires direct access to the underlyign visa
handler. See the method fromResourceManager for a more user-friendly way
of constructing the class.
"""
self.channel_num = chan_num
self.instrument = instrument
@classmethod
def fromResourceManager(cls,resource_manager,device_type="GPIB"):
"""
Parameters:
-----------
resource_manager Resource manager from the visa module.
See pyvisa documentation.
device_type Specifies the type of device to
communicate with.
"""
resources = resource_manager.list_resources()
gpib_resources = list(filter(lambda resource_id :
device_type in resource_id, resources))
# need to handle if no GPIB devices found
if len(gpib_resources) == 0: raise Exception("No GPIB devices found.")
# need to handle if more than one GPIB resource connected.
# TODO: this will be necessary when we add AWG or another GPIB device.
if len(gpib_resources) > 1: raise Exception("More than one device found")
instrument = resource_manager.open_resource(gpib_resources[0])
instrument.timeout = 6000
#instrument.write("*RST")
return cls(instrument)
def get_xrange(self):
return self.instrument.query_ascii_values("WAV:XRAN?")[0]
def get_xunits(self):
return self.instrument.query("WAV:XUN?").rstrip('\n')
def get_yrange(self):
return self.instrument.query_ascii_values("WAV:YRAN?")[0]
def get_yunits(self):
return self.instrument.query("WAV:YUN?").rstrip('\n')
def get_offset(self):
return self.instrument.query_ascii_values("CHAN%d:OFFS?" % self.channel_num)[0]
def get_bottom_bound(self):
""" Gets the voltage at the bottom of the scope window. """
return self.get_offset() - 0.5*self.get_yrange()
def get_top_bound(self):
""" Gets the voltage at the top of the scope window. """
return self.get_offset() + 0.5*self.get_yrange()
def set_offset(self,value):
""" Sets the center of the window of the scope. """
offset_command = "CHAN%d:OFFS " % self.channel_num
self.instrument.write(offset_command + str(value))
def set_range(self,value):
""" Sets the total vertical range of the scope. """
range_command = "CHAN%d:RANG " % self.channel_num
self.instrument.write(range_command + str(value))
def recenter(self):
v_average = self.instrument.query_ascii_values("MEAS:VAV?")[0]
self.instrument.write("CHAN" + str(self.channel_num) + ":OFFS " + str(v_average))
def scope_autoscale(self):
"""
Instructs the oscilloscope to autoscale the axes. Returns the
values of the ranges after doing the autoscale.
"""
self.instrument.write("AUT")
# return range of x,y values after doing auto scale
return {'x' : [self.get_xrange(), self.get_xunits()],
'y': [self.get_yrange(), self.get_yunits()]}
def reset_window(self):
"""
Resets the window to full scale (16 V), then brings the signal to center.
"""
self.set_range(16)
self.recenter()
self.recenter() # twice needed in case signal was out of range the first time.
def autoscale(self):
"""
Auto scaling function to find the optimal window for a given signal.
"""
self.reset_window()
self.rescale(True)
def rescale(self,quick_scale=True):
"""
Rescales the window based on measurements on signal iteratively as best it
can to fill a noisy signal + 5sigma of fluctauations to the entire window.
By setting quick_scale=True, it will first attempt a rough guess of the final
window config before starting an iterative procedure. If this is used just after
reset_window(), this should speed up the scaling.
Usage:
self.reset_window()
self.rescale(False)
Parameters:
-----------
quick_scale Boolean to to decide whether or not
to 'one-shot' the window config. Use
only if used a reset_window() before.
"""
self.instrument.write("MEAS:CLE") # clear current measurements.
self.instrument.write("MEAS:STAT ON") # turn on statistics tracking
# measurements to perform.
self.instrument.write("MEAS:VMAX")
self.instrument.write("MEAS:VMIN")
self.instrument.write("MEAS:VAV")
time.sleep(8)
# contains results of all three measurements.
query = self.instrument.query("MEAS:RES?").split(",")
# maximum voltage of signal
vmax = np.array(query[1:7],dtype=float)
if query[0].upper() != "V MAX(1)":
raise Exception(query[0] + " is not measuring maximum voltage.")
# minimum voltage of signal
vmin = np.array(query[8:14],dtype=float)
if query[7].upper() != "V MIN(1)":
raise Exception(query[7] + " is not measuring minimum voltage.")
# average signal of signal
vav = np.array(query[15:21],dtype=float)
if query[14].upper() != "V AVG(1)":
raise Exception(query[14] + " is not measuring minimum voltage.")
num_samples = vmax[-1]
if num_samples < 5:
raise Warning("Only collected " + str(num_samples) + " samples.")
# if signal goes outside of current window bounds, zoom out before continuing.
if vmin[1] < self.get_bottom_bound() or vmax[2] > self.get_top_bound():
self.set_offset((vav[2] + vav[1])/2)
self.set_range(self.get_yrange()*2)
self.rescale(False)
return
# find the maximum deviation of the signal from its average while accounting
# for 5 sigma of fluctuations.
v_amp = vmax if np.abs(vmax[2] - vav[2]) > np.abs(vmin[2] - vav[2] ) else vmin
v_amp_max = np.abs(v_amp[2] - vav[2]) + 5*np.sqrt(2)*v_amp[4]
# if high voltage signal, oscilloscope is not capable of performing high
# resolution zooms. If this is the case, attempt zoom beyond scope capabilities.
# Additionally, turn off 'one-shot' attempt as this is not accurate for
# high voltages.
rmin = 0.064
if vav[2] > 1.0:
rmin = 0.8
quick_scale = False
# ESCAPE CONDITION
range = self.get_yrange()
if range/2 < v_amp_max or range/2 < rmin:
self.set_offset((vav[2] + vav[1])/2)
return
# one-shot attempt
if quick_scale:
self.set_range(v_amp_max)
self.recenter()
self.rescale(False)
return
# iterative attempts
self.set_range(range/2)
self.set_offset((vav[2] + vav[1])/2)
self.rescale(False)
def id(self):
return self.instrument.query('*IDN?')
def set_waveform_source(self,channel_num):
"""
Parameters
----------
channel_num Sets the source for the WAVEFORM operation
the channel given by channel_num.
"""
self.channel_num = channel_num
self.instrument.write("WAV:SOUR CHAN %d" % channel_num)
def enable_header_data(self):
self.instrument.write("SYST:HEAD ON")
def disable_header_data(self):
self.instrument.write("SYST:HEAD OFF")
def get_waveform(self):
"""
Main data-taking function. Grabs the waveform currently measured by
oscilloscope while checking that the waveform is currently within window
bounds. If not, will automatically autoscale.
"""
num_attempts = 0
while True:
wave = self.instrument.query_ascii_values("WAV:DATA?",container = np.array)
within_bounds = (wave < self.get_top_bound()).all() and (wave > self.get_bottom_bound()).all()
if within_bounds:
return wave
else:
self.autoscale()
num_attempts += 1
def get_num_points(self):
"""
Returns the number of points measured by the scope for the waveform function.
"""
return int(self.instrument.query_ascii_values("WAV:POIN?")[0])
def close(self):
self.instrument.close()
|
[
"numpy.array",
"numpy.sqrt",
"numpy.abs"
] |
[((5140, 5173), 'numpy.array', 'np.array', (['query[1:7]'], {'dtype': 'float'}), '(query[1:7], dtype=float)\n', (5148, 5173), True, 'import numpy as np\n'), ((5345, 5379), 'numpy.array', 'np.array', (['query[8:14]'], {'dtype': 'float'}), '(query[8:14], dtype=float)\n', (5353, 5379), True, 'import numpy as np\n'), ((5549, 5584), 'numpy.array', 'np.array', (['query[15:21]'], {'dtype': 'float'}), '(query[15:21], dtype=float)\n', (5557, 5584), True, 'import numpy as np\n'), ((6392, 6417), 'numpy.abs', 'np.abs', (['(v_amp[2] - vav[2])'], {}), '(v_amp[2] - vav[2])\n', (6398, 6417), True, 'import numpy as np\n'), ((6309, 6333), 'numpy.abs', 'np.abs', (['(vmax[2] - vav[2])'], {}), '(vmax[2] - vav[2])\n', (6315, 6333), True, 'import numpy as np\n'), ((6336, 6360), 'numpy.abs', 'np.abs', (['(vmin[2] - vav[2])'], {}), '(vmin[2] - vav[2])\n', (6342, 6360), True, 'import numpy as np\n'), ((6422, 6432), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (6429, 6432), True, 'import numpy as np\n')]
|
'''
Provider for dataset
'''
import os
import os.path
import numpy as np
import time
class SceneflowDataset():
def __init__(self, root='/tmp/FlyingThings3D_subset_processed_35m', npoints=8192, mode = 'train_ft3d'):
self.npoints = npoints
self.mode = mode
self.root = root
if self.mode == 'eval_kitti':
self.samples = self.make_dataset()
self.datapath = root
self.file_list = os.listdir(self.datapath)
self.npoints = 16384
elif self.mode == 'train_ft3d':
self.datapath = os.path.join(self.root, 'train')
self.file_list = os.listdir(self.datapath)
elif self.mode == 'eval_ft3d':
self.datapath = os.path.join(self.root, 'val')
self.file_list = os.listdir(self.datapath)
def __getitem__(self, index):
np.random.seed(0)
if self.mode == 'eval_kitti':
fn = self.samples[index]
else:
fn = self.file_list[index]
fn = os.path.join(self.datapath, fn)
pc1 = os.path.join(fn,'pc1.npy')
pc2 = os.path.join(fn,'pc2.npy')
with open(pc1, 'rb') as fp:
pos1 = np.load(fp)
with open(pc2, 'rb') as fp2:
pos2 = np.load(fp2)
flow = pos2[:, :3] - pos1[:, :3]
if self.mode == 'eval_kitti':
is_ground = np.logical_or(pos1[:,1] < -1.35, pos2[:,1] < -1.35)
not_ground = np.logical_not(is_ground)
near_mask = np.logical_and(pos1[:, 2] < 35, pos2[:, 2] < 35)
near_mask = np.logical_and(not_ground, near_mask)
indices = np.where(near_mask)[0]
else:
near_mask = np.logical_and(pos1[:, 2] < 35, pos2[:, 2] < 35)
indices = np.where(near_mask)[0]
if len(indices) >= self.npoints:
sample_idx1 = np.random.choice(indices, self.npoints, replace=False)
else:
sample_idx1 = np.concatenate((indices, np.random.choice(indices, self.npoints - len(indices), replace=True)), axis=-1)
if len(indices) >= self.npoints:
sample_idx2 = np.random.choice(indices, self.npoints, replace=False)
else:
sample_idx2 = np.concatenate((indices, np.random.choice(indices, self.npoints - len(indices), replace=True)), axis=-1)
pos1 = pos1[sample_idx1, :]
pos2 = pos2[sample_idx2, :]
flow = flow[sample_idx1, :]
if self.mode == 'eval_kitti':
return pos1, pos2, flow, fn
else:
return pos1, pos2, flow
def __len__(self):
return len(self.file_list)
def make_dataset(self):
do_mapping = True
root = os.path.realpath(os.path.expanduser(self.root))
all_paths = sorted(os.walk(root))
useful_paths = [item[0] for item in all_paths if len(item[1]) == 0]
try:
assert (len(useful_paths) == 200)
except AssertionError:
print('assert (len(useful_paths) == 200) failed!', len(useful_paths))
if do_mapping:
mapping_path = os.path.join(os.path.dirname(__file__), 'KITTI_mapping.txt')
print('mapping_path', mapping_path)
with open(mapping_path) as fd:
lines = fd.readlines()
lines = [line.strip() for line in lines]
useful_paths = [path for path in useful_paths if lines[int(os.path.split(path)[-1])] != '']
res_paths = useful_paths
return res_paths
|
[
"os.listdir",
"numpy.logical_and",
"numpy.random.choice",
"numpy.where",
"numpy.logical_not",
"os.path.join",
"numpy.logical_or",
"os.walk",
"os.path.split",
"os.path.dirname",
"numpy.random.seed",
"numpy.load",
"os.path.expanduser"
] |
[((895, 912), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (909, 912), True, 'import numpy as np\n'), ((1120, 1147), 'os.path.join', 'os.path.join', (['fn', '"""pc1.npy"""'], {}), "(fn, 'pc1.npy')\n", (1132, 1147), False, 'import os\n'), ((1161, 1188), 'os.path.join', 'os.path.join', (['fn', '"""pc2.npy"""'], {}), "(fn, 'pc2.npy')\n", (1173, 1188), False, 'import os\n'), ((463, 488), 'os.listdir', 'os.listdir', (['self.datapath'], {}), '(self.datapath)\n', (473, 488), False, 'import os\n'), ((1061, 1092), 'os.path.join', 'os.path.join', (['self.datapath', 'fn'], {}), '(self.datapath, fn)\n', (1073, 1092), False, 'import os\n'), ((1244, 1255), 'numpy.load', 'np.load', (['fp'], {}), '(fp)\n', (1251, 1255), True, 'import numpy as np\n'), ((1313, 1325), 'numpy.load', 'np.load', (['fp2'], {}), '(fp2)\n', (1320, 1325), True, 'import numpy as np\n'), ((1448, 1501), 'numpy.logical_or', 'np.logical_or', (['(pos1[:, 1] < -1.35)', '(pos2[:, 1] < -1.35)'], {}), '(pos1[:, 1] < -1.35, pos2[:, 1] < -1.35)\n', (1461, 1501), True, 'import numpy as np\n'), ((1526, 1551), 'numpy.logical_not', 'np.logical_not', (['is_ground'], {}), '(is_ground)\n', (1540, 1551), True, 'import numpy as np\n'), ((1577, 1625), 'numpy.logical_and', 'np.logical_and', (['(pos1[:, 2] < 35)', '(pos2[:, 2] < 35)'], {}), '(pos1[:, 2] < 35, pos2[:, 2] < 35)\n', (1591, 1625), True, 'import numpy as np\n'), ((1650, 1687), 'numpy.logical_and', 'np.logical_and', (['not_ground', 'near_mask'], {}), '(not_ground, near_mask)\n', (1664, 1687), True, 'import numpy as np\n'), ((1781, 1829), 'numpy.logical_and', 'np.logical_and', (['(pos1[:, 2] < 35)', '(pos2[:, 2] < 35)'], {}), '(pos1[:, 2] < 35, pos2[:, 2] < 35)\n', (1795, 1829), True, 'import numpy as np\n'), ((1956, 2010), 'numpy.random.choice', 'np.random.choice', (['indices', 'self.npoints'], {'replace': '(False)'}), '(indices, self.npoints, replace=False)\n', (1972, 2010), True, 'import numpy as np\n'), ((2232, 2286), 'numpy.random.choice', 'np.random.choice', (['indices', 'self.npoints'], {'replace': '(False)'}), '(indices, self.npoints, replace=False)\n', (2248, 2286), True, 'import numpy as np\n'), ((2830, 2859), 'os.path.expanduser', 'os.path.expanduser', (['self.root'], {}), '(self.root)\n', (2848, 2859), False, 'import os\n'), ((2889, 2902), 'os.walk', 'os.walk', (['root'], {}), '(root)\n', (2896, 2902), False, 'import os\n'), ((591, 623), 'os.path.join', 'os.path.join', (['self.root', '"""train"""'], {}), "(self.root, 'train')\n", (603, 623), False, 'import os\n'), ((653, 678), 'os.listdir', 'os.listdir', (['self.datapath'], {}), '(self.datapath)\n', (663, 678), False, 'import os\n'), ((1711, 1730), 'numpy.where', 'np.where', (['near_mask'], {}), '(near_mask)\n', (1719, 1730), True, 'import numpy as np\n'), ((1852, 1871), 'numpy.where', 'np.where', (['near_mask'], {}), '(near_mask)\n', (1860, 1871), True, 'import numpy as np\n'), ((3216, 3241), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (3231, 3241), False, 'import os\n'), ((755, 785), 'os.path.join', 'os.path.join', (['self.root', '"""val"""'], {}), "(self.root, 'val')\n", (767, 785), False, 'import os\n'), ((815, 840), 'os.listdir', 'os.listdir', (['self.datapath'], {}), '(self.datapath)\n', (825, 840), False, 'import os\n'), ((3523, 3542), 'os.path.split', 'os.path.split', (['path'], {}), '(path)\n', (3536, 3542), False, 'import os\n')]
|
import numpy as np
import pandas as pd
import torch
import torchvision
from am_utils.utils import walk_dir
from torch.utils.data import DataLoader
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from tqdm import tqdm
from ..dataset.dataset_object_inference import DatasetObjectInference, DatasetObjectInferenceMosaic
from ..transforms.bbox import get_test_transform
from ..utils.utils import collate_fn
from ..utils.utils import remove_overlapping_boxes, get_boxes_above_threshold
def get_df_of_file_list(input_dir, id_name='image_id'):
"""
List files in given folder and generate a dataframe for the data loader.
Parameters
----------
input_dir : str
Input directory
id_name : str, optional
Column name to specify image ID.
Default is 'image_id'
Returns
-------
pd.DataFrame
Dataframe with a list of input files.
"""
files = walk_dir(input_dir)
files = [fn[len(input_dir) + 1:] for fn in files]
df = pd.DataFrame({id_name: files})
return df
def load_detection_model(model_fn, num_classes=2, device=None):
"""
Load the object detection model from a given file.
Parameters
----------
model_fn : str
Model filename with the full path.
num_classes : int, optional
Number of classes in the object detection model.
Default is 2 (one class + background).
device : torch.device
Device to send the model to ('cpu' or 'cuda').
If None, the device will be detected automatically.
Default is None.
Returns
-------
model:
Torch model with loaded weights.
"""
model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False, pretrained_backbone=False)
in_features = model.roi_heads.box_predictor.cls_score.in_features
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
# Load the trained weights
model.load_state_dict(torch.load(model_fn))
model.eval()
if device is None:
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
model.to(device)
return model
def detect_bboxes(input_dir, model_fn, batch_size=2, maxsize=None,
detection_threshold=0.5, overlap_threshold=0.1, id_name='image_id'):
"""
Detect object bounding boxes in all image in give directory and return dataframe with the results.
Parameters
----------
input_dir : str
Input directory.
model_fn : str
Model filename with the full path.
batch_size : int, optional
Batch size for predictions.
Default is 2.
maxsize : int, optional
Pad the input image to a square with this size.
Default is None.
detection_threshold : float, optional
Threshold (between 0 and 1) for the confidence of the bounding boxes.
Bounding boxes with a confidence score lower than `detection_threshold` will not be included.
Default is 0.5.
overlap_threshold : float, optional
Maximum allowed intersection-over-union (IOU) score for two bounding boxes.
If two boxes overlap with a higher score, the box with a lower confidence score will be removed
id_name : str, optional
Column name to specify image ID.
Default is 'image_id'
Returns
-------
pd.DataFrame
Dataframe with detected bounding box coordinates.
"""
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
model = load_detection_model(model_fn, device=device)
loader_kwargs = dict(batch_size=batch_size,
shuffle=False,
num_workers=batch_size,
drop_last=False)
df = get_df_of_file_list(input_dir)
ds = DatasetObjectInference(df, input_dir,
get_test_transform(),
maxsize=maxsize)
dl = DataLoader(ds, collate_fn=collate_fn, **loader_kwargs)
results = pd.DataFrame()
for images, image_ids in tqdm(dl):
images = list(image.to(device) for image in images)
outputs = model(images)
for i in range(len(outputs)):
bboxes, scores = get_boxes_above_threshold(outputs[i], detection_threshold)
bboxes, scores = remove_overlapping_boxes(bboxes, scores,
overlap_threshold, return_full=True)
bboxes = bboxes[scores > 0].data.cpu().numpy()
scores = scores[scores > 0].data.cpu().numpy()
results = __append_detections(bboxes, scores, results, image_ids[i], id_name)
return results
def __append_detections(bboxes, scores, results, image_id, id_name):
cur_results = pd.DataFrame(np.int_(np.round_(bboxes)), columns=['x1', 'y1', 'x2', 'y2'])
cur_results['scores'] = scores
cur_results[id_name] = image_id
results = pd.concat([results, cur_results], ignore_index=True)
return results
def __get_mosaic_df(df, imgshape, maxsize):
step = int(maxsize / 2)
ind_i = np.arange(int(imgshape[0] / step + 1)) * step if imgshape[0] > maxsize else [0]
ind_j = np.arange(int(imgshape[1] / step + 1)) * step if imgshape[1] > maxsize else [0]
boxes = []
for i in ind_i:
for j in ind_j:
boxes.append([j, i, j + maxsize, i + maxsize])
df_new = pd.DataFrame()
for i in range(len(df)):
cur_df = pd.DataFrame(boxes, columns=['x1', 'y1', 'x2', 'y2'])
cur_df['image_id'] = df.iloc[i]['image_id']
df_new = pd.concat([df_new, cur_df], ignore_index=True)
return df_new
def __add_shift(boxes, shift):
boxes[:, 0] += shift[0]
boxes[:, 2] += shift[0]
boxes[:, 1] += shift[1]
boxes[:, 3] += shift[1]
return boxes
def detect_bboxes_mosaic(input_dir, model_fn, maxsize, imgshape, batch_size=2,
detection_threshold=0.5, overlap_threshold=0.1, id_name='image_id'):
"""
Detect object bounding boxes in all image in give directory and return dataframe with the results.
Parameters
----------
input_dir : str
Input directory.
model_fn : str
Model filename with the full path.
maxsize : int
Pad the input image to a square with this size.
imgshape : tuple
Shape of the input image.
If greater than `maxsize`, the mosaic option will be used to crop ROI of `maxsize`.
batch_size : int, optional
Batch size for predictions.
Default is 2.
detection_threshold : float, optional
Threshold (between 0 and 1) for the confidence of the bounding boxes.
Bounding boxes with a confidence score lower than `detection_threshold` will not be included.
Default is 0.5.
overlap_threshold : float, optional
Maximum allowed intersection-over-union (IOU) score for two bounding boxes.
If two boxes overlap with a higher score, the box with a lower confidence score will be removed
id_name : str, optional
Column name to specify image ID.
Default is 'image_id'
Returns
-------
pd.DataFrame
Dataframe with detected bounding box coordinates.
"""
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
model = load_detection_model(model_fn, device=device)
loader_kwargs = dict(batch_size=batch_size,
shuffle=False,
num_workers=batch_size,
drop_last=False)
df = __get_mosaic_df(get_df_of_file_list(input_dir), imgshape, maxsize)
ds = DatasetObjectInferenceMosaic(df, input_dir,
get_test_transform(),
maxsize=maxsize)
dl = DataLoader(ds, collate_fn=collate_fn, **loader_kwargs)
results = pd.DataFrame()
for images, image_ids, start_coord in tqdm(dl):
images = list(image.to(device) for image in images)
outputs = model(images)
for i in range(len(outputs)):
bboxes, scores = get_boxes_above_threshold(outputs[i], detection_threshold)
bboxes = bboxes.data.cpu().numpy()
scores = scores.data.cpu().numpy()
bboxes = __add_shift(bboxes, start_coord[i])
results = __append_detections(bboxes, scores, results, image_ids[i], id_name)
results2 = pd.DataFrame()
for image_id in results['image_id'].unique():
cur_df = results[results['image_id'] == image_id]
bboxes = torch.tensor(cur_df[['x1', 'y1', 'x2', 'y2']].values).to(device)
scores = torch.tensor(cur_df['scores'].values).to(device)
bboxes, scores = remove_overlapping_boxes(bboxes, scores,
overlap_threshold, return_full=True)
bboxes = bboxes[scores > 0].data.cpu().numpy()
scores = scores[scores > 0].data.cpu().numpy()
results2 = __append_detections(bboxes, scores, results2, image_id, id_name)
return results2
|
[
"numpy.round_",
"torch.load",
"tqdm.tqdm",
"torchvision.models.detection.faster_rcnn.FastRCNNPredictor",
"am_utils.utils.walk_dir",
"torch.tensor",
"torchvision.models.detection.fasterrcnn_resnet50_fpn",
"torch.cuda.is_available",
"torch.utils.data.DataLoader",
"pandas.DataFrame",
"pandas.concat",
"torch.device"
] |
[((931, 950), 'am_utils.utils.walk_dir', 'walk_dir', (['input_dir'], {}), '(input_dir)\n', (939, 950), False, 'from am_utils.utils import walk_dir\n'), ((1014, 1044), 'pandas.DataFrame', 'pd.DataFrame', (['{id_name: files}'], {}), '({id_name: files})\n', (1026, 1044), True, 'import pandas as pd\n'), ((1681, 1782), 'torchvision.models.detection.fasterrcnn_resnet50_fpn', 'torchvision.models.detection.fasterrcnn_resnet50_fpn', ([], {'pretrained': '(False)', 'pretrained_backbone': '(False)'}), '(pretrained=False,\n pretrained_backbone=False)\n', (1733, 1782), False, 'import torchvision\n'), ((1885, 1928), 'torchvision.models.detection.faster_rcnn.FastRCNNPredictor', 'FastRCNNPredictor', (['in_features', 'num_classes'], {}), '(in_features, num_classes)\n', (1902, 1928), False, 'from torchvision.models.detection.faster_rcnn import FastRCNNPredictor\n'), ((3990, 4044), 'torch.utils.data.DataLoader', 'DataLoader', (['ds'], {'collate_fn': 'collate_fn'}), '(ds, collate_fn=collate_fn, **loader_kwargs)\n', (4000, 4044), False, 'from torch.utils.data import DataLoader\n'), ((4059, 4073), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4071, 4073), True, 'import pandas as pd\n'), ((4103, 4111), 'tqdm.tqdm', 'tqdm', (['dl'], {}), '(dl)\n', (4107, 4111), False, 'from tqdm import tqdm\n'), ((4971, 5023), 'pandas.concat', 'pd.concat', (['[results, cur_results]'], {'ignore_index': '(True)'}), '([results, cur_results], ignore_index=True)\n', (4980, 5023), True, 'import pandas as pd\n'), ((5432, 5446), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (5444, 5446), True, 'import pandas as pd\n'), ((7838, 7892), 'torch.utils.data.DataLoader', 'DataLoader', (['ds'], {'collate_fn': 'collate_fn'}), '(ds, collate_fn=collate_fn, **loader_kwargs)\n', (7848, 7892), False, 'from torch.utils.data import DataLoader\n'), ((7907, 7921), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (7919, 7921), True, 'import pandas as pd\n'), ((7964, 7972), 'tqdm.tqdm', 'tqdm', (['dl'], {}), '(dl)\n', (7968, 7972), False, 'from tqdm import tqdm\n'), ((8450, 8464), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (8462, 8464), True, 'import pandas as pd\n'), ((1987, 2007), 'torch.load', 'torch.load', (['model_fn'], {}), '(model_fn)\n', (1997, 2007), False, 'import torch\n'), ((3502, 3527), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3525, 3527), False, 'import torch\n'), ((3478, 3498), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (3490, 3498), False, 'import torch\n'), ((3533, 3552), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (3545, 3552), False, 'import torch\n'), ((5493, 5546), 'pandas.DataFrame', 'pd.DataFrame', (['boxes'], {'columns': "['x1', 'y1', 'x2', 'y2']"}), "(boxes, columns=['x1', 'y1', 'x2', 'y2'])\n", (5505, 5546), True, 'import pandas as pd\n'), ((5616, 5662), 'pandas.concat', 'pd.concat', (['[df_new, cur_df]'], {'ignore_index': '(True)'}), '([df_new, cur_df], ignore_index=True)\n', (5625, 5662), True, 'import pandas as pd\n'), ((7296, 7321), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7319, 7321), False, 'import torch\n'), ((7272, 7292), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (7284, 7292), False, 'import torch\n'), ((7327, 7346), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (7339, 7346), False, 'import torch\n'), ((2091, 2116), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2114, 2116), False, 'import torch\n'), ((2067, 2087), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (2079, 2087), False, 'import torch\n'), ((2122, 2141), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (2134, 2141), False, 'import torch\n'), ((4832, 4849), 'numpy.round_', 'np.round_', (['bboxes'], {}), '(bboxes)\n', (4841, 4849), True, 'import numpy as np\n'), ((8590, 8643), 'torch.tensor', 'torch.tensor', (["cur_df[['x1', 'y1', 'x2', 'y2']].values"], {}), "(cur_df[['x1', 'y1', 'x2', 'y2']].values)\n", (8602, 8643), False, 'import torch\n'), ((8672, 8709), 'torch.tensor', 'torch.tensor', (["cur_df['scores'].values"], {}), "(cur_df['scores'].values)\n", (8684, 8709), False, 'import torch\n')]
|
"""
Pure Python implementation of the kernel functions
"""
import numpy as np
from scipy.special import erf
from utils import numpy_trans, numpy_trans_idx
s2pi = np.sqrt(2.0 * np.pi)
s2 = np.sqrt(2.0)
@numpy_trans
def norm1d_pdf(z, out):
"""
Full-python implementation of :py:func:`normal_kernel1d.pdf`
"""
z = np.atleast_1d(z)
if out is None:
out = np.empty(z.shape, dtype=z.dtype)
np.multiply(z, z, out)
out *= -0.5
np.exp(out, out)
out /= s2pi
return out
@numpy_trans
def norm1d_cdf(z, out):
"""
Full-python implementation of :py:func:`normal_kernel1d.cdf`
"""
np.divide(z, s2, out)
erf(out, out)
out *= 0.5
out += 0.5
return out
@numpy_trans
def norm1d_pm1(z, out):
"""
Full-python implementation of :py:func:`normal_kernel1d.pm1`
"""
np.multiply(z, z, out)
out *= -0.5
np.exp(out, out)
out /= -s2pi
return out
@numpy_trans_idx
def norm1d_pm2(z, out):
"""
Full-python implementation of :py:func:`normal_kernel1d.pm2`
"""
np.divide(z, s2, out)
erf(out, out)
out /= 2
if z.shape:
zz = np.isfinite(z)
sz = z[zz]
out[zz] -= sz * np.exp(-0.5 * sz * sz) / s2pi
elif np.isfinite(z):
out -= z * np.exp(-0.5 * z * z) / s2pi
out += 0.5
return out
tricube_width = np.sqrt(35. / 243)
@numpy_trans_idx
def tricube_pdf(z, out=None):
np.multiply(z, tricube_width, out)
sel = (out > -1) & (out < 1)
out[~sel] = 0
out[sel] = 70. / 81 * (1 - abs(out[sel]) ** 3.) ** 3. * tricube_width
return out
@numpy_trans_idx
def tricube_cdf(z, out=None):
np.multiply(z, tricube_width, out)
sel_down = out <= -1
sel_up = out >= 1
sel_neg = (out < 0) & (~sel_down)
sel_pos = (out >= 0) & (~sel_up)
out[sel_up] = 1
out[sel_down] = 0
out[sel_pos] = 1. / 162 * \
(60 * (out[sel_pos] ** 7) - 7. *
(2 * (out[sel_pos] ** 10) + 15 * (out[sel_pos] ** 4)) +
140 * out[sel_pos] + 81)
out[sel_neg] = 1. / 162 * \
(60 * (out[sel_neg] ** 7) + 7. *
(2 * (out[sel_neg] ** 10) + 15 * (out[sel_neg] ** 4)) +
140 * out[sel_neg] + 81)
return out
@numpy_trans_idx
def tricube_pm1(z, out=None):
np.multiply(z, tricube_width, out)
out[out < 0] = -out[out < 0]
sel = out < 1
out[~sel] = 0
out[sel] = 7 / (3564 * tricube_width) * \
(165 * out[sel] ** 8 - 8 * (5 * out[sel] ** 11 + 33 * out[sel] ** 5) +
220 * out[sel] ** 2 - 81)
return out
@numpy_trans_idx
def tricube_pm2(z, out=None):
np.multiply(z, tricube_width, out)
sel_down = out <= -1
sel_up = out >= 1
sel_neg = (out < 0) & ~sel_down
sel_pos = (out >= 0) & ~sel_up
out[sel_down] = 0
out[sel_up] = 1
out[sel_pos] = 35. / (tricube_width * tricube_width * 486) * \
(4 * out[sel_pos] ** 9 - (out[sel_pos] ** 12 + 6 * out[sel_pos] ** 6) +
4 * out[sel_pos] ** 3 + 1)
out[sel_neg] = 35. / (tricube_width * tricube_width * 486) * \
(4 * out[sel_neg] ** 9 + (out[sel_neg] ** 12 + 6 * out[sel_neg] ** 6) +
4 * out[sel_neg] ** 3 + 1)
return out
epanechnikov_width = 1. / np.sqrt(5.)
@numpy_trans_idx
def epanechnikov_pdf(z, out=None):
np.multiply(z, epanechnikov_width, out)
sel = (out > -1) & (out < 1)
out[~sel] = 0
out[sel] = (.75 * epanechnikov_width) * (1 - out[sel] ** 2)
return out
@numpy_trans_idx
def epanechnikov_cdf(z, out=None):
np.multiply(z, epanechnikov_width, out)
sel_up = out >= 1
sel_down = out <= -1
out[sel_up] = 1
out[sel_down] = 0
sel = ~(sel_up | sel_down)
out[sel] = .25 * (2 + 3 * out[sel] - out[sel] ** 3)
return out
@numpy_trans_idx
def epanechnikov_pm1(z, out=None):
np.multiply(z, epanechnikov_width, out)
sel = (out > -1) & (out < 1)
out[~sel] = 0
out[sel] = -3 / (16 * epanechnikov_width) * \
(1 - 2 * out[sel] ** 2 + out[sel] ** 4)
return out
@numpy_trans_idx
def epanechnikov_pm2(z, out=None):
np.multiply(z, epanechnikov_width, out)
sel_up = out >= 1
sel_down = out <= -1
out[sel_up] = 1
out[sel_down] = 0
sel = ~(sel_up | sel_down)
out[sel] = .25 * (2 + 5 * out[sel] ** 3 - 3 * out[sel] ** 5)
return out
@numpy_trans
def normal_o4_pdf(z, out=None):
norm1d_pdf(z, out)
out *= (3 - z ** 2) / 2
return out
@numpy_trans_idx
def normal_o4_cdf(z, out=None):
norm1d_cdf(z, out)
sel = np.isfinite(z)
out[sel] += z[sel] * norm1d_pdf(z[sel]) / 2
return out
@numpy_trans_idx
def normal_o4_pm1(z, out=None):
norm1d_pdf(z, out)
out -= normal_o4_pdf(z)
out[~np.isfinite(z)] = 0
return out
@numpy_trans_idx
def normal_o4_pm2(z, out=None):
np.power(z, 3, out)
out *= norm1d_pdf(z) / 2
out[~np.isfinite(z)] = 0
return out
@numpy_trans_idx
def epanechnikov_o4_pdf(z, out=None):
np.power(z, 2., out)
out *= -15 / 8.
out += 9. / 8.
out[(z < -1) | (z > 1)] = 0
return out
@numpy_trans_idx
def epanechnikov_o4_cdf(z, out=None):
np.power(z, 3, out)
out *= -5. / 8.
out += (4 + 9 * z) / 8.
out[z > 1] = 1
out[z < -1] = 0
return out
@numpy_trans_idx
def epanechnikov_o4_pm1(z, out=None):
out = np.power(z, 4, out)
out *= -15. / 32.
out += 1. / 32. * (18 * z ** 2 - 3)
out[(z < -1) | (z > 1)] = 0
return out
@numpy_trans_idx
def epanechnikov_o4_pm2(z, out=None):
out = np.power(z, 3, out)
out *= .375
out -= .375 * np.power(z, 5)
out[(z < -1) | (z > 1)] = 0
return out
|
[
"numpy.multiply",
"numpy.sqrt",
"numpy.power",
"numpy.exp",
"scipy.special.erf",
"numpy.isfinite",
"numpy.empty",
"numpy.divide",
"numpy.atleast_1d"
] |
[((165, 185), 'numpy.sqrt', 'np.sqrt', (['(2.0 * np.pi)'], {}), '(2.0 * np.pi)\n', (172, 185), True, 'import numpy as np\n'), ((191, 203), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (198, 203), True, 'import numpy as np\n'), ((1354, 1373), 'numpy.sqrt', 'np.sqrt', (['(35.0 / 243)'], {}), '(35.0 / 243)\n', (1361, 1373), True, 'import numpy as np\n'), ((332, 348), 'numpy.atleast_1d', 'np.atleast_1d', (['z'], {}), '(z)\n', (345, 348), True, 'import numpy as np\n'), ((420, 442), 'numpy.multiply', 'np.multiply', (['z', 'z', 'out'], {}), '(z, z, out)\n', (431, 442), True, 'import numpy as np\n'), ((463, 479), 'numpy.exp', 'np.exp', (['out', 'out'], {}), '(out, out)\n', (469, 479), True, 'import numpy as np\n'), ((635, 656), 'numpy.divide', 'np.divide', (['z', 's2', 'out'], {}), '(z, s2, out)\n', (644, 656), True, 'import numpy as np\n'), ((661, 674), 'scipy.special.erf', 'erf', (['out', 'out'], {}), '(out, out)\n', (664, 674), False, 'from scipy.special import erf\n'), ((844, 866), 'numpy.multiply', 'np.multiply', (['z', 'z', 'out'], {}), '(z, z, out)\n', (855, 866), True, 'import numpy as np\n'), ((887, 903), 'numpy.exp', 'np.exp', (['out', 'out'], {}), '(out, out)\n', (893, 903), True, 'import numpy as np\n'), ((1064, 1085), 'numpy.divide', 'np.divide', (['z', 's2', 'out'], {}), '(z, s2, out)\n', (1073, 1085), True, 'import numpy as np\n'), ((1090, 1103), 'scipy.special.erf', 'erf', (['out', 'out'], {}), '(out, out)\n', (1093, 1103), False, 'from scipy.special import erf\n'), ((1426, 1460), 'numpy.multiply', 'np.multiply', (['z', 'tricube_width', 'out'], {}), '(z, tricube_width, out)\n', (1437, 1460), True, 'import numpy as np\n'), ((1654, 1688), 'numpy.multiply', 'np.multiply', (['z', 'tricube_width', 'out'], {}), '(z, tricube_width, out)\n', (1665, 1688), True, 'import numpy as np\n'), ((2265, 2299), 'numpy.multiply', 'np.multiply', (['z', 'tricube_width', 'out'], {}), '(z, tricube_width, out)\n', (2276, 2299), True, 'import numpy as np\n'), ((2597, 2631), 'numpy.multiply', 'np.multiply', (['z', 'tricube_width', 'out'], {}), '(z, tricube_width, out)\n', (2608, 2631), True, 'import numpy as np\n'), ((3200, 3212), 'numpy.sqrt', 'np.sqrt', (['(5.0)'], {}), '(5.0)\n', (3207, 3212), True, 'import numpy as np\n'), ((3269, 3308), 'numpy.multiply', 'np.multiply', (['z', 'epanechnikov_width', 'out'], {}), '(z, epanechnikov_width, out)\n', (3280, 3308), True, 'import numpy as np\n'), ((3497, 3536), 'numpy.multiply', 'np.multiply', (['z', 'epanechnikov_width', 'out'], {}), '(z, epanechnikov_width, out)\n', (3508, 3536), True, 'import numpy as np\n'), ((3786, 3825), 'numpy.multiply', 'np.multiply', (['z', 'epanechnikov_width', 'out'], {}), '(z, epanechnikov_width, out)\n', (3797, 3825), True, 'import numpy as np\n'), ((4048, 4087), 'numpy.multiply', 'np.multiply', (['z', 'epanechnikov_width', 'out'], {}), '(z, epanechnikov_width, out)\n', (4059, 4087), True, 'import numpy as np\n'), ((4485, 4499), 'numpy.isfinite', 'np.isfinite', (['z'], {}), '(z)\n', (4496, 4499), True, 'import numpy as np\n'), ((4764, 4783), 'numpy.power', 'np.power', (['z', '(3)', 'out'], {}), '(z, 3, out)\n', (4772, 4783), True, 'import numpy as np\n'), ((4918, 4939), 'numpy.power', 'np.power', (['z', '(2.0)', 'out'], {}), '(z, 2.0, out)\n', (4926, 4939), True, 'import numpy as np\n'), ((5086, 5105), 'numpy.power', 'np.power', (['z', '(3)', 'out'], {}), '(z, 3, out)\n', (5094, 5105), True, 'import numpy as np\n'), ((5275, 5294), 'numpy.power', 'np.power', (['z', '(4)', 'out'], {}), '(z, 4, out)\n', (5283, 5294), True, 'import numpy as np\n'), ((5471, 5490), 'numpy.power', 'np.power', (['z', '(3)', 'out'], {}), '(z, 3, out)\n', (5479, 5490), True, 'import numpy as np\n'), ((383, 415), 'numpy.empty', 'np.empty', (['z.shape'], {'dtype': 'z.dtype'}), '(z.shape, dtype=z.dtype)\n', (391, 415), True, 'import numpy as np\n'), ((1146, 1160), 'numpy.isfinite', 'np.isfinite', (['z'], {}), '(z)\n', (1157, 1160), True, 'import numpy as np\n'), ((1243, 1257), 'numpy.isfinite', 'np.isfinite', (['z'], {}), '(z)\n', (1254, 1257), True, 'import numpy as np\n'), ((5525, 5539), 'numpy.power', 'np.power', (['z', '(5)'], {}), '(z, 5)\n', (5533, 5539), True, 'import numpy as np\n'), ((4674, 4688), 'numpy.isfinite', 'np.isfinite', (['z'], {}), '(z)\n', (4685, 4688), True, 'import numpy as np\n'), ((4822, 4836), 'numpy.isfinite', 'np.isfinite', (['z'], {}), '(z)\n', (4833, 4836), True, 'import numpy as np\n'), ((1204, 1226), 'numpy.exp', 'np.exp', (['(-0.5 * sz * sz)'], {}), '(-0.5 * sz * sz)\n', (1210, 1226), True, 'import numpy as np\n'), ((1278, 1298), 'numpy.exp', 'np.exp', (['(-0.5 * z * z)'], {}), '(-0.5 * z * z)\n', (1284, 1298), True, 'import numpy as np\n')]
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import numpy as np
__all__ = ['raises', 'assert_equal', 'assert_almost_equal',
'assert_true', 'setup_function', 'teardown_function',
'has_isnan']
CWD = os.getcwd()
TEST_DIR = os.path.dirname(__file__)
has_isnan = True
try:
from math import isnan # noqa
except ImportError:
try:
from numpy import isnan # noqa
except ImportError:
has_isnan = False
print('Tests requiring isnan will fail')
def setup_function(function):
os.chdir(TEST_DIR)
def teardown_function(function):
os.chdir(CWD)
# Compatibility functions to convert from nose to py.test
def assert_equal(a, b):
assert a == b
def assert_almost_equal(a, b, **kwargs):
assert np.allclose(a, b, **kwargs)
def assert_true(a):
assert a
def make_decorator(func):
"""
Wraps a test decorator so as to properly replicate metadata
of the decorated function, including nose's additional stuff
(namely, setup and teardown).
"""
def decorate(newfunc):
if hasattr(func, 'compat_func_name'):
name = func.compat_func_name
else:
name = func.__name__
newfunc.__dict__ = func.__dict__
newfunc.__doc__ = func.__doc__
newfunc.__module__ = func.__module__
if not hasattr(newfunc, 'compat_co_firstlineno'):
try:
newfunc.compat_co_firstlineno = func.func_code.co_firstlineno
except AttributeError:
newfunc.compat_co_firstlineno = func.__code__.co_firstlineno
try:
newfunc.__name__ = name
except TypeError:
# can't set func name in 2.3
newfunc.compat_func_name = name
return newfunc
return decorate
def raises(*exceptions):
"""Test must raise one of expected exceptions to pass.
Example use::
@raises(TypeError, ValueError)
def test_raises_type_error():
raise TypeError("This test passes")
@raises(Exception)
def test_that_fails_by_passing():
pass
If you want to test many assertions about exceptions in a single test,
you may want to use `assert_raises` instead.
"""
valid = ' or '.join([e.__name__ for e in exceptions])
def decorate(func):
name = func.__name__
def newfunc(*arg, **kw):
try:
func(*arg, **kw)
except exceptions:
pass
else:
message = f"{name}() did not raise {valid}"
raise AssertionError(message)
newfunc = make_decorator(func)(newfunc)
return newfunc
return decorate
|
[
"os.chdir",
"os.path.dirname",
"numpy.allclose",
"os.getcwd"
] |
[((255, 266), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (264, 266), False, 'import os\n'), ((278, 303), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (293, 303), False, 'import os\n'), ((566, 584), 'os.chdir', 'os.chdir', (['TEST_DIR'], {}), '(TEST_DIR)\n', (574, 584), False, 'import os\n'), ((624, 637), 'os.chdir', 'os.chdir', (['CWD'], {}), '(CWD)\n', (632, 637), False, 'import os\n'), ((794, 821), 'numpy.allclose', 'np.allclose', (['a', 'b'], {}), '(a, b, **kwargs)\n', (805, 821), True, 'import numpy as np\n')]
|
import platform
import scipy
import numpy as np
import math
from scipy import integrate
from scipy import optimize as opt
from scipy.stats import gamma
from colorama import init, Fore, Back, Style
from cell import Cell
class PopSimulator:
def __init__(self, ncells, gr, sb, steps, CV2div = 0, CV2gr = 0, lamb=1, V0array=None, nu=1):
"""
:param ncells: int
:param gr: float
:param sb: float
:param steps: float
:param CV2div: float
:param CV2gr: float
:param lamb: float
:param V0array: list
"""
#self.__title()
self.__check_errors(ncells, gr, sb, steps, CV2div, CV2gr, lamb, nu)
self.n = ncells # Number of cells to study
self.smplt = 0 # Sampling time
self.gr = gr # Growth rate
self.total_steps = steps # Division steps
self.sb = sb #Initial size
self.l = lamb
self.nu=nu
if lamb ==1:
self.K = self.total_steps *self.gr/(self.sb)
else:
self.K = self.total_steps*self.getk()
self.CV2div = CV2div
self.CV2gr = CV2gr
self.output = "" # string to export data in dynamic simulation
self.output_size = "" # string to export data in divison strategy
self.num_steps = 0 # Initial steps
self.V = self.sb # Cell size
self.time = 0 # Simulation time
self.cells = [] # Array of cells
if hasattr(V0array, "__len__"):
self.V0arr = V0array
else:
self.V0arr = []
self.initialize_cells(V0array=self.V0arr) #Initialize cells
self.DivFile=''
def __title(self):
"""
Initial title with the name of the project
:return: None
"""
if platform.system() == "Windows":
print(" ___ __ __ _______ ______ _____ __ ___ _____")
print("| _ \ \ \ | | | _____| / ____| / ___ \ | | | | | __ \\")
print("| | \ | \ \ | | | | | / | / \ | | | |___| | | \ |")
print("| |_/ / \ \| | | |___ | | | | | | | | ___ | |__/ /")
print("| __/ \__ | | ___| | | | | | | | | | | | __ \\")
print("| | / / | | | | | | | | | | | | | | \ |")
print("| | ___/ / | |_____ | \_____ | \___/ | | |___ | | | |__/ |")
print("|_| |_____/ |_______| \______| \_____/ |______| |___| |______/")
else:
print("\x1b[1,32m"+" ___ __ __ _______ ______ _____ __ ___ _____"+'\033[0m')
print("\x1b[1,32m"+"| _ \ \ \ | | | _____| / ____| / ___ \ | | | | | __ \\"+'\033[0m')
print("\x1b[1,32m"+"| | \ | \ \ | | | | | / | / \ | | | |___| | | \ |"+'\033[0m')
print("\x1b[1,32m"+"| |_/ / \ \| | | |___ | | | | | | | | ___ | |__/ /"+'\033[0m')
print("\x1b[1,32m"+"| __/ \__ | | ___| | | | | | | | | | | | __ \\"+'\033[0m')
print("\x1b[1,32m"+"| | / / | | | | | | | | | | | | | | \ |"+'\033[0m')
print("\x1b[1,32m"+"| | ___/ / | |_____ | \_____ | \___/ | | |___ | | | |__/ |"+'\033[0m')
print("\x1b[1,32m"+"|_| |_____/ |_______| \______| \_____/ |______| |___| |______/"+'\033[0m')
def __check_errors(self, ncells, gr, sb, steps, CV2div, CV2gr, lamb,nu):
"""
it generate an error if some param does not comply with the established
:param ncells: int
:param gr: float
:param sb: float
:param steps: int
:param CV2div: float
:param CV2gr: float
:param lamb: float
:return: None
"""
if not(nu in [1,2]):
raise NameError('nu must be in [1,2]')
elif ncells <= 0:
raise NameError('ncells must be positive')
elif gr < 0:
raise NameError('gr must be positive')
elif sb < 0:
raise NameError('sb must be positive or zero')
elif steps < 0:
raise NameError('steps must be positive or zero')
elif CV2div < 0:
raise NameError('CV2div must be positive or zero')
elif CV2gr < 0:
raise NameError('CV2gr must be positive or zero')
elif lamb < 0.5 or lamb > 2:
raise NameError('lamb must be higher than 0.5 and less than 2')
def newgr(self,CV2):
"""
Give a new growth rate
:param CV2: float
:return: float
"""
if CV2 ==0:
return 1.
else:
return np.random.gamma(shape=1/CV2,scale=CV2)
def newdivpar(self,CV2):
"""
*
:param CV2: float
:return: None
"""
if CV2 ==0:
return 0.5
else:
beta = 0.5*((1/CV2)-1)
return np.random.beta(a=beta,b=beta)
def getsb(self,k):
"""
*
:param k: float
:return: None
"""
def root(tt):
return self.multimean(tt,k)-2*tt
def meansb():
return opt.bisect(root,0.00001,100000)
sb = meansb()
return sb
def multimean(self,s,k):
"""
*
:param s: float
:param k: float
:return: None
"""
sb=s
def moment(sd):
return self.rhomulti(sb,sd,k)*sd
v=integrate.quad(moment, sb, np.inf)[0]
return v
def rhomulti(self,sb,sd,k):
"""
*
:param sb: float
:param sd: float
:param k: float
:return: None
"""
n=self.total_steps
lamb=self.l
gr=self.gr
c=n*k/gr
x=c*((sd**lamb-sb**lamb)/lamb)
return gamma.pdf(x, n)*c*sd**(lamb-1)
def opti(self,k):
"""
*
:param k: float
:return: float
"""
return self.getsb(k)-self.sb
def getk(self):
"""
return k when it cannot be calculate with the equation gr/sb
:return: float
"""
return opt.bisect(self.opti,0.001,1.5)
def initialize_cells(self, V0array):
"""
Give the initial params to the cells
:param V0array: list
:return: None
"""
self.cells=[]
if len(V0array)!=0:
idx = 0
for v in V0array:
gr = self.newgr(self.CV2gr)
divpar = self.newdivpar(self.CV2div)
cell = Cell(idx, v, num_steps=self.total_steps, gr=gr, divpar=divpar, k = gr)
cell.nextt = self.nextt(v,cell.rv,cell)
self.cells.append(cell)
idx += 1
else:
for i in range(self.n):
gr = self.newgr(self.CV2gr)
divpar = self.newdivpar(self.CV2div)
cell = Cell(i, self.sb, num_steps=self.total_steps, gr = gr, divpar = divpar, k = gr)
cell.nextt = self.nextt(self.sb,cell.rv,cell)
self.cells.append(cell)
def open_file(self, FileName="./DataSz.csv",DivEventsFile=None):
"""
Here open the file to write the .csv outputs
:param nameCRM: string
:return: None
"""
if hasattr(DivEventsFile, "__len__"):
self.DivFile = open(DivEventsFile, "w")
output="Sample,Cell,Mother,MotherSize,BirthTime,Sb,GrowthRate,DivPar\n"
for m in range(len(self.cells)):
output+=str(m)+','+str(m)+','+str(m)+','+str(np.nan)+',0.000,'+str(np.round(self.cells[m].V,8))\
+','+str(np.round(self.cells[m].gr*self.gr,8))+','+str(self.cells[m].dp)+'\n'
self.DivFile.write(output)
self.output = ""
self.file = open(FileName, "w")
self.output += "Time,Sample,Cell,Size,DivSteps\n"
self.file.write(self.output)
kk=0
for cell in self.cells:
self.output = ""
self.output += "0.00,"+str(kk)+","+str(kk)+","
self.output += str(np.round(cell.get_size(), 4) )+",0\n"
self.file.write(self.output)
kk+=1
def nextt (self,s0,r,cell):
"""
*
:param s0: float
:param r: float
:param cell: Cell
:return: None
"""
mu= (self.gr*cell.gr)
k= self.K*cell.k
l=self.l
return (1/(l*mu))*np.log(1-(l*mu*np.log(r)/(k*s0**l)))
def simulate(self,tmax):
"""
This function do all operations
:param tmax: int
:return: None
"""
if self.nu==2:
raise NameError('This function was designed for nu=1.')
else:
for cell in self.cells:
t=0
while t<tmax:
tt = cell.nextt
if ((t+tt) <= tmax):
cell.num_steps += 1
Vn=cell.V*np.exp(self.gr*cell.gr*tt)
if cell.num_steps >= cell.total_steps:
dp = self.newdivpar(self.CV2div)
gr = self.newgr(self.CV2gr)
cell.division(Vn,dp,gr,k=gr)
else:
cell.change(Vn)
cell.rv=np.random.rand()
cell.nextt = self.nextt(cell.V,cell.rv,cell)
else:
Vn = cell.V*np.exp(self.gr*cell.gr*(tmax-t))
cell.change(Vn)
cell.nextt = cell.nextt - (tmax-t)
t += tt
def szdyn(self, tmax, sample_time, FileName = "./DataSz.csv", DivEventsFile=None):
self.initialize_cells(self.V0arr) #Initialize cells
if hasattr(DivEventsFile, "__len__"):
self.open_file(FileName = FileName, DivEventsFile=DivEventsFile)
else:
self.open_file(FileName = FileName)
"""
*
:param tmax: int
:param sample_time: int
:param nameCRM: string
:return: None
"""
if self.nu==2:
if tmax>9*np.log(2)/self.gr:
raise NameError('Please select tmax<9*doublingtime')
elif self.n>=2000:
raise NameError('Please select ncells<=2000')
self.smplt = sample_time
self.time = 0
nextarray=np.empty(len(self.cells))
for m in range(len(self.cells)):
nextarray[m]=self.cells[m].nextt
self.cells[m].idx=m
tref=self.smplt
while self.time<tmax:
nexttm=np.min(nextarray)
if nexttm>tref-self.time:
tt=tref-self.time
self.time+=tt
self.output = ""
for ll in range(len(self.cells)):
cell=self.cells[ll]
cell.nextt+=-tt
nextarray[ll]+=-tt
g=cell.gr*self.gr
cell.V=cell.V*np.exp(g*tt)
"Time,Sample,Cell,Size,DivSteps\n"
self.output += str(self.time)+","+str(cell.popidx)+","+str(cell.idx)+","\
+str(np.round(cell.V,8))+","+str(cell.num_steps)+"\n"
self.file.write(self.output)
tref+=self.smplt
else:
m = np.argmin(nextarray)
cell = self.cells[m]
cell.num_steps+=1
for ll in range(len(self.cells)):
self.cells[ll].nextt+=-nexttm
nextarray[ll]+=-nexttm
g=self.cells[ll].gr*self.gr
self.cells[ll].V=self.cells[ll].V*np.exp(g*nexttm)
if cell.num_steps>=self.total_steps:
momsz=cell.V
sz=cell.V*cell.dp
sz2=cell.V*(1-cell.dp)
cell.V=sz
cell.num_steps=0
cell.dp = self.newdivpar(self.CV2div)
cell.gr = self.newgr(self.CV2gr)
gr=cell.gr
dp=cell.dp
cell.nextt = self.nextt(sz,np.random.rand(),cell)
nextarray[m]=cell.nextt
if self.nu==2:
gr2 = self.newgr(self.CV2gr)
dp2 = self.newdivpar(self.CV2div)
idx=len(self.cells)
cell2 = Cell(idx, V0=sz2, num_steps=self.total_steps, gr=gr2, divpar=dp2, k = gr2)
cell2.popidx=cell.popidx
cell2.nextt = self.nextt(sz2,np.random.rand(),cell2)
nextarray=np.concatenate((nextarray, [cell2.nextt]), axis=None)
self.cells.append(cell2)
if hasattr(DivEventsFile, "__len__"):
self.DivFile.write(str(cell.popidx)+","+str(int(cell.idx))+","+str(int(cell.idx))+","+str(momsz)\
+","+str(nexttm+self.time)+","+str(np.round(sz,8))\
+","+str(np.round(gr*self.gr,8))+","+str(dp)+"\n")
if self.nu==2:
self.DivFile.write(str(cell.popidx)+","+str(int(cell2.idx))+","+str(int(cell.idx))+","+str(momsz)\
+","+str(nexttm+self.time)+","+str(np.round(sz2,8))\
+","+str(np.round(gr2*self.gr,8))+","+str(dp2)+"\n")
else:
cell.rv=np.random.rand()
cell.nextt = self.nextt(cell.V, cell.rv, cell)
nextarray[m]=cell.nextt
self.time+=nexttm
self.file.close()
if hasattr(DivEventsFile, "__len__"):
self.DivFile.close()
def divstrat(self, tmax, sample_time, nameDSM = "./dataDSM.csv"):
"""
*
:param tmax: int
:param sample_time: int
:param nameDSM: string
:return: None
"""
if self.nu==2:
raise NameError('This function was designed for nu==1.')
else:
self.initialize_cells(self.V0arr) #Initialize cells
self.file_size = open(nameDSM, "w")
self.file_size.write("S_b,S_d,time\n")
self.smplt = sample_time
self.time = 0
self.open_file()
self.time = 0
divarray = np.array([])
tgt = (tmax/10)
cnt = 0
for i in range(len(self.cells)):
divarray = np.concatenate((divarray,[self.get_ndiv(i)]),axis=0)
while self.time<tmax:
self.simulate(self.smplt)
cnt2 = 0
self.time += self.smplt
line = ""
for cell in self.cells:
if self.get_ndiv(i) > divarray[cnt2]:
line+=str(self.truncate(cell.Vb, 4))+","+str(self.truncate(cell.Vd, 4))+","+str(self.truncate(self.time, 4))+"\n "
divarray[cnt2] = self.get_ndiv(i)
cnt2+=1
self.file_size.write(line)
cnt +=self.smplt
if cnt >= tgt:
print(str(np.int(100*self.time/tmax))+"%")
cnt = 0
self.file_size.close()
def du(self,u,sb,t,dt):
"""
*
:param u: array
:param sb: float
:param t: int
:param dt: float
:return: array
"""
mu=self.gr
lamb=self.l
k=self.K
v=np.zeros_like(u)
s=sb*np.exp(mu*t)
for l in range(len(u)):
if l==0:
v[0]=(-k*(s**lamb)*u[0])*dt
elif l==len(u)-1:
v[len(u)-1]=(k*(s**lamb)*u[len(u)-2])*dt
elif l==len(u)-2:
v[len(u)-2]=(-k*(s**lamb)*u[len(u)-2]+k*(s**lamb)*u[len(u)-3])*dt
else:
v[l]=(-k*(s**lamb)*u[l]+k*(s**lamb)*u[l-1])*dt
return v
def SdStat(self,sb):
"""
*
:param sb: float
:return: float, float
"""
mu=self.gr
tmax=5/self.gr
dt=0.001/self.gr
u=np.zeros(self.total_steps+1)
t=0
count=10
plim=[]
tarrayfsp=[]
u[0]=1
while t<tmax:
u+=self.du(u,sb,t,dt)
t+=dt
count+=1
if count>9:
plim.append(u[-1])
tarrayfsp.append(t)
count=0
tt=np.array(tarrayfsp)
h=tt[1]-tt[0]
rhot=np.diff(plim)/h
trho=0.5*(tt[1:] + tt[:-1])
sarray=sb*np.exp(mu*tt)
ds=np.diff(sarray)
ss=0.5*(sarray[1:] + sarray[:-1])
rhos=rhot=np.diff(plim)/ds
mn=np.trapz(rhos*ss,x=ss)
var=np.trapz(rhos*(ss)**2,x=ss)
CV2=(var-mn**2)/(mn-sb)**2
return mn-sb,CV2
def szdynFSP(self, tmax, CV2sz = 0, nameFSP = "./dataFSP.csv"):
"""
*
:param tmax: int
:param CV2sz: float
:param nameFSP: string
:return: None
"""
if self.nu==2:
raise NameError('This function was designed for nu==1.')
else:
file = open(nameFSP, "w")
output = "time,Meansize,VarSize\n"
nsteps=self.total_steps
gr=self.gr
k=self.K
lamb=self.l
tmax=tmax
ndivs=int(1.5*tmax*self.gr/np.log(2))
dt=0.0001*np.log(2)/self.gr
if CV2sz==0:
s0arr=[self.V]
else:
s0arr = np.linspace(gamma.ppf(0.001,a=1/CV2sz,scale=self.V*CV2sz),
gamma.ppf(0.999, a=1/CV2sz,scale=self.V*CV2sz), 30)
dx=(s0arr[1]-s0arr[0])
wgs=[]
for l in s0arr:
wgs.append((gamma.cdf(l+dx/2,a=1/CV2sz,scale=self.V*CV2sz)-gamma.cdf(l-dx/2,a=1/CV2sz,scale=self.V*CV2sz))/dx)
allp=np.zeros([ndivs,len(s0arr),1000])
obj=0
countv0=0
for v0 in s0arr:
if obj%3==2:
print(str(np.int(100*obj/30))+"%")
obj+=1
t=0
steps=int(np.floor(tmax/dt))
u=np.zeros([ndivs,nsteps])#(DIVS,STEPS)
u[0]=np.zeros(nsteps)
u[0][0]=1#P_00
time=[]#time array
count=int(np.floor(tmax/(dt*1000)))-1
count2=0
for l in range(steps):
utemp=u
for n in range(len(utemp)):#n=divs,
for m in range(len(utemp[n])):#m=steps
arg=lamb*(gr*t-n*np.log(2))
if (m==0):#m=steps
if(n==0):#n=divs
dun=-k*v0**lamb*np.exp(lamb*gr*t)*(utemp[0][0])
u[n][m]+=dun*dt
else:
dun=k*v0**lamb*np.exp(arg)*(2**lamb*utemp[n-1][len(utemp[n])-1]-utemp[n][0])
u[n][m]+=dun*dt
elif(m==len(utemp[n])-1):
if(n==len(utemp)-1):
dun=k*v0**lamb*np.exp(arg)*(utemp[n][len(utemp[n])-2])
u[n][m]+=dun*dt
else:
dun=k*v0**lamb*np.exp(arg)*(utemp[n][m-1]-utemp[n][m])
u[n][m]+=dun*dt
else:
dun=k*v0**lamb*np.exp(arg)*(utemp[n][m-1]-utemp[n][m])
u[n][m]+=dun*dt
t+=dt
count=count+1
if count==int(np.floor(tmax/(dt*1000))):
time.append(t)
mean=0
for ii in range(len(allp)):
allp[ii][countv0][count2]=np.sum(u[ii])
count=0
count2+=1
countv0=countv0+1
if CV2sz==0:
fullmeansz=[]
fullvarsz=[]
fulltime=[]
t=0
dt=tmax/1000
for ll in range(len(allp[0][0])):
ms=0
for ctv0 in range(len(s0arr)):
tempms=0
for ii in range(ndivs):
arg=gr*t-np.log(2)*ii
tempms+=np.exp(arg)*allp[ii][ctv0][ll]
ms+=s0arr[ctv0]*tempms
fullmeansz.append(ms)
mvar=0
for ctv0 in range(len(s0arr)):
tempms=0
for ii in range(ndivs):
arg=gr*t-np.log(2)*ii
tempms+=(ms-s0arr[ctv0]*np.exp(arg))**2*allp[ii][ctv0][ll]
mvar+=tempms
fullvarsz.append(mvar)
fulltime.append(t)
t+=dt
else:
fullmeansz=[]
fullvarsz=[]
fulltime=[]
t=0
dt=tmax/1000
for ll in range(len(allp[0][0])):
ms=0
for ctv0 in range(len(s0arr)):
tempms=0
for ii in range(ndivs):
arg=gr*t-np.log(2)*ii
tempms+=np.exp(arg)*allp[ii][ctv0][ll]
ms+=s0arr[ctv0]*tempms*wgs[ctv0]*dx
fullmeansz.append(ms)
mvar=0
for ctv0 in range(len(s0arr)):
tempms=0
for ii in range(ndivs):
arg=gr*t-np.log(2)*ii
tempms+=(ms-s0arr[ctv0]*np.exp(arg))**2*allp[ii][ctv0][ll]
mvar+=tempms*wgs[ctv0]*dx
fullvarsz.append(mvar)
fulltime.append(t)
t+=dt
for m in range(len(fullmeansz)):
output += str(fulltime[m])+","+str(fullmeansz[m])+","+str(fullvarsz[m])+"\n"
file.write(output)
def get_sz(self, n, cells=[]):
"""
Give the size of a cell
:param n: int
:param cells: list
:return: float
"""
if len(cells) > 0:
return cells[n].V
else:
return self.cells[n].V
def get_ndiv(self, n, cells=[]):
if len(cells) > 0:
return cells[n].ndiv
else:
return self.cells[n].ndiv
def get_gr(self, n, cells=[]):
"""
Give the growth rate of a given index cell
:param n: int
:param cells: list
:return: float
"""
if len(cells) > 0:
return cells[n].gr
else:
return self.cells[n].gr
def get_dp(self, n, cells=[]):
"""
*
:param n: int
:param cells: array
:return: float
"""
if len(cells) > 0:
return cells[n].dp
else:
return self.cells[n].dp
def get_next_t(self, n, cells=[]):
"""
Get the next time
:param n: int
:param cells: array
:return: int
"""
if len(cells) > 0:
return cells[n].nextt
else:
return self.cells[n].nextt
def truncate(self, num, ciphers):
"""
This functions return a number with the n number of ciphers
:param num: float
:param ciphers: int
:return: float
"""
pos = pow(10.0, ciphers)
return math.trunc(pos * num)/pos
def __str__(self):
out = "Initial Params: {\n tmax: "+str(self.total_time)+", \n sample time: "+str(self.smplt)+", \n ncells: "+str(self.n)+", \n dt: "+str(self.dt)+", \n alpha: "+str(self.alpha)+", \n k: "+str(self.K)+"\n}"
for cell in self.cells:
out+= str(cell)+"\n"
return out
|
[
"numpy.random.rand",
"numpy.log",
"numpy.array",
"math.trunc",
"scipy.stats.gamma.pdf",
"numpy.diff",
"numpy.exp",
"platform.system",
"numpy.random.gamma",
"cell.Cell",
"numpy.concatenate",
"numpy.min",
"numpy.argmin",
"numpy.round",
"scipy.stats.gamma.cdf",
"numpy.trapz",
"numpy.random.beta",
"scipy.integrate.quad",
"scipy.stats.gamma.ppf",
"numpy.floor",
"numpy.int",
"scipy.optimize.bisect",
"numpy.sum",
"numpy.zeros",
"numpy.zeros_like"
] |
[((6301, 6334), 'scipy.optimize.bisect', 'opt.bisect', (['self.opti', '(0.001)', '(1.5)'], {}), '(self.opti, 0.001, 1.5)\n', (6311, 6334), True, 'from scipy import optimize as opt\n'), ((15927, 15943), 'numpy.zeros_like', 'np.zeros_like', (['u'], {}), '(u)\n', (15940, 15943), True, 'import numpy as np\n'), ((16557, 16587), 'numpy.zeros', 'np.zeros', (['(self.total_steps + 1)'], {}), '(self.total_steps + 1)\n', (16565, 16587), True, 'import numpy as np\n'), ((16892, 16911), 'numpy.array', 'np.array', (['tarrayfsp'], {}), '(tarrayfsp)\n', (16900, 16911), True, 'import numpy as np\n'), ((17042, 17057), 'numpy.diff', 'np.diff', (['sarray'], {}), '(sarray)\n', (17049, 17057), True, 'import numpy as np\n'), ((17146, 17171), 'numpy.trapz', 'np.trapz', (['(rhos * ss)'], {'x': 'ss'}), '(rhos * ss, x=ss)\n', (17154, 17171), True, 'import numpy as np\n'), ((17181, 17211), 'numpy.trapz', 'np.trapz', (['(rhos * ss ** 2)'], {'x': 'ss'}), '(rhos * ss ** 2, x=ss)\n', (17189, 17211), True, 'import numpy as np\n'), ((1783, 1800), 'platform.system', 'platform.system', ([], {}), '()\n', (1798, 1800), False, 'import platform\n'), ((4813, 4854), 'numpy.random.gamma', 'np.random.gamma', ([], {'shape': '(1 / CV2)', 'scale': 'CV2'}), '(shape=1 / CV2, scale=CV2)\n', (4828, 4854), True, 'import numpy as np\n'), ((5075, 5105), 'numpy.random.beta', 'np.random.beta', ([], {'a': 'beta', 'b': 'beta'}), '(a=beta, b=beta)\n', (5089, 5105), True, 'import numpy as np\n'), ((5320, 5351), 'scipy.optimize.bisect', 'opt.bisect', (['root', '(1e-05)', '(100000)'], {}), '(root, 1e-05, 100000)\n', (5330, 5351), True, 'from scipy import optimize as opt\n'), ((5620, 5654), 'scipy.integrate.quad', 'integrate.quad', (['moment', 'sb', 'np.inf'], {}), '(moment, sb, np.inf)\n', (5634, 5654), False, 'from scipy import integrate\n'), ((10865, 10882), 'numpy.min', 'np.min', (['nextarray'], {}), '(nextarray)\n', (10871, 10882), True, 'import numpy as np\n'), ((14761, 14773), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (14769, 14773), True, 'import numpy as np\n'), ((15957, 15971), 'numpy.exp', 'np.exp', (['(mu * t)'], {}), '(mu * t)\n', (15963, 15971), True, 'import numpy as np\n'), ((16947, 16960), 'numpy.diff', 'np.diff', (['plim'], {}), '(plim)\n', (16954, 16960), True, 'import numpy as np\n'), ((17017, 17032), 'numpy.exp', 'np.exp', (['(mu * tt)'], {}), '(mu * tt)\n', (17023, 17032), True, 'import numpy as np\n'), ((17118, 17131), 'numpy.diff', 'np.diff', (['plim'], {}), '(plim)\n', (17125, 17131), True, 'import numpy as np\n'), ((24288, 24309), 'math.trunc', 'math.trunc', (['(pos * num)'], {}), '(pos * num)\n', (24298, 24309), False, 'import math\n'), ((5976, 5991), 'scipy.stats.gamma.pdf', 'gamma.pdf', (['x', 'n'], {}), '(x, n)\n', (5985, 5991), False, 'from scipy.stats import gamma\n'), ((6719, 6787), 'cell.Cell', 'Cell', (['idx', 'v'], {'num_steps': 'self.total_steps', 'gr': 'gr', 'divpar': 'divpar', 'k': 'gr'}), '(idx, v, num_steps=self.total_steps, gr=gr, divpar=divpar, k=gr)\n', (6723, 6787), False, 'from cell import Cell\n'), ((7081, 7153), 'cell.Cell', 'Cell', (['i', 'self.sb'], {'num_steps': 'self.total_steps', 'gr': 'gr', 'divpar': 'divpar', 'k': 'gr'}), '(i, self.sb, num_steps=self.total_steps, gr=gr, divpar=divpar, k=gr)\n', (7085, 7153), False, 'from cell import Cell\n'), ((11607, 11627), 'numpy.argmin', 'np.argmin', (['nextarray'], {}), '(nextarray)\n', (11616, 11627), True, 'import numpy as np\n'), ((18651, 18676), 'numpy.zeros', 'np.zeros', (['[ndivs, nsteps]'], {}), '([ndivs, nsteps])\n', (18659, 18676), True, 'import numpy as np\n'), ((18710, 18726), 'numpy.zeros', 'np.zeros', (['nsteps'], {}), '(nsteps)\n', (18718, 18726), True, 'import numpy as np\n'), ((13859, 13875), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (13873, 13875), True, 'import numpy as np\n'), ((17835, 17844), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (17841, 17844), True, 'import numpy as np\n'), ((17868, 17877), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (17874, 17877), True, 'import numpy as np\n'), ((17996, 18047), 'scipy.stats.gamma.ppf', 'gamma.ppf', (['(0.001)'], {'a': '(1 / CV2sz)', 'scale': '(self.V * CV2sz)'}), '(0.001, a=1 / CV2sz, scale=self.V * CV2sz)\n', (18005, 18047), False, 'from scipy.stats import gamma\n'), ((18063, 18114), 'scipy.stats.gamma.ppf', 'gamma.ppf', (['(0.999)'], {'a': '(1 / CV2sz)', 'scale': '(self.V * CV2sz)'}), '(0.999, a=1 / CV2sz, scale=self.V * CV2sz)\n', (18072, 18114), False, 'from scipy.stats import gamma\n'), ((18614, 18633), 'numpy.floor', 'np.floor', (['(tmax / dt)'], {}), '(tmax / dt)\n', (18622, 18633), True, 'import numpy as np\n'), ((10394, 10403), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (10400, 10403), True, 'import numpy as np\n'), ((11255, 11269), 'numpy.exp', 'np.exp', (['(g * tt)'], {}), '(g * tt)\n', (11261, 11269), True, 'import numpy as np\n'), ((11944, 11962), 'numpy.exp', 'np.exp', (['(g * nexttm)'], {}), '(g * nexttm)\n', (11950, 11962), True, 'import numpy as np\n'), ((12415, 12431), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (12429, 12431), True, 'import numpy as np\n'), ((12704, 12776), 'cell.Cell', 'Cell', (['idx'], {'V0': 'sz2', 'num_steps': 'self.total_steps', 'gr': 'gr2', 'divpar': 'dp2', 'k': 'gr2'}), '(idx, V0=sz2, num_steps=self.total_steps, gr=gr2, divpar=dp2, k=gr2)\n', (12708, 12776), False, 'from cell import Cell\n'), ((12939, 12992), 'numpy.concatenate', 'np.concatenate', (['(nextarray, [cell2.nextt])'], {'axis': 'None'}), '((nextarray, [cell2.nextt]), axis=None)\n', (12953, 12992), True, 'import numpy as np\n'), ((18819, 18847), 'numpy.floor', 'np.floor', (['(tmax / (dt * 1000))'], {}), '(tmax / (dt * 1000))\n', (18827, 18847), True, 'import numpy as np\n'), ((8643, 8652), 'numpy.log', 'np.log', (['r'], {}), '(r)\n', (8649, 8652), True, 'import numpy as np\n'), ((9162, 9192), 'numpy.exp', 'np.exp', (['(self.gr * cell.gr * tt)'], {}), '(self.gr * cell.gr * tt)\n', (9168, 9192), True, 'import numpy as np\n'), ((9536, 9552), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (9550, 9552), True, 'import numpy as np\n'), ((9684, 9722), 'numpy.exp', 'np.exp', (['(self.gr * cell.gr * (tmax - t))'], {}), '(self.gr * cell.gr * (tmax - t))\n', (9690, 9722), True, 'import numpy as np\n'), ((12881, 12897), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (12895, 12897), True, 'import numpy as np\n'), ((20243, 20271), 'numpy.floor', 'np.floor', (['(tmax / (dt * 1000))'], {}), '(tmax / (dt * 1000))\n', (20251, 20271), True, 'import numpy as np\n'), ((20446, 20459), 'numpy.sum', 'np.sum', (['u[ii]'], {}), '(u[ii])\n', (20452, 20459), True, 'import numpy as np\n'), ((15574, 15604), 'numpy.int', 'np.int', (['(100 * self.time / tmax)'], {}), '(100 * self.time / tmax)\n', (15580, 15604), True, 'import numpy as np\n'), ((18241, 18297), 'scipy.stats.gamma.cdf', 'gamma.cdf', (['(l + dx / 2)'], {'a': '(1 / CV2sz)', 'scale': '(self.V * CV2sz)'}), '(l + dx / 2, a=1 / CV2sz, scale=self.V * CV2sz)\n', (18250, 18297), False, 'from scipy.stats import gamma\n'), ((18288, 18344), 'scipy.stats.gamma.cdf', 'gamma.cdf', (['(l - dx / 2)'], {'a': '(1 / CV2sz)', 'scale': '(self.V * CV2sz)'}), '(l - dx / 2, a=1 / CV2sz, scale=self.V * CV2sz)\n', (18297, 18344), False, 'from scipy.stats import gamma\n'), ((18520, 18542), 'numpy.int', 'np.int', (['(100 * obj / 30)'], {}), '(100 * obj / 30)\n', (18526, 18542), True, 'import numpy as np\n'), ((21014, 21025), 'numpy.exp', 'np.exp', (['arg'], {}), '(arg)\n', (21020, 21025), True, 'import numpy as np\n'), ((22022, 22033), 'numpy.exp', 'np.exp', (['arg'], {}), '(arg)\n', (22028, 22033), True, 'import numpy as np\n'), ((7836, 7875), 'numpy.round', 'np.round', (['(self.cells[m].gr * self.gr)', '(8)'], {}), '(self.cells[m].gr * self.gr, 8)\n', (7844, 7875), True, 'import numpy as np\n'), ((20965, 20974), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (20971, 20974), True, 'import numpy as np\n'), ((21330, 21339), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (21336, 21339), True, 'import numpy as np\n'), ((21973, 21982), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (21979, 21982), True, 'import numpy as np\n'), ((22351, 22360), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (22357, 22360), True, 'import numpy as np\n'), ((11442, 11461), 'numpy.round', 'np.round', (['cell.V', '(8)'], {}), '(cell.V, 8)\n', (11450, 11461), True, 'import numpy as np\n'), ((19103, 19112), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (19109, 19112), True, 'import numpy as np\n'), ((7781, 7809), 'numpy.round', 'np.round', (['self.cells[m].V', '(8)'], {}), '(self.cells[m].V, 8)\n', (7789, 7809), True, 'import numpy as np\n'), ((19262, 19283), 'numpy.exp', 'np.exp', (['(lamb * gr * t)'], {}), '(lamb * gr * t)\n', (19268, 19283), True, 'import numpy as np\n'), ((19435, 19446), 'numpy.exp', 'np.exp', (['arg'], {}), '(arg)\n', (19441, 19446), True, 'import numpy as np\n'), ((20061, 20072), 'numpy.exp', 'np.exp', (['arg'], {}), '(arg)\n', (20067, 20072), True, 'import numpy as np\n'), ((21395, 21406), 'numpy.exp', 'np.exp', (['arg'], {}), '(arg)\n', (21401, 21406), True, 'import numpy as np\n'), ((22416, 22427), 'numpy.exp', 'np.exp', (['arg'], {}), '(arg)\n', (22422, 22427), True, 'import numpy as np\n'), ((13389, 13414), 'numpy.round', 'np.round', (['(gr * self.gr)', '(8)'], {}), '(gr * self.gr, 8)\n', (13397, 13414), True, 'import numpy as np\n'), ((19707, 19718), 'numpy.exp', 'np.exp', (['arg'], {}), '(arg)\n', (19713, 19718), True, 'import numpy as np\n'), ((19888, 19899), 'numpy.exp', 'np.exp', (['arg'], {}), '(arg)\n', (19894, 19899), True, 'import numpy as np\n'), ((13765, 13791), 'numpy.round', 'np.round', (['(gr2 * self.gr)', '(8)'], {}), '(gr2 * self.gr, 8)\n', (13773, 13791), True, 'import numpy as np\n'), ((13320, 13335), 'numpy.round', 'np.round', (['sz', '(8)'], {}), '(sz, 8)\n', (13328, 13335), True, 'import numpy as np\n'), ((13695, 13711), 'numpy.round', 'np.round', (['sz2', '(8)'], {}), '(sz2, 8)\n', (13703, 13711), True, 'import numpy as np\n')]
|
"""
file: simple_gen.py
author: <NAME>
date: 17 May 2020
notes: a most basic implementation of genetic cross breeding and mutation to attempt to improve
a neural network. Assumes the standard Keras model from Donkeycar project.
Lower score means less loss = better.
"""
import argparse
import json
import os
import time
import warnings
import numpy as np
from PIL import Image
# noisy, noisy tensorflow. we love you.
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=FutureWarning)
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
class IAgent:
def begin(self):
pass
def wait(self):
pass
def get_score(self):
pass
def make_new(self, parent1, parent2):
return IAgent()
class GeneticAlg:
def __init__(self, population, conf={}):
self.population = population
self.conf = conf
def finished(self):
return False
def process(self, num_iter):
iIter = 0
while not self.finished() and iIter < num_iter:
print("starting epoch", iIter)
s = time.time()
self.evaluate_agents()
self.on_agents_finished()
e = time.time() - s
self.breed_agents()
iIter += 1
d = time.time() - s
# Time per iteration getting worse?!
print("finish epoch", iIter)
print("Iter %d eval time: %f total time: %f" % (iIter, e, d))
def on_agents_finished(self):
pass
def evaluate_agents(self):
for agent in self.population:
agent.begin()
for agent in self.population:
agent.wait()
self.sort_agents()
# progress
print("scores:", [a.score for a in self.population])
def how_many_to_keep(self):
return round(len(self.population) / 4) + 1
def breed_agents(self):
"""
keep the best N of our population and replace the rest
with versions cross bred from other agents.
"""
keep = self.how_many_to_keep()
num_new = len(self.population) - keep
pop_to_keep = self.population[0:keep]
new_population = []
for _ in range(num_new):
p1, p2 = self.select_parents()
new_agent = p1.make_new(p1, p2)
new_agent.mutate()
new_population.append(new_agent)
self.population = pop_to_keep + new_population
def sort_agents(self):
self.population.sort(key=lambda x: x.get_score(), reverse=False)
def select_pop_index(self):
r = np.random.uniform(low=0.0, high=1.0)
N = len(self.population)
iP = round(r * N) % N
return iP
def select_parents(self):
iP1 = self.select_pop_index()
iP2 = self.select_pop_index()
# hack, always select the best 2
# iP1 = 0
# iP2 = 1
# lets make sure parents are not the same
while iP2 == iP1:
iP2 = self.select_pop_index()
return self.population[iP1], self.population[iP2]
class NNAgent(IAgent):
def __init__(self, model, conf):
self.model = model
self.score = 0.0
self.conf = conf
def begin(self):
self.score = 0.0
def wait(self):
pass
def get_score(self):
return self.score
def mutate(self):
pass
def breed(self, agent1, agent2):
return agent1.model
def make_new(self, parent1, parent2):
new_model = self.breed(parent1, parent2)
agent = NNAgent(new_model, self.conf)
agent.mutate()
return agent
class KerasNNAgent(NNAgent):
def __init__(self, model, conf):
super().__init__(model, conf)
self.mutation_rate = conf["mutation_rate"]
def mutate(self):
layers_to_mutate = self.conf["layers_to_mutate"]
for iLayer in layers_to_mutate:
layer = self.model.get_layer(index=iLayer)
w = layer.get_weights()
self.modify_weights(w)
layer.set_weights(w)
self.decay_mutations()
def rand_float(self, mn, mx):
return float(np.random.uniform(mn, mx, 1)[0])
def modify_weights(self, w):
mx = self.conf["mutation_max"]
mn = self.conf["mutation_min"]
mag = self.rand_float(mn, mx)
for iArr, arr in enumerate(w):
val = self.rand_float(0.0, 1.0)
if val > self.mutation_rate:
continue
random_values = np.random.uniform(-mag, mag, arr.shape)
arr = arr + random_values
w[iArr] = arr
return w
def decay_mutations(self):
self.conf["mutation_max"] *= self.conf["mutation_decay"]
def breed(self, agent1, agent2):
model1, model2 = agent1.model, agent2.model
jsm = model1.to_json()
new_model = tf.keras.models.model_from_json(jsm)
new_model.set_weights(model1.get_weights())
iLayers = self.conf["layers_to_combine"]
for iLayer in iLayers:
layer1 = model1.get_layer(index=iLayer)
layer2 = model2.get_layer(index=iLayer)
final_layer = new_model.get_layer(index=iLayer)
self.merge_layers(final_layer, layer1, layer2)
return new_model
def merge_layers(self, dest_layer, src1_layer, src2_layer):
w1 = src1_layer.get_weights()
w2 = src2_layer.get_weights()
res = w1.copy()
if type(w1) is list:
half = round(len(w1) / 2)
res[half:-1] = w2[half:-1]
else:
l_indices = np.tril_indices_from(w2)
res[l_indices] = w2[l_indices]
dest_layer.set_weights(res)
class KerasNNImageAgent(KerasNNAgent):
"""
Given an image and a target prediction, make an agent that will
optimize for score of target.
"""
def __init__(self, model, conf):
super().__init__(model, conf)
self.image = conf["image"]
self.target = conf["target"]
def begin(self):
pred = self.model.predict(self.image)
self.score = np.sum(np.absolute(pred - self.target))
def make_new(self, parent1, parent2):
new_model = self.breed(parent1, parent2)
agent = KerasNNImageAgent(new_model, self.conf)
agent.mutate()
return agent
def test_image_agent(model_filename, record_filename, num_agents, num_iter):
with open(os.path.expanduser(record_filename), "r") as fp:
record = json.load(fp)
img_filename = os.path.join(os.path.dirname(record_filename), record["cam/image_array"])
img = Image.open(os.path.expanduser(img_filename))
img_arr = np.array(img)
# Our model was trained with this normalization scale on data.
one_byte_scale = 1.0 / 255.0
img_arr = img_arr.astype(np.float32) * one_byte_scale
img_arr = img_arr.reshape((1,) + img_arr.shape)
steering = record["user/angle"]
throttle = record["user/throttle"]
target = np.array([np.array([[steering]]), np.array([[throttle]])])
# These are the final two dense layers we will mutate. We will use the same two layers we breeding.
to_mutate = [14, 16]
conf = {"layers_to_mutate": to_mutate}
conf["layers_to_combine"] = to_mutate
conf["mutation_rate"] = 1.0
conf["mutation_max"] = 0.3
conf["mutation_min"] = 0.0
conf["mutation_decay"] = 1.0
conf["image"] = img_arr
conf["target"] = target
population = []
for i in range(num_agents):
model = tf.keras.models.load_model(os.path.expanduser(model_filename))
agent = KerasNNImageAgent(model, conf)
if i > 0:
agent.mutate()
population.append(agent)
# Some initial state
print("target: steering: %f throttle: %f" % (target[0][0][0], target[1][0][0]))
agent = population[0]
agent.begin()
print("initial score:", agent.score)
pred = agent.model.predict(img_arr)
print("initial pred", pred[0][0], pred[1][0])
# Try to improve
alg = GeneticAlg(population)
alg.process(num_iter=num_iter)
# Our best agent
agent = alg.population[0]
print("final score:", agent.score)
pred = agent.model.predict(img_arr)
print("final pred", pred[0][0], pred[1][0])
if __name__ == "__main__":
# Example: python ~\projects\gym-donkeycar\examples\genetic_alg\simple_gen.py
# --model models\lane_keeper.h5 --record data\tub_6_20-05-16\record_2000.json
parser = argparse.ArgumentParser(description="simple_gen")
parser.add_argument("--model", type=str, help=".h5 model produced by donkeycar. expects the default linear model type.")
parser.add_argument("--record", type=str, help="donkey json record to use for training")
parser.add_argument("--num_agents", type=int, default=8, help="how many agents in our population")
parser.add_argument("--num_iter", type=int, default=8, help="how many generations before we stop")
args = parser.parse_args()
# only needed if TF==1.13.1
# config = tf.ConfigProto()
# config.gpu_options.allow_growth = True
# sess = tf.Session(config=config)
# K.set_session(sess)
test_image_agent(
model_filename=args.model, record_filename=args.record, num_agents=args.num_agents, num_iter=args.num_iter
)
|
[
"numpy.tril_indices_from",
"argparse.ArgumentParser",
"tensorflow.keras.models.model_from_json",
"numpy.absolute",
"warnings.catch_warnings",
"tensorflow.logging.set_verbosity",
"numpy.array",
"os.path.dirname",
"numpy.random.uniform",
"json.load",
"time.time",
"warnings.filterwarnings",
"os.path.expanduser"
] |
[((600, 642), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.ERROR'], {}), '(tf.logging.ERROR)\n', (624, 642), True, 'import tensorflow as tf\n'), ((482, 507), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (505, 507), False, 'import warnings\n'), ((513, 570), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'FutureWarning'}), "('ignore', category=FutureWarning)\n", (536, 570), False, 'import warnings\n'), ((6748, 6761), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (6756, 6761), True, 'import numpy as np\n'), ((8536, 8585), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""simple_gen"""'}), "(description='simple_gen')\n", (8559, 8585), False, 'import argparse\n'), ((2670, 2706), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.0)', 'high': '(1.0)'}), '(low=0.0, high=1.0)\n', (2687, 2706), True, 'import numpy as np\n'), ((4953, 4989), 'tensorflow.keras.models.model_from_json', 'tf.keras.models.model_from_json', (['jsm'], {}), '(jsm)\n', (4984, 4989), True, 'import tensorflow as tf\n'), ((6572, 6585), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (6581, 6585), False, 'import json\n'), ((6618, 6650), 'os.path.dirname', 'os.path.dirname', (['record_filename'], {}), '(record_filename)\n', (6633, 6650), False, 'import os\n'), ((6700, 6732), 'os.path.expanduser', 'os.path.expanduser', (['img_filename'], {}), '(img_filename)\n', (6718, 6732), False, 'import os\n'), ((1173, 1184), 'time.time', 'time.time', ([], {}), '()\n', (1182, 1184), False, 'import time\n'), ((4594, 4633), 'numpy.random.uniform', 'np.random.uniform', (['(-mag)', 'mag', 'arr.shape'], {}), '(-mag, mag, arr.shape)\n', (4611, 4633), True, 'import numpy as np\n'), ((5681, 5705), 'numpy.tril_indices_from', 'np.tril_indices_from', (['w2'], {}), '(w2)\n', (5701, 5705), True, 'import numpy as np\n'), ((6188, 6219), 'numpy.absolute', 'np.absolute', (['(pred - self.target)'], {}), '(pred - self.target)\n', (6199, 6219), True, 'import numpy as np\n'), ((6506, 6541), 'os.path.expanduser', 'os.path.expanduser', (['record_filename'], {}), '(record_filename)\n', (6524, 6541), False, 'import os\n'), ((7071, 7093), 'numpy.array', 'np.array', (['[[steering]]'], {}), '([[steering]])\n', (7079, 7093), True, 'import numpy as np\n'), ((7095, 7117), 'numpy.array', 'np.array', (['[[throttle]]'], {}), '([[throttle]])\n', (7103, 7117), True, 'import numpy as np\n'), ((7615, 7649), 'os.path.expanduser', 'os.path.expanduser', (['model_filename'], {}), '(model_filename)\n', (7633, 7649), False, 'import os\n'), ((1275, 1286), 'time.time', 'time.time', ([], {}), '()\n', (1284, 1286), False, 'import time\n'), ((1362, 1373), 'time.time', 'time.time', ([], {}), '()\n', (1371, 1373), False, 'import time\n'), ((4232, 4260), 'numpy.random.uniform', 'np.random.uniform', (['mn', 'mx', '(1)'], {}), '(mn, mx, 1)\n', (4249, 4260), True, 'import numpy as np\n')]
|
import os
import glob
import pickle
import pcl
import torch
import torch.utils.data
import torch.nn as nn
import numpy as np
# global configurations:
from autolab_core import YamlConfig
from dexnet.grasping import GpgGraspSampler
from dexnet.grasping import RobotGripper
home_dir = os.environ['HOME']
yaml_config = YamlConfig(home_dir + "/Projects/PointNetGPD/dex-net/test/config.yaml")
gripper_name = 'robotiq_85'
gripper = RobotGripper.load(gripper_name, home_dir + "/Projects/PointNetGPD/dex-net/data/grippers")
ags = GpgGraspSampler(gripper, yaml_config)
class PointGraspDataset(torch.utils.data.Dataset):
def __init__(self, obj_points_num, grasp_points_num, pc_file_used_num, grasp_amount_per_file, thresh_good,
thresh_bad, path, tag, with_obj=False, projection=False, project_chann=3, project_size=60):
self.obj_points_num = obj_points_num
self.grasp_points_num = grasp_points_num
self.pc_file_used_num = pc_file_used_num
self.grasp_amount_per_file = grasp_amount_per_file
self.path = path
self.tag = tag
self.thresh_good = thresh_good
self.thresh_bad = thresh_bad
self.with_obj = with_obj
self.min_point_limit = 50
# projection related
self.projection = projection
self.project_chann = project_chann
if self.project_chann not in [3, 12]:
raise NotImplementedError
self.project_size = project_size
if self.project_size != 60:
raise NotImplementedError
self.normal_K = 10
self.voxel_point_num = 50
self.projection_margin = 1
self.transform = pickle.load(open(os.path.join(self.path, 'google2cloud.pkl'), 'rb'))
fl_grasp = glob.glob(os.path.join(path, 'ycb_grasp', self.tag, '*.npy'))
fl_pc = glob.glob(os.path.join(path, 'ycb_rgbd', '*', 'clouds', '*.npy'))
self.d_pc, self.d_grasp = {}, {}
for i in fl_pc:
k = i.split('/')[-3]
if k in self.d_pc.keys():
self.d_pc[k].append(i)
else:
self.d_pc[k] = [i]
for i in fl_grasp:
k = i.split('/')[-1].split('.')[0]
self.d_grasp[k] = i
object1 = set(self.d_grasp.keys())
object2 = set(self.transform.keys())
self.object = list(object1.intersection(object2))
self.amount = len(self.object) * self.grasp_amount_per_file
def collect_pc(self, grasp, pc, transform):
center = grasp[0:3]
axis = grasp[3:6] # binormal
width = grasp[6]
angle = grasp[7]
axis = axis/np.linalg.norm(axis)
binormal = axis
# cal approach
cos_t = np.cos(angle)
sin_t = np.sin(angle)
R1 = np.c_[[cos_t, 0, sin_t],[0, 1, 0],[-sin_t, 0, cos_t]]
axis_y = axis
axis_x = np.array([axis_y[1], -axis_y[0], 0])
if np.linalg.norm(axis_x) == 0:
axis_x = np.array([1, 0, 0])
axis_x = axis_x / np.linalg.norm(axis_x)
axis_y = axis_y / np.linalg.norm(axis_y)
axis_z = np.cross(axis_x, axis_y)
R2 = np.c_[axis_x, np.c_[axis_y, axis_z]]
approach = R2.dot(R1)[:, 0]
approach = approach / np.linalg.norm(approach)
minor_normal = np.cross(axis, approach)
left = center - width*axis/2
right = center + width*axis/2
# bottom = center - width*approach
left = (np.dot(transform, np.array([left[0], left[1], left[2], 1])))[:3]
right = (np.dot(transform, np.array([right[0], right[1], right[2], 1])))[:3]
# bottom = (transform @ np.array([bottom[0], bottom[1], bottom[2], 1]))[:3]
center = (np.dot(transform, np.array([center[0], center[1], center[2], 1])))[:3]
binormal = (np.dot(transform, np.array([binormal[0], binormal[1], binormal[2], 1])))[:3].reshape(3, 1)
approach = (np.dot(transform, np.array([approach[0], approach[1], approach[2], 1])))[:3].reshape(3, 1)
minor_normal = (np.dot(transform, np.array([minor_normal[0], minor_normal[1], minor_normal[2], 1])))[:3].reshape(3, 1)
matrix = np.hstack([approach, binormal, minor_normal]).T
# pc_p2c/left_t/right_t is in local coordinate(with center as origin)
# other(include pc) are in pc coordinate
pc_p2c = (np.dot(matrix, (pc-center).T)).T
left_t = (-width * np.array([0,1,0]) / 2).squeeze()
right_t = (width * np.array([0,1,0]) / 2).squeeze()
x_limit = width/4
z_limit = width/4
y_limit = width/2
x1 = pc_p2c[:, 0] > -x_limit
x2 = pc_p2c[:, 0] < x_limit
y1 = pc_p2c[:, 1] > -y_limit
y2 = pc_p2c[:, 1] < y_limit
z1 = pc_p2c[:, 2] > -z_limit
z2 = pc_p2c[:, 2] < z_limit
a = np.vstack([x1, x2, y1, y2, z1, z2])
self.in_ind = np.where(np.sum(a, axis=0) == len(a))[0]
if len(self.in_ind) < self.min_point_limit:
return None
if self.projection:
return self.project_pc(pc_p2c, width)
else:
return pc_p2c[self.in_ind]
def check_square(self, point, points_g):
dirs = np.array([[-1, 1, 1], [1, 1, 1], [-1, -1, 1], [1, -1, 1],
[-1, 1, -1], [1, 1, -1], [-1, -1, -1], [1, -1, -1]])
p = dirs * 0.5 + point # here res * 0.5 means get half of a pixel width
a1 = p[2][1] < points_g[:, 1]
a2 = p[0][1] > points_g[:, 1]
a3 = p[0][2] > points_g[:, 2]
a4 = p[4][2] < points_g[:, 2]
a5 = p[1][0] > points_g[:, 0]
a6 = p[0][0] < points_g[:, 0]
a = np.vstack([a1, a2, a3, a4, a5, a6])
points_in_area = np.where(np.sum(a, axis=0) == len(a))[0]
if len(points_in_area) == 0:
has_p = False
else:
has_p = True
return points_in_area
def cal_projection(self, point_cloud_voxel, m_width_of_pic, margin, surface_normal, order, gripper_width):
occupy_pic = np.zeros([m_width_of_pic, m_width_of_pic, 1])
norm_pic = np.zeros([m_width_of_pic, m_width_of_pic, 3])
norm_pic_num = np.zeros([m_width_of_pic, m_width_of_pic, 1])
max_x = point_cloud_voxel[:, order[0]].max()
min_x = point_cloud_voxel[:, order[0]].min()
max_y = point_cloud_voxel[:, order[1]].max()
min_y = point_cloud_voxel[:, order[1]].min()
min_z = point_cloud_voxel[:, order[2]].min()
tmp = max((max_x - min_x), (max_y - min_y))
if tmp == 0:
print("WARNING : the num of input points seems only have one, no possilbe to do learning on"
"such data, please throw it away. -- Hongzhuo")
return occupy_pic, norm_pic
# Here, we use the gripper width to cal the res:
res = gripper_width / (m_width_of_pic-margin)
voxel_points_square_norm = []
x_coord_r = ((point_cloud_voxel[:, order[0]]) / res + m_width_of_pic / 2)
y_coord_r = ((point_cloud_voxel[:, order[1]]) / res + m_width_of_pic / 2)
z_coord_r = ((point_cloud_voxel[:, order[2]]) / res + m_width_of_pic / 2)
x_coord_r = np.floor(x_coord_r).astype(int)
y_coord_r = np.floor(y_coord_r).astype(int)
z_coord_r = np.floor(z_coord_r).astype(int)
voxel_index = np.array([x_coord_r, y_coord_r, z_coord_r]).T # all point in grid
coordinate_buffer = np.unique(voxel_index, axis=0) # get a list of points without duplication
K = len(coordinate_buffer)
# [K, 1] store number of points in each voxel grid
number_buffer = np.zeros(shape=K, dtype=np.int64)
feature_buffer = np.zeros(shape=(K, self.voxel_point_num, 6), dtype=np.float32)
index_buffer = {}
for i in range(K):
index_buffer[tuple(coordinate_buffer[i])] = i # got index of coordinate
for voxel, point, normal in zip(voxel_index, point_cloud_voxel, surface_normal):
index = index_buffer[tuple(voxel)]
number = number_buffer[index]
if number < self.voxel_point_num:
feature_buffer[index, number, :3] = point
feature_buffer[index, number, 3:6] = normal
number_buffer[index] += 1
voxel_points_square_norm = np.sum(feature_buffer[..., -3:], axis=1)/number_buffer[:, np.newaxis]
voxel_points_square = coordinate_buffer
if len(voxel_points_square) == 0:
return occupy_pic, norm_pic
x_coord_square = voxel_points_square[:, 0]
y_coord_square = voxel_points_square[:, 1]
norm_pic[x_coord_square, y_coord_square, :] = voxel_points_square_norm
occupy_pic[x_coord_square, y_coord_square] = number_buffer[:, np.newaxis]
occupy_max = occupy_pic.max()
assert(occupy_max > 0)
occupy_pic = occupy_pic / occupy_max
return occupy_pic, norm_pic
def project_pc(self, pc, gripper_width):
"""
for gpd baseline, only support input_chann == [3, 12]
"""
pc = pc.astype(np.float32)
pc = pcl.PointCloud(pc)
norm = pc.make_NormalEstimation()
norm.set_KSearch(self.normal_K)
normals = norm.compute()
surface_normal = normals.to_array()
surface_normal = surface_normal[:, 0:3]
pc = pc.to_array()
grasp_pc = pc[self.in_ind]
grasp_pc_norm = surface_normal[self.in_ind]
bad_check = (grasp_pc_norm != grasp_pc_norm)
if np.sum(bad_check)!=0:
bad_ind = np.where(bad_check == True)
grasp_pc = np.delete(grasp_pc, bad_ind[0], axis=0)
grasp_pc_norm = np.delete(grasp_pc_norm, bad_ind[0], axis=0)
assert(np.sum(grasp_pc_norm != grasp_pc_norm) == 0)
m_width_of_pic = self.project_size
margin = self.projection_margin
order = np.array([0, 1, 2])
occupy_pic1, norm_pic1 = self.cal_projection(grasp_pc, m_width_of_pic, margin, grasp_pc_norm,
order, gripper_width)
if self.project_chann == 3:
output = norm_pic1
elif self.project_chann == 12:
order = np.array([1, 2, 0])
occupy_pic2, norm_pic2 = self.cal_projection(grasp_pc, m_width_of_pic, margin, grasp_pc_norm,
order, gripper_width)
order = np.array([0, 2, 1])
occupy_pic3, norm_pic3 = self.cal_projection(grasp_pc, m_width_of_pic, margin, grasp_pc_norm,
order, gripper_width)
output = np.dstack([occupy_pic1, norm_pic1, occupy_pic2, norm_pic2, occupy_pic3, norm_pic3])
else:
raise NotImplementedError
return output
def __getitem__(self, index):
# try:
obj_ind, grasp_ind = np.unravel_index(index, (len(self.object), self.grasp_amount_per_file))
obj_grasp = self.object[obj_ind]
obj_pc = self.transform[obj_grasp][0]
f_grasp = self.d_grasp[obj_grasp]
fl_pc = np.array(self.d_pc[obj_pc])
fl_pc = fl_pc[np.random.choice(len(fl_pc), size=self.pc_file_used_num)]
grasp = np.load(f_grasp)[grasp_ind]
pc = np.vstack([np.load(i) for i in fl_pc])
pc = pc[np.random.choice(len(pc), size=self.obj_points_num)]
t = self.transform[obj_grasp][1]
grasp_pc = self.collect_pc(grasp, pc, t)
if grasp_pc is None:
return None
level_score, refine_score = grasp[-2:]
if not self.projection:
if len(grasp_pc) > self.grasp_points_num:
grasp_pc = grasp_pc[np.random.choice(len(grasp_pc), size=self.grasp_points_num,
replace=False)].T
else:
grasp_pc = grasp_pc[np.random.choice(len(grasp_pc), size=self.grasp_points_num,
replace=True)].T
else:
grasp_pc = grasp_pc.transpose((2, 1, 0))
score = level_score + refine_score*0.01
if score >= self.thresh_bad:
label = 0
elif score <= self.thresh_good:
label = 1
else:
return None
if self.with_obj:
return grasp_pc, label, obj_grasp
else:
return grasp_pc, label
def __len__(self):
return self.amount
class PointGraspMultiClassDataset(torch.utils.data.Dataset):
def __init__(self, obj_points_num, grasp_points_num, pc_file_used_num, grasp_amount_per_file, thresh_good,
thresh_bad, path, tag, with_obj=False, projection=False, project_chann=3, project_size=60):
self.obj_points_num = obj_points_num
self.grasp_points_num = grasp_points_num
self.pc_file_used_num = pc_file_used_num
self.grasp_amount_per_file = grasp_amount_per_file
self.path = path
self.tag = tag
self.thresh_good = thresh_good
self.thresh_bad = thresh_bad
self.with_obj = with_obj
self.min_point_limit = 50
# projection related
self.projection = projection
self.project_chann = project_chann
if self.project_chann not in [3, 12]:
raise NotImplementedError
self.project_size = project_size
if self.project_size != 60:
raise NotImplementedError
self.normal_K = 10
self.voxel_point_num = 50
self.projection_margin = 1
self.transform = pickle.load(open(os.path.join(self.path, 'google2cloud.pkl'), 'rb'))
fl_grasp = glob.glob(os.path.join(path, 'ycb_grasp', self.tag, '*.npy'))
fl_pc = glob.glob(os.path.join(path, 'ycb_rgbd', '*', 'clouds', '*.npy'))
self.d_pc, self.d_grasp = {}, {}
for i in fl_pc:
k = i.split('/')[-3]
if k in self.d_pc.keys():
self.d_pc[k].append(i)
else:
self.d_pc[k] = [i]
for i in fl_grasp:
k = i.split('/')[-1].split('.')[0]
self.d_grasp[k] = i
object1 = set(self.d_grasp.keys())
object2 = set(self.transform.keys())
self.object = list(object1.intersection(object2))
self.amount = len(self.object) * self.grasp_amount_per_file
def collect_pc(self, grasp, pc, transform):
center = grasp[0:3]
axis = grasp[3:6] # binormal
width = grasp[6]
angle = grasp[7]
axis = axis/np.linalg.norm(axis)
binormal = axis
# cal approach
cos_t = np.cos(angle)
sin_t = np.sin(angle)
R1 = np.c_[[cos_t, 0, sin_t],[0, 1, 0],[-sin_t, 0, cos_t]]
axis_y = axis
axis_x = np.array([axis_y[1], -axis_y[0], 0])
if np.linalg.norm(axis_x) == 0:
axis_x = np.array([1, 0, 0])
axis_x = axis_x / np.linalg.norm(axis_x)
axis_y = axis_y / np.linalg.norm(axis_y)
axis_z = np.cross(axis_x, axis_y)
R2 = np.c_[axis_x, np.c_[axis_y, axis_z]]
approach = R2.dot(R1)[:, 0]
approach = approach / np.linalg.norm(approach)
minor_normal = np.cross(axis, approach)
left = center - width*axis/2
right = center + width*axis/2
# bottom = center - width*approach
left = (np.dot(transform, np.array([left[0], left[1], left[2], 1])))[:3]
right = (np.dot(transform, np.array([right[0], right[1], right[2], 1])))[:3]
# bottom = (transform @ np.array([bottom[0], bottom[1], bottom[2], 1]))[:3]
center = (np.dot(transform, np.array([center[0], center[1], center[2], 1])))[:3]
binormal = (np.dot(transform, np.array([binormal[0], binormal[1], binormal[2], 1])))[:3].reshape(3, 1)
approach = (np.dot(transform, np.array([approach[0], approach[1], approach[2], 1])))[:3].reshape(3, 1)
minor_normal = (np.dot(transform, np.array([minor_normal[0], minor_normal[1], minor_normal[2], 1])))[:3].reshape(3, 1)
matrix = np.hstack([approach, binormal, minor_normal]).T
# pc_p2c/left_t/right_t is in local coordinate(with center as origin)
# other(include pc) are in pc coordinate
pc_p2c = (np.dot(matrix, (pc-center).T)).T
left_t = (-width * np.array([0,1,0]) / 2).squeeze()
right_t = (width * np.array([0,1,0]) / 2).squeeze()
x_limit = width/4
z_limit = width/4
y_limit = width/2
x1 = pc_p2c[:, 0] > -x_limit
x2 = pc_p2c[:, 0] < x_limit
y1 = pc_p2c[:, 1] > -y_limit
y2 = pc_p2c[:, 1] < y_limit
z1 = pc_p2c[:, 2] > -z_limit
z2 = pc_p2c[:, 2] < z_limit
a = np.vstack([x1, x2, y1, y2, z1, z2])
self.in_ind = np.where(np.sum(a, axis=0) == len(a))[0]
if len(self.in_ind) < self.min_point_limit:
return None
if self.projection:
return self.project_pc(pc_p2c, width)
else:
return pc_p2c[self.in_ind]
def check_square(self, point, points_g):
dirs = np.array([[-1, 1, 1], [1, 1, 1], [-1, -1, 1], [1, -1, 1],
[-1, 1, -1], [1, 1, -1], [-1, -1, -1], [1, -1, -1]])
p = dirs * 0.5 + point # here res * 0.5 means get half of a pixel width
a1 = p[2][1] < points_g[:, 1]
a2 = p[0][1] > points_g[:, 1]
a3 = p[0][2] > points_g[:, 2]
a4 = p[4][2] < points_g[:, 2]
a5 = p[1][0] > points_g[:, 0]
a6 = p[0][0] < points_g[:, 0]
a = np.vstack([a1, a2, a3, a4, a5, a6])
points_in_area = np.where(np.sum(a, axis=0) == len(a))[0]
if len(points_in_area) == 0:
has_p = False
else:
has_p = True
return points_in_area
def cal_projection(self, point_cloud_voxel, m_width_of_pic, margin, surface_normal, order, gripper_width):
occupy_pic = np.zeros([m_width_of_pic, m_width_of_pic, 1])
norm_pic = np.zeros([m_width_of_pic, m_width_of_pic, 3])
norm_pic_num = np.zeros([m_width_of_pic, m_width_of_pic, 1])
max_x = point_cloud_voxel[:, order[0]].max()
min_x = point_cloud_voxel[:, order[0]].min()
max_y = point_cloud_voxel[:, order[1]].max()
min_y = point_cloud_voxel[:, order[1]].min()
min_z = point_cloud_voxel[:, order[2]].min()
tmp = max((max_x - min_x), (max_y - min_y))
if tmp == 0:
print("WARNING : the num of input points seems only have one, no possilbe to do learning on"
"such data, please throw it away. -- Hongzhuo")
return occupy_pic, norm_pic
# Here, we use the gripper width to cal the res:
res = gripper_width / (m_width_of_pic-margin)
voxel_points_square_norm = []
x_coord_r = ((point_cloud_voxel[:, order[0]]) / res + m_width_of_pic / 2)
y_coord_r = ((point_cloud_voxel[:, order[1]]) / res + m_width_of_pic / 2)
z_coord_r = ((point_cloud_voxel[:, order[2]]) / res + m_width_of_pic / 2)
x_coord_r = np.floor(x_coord_r).astype(int)
y_coord_r = np.floor(y_coord_r).astype(int)
z_coord_r = np.floor(z_coord_r).astype(int)
voxel_index = np.array([x_coord_r, y_coord_r, z_coord_r]).T # all point in grid
coordinate_buffer = np.unique(voxel_index, axis=0) # get a list of points without duplication
K = len(coordinate_buffer)
# [K, 1] store number of points in each voxel grid
number_buffer = np.zeros(shape=K, dtype=np.int64)
feature_buffer = np.zeros(shape=(K, self.voxel_point_num, 6), dtype=np.float32)
index_buffer = {}
for i in range(K):
index_buffer[tuple(coordinate_buffer[i])] = i # got index of coordinate
for voxel, point, normal in zip(voxel_index, point_cloud_voxel, surface_normal):
index = index_buffer[tuple(voxel)]
number = number_buffer[index]
if number < self.voxel_point_num:
feature_buffer[index, number, :3] = point
feature_buffer[index, number, 3:6] = normal
number_buffer[index] += 1
voxel_points_square_norm = np.sum(feature_buffer[..., -3:], axis=1)/number_buffer[:, np.newaxis]
voxel_points_square = coordinate_buffer
if len(voxel_points_square) == 0:
return occupy_pic, norm_pic
x_coord_square = voxel_points_square[:, 0]
y_coord_square = voxel_points_square[:, 1]
norm_pic[x_coord_square, y_coord_square, :] = voxel_points_square_norm
occupy_pic[x_coord_square, y_coord_square] = number_buffer[:, np.newaxis]
occupy_max = occupy_pic.max()
assert(occupy_max > 0)
occupy_pic = occupy_pic / occupy_max
return occupy_pic, norm_pic
def project_pc(self, pc, gripper_width):
"""
for gpd baseline, only support input_chann == [3, 12]
"""
pc = pc.astype(np.float32)
pc = pcl.PointCloud(pc)
norm = pc.make_NormalEstimation()
norm.set_KSearch(self.normal_K)
normals = norm.compute()
surface_normal = normals.to_array()
surface_normal = surface_normal[:, 0:3]
pc = pc.to_array()
grasp_pc = pc[self.in_ind]
grasp_pc_norm = surface_normal[self.in_ind]
bad_check = (grasp_pc_norm != grasp_pc_norm)
if np.sum(bad_check)!=0:
bad_ind = np.where(bad_check == True)
grasp_pc = np.delete(grasp_pc, bad_ind[0], axis=0)
grasp_pc_norm = np.delete(grasp_pc_norm, bad_ind[0], axis=0)
assert(np.sum(grasp_pc_norm != grasp_pc_norm) == 0)
m_width_of_pic = self.project_size
margin = self.projection_margin
order = np.array([0, 1, 2])
occupy_pic1, norm_pic1 = self.cal_projection(grasp_pc, m_width_of_pic, margin, grasp_pc_norm,
order, gripper_width)
if self.project_chann == 3:
output = norm_pic1
elif self.project_chann == 12:
order = np.array([1, 2, 0])
occupy_pic2, norm_pic2 = self.cal_projection(grasp_pc, m_width_of_pic, margin, grasp_pc_norm,
order, gripper_width)
order = np.array([0, 2, 1])
occupy_pic3, norm_pic3 = self.cal_projection(grasp_pc, m_width_of_pic, margin, grasp_pc_norm,
order, gripper_width)
output = np.dstack([occupy_pic1, norm_pic1, occupy_pic2, norm_pic2, occupy_pic3, norm_pic3])
else:
raise NotImplementedError
return output
def __getitem__(self, index):
# try:
obj_ind, grasp_ind = np.unravel_index(index, (len(self.object), self.grasp_amount_per_file))
obj_grasp = self.object[obj_ind]
obj_pc = self.transform[obj_grasp][0]
f_grasp = self.d_grasp[obj_grasp]
fl_pc = np.array(self.d_pc[obj_pc])
fl_pc = fl_pc[np.random.choice(len(fl_pc), size=self.pc_file_used_num)]
grasp = np.load(f_grasp)[grasp_ind]
pc = np.vstack([np.load(i) for i in fl_pc])
pc = pc[np.random.choice(len(pc), size=self.obj_points_num)]
t = self.transform[obj_grasp][1]
grasp_pc = self.collect_pc(grasp, pc, t)
if grasp_pc is None:
return None
level_score, refine_score = grasp[-2:]
if not self.projection:
if len(grasp_pc) > self.grasp_points_num:
grasp_pc = grasp_pc[np.random.choice(len(grasp_pc), size=self.grasp_points_num,
replace=False)].T
else:
grasp_pc = grasp_pc[np.random.choice(len(grasp_pc), size=self.grasp_points_num,
replace=True)].T
else:
grasp_pc = grasp_pc.transpose((2, 1, 0))
score = level_score + refine_score*0.01
if score >= self.thresh_bad:
label = 0
elif score <= self.thresh_good:
label = 2
else:
label = 1
if self.with_obj:
return grasp_pc, label, obj_grasp
else:
return grasp_pc, label
def __len__(self):
return self.amount
class PointGraspOneViewDataset(torch.utils.data.Dataset):
def __init__(self, grasp_points_num, grasp_amount_per_file, thresh_good,
thresh_bad, path, tag, with_obj=False, projection=False, project_chann=3, project_size=60):
self.grasp_points_num = grasp_points_num
self.grasp_amount_per_file = grasp_amount_per_file
self.path = path
self.tag = tag
self.thresh_good = thresh_good
self.thresh_bad = thresh_bad
self.with_obj = with_obj
self.min_point_limit = 150 # 最低点数限制
# projection related 投影相关参数
self.projection = projection
self.project_chann = project_chann
if self.project_chann not in [3, 12]:
raise NotImplementedError
self.project_size = project_size
if self.project_size != 60:
raise NotImplementedError
self.normal_K = 10
self.voxel_point_num = 50
self.projection_margin = 1
self.minimum_point_amount = 150
# google扫描仪到点云的转换矩阵
self.transform = pickle.load(open(os.path.join(self.path, 'google2cloud.pkl'), 'rb'))
fl_grasp = glob.glob(os.path.join(path, 'ycb_grasp', self.tag, '*.npy')) # grasp pose file
# 仅获取相机NP3采集的点云
fl_pc = glob.glob(os.path.join(path, 'ycb_rgbd', '*', 'clouds', 'pc_NP3_NP5*.npy')) # point cloud file
self.d_pc, self.d_grasp = {}, {}
for i in fl_pc: # 获取点云文件列表
k = i.split('/')[-3]
if k in self.d_pc.keys():
self.d_pc[k].append(i)
else:
self.d_pc[k] = [i]
for k in self.d_pc.keys():
self.d_pc[k].sort()
for i in fl_grasp: # 获取已生成的抓取姿态列表
grasp_fl_name = i.split('/')[-1].split('.')[0] # grasp文件名
cnt = grasp_fl_name.split('_')[-1] # grasp文件尾
head = grasp_fl_name.split('_')[0] # grasp文件头
k = grasp_fl_name[len(head)+1:-(len(cnt)+1)] # 标准物品名称
self.d_grasp[k] = i
object1 = set(self.d_grasp.keys()) # objects to deal with
# print("object1", object1)
object2 = set(self.transform.keys()) # all ycb objects name
# print("object2", object2)
self.object = list(object1)
# self.object = list(object1.intersection(object2)) # 取交集
print("objects to deal with", self.object)
self.amount = len(self.object) * self.grasp_amount_per_file
def collect_pc(self, grasp, pc, transform):
"""
获取手抓闭合区域中的点云
:param grasp: 扫描仪获取的mesh坐标系下抓取姿态 (grasp_center, grasp_axis, grasp_angle, grasp_width, jaw_width)
:param pc: 点云
:param transform: 扫描仪mesh到点云的转换矩阵
:param vis: 可视化选项
:return: 手抓闭合区域中的点云, 或其投影
"""
# 轴角表示
center = grasp[0:3] # 抓取姿态中心点
axis = grasp[3:6] # binormal 副法线
width = grasp[6] # 抓取姿态宽度
angle = grasp[7] # 旋转角
axis = axis/np.linalg.norm(axis) # (3,)
binormal = axis
# cal approach
cos_t = np.cos(angle)
sin_t = np.sin(angle)
R1 = np.c_[[cos_t, 0, sin_t], [0, 1, 0], [-sin_t, 0, cos_t]] # 旋转矩阵
axis_y = axis
axis_x = np.array([axis_y[1], -axis_y[0], 0])
if np.linalg.norm(axis_x) == 0:
axis_x = np.array([1, 0, 0])
# 各轴单位方向向量
axis_x = axis_x / np.linalg.norm(axis_x)
axis_y = axis_y / np.linalg.norm(axis_y)
axis_z = np.cross(axis_x, axis_y)
R2 = np.c_[axis_x, np.c_[axis_y, axis_z]] # 旋转矩阵
approach = R2.dot(R1)[:, 0]
approach = approach / np.linalg.norm(approach) # 手抓朝向
minor_normal = -np.cross(axis, approach) # 次曲率方向 NOTE: 添加了负号调整为右手坐标系
# 碰撞检测
# grasp_bottom_center = -ags.gripper.hand_depth * approach + center
# hand_points = ags.get_hand_points(grasp_bottom_center, approach, binormal)
# local_hand_points = ags.get_hand_points(np.array([0, 0, 0]), np.array([1, 0, 0]), np.array([0, 1, 0]))
# if_collide = ags.check_collide(grasp_bottom_center, approach,
# binormal, minor_normal, graspable, local_hand_points)
vis = False
if vis: # NOTE:此处获得的抓取姿态可能与点云存在碰撞(影响不是很大)!!! TODO:碰撞检查
mlab.figure(bgcolor=(1, 1, 1), size=(1000, 800))
mlab.pipeline.surface(mlab.pipeline.open("/home/sdhm/Projects/PointNetGPD/PointNetGPD/data/"
"ycb_meshes_google/003_cracker_box/google_512k/nontextured.ply"))
# ---扫描仪坐标系下---:
# 世界坐标系
show_line([0, 0, 0], [0.1, 0, 0], color='r', scale_factor=.0015)
show_line([0, 0, 0], [0, 0.1, 0], color='g', scale_factor=.0015)
show_line([0, 0, 0], [0, 0, 0.1], color='b', scale_factor=.0015)
show_points(pc, color='b', scale_factor=.002) # 原始点云
show_points(center, color='r', scale_factor=.008)
# 显示手抓坐标系
show_line(center, (center + binormal * 0.05).reshape(3), color='g', scale_factor=.0015)
show_line(center, (center + approach * 0.05).reshape(3), color='r', scale_factor=.0015)
show_line(center, (center + minor_normal * 0.05).reshape(3), color='b', scale_factor=.0015)
grasp_bottom_center = -ags.gripper.hand_depth * approach + center
hand_points = ags.get_hand_points(grasp_bottom_center, approach, binormal)
ags.show_grasp_3d(hand_points, color=(0.4, 0.6, 0.0))
mlab.title("google", size=0.3, color=(0, 0, 0))
mlab.show()
left = center - width*axis/2 # 手抓最左侧点
right = center + width*axis/2 # 手抓最右侧点
# bottom = center - width*approach
left = (np.dot(transform, np.array([left[0], left[1], left[2], 1])))[:3]
right = (np.dot(transform, np.array([right[0], right[1], right[2], 1])))[:3]
# bottom = (transform @ np.array([bottom[0], bottom[1], bottom[2], 1]))[:3]
# NOTE: m:mesh c:center p:point cloud
matrix_m2c = np.array([approach, binormal, minor_normal]) # 旋转矩阵: 扫描仪坐标系->中心点坐标系
matrix_p2m = transform[:3, :3] # 旋转矩阵: 点云坐标系->扫描仪坐标系
trans_p2m = transform[:, 3:][:3].reshape(3,) # 平移矩阵: 点云坐标系->扫描仪坐标系
trans_p2m = np.array([trans_p2m[0], trans_p2m[1], trans_p2m[2] + 0.02]) # 微调
pc_p2m = np.dot(matrix_p2m.T, (pc - trans_p2m).T).T # 配准到扫描仪坐标系下的点云
pc_m2c = (np.dot(matrix_m2c, (pc_p2m-center).T)).T # 扫描仪坐标系下点云转换到中心点坐标系下
# pc_c2m = (np.dot(matrix_m2c.T, pc_m2c.T)).T + center # 中心点坐标系下点云转换到扫描仪坐标系下
left_t = (-width * np.array([0, 1, 0]) / 2).squeeze()
right_t = (width * np.array([0, 1, 0]) / 2).squeeze()
# 获取手抓闭合区域中的点
x_limit = ags.gripper.hand_depth
z_limit = ags.gripper.hand_height
y_limit = width
x1 = pc_m2c[:, 0] > -x_limit
x2 = pc_m2c[:, 0] < 0
y1 = pc_m2c[:, 1] > -y_limit/2
y2 = pc_m2c[:, 1] < y_limit/2
z1 = pc_m2c[:, 2] > -z_limit/2
z2 = pc_m2c[:, 2] < z_limit/2
a = np.vstack([x1, x2, y1, y2, z1, z2])
self.in_ind = np.where(np.sum(a, axis=0) == len(a))[0] # 手抓闭合区域中点的索引
if len(self.in_ind) < self.min_point_limit: # 手抓闭合区域内点数太少
# print("\033[0;32m%s\033[0m" % "[INFO] points num", len(self.in_ind))
return None
vis = False
if vis: # 显示手抓闭合区域内点云
mlab.figure(bgcolor=(1, 1, 1), size=(1000, 800))
mlab.pipeline.surface(mlab.pipeline.open("/home/sdhm/Projects/PointNetGPD/PointNetGPD/data/"
"ycb_meshes_google/003_cracker_box/google_512k/nontextured.ply"))
# 世界坐标系
show_line([0, 0, 0], [0.1, 0, 0], color='r', scale_factor=.0015)
show_line([0, 0, 0], [0, 0.1, 0], color='g', scale_factor=.0015)
show_line([0, 0, 0], [0, 0, 0.1], color='b', scale_factor=.0015)
# show_points(pc, color='b', scale_factor=.002) # 原始点云
show_points(pc_p2m, color='g', scale_factor=.002) # 配准到扫描仪坐标系下点云
show_points(pc_m2c, color='b', scale_factor=.002) # 手抓中心坐标系下点云
# show_points(pc_c2m, color='r', scale_factor=.002) # 手抓中心坐标系转换到扫描仪坐标系下点云
# 显示扫描仪坐标系下手抓
grasp_bottom_center = -ags.gripper.hand_depth * approach + center
hand_points = ags.get_hand_points(grasp_bottom_center, approach, binormal)
ags.show_grasp_3d(hand_points, color=(0.0, 1.0, 0.0))
# 中心点坐标系下手抓(应在世界坐标系原点)
hand_points = (np.dot(matrix_m2c, (hand_points - center).T)).T # 手抓关键点转换到中心点坐标系
ags.show_grasp_3d(hand_points, color=(0.5, 0.5, 0.5)) # 显示手抓
# 扫描仪坐标系下抓取坐标系
show_points(center, color='r', scale_factor=.008) # 扫描仪坐标系下中心点
show_line(center, (center + binormal * 0.05).reshape(3), color='g', scale_factor=.0015)
show_line(center, (center + approach * 0.05).reshape(3), color='r', scale_factor=.0015)
show_line(center, (center + minor_normal * 0.05).reshape(3), color='b', scale_factor=.0015)
show_points(pc_m2c, color='c', scale_factor=.002) # 手抓中心坐标系下点云
show_points(pc_m2c[self.in_ind], color='b', scale_factor=.002) # 中心点坐标系下手抓闭合区域中的点云
pc_c2m_region = (np.dot(matrix_m2c.T, pc_m2c[self.in_ind].T)).T + center # 扫描仪坐标系下手抓闭合区域中的点云
show_points(pc_c2m_region, color='r', scale_factor=.002)
# 显示手抓闭合区域
# x = (np.array([[-1, 1, 1, -1, -1], [-1, 1, 1, -1, -1]]) - 1) * x_limit/2
# y = np.array([[-1, -1, -1, -1, -1], [1, 1, 1, 1, 1]]) * y_limit
# z = np.array([[1, 1, -1, -1, 1], [1, 1, -1, -1, 1]]) * z_limit
# mlab.mesh(x, y, z, color=(1, 0, 0), opacity=0.4)
# 体积为1的正方体的八个顶点
x_arr = np.array([-1, 1, 1, -1, -1, 1, 1, -1])/2
y_arr = np.array([-1, -1, 1, 1, -1, -1, 1, 1])/2
z_arr = np.array([-1, -1, -1, -1, 1, 1, 1, 1])/2
x = (x_arr - 0.5) * ags.gripper.hand_depth # 平移半个单位
y = y_arr * (ags.gripper.hand_outer_diameter-2*ags.gripper.finger_width)
z = z_arr * ags.gripper.hand_height
triangles = [(0, 1, 2), (0, 2, 3), (4, 5, 6), (4, 6, 7), (1, 5, 6), (1, 2, 6),
(0, 4, 7), (0, 3, 7), (2, 3, 6), (3, 6, 7), (0, 1, 5), (0, 4, 5)]
mlab.triangular_mesh(x, y, z, triangles, color=(1, 0, 1), opacity=0.2)
mlab.title("cloud", size=0.3, color=(0, 0, 0))
mlab.show()
if self.projection:
return self.project_pc(pc_m2c, width) # 返回投影后的点云
else:
return pc_m2c[self.in_ind] # 返回手抓闭合区域中的点云
def check_square(self, point, points_g):
dirs = np.array([[-1, 1, 1], [1, 1, 1], [-1, -1, 1], [1, -1, 1],
[-1, 1, -1], [1, 1, -1], [-1, -1, -1], [1, -1, -1]])
p = dirs * 0.5 + point # here res * 0.5 means get half of a pixel width
a1 = p[2][1] < points_g[:, 1]
a2 = p[0][1] > points_g[:, 1]
a3 = p[0][2] > points_g[:, 2]
a4 = p[4][2] < points_g[:, 2]
a5 = p[1][0] > points_g[:, 0]
a6 = p[0][0] < points_g[:, 0]
a = np.vstack([a1, a2, a3, a4, a5, a6])
points_in_area = np.where(np.sum(a, axis=0) == len(a))[0]
if len(points_in_area) == 0:
has_p = False
else:
has_p = True
return points_in_area
def cal_projection(self, point_cloud_voxel, m_width_of_pic, margin, surface_normal, order, gripper_width):
"""
计算点云投影
:param point_cloud_voxel:
:param m_width_of_pic:
:param margin:
:param surface_normal:
:param order:
:param gripper_width:
:return:
"""
occupy_pic = np.zeros([m_width_of_pic, m_width_of_pic, 1])
norm_pic = np.zeros([m_width_of_pic, m_width_of_pic, 3])
norm_pic_num = np.zeros([m_width_of_pic, m_width_of_pic, 1])
max_x = point_cloud_voxel[:, order[0]].max()
min_x = point_cloud_voxel[:, order[0]].min()
max_y = point_cloud_voxel[:, order[1]].max()
min_y = point_cloud_voxel[:, order[1]].min()
min_z = point_cloud_voxel[:, order[2]].min()
tmp = max((max_x - min_x), (max_y - min_y))
if tmp == 0:
print("WARNING : the num of input points seems only have one, no possilbe to do learning on"
"such data, please throw it away. -- Hongzhuo")
return occupy_pic, norm_pic
# Here, we use the gripper width to cal the res:
res = gripper_width / (m_width_of_pic-margin)
voxel_points_square_norm = []
x_coord_r = ((point_cloud_voxel[:, order[0]]) / res + m_width_of_pic / 2)
y_coord_r = ((point_cloud_voxel[:, order[1]]) / res + m_width_of_pic / 2)
z_coord_r = ((point_cloud_voxel[:, order[2]]) / res + m_width_of_pic / 2)
x_coord_r = np.floor(x_coord_r).astype(int)
y_coord_r = np.floor(y_coord_r).astype(int)
z_coord_r = np.floor(z_coord_r).astype(int)
voxel_index = np.array([x_coord_r, y_coord_r, z_coord_r]).T # all point in grid
coordinate_buffer = np.unique(voxel_index, axis=0) # get a list of points without duplication
K = len(coordinate_buffer)
# [K, 1] store number of points in each voxel grid
number_buffer = np.zeros(shape=K, dtype=np.int64)
feature_buffer = np.zeros(shape=(K, self.voxel_point_num, 6), dtype=np.float32)
index_buffer = {}
for i in range(K):
index_buffer[tuple(coordinate_buffer[i])] = i # got index of coordinate
for voxel, point, normal in zip(voxel_index, point_cloud_voxel, surface_normal):
index = index_buffer[tuple(voxel)]
number = number_buffer[index]
if number < self.voxel_point_num:
feature_buffer[index, number, :3] = point
feature_buffer[index, number, 3:6] = normal
number_buffer[index] += 1
voxel_points_square_norm = np.sum(feature_buffer[..., -3:], axis=1)/number_buffer[:, np.newaxis]
voxel_points_square = coordinate_buffer
if len(voxel_points_square) == 0:
return occupy_pic, norm_pic
x_coord_square = voxel_points_square[:, 0]
y_coord_square = voxel_points_square[:, 1]
norm_pic[x_coord_square, y_coord_square, :] = voxel_points_square_norm
occupy_pic[x_coord_square, y_coord_square] = number_buffer[:, np.newaxis]
occupy_max = occupy_pic.max()
assert(occupy_max > 0)
occupy_pic = occupy_pic / occupy_max
return occupy_pic, norm_pic
def project_pc(self, pc, gripper_width):
"""
获取手抓闭合区域中点云的投影
for gpd baseline, only support input_chann == [3, 12]
"""
pc = pc.astype(np.float32)
pc = pcl.PointCloud(pc)
norm = pc.make_NormalEstimation()
norm.set_KSearch(self.normal_K)
normals = norm.compute()
surface_normal = normals.to_array()
surface_normal = surface_normal[:, 0:3]
pc = pc.to_array()
grasp_pc = pc[self.in_ind]
grasp_pc_norm = surface_normal[self.in_ind]
bad_check = (grasp_pc_norm != grasp_pc_norm)
if np.sum(bad_check) != 0:
bad_ind = np.where(bad_check == True)
grasp_pc = np.delete(grasp_pc, bad_ind[0], axis=0)
grasp_pc_norm = np.delete(grasp_pc_norm, bad_ind[0], axis=0)
assert(np.sum(grasp_pc_norm != grasp_pc_norm) == 0)
m_width_of_pic = self.project_size
margin = self.projection_margin
order = np.array([0, 1, 2])
occupy_pic1, norm_pic1 = self.cal_projection(grasp_pc, m_width_of_pic, margin, grasp_pc_norm, # 计算点云投影
order, gripper_width)
if self.project_chann == 3:
output = norm_pic1
elif self.project_chann == 12:
order = np.array([1, 2, 0])
occupy_pic2, norm_pic2 = self.cal_projection(grasp_pc, m_width_of_pic, margin, grasp_pc_norm, # 计算点云投影
order, gripper_width)
order = np.array([0, 2, 1])
occupy_pic3, norm_pic3 = self.cal_projection(grasp_pc, m_width_of_pic, margin, grasp_pc_norm, # 计算点云投影
order, gripper_width)
output = np.dstack([occupy_pic1, norm_pic1, occupy_pic2, norm_pic2, occupy_pic3, norm_pic3])
else:
raise NotImplementedError
return output
def __getitem__(self, index):
# 获取物体和抓取姿态索引
obj_ind, grasp_ind = np.unravel_index(index, (len(self.object), self.grasp_amount_per_file))
obj_grasp = self.object[obj_ind] # 物体名称, 用于获取抓取姿态
obj_pc = self.transform[obj_grasp][0] # 物体名称, 用于获取点云
f_grasp = self.d_grasp[obj_grasp] # 抓取姿态文件名
fl_pc = np.array(self.d_pc[obj_pc]) # 各视角点云文件名
np.random.shuffle(fl_pc) # 打乱文件
grasp = np.load(f_grasp)[grasp_ind] # 获取抓取姿态
pc = np.load(fl_pc[-1]) # 随机获取点云
t = self.transform[obj_grasp][1] # 获取扫描仪到点云的转换矩阵, 抓取姿态在扫描仪采集的mesh文件上获取, 须转换到
# debug
# level_score_, refine_score_ = grasp[-2:]
# score_ = level_score_ + refine_score_ * 0.01
# if score_ >= self.thresh_bad:
# print("label: 0")
# elif score_ <= self.thresh_good:
# print("label: 1")
grasp_pc = self.collect_pc(grasp, pc, t) # 获取手抓闭合区域中的点云
if grasp_pc is None:
return None
level_score, refine_score = grasp[-2:]
if not self.projection:
# 点数不够则有放回采样, 点数太多则随机采样
if len(grasp_pc) > self.grasp_points_num:
grasp_pc = grasp_pc[np.random.choice(len(grasp_pc), size=self.grasp_points_num,
replace=False)].T
else:
grasp_pc = grasp_pc[np.random.choice(len(grasp_pc), size=self.grasp_points_num,
replace=True)].T
else:
grasp_pc = grasp_pc.transpose((2, 1, 0)) # 调整通道顺序
# 根据score分类
score = level_score + refine_score*0.01
if score >= self.thresh_bad:
label = 0
elif score <= self.thresh_good:
label = 1
else:
return None
if self.with_obj:
return grasp_pc, label, obj_grasp
else:
# print("grasp_pc", grasp_pc, grasp_pc.shape, label) # (3, 750)
return grasp_pc, label
def __len__(self):
return self.amount
class PointGraspOneViewMultiClassDataset(torch.utils.data.Dataset):
def __init__(self, grasp_points_num, grasp_amount_per_file, thresh_good,
thresh_bad, path, tag, with_obj=False, projection=False, project_chann=3, project_size=60):
self.grasp_points_num = grasp_points_num
self.grasp_amount_per_file = grasp_amount_per_file
self.path = path
self.tag = tag
self.thresh_good = thresh_good
self.thresh_bad = thresh_bad
self.with_obj = with_obj
self.min_point_limit = 50
# projection related
self.projection = projection
self.project_chann = project_chann
if self.project_chann not in [3, 12]:
raise NotImplementedError
self.project_size = project_size
if self.project_size != 60:
raise NotImplementedError
self.normal_K = 10
self.voxel_point_num = 50
self.projection_margin = 1
self.minimum_point_amount = 150
self.transform = pickle.load(open(os.path.join(self.path, 'google2cloud.pkl'), 'rb'))
fl_grasp = glob.glob(os.path.join(path, 'ycb_grasp', self.tag, '*.npy'))
fl_pc = glob.glob(os.path.join(path, 'ycb_rgbd', '*', 'clouds', 'pc_NP3_NP5*.npy'))
self.d_pc, self.d_grasp = {}, {}
for i in fl_pc:
k = i.split('/')[-3]
if k in self.d_pc.keys():
self.d_pc[k].append(i)
else:
self.d_pc[k] = [i]
for k in self.d_pc.keys():
self.d_pc[k].sort()
for i in fl_grasp:
k = i.split('/')[-1].split('.')[0]
self.d_grasp[k] = i
object1 = set(self.d_grasp.keys())
object2 = set(self.transform.keys())
self.object = list(object1.intersection(object2))
self.amount = len(self.object) * self.grasp_amount_per_file
def collect_pc(self, grasp, pc, transform):
center = grasp[0:3]
axis = grasp[3:6] # binormal
width = grasp[6]
angle = grasp[7]
axis = axis/np.linalg.norm(axis)
binormal = axis
# cal approach
cos_t = np.cos(angle)
sin_t = np.sin(angle)
R1 = np.c_[[cos_t, 0, sin_t],[0, 1, 0],[-sin_t, 0, cos_t]]
axis_y = axis
axis_x = np.array([axis_y[1], -axis_y[0], 0])
if np.linalg.norm(axis_x) == 0:
axis_x = np.array([1, 0, 0])
axis_x = axis_x / np.linalg.norm(axis_x)
axis_y = axis_y / np.linalg.norm(axis_y)
axis_z = np.cross(axis_x, axis_y)
R2 = np.c_[axis_x, np.c_[axis_y, axis_z]]
approach = R2.dot(R1)[:, 0]
approach = approach / np.linalg.norm(approach)
minor_normal = np.cross(axis, approach)
left = center - width*axis/2
right = center + width*axis/2
left = (np.dot(transform, np.array([left[0], left[1], left[2], 1])))[:3]
right = (np.dot(transform, np.array([right[0], right[1], right[2], 1])))[:3]
center = (np.dot(transform, np.array([center[0], center[1], center[2], 1])))[:3]
binormal = (np.dot(transform, np.array([binormal[0], binormal[1], binormal[2], 1])))[:3].reshape(3, 1)
approach = (np.dot(transform, np.array([approach[0], approach[1], approach[2], 1])))[:3].reshape(3, 1)
minor_normal = (np.dot(transform, np.array([minor_normal[0], minor_normal[1], minor_normal[2], 1])))[:3].reshape(3, 1)
matrix = np.hstack([approach, binormal, minor_normal]).T
pc_p2c = (np.dot(matrix, (pc-center).T)).T
left_t = (-width * np.array([0,1,0]) / 2).squeeze()
right_t = (width * np.array([0,1,0]) / 2).squeeze()
x_limit = width/4
z_limit = width/4
y_limit = width/2
x1 = pc_p2c[:, 0] > -x_limit
x2 = pc_p2c[:, 0] < x_limit
y1 = pc_p2c[:, 1] > -y_limit
y2 = pc_p2c[:, 1] < y_limit
z1 = pc_p2c[:, 2] > -z_limit
z2 = pc_p2c[:, 2] < z_limit
a = np.vstack([x1, x2, y1, y2, z1, z2])
self.in_ind = np.where(np.sum(a, axis=0) == len(a))[0]
if len(self.in_ind) < self.min_point_limit:
return None
if self.projection:
return self.project_pc(pc_p2c, width)
else:
return pc_p2c[self.in_ind]
def check_square(self, point, points_g):
dirs = np.array([[-1, 1, 1], [1, 1, 1], [-1, -1, 1], [1, -1, 1],
[-1, 1, -1], [1, 1, -1], [-1, -1, -1], [1, -1, -1]])
p = dirs * 0.5 + point # here res * 0.5 means get half of a pixel width
a1 = p[2][1] < points_g[:, 1]
a2 = p[0][1] > points_g[:, 1]
a3 = p[0][2] > points_g[:, 2]
a4 = p[4][2] < points_g[:, 2]
a5 = p[1][0] > points_g[:, 0]
a6 = p[0][0] < points_g[:, 0]
a = np.vstack([a1, a2, a3, a4, a5, a6])
points_in_area = np.where(np.sum(a, axis=0) == len(a))[0]
if len(points_in_area) == 0:
has_p = False
else:
has_p = True
return points_in_area
def cal_projection(self, point_cloud_voxel, m_width_of_pic, margin, surface_normal, order, gripper_width):
occupy_pic = np.zeros([m_width_of_pic, m_width_of_pic, 1])
norm_pic = np.zeros([m_width_of_pic, m_width_of_pic, 3])
norm_pic_num = np.zeros([m_width_of_pic, m_width_of_pic, 1])
max_x = point_cloud_voxel[:, order[0]].max()
min_x = point_cloud_voxel[:, order[0]].min()
max_y = point_cloud_voxel[:, order[1]].max()
min_y = point_cloud_voxel[:, order[1]].min()
min_z = point_cloud_voxel[:, order[2]].min()
tmp = max((max_x - min_x), (max_y - min_y))
if tmp == 0:
print("WARNING : the num of input points seems only have one, no possilbe to do learning on"
"such data, please throw it away. -- Hongzhuo")
return occupy_pic, norm_pic
# Here, we use the gripper width to cal the res:
res = gripper_width / (m_width_of_pic-margin)
voxel_points_square_norm = []
x_coord_r = ((point_cloud_voxel[:, order[0]]) / res + m_width_of_pic / 2)
y_coord_r = ((point_cloud_voxel[:, order[1]]) / res + m_width_of_pic / 2)
z_coord_r = ((point_cloud_voxel[:, order[2]]) / res + m_width_of_pic / 2)
x_coord_r = np.floor(x_coord_r).astype(int)
y_coord_r = np.floor(y_coord_r).astype(int)
z_coord_r = np.floor(z_coord_r).astype(int)
voxel_index = np.array([x_coord_r, y_coord_r, z_coord_r]).T # all point in grid
coordinate_buffer = np.unique(voxel_index, axis=0) # get a list of points without duplication
K = len(coordinate_buffer)
# [K, 1] store number of points in each voxel grid
number_buffer = np.zeros(shape=K, dtype=np.int64)
feature_buffer = np.zeros(shape=(K, self.voxel_point_num, 6), dtype=np.float32)
index_buffer = {}
for i in range(K):
index_buffer[tuple(coordinate_buffer[i])] = i # got index of coordinate
for voxel, point, normal in zip(voxel_index, point_cloud_voxel, surface_normal):
index = index_buffer[tuple(voxel)]
number = number_buffer[index]
if number < self.voxel_point_num:
feature_buffer[index, number, :3] = point
feature_buffer[index, number, 3:6] = normal
number_buffer[index] += 1
voxel_points_square_norm = np.sum(feature_buffer[..., -3:], axis=1)/number_buffer[:, np.newaxis]
voxel_points_square = coordinate_buffer
if len(voxel_points_square) == 0:
return occupy_pic, norm_pic
x_coord_square = voxel_points_square[:, 0]
y_coord_square = voxel_points_square[:, 1]
norm_pic[x_coord_square, y_coord_square, :] = voxel_points_square_norm
occupy_pic[x_coord_square, y_coord_square] = number_buffer[:, np.newaxis]
occupy_max = occupy_pic.max()
assert(occupy_max > 0)
occupy_pic = occupy_pic / occupy_max
return occupy_pic, norm_pic
def project_pc(self, pc, gripper_width):
"""
for gpd baseline, only support input_chann == [3, 12]
"""
pc = pc.astype(np.float32)
pc = pcl.PointCloud(pc)
norm = pc.make_NormalEstimation()
norm.set_KSearch(self.normal_K)
normals = norm.compute()
surface_normal = normals.to_array()
surface_normal = surface_normal[:, 0:3]
pc = pc.to_array()
grasp_pc = pc[self.in_ind]
grasp_pc_norm = surface_normal[self.in_ind]
bad_check = (grasp_pc_norm != grasp_pc_norm)
if np.sum(bad_check)!=0:
bad_ind = np.where(bad_check == True)
grasp_pc = np.delete(grasp_pc, bad_ind[0], axis=0)
grasp_pc_norm = np.delete(grasp_pc_norm, bad_ind[0], axis=0)
assert(np.sum(grasp_pc_norm != grasp_pc_norm) == 0)
m_width_of_pic = self.project_size
margin = self.projection_margin
order = np.array([0, 1, 2])
occupy_pic1, norm_pic1 = self.cal_projection(grasp_pc, m_width_of_pic, margin, grasp_pc_norm,
order, gripper_width)
if self.project_chann == 3:
output = norm_pic1
elif self.project_chann == 12:
order = np.array([1, 2, 0])
occupy_pic2, norm_pic2 = self.cal_projection(grasp_pc, m_width_of_pic, margin, grasp_pc_norm,
order, gripper_width)
order = np.array([0, 2, 1])
occupy_pic3, norm_pic3 = self.cal_projection(grasp_pc, m_width_of_pic, margin, grasp_pc_norm,
order, gripper_width)
output = np.dstack([occupy_pic1, norm_pic1, occupy_pic2, norm_pic2, occupy_pic3, norm_pic3])
else:
raise NotImplementedError
return output
def __getitem__(self, index):
obj_ind, grasp_ind = np.unravel_index(index, (len(self.object), self.grasp_amount_per_file))
obj_grasp = self.object[obj_ind] # 抓取姿态
obj_pc = self.transform[obj_grasp][0] # 物体点云
f_grasp = self.d_grasp[obj_grasp]
fl_pc = np.array(self.d_pc[obj_pc])
np.random.shuffle(fl_pc)
grasp = np.load(f_grasp)[grasp_ind]
pc = np.load(fl_pc[-1])
t = self.transform[obj_grasp][1]
grasp_pc = self.collect_pc(grasp, pc, t)
if grasp_pc is None:
return None
level_score, refine_score = grasp[-2:]
if not self.projection:
if len(grasp_pc) > self.grasp_points_num:
grasp_pc = grasp_pc[np.random.choice(len(grasp_pc), size=self.grasp_points_num,
replace=False)].T
else:
grasp_pc = grasp_pc[np.random.choice(len(grasp_pc), size=self.grasp_points_num,
replace=True)].T
else:
grasp_pc = grasp_pc.transpose((2, 1, 0))
score = level_score + refine_score*0.01
if score >= self.thresh_bad:
label = 0
elif score <= self.thresh_good:
label = 2
else:
label = 1
if self.with_obj:
return grasp_pc, label, obj_grasp
else:
return grasp_pc, label
def __len__(self):
return self.amount
if __name__ == '__main__':
try:
from mayavi import mlab
except ImportError:
print("Can not import mayavi")
mlab = None
def worker_init_fn(pid): # After creating the workers, each worker has an independent seed
np.random.seed(torch.initial_seed() % (2 ** 31 - 1))
def my_collate(batch):
batch = list(filter(lambda x: x is not None, batch))
return torch.utils.data.dataloader.default_collate(batch)
def show_points(point, color='lb', scale_factor=.0005):
if color == 'b':
color_f = (0, 0, 1)
elif color == 'r':
color_f = (1, 0, 0)
elif color == 'g':
color_f = (0, 1, 0)
elif color == 'lb': # light blue
color_f = (0.22, 1, 1)
else:
color_f = (1, 1, 1)
if point.size == 3: # vis for only one point, shape must be (3,), for shape (1, 3) is not work
point = point.reshape(3, )
mlab.points3d(point[0], point[1], point[2], color=color_f, scale_factor=scale_factor)
else: # vis for multiple points
mlab.points3d(point[:, 0], point[:, 1], point[:, 2], color=color_f, scale_factor=scale_factor)
def show_line(un1, un2, color='g', scale_factor=0.0005):
if color == 'b':
color_f = (0, 0, 1)
elif color == 'r':
color_f = (1, 0, 0)
elif color == 'g':
color_f = (0, 1, 0)
else:
color_f = (1, 1, 1)
mlab.plot3d([un1[0], un2[0]], [un1[1], un2[1]], [un1[2], un2[2]], color=color_f, tube_radius=scale_factor)
grasp_points_num = 1000
obj_points_num = 50000
pc_file_used_num = 20
thresh_good = 0.6
thresh_bad = 0.6
input_size = 60
input_chann = 12 # 12
# a = PointGraspDataset(
# obj_points_num=obj_points_num,
# grasp_points_num=grasp_points_num,
# pc_file_used_num=pc_file_used_num,
# path="../data",
# tag='train',
# grasp_amount_per_file=2000,
# thresh_good=thresh_good,
# thresh_bad=thresh_bad,
# projection=True,
# project_chann=input_chann,
# project_size=input_size,
# )
# c, d = a.__getitem__(0)
b = PointGraspOneViewDataset(
grasp_points_num=grasp_points_num,
path="../data",
tag='train',
grasp_amount_per_file=2100, # 6500
thresh_good=thresh_good,
thresh_bad=thresh_bad,
)
cnt = 0
for i in range(b.__len__()):
try:
grasp_pc, label = b.__getitem__(i)
cnt += 1
except (RuntimeError, TypeError, NameError):
print("[INFO] don't have valid points!")
else:
print("[INFO] get points success!")
# print("grasp_pc:", grasp_pc[0], grasp_pc[0].shape, grasp_pc.shape, "\nlable:", label)
# break
# pass
print("[INFO] have {} valid grasp in the dataset.".format(cnt))
# train_loader = torch.utils.data.DataLoader(
# PointGraspOneViewDataset(
# grasp_points_num=grasp_points_num,
# path="../data",
# tag='train',
# grasp_amount_per_file=2100, # 6500
# thresh_good=thresh_good,
# thresh_bad=thresh_bad,
# ),
# batch_size=64,
# num_workers=32,
# pin_memory=True,
# shuffle=True,
# worker_init_fn=worker_init_fn,
# collate_fn=my_collate,
# drop_last=True, # fix bug: ValueError: Expected more than 1 value per channel when training
# )
#
# for batch_idx, (data, target) in enumerate(train_loader):
# # print("data", data, data.shape, "target", target)
# pass
|
[
"dexnet.grasping.GpgGraspSampler",
"numpy.hstack",
"torch.initial_seed",
"numpy.array",
"numpy.linalg.norm",
"numpy.sin",
"pcl.PointCloud",
"autolab_core.YamlConfig",
"mayavi.mlab.points3d",
"mayavi.mlab.pipeline.open",
"numpy.cross",
"numpy.where",
"numpy.delete",
"numpy.dot",
"numpy.vstack",
"dexnet.grasping.RobotGripper.load",
"torch.utils.data.dataloader.default_collate",
"numpy.floor",
"numpy.cos",
"mayavi.mlab.triangular_mesh",
"numpy.dstack",
"numpy.unique",
"mayavi.mlab.show",
"os.path.join",
"mayavi.mlab.figure",
"numpy.sum",
"numpy.zeros",
"mayavi.mlab.plot3d",
"mayavi.mlab.title",
"numpy.load",
"numpy.random.shuffle"
] |
[((318, 389), 'autolab_core.YamlConfig', 'YamlConfig', (["(home_dir + '/Projects/PointNetGPD/dex-net/test/config.yaml')"], {}), "(home_dir + '/Projects/PointNetGPD/dex-net/test/config.yaml')\n", (328, 389), False, 'from autolab_core import YamlConfig\n'), ((428, 521), 'dexnet.grasping.RobotGripper.load', 'RobotGripper.load', (['gripper_name', "(home_dir + '/Projects/PointNetGPD/dex-net/data/grippers')"], {}), "(gripper_name, home_dir +\n '/Projects/PointNetGPD/dex-net/data/grippers')\n", (445, 521), False, 'from dexnet.grasping import RobotGripper\n'), ((524, 561), 'dexnet.grasping.GpgGraspSampler', 'GpgGraspSampler', (['gripper', 'yaml_config'], {}), '(gripper, yaml_config)\n', (539, 561), False, 'from dexnet.grasping import GpgGraspSampler\n'), ((2711, 2724), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (2717, 2724), True, 'import numpy as np\n'), ((2741, 2754), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (2747, 2754), True, 'import numpy as np\n'), ((2861, 2897), 'numpy.array', 'np.array', (['[axis_y[1], -axis_y[0], 0]'], {}), '([axis_y[1], -axis_y[0], 0])\n', (2869, 2897), True, 'import numpy as np\n'), ((3094, 3118), 'numpy.cross', 'np.cross', (['axis_x', 'axis_y'], {}), '(axis_x, axis_y)\n', (3102, 3118), True, 'import numpy as np\n'), ((3283, 3307), 'numpy.cross', 'np.cross', (['axis', 'approach'], {}), '(axis, approach)\n', (3291, 3307), True, 'import numpy as np\n'), ((4790, 4825), 'numpy.vstack', 'np.vstack', (['[x1, x2, y1, y2, z1, z2]'], {}), '([x1, x2, y1, y2, z1, z2])\n', (4799, 4825), True, 'import numpy as np\n'), ((5158, 5273), 'numpy.array', 'np.array', (['[[-1, 1, 1], [1, 1, 1], [-1, -1, 1], [1, -1, 1], [-1, 1, -1], [1, 1, -1], [\n -1, -1, -1], [1, -1, -1]]'], {}), '([[-1, 1, 1], [1, 1, 1], [-1, -1, 1], [1, -1, 1], [-1, 1, -1], [1, \n 1, -1], [-1, -1, -1], [1, -1, -1]])\n', (5166, 5273), True, 'import numpy as np\n'), ((5615, 5650), 'numpy.vstack', 'np.vstack', (['[a1, a2, a3, a4, a5, a6]'], {}), '([a1, a2, a3, a4, a5, a6])\n', (5624, 5650), True, 'import numpy as np\n'), ((5983, 6028), 'numpy.zeros', 'np.zeros', (['[m_width_of_pic, m_width_of_pic, 1]'], {}), '([m_width_of_pic, m_width_of_pic, 1])\n', (5991, 6028), True, 'import numpy as np\n'), ((6048, 6093), 'numpy.zeros', 'np.zeros', (['[m_width_of_pic, m_width_of_pic, 3]'], {}), '([m_width_of_pic, m_width_of_pic, 3])\n', (6056, 6093), True, 'import numpy as np\n'), ((6117, 6162), 'numpy.zeros', 'np.zeros', (['[m_width_of_pic, m_width_of_pic, 1]'], {}), '([m_width_of_pic, m_width_of_pic, 1])\n', (6125, 6162), True, 'import numpy as np\n'), ((7384, 7414), 'numpy.unique', 'np.unique', (['voxel_index'], {'axis': '(0)'}), '(voxel_index, axis=0)\n', (7393, 7414), True, 'import numpy as np\n'), ((7577, 7610), 'numpy.zeros', 'np.zeros', ([], {'shape': 'K', 'dtype': 'np.int64'}), '(shape=K, dtype=np.int64)\n', (7585, 7610), True, 'import numpy as np\n'), ((7636, 7698), 'numpy.zeros', 'np.zeros', ([], {'shape': '(K, self.voxel_point_num, 6)', 'dtype': 'np.float32'}), '(shape=(K, self.voxel_point_num, 6), dtype=np.float32)\n', (7644, 7698), True, 'import numpy as np\n'), ((9052, 9070), 'pcl.PointCloud', 'pcl.PointCloud', (['pc'], {}), '(pc)\n', (9066, 9070), False, 'import pcl\n'), ((9823, 9842), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (9831, 9842), True, 'import numpy as np\n'), ((11048, 11075), 'numpy.array', 'np.array', (['self.d_pc[obj_pc]'], {}), '(self.d_pc[obj_pc])\n', (11056, 11075), True, 'import numpy as np\n'), ((14557, 14570), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (14563, 14570), True, 'import numpy as np\n'), ((14587, 14600), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (14593, 14600), True, 'import numpy as np\n'), ((14707, 14743), 'numpy.array', 'np.array', (['[axis_y[1], -axis_y[0], 0]'], {}), '([axis_y[1], -axis_y[0], 0])\n', (14715, 14743), True, 'import numpy as np\n'), ((14940, 14964), 'numpy.cross', 'np.cross', (['axis_x', 'axis_y'], {}), '(axis_x, axis_y)\n', (14948, 14964), True, 'import numpy as np\n'), ((15129, 15153), 'numpy.cross', 'np.cross', (['axis', 'approach'], {}), '(axis, approach)\n', (15137, 15153), True, 'import numpy as np\n'), ((16636, 16671), 'numpy.vstack', 'np.vstack', (['[x1, x2, y1, y2, z1, z2]'], {}), '([x1, x2, y1, y2, z1, z2])\n', (16645, 16671), True, 'import numpy as np\n'), ((17004, 17119), 'numpy.array', 'np.array', (['[[-1, 1, 1], [1, 1, 1], [-1, -1, 1], [1, -1, 1], [-1, 1, -1], [1, 1, -1], [\n -1, -1, -1], [1, -1, -1]]'], {}), '([[-1, 1, 1], [1, 1, 1], [-1, -1, 1], [1, -1, 1], [-1, 1, -1], [1, \n 1, -1], [-1, -1, -1], [1, -1, -1]])\n', (17012, 17119), True, 'import numpy as np\n'), ((17461, 17496), 'numpy.vstack', 'np.vstack', (['[a1, a2, a3, a4, a5, a6]'], {}), '([a1, a2, a3, a4, a5, a6])\n', (17470, 17496), True, 'import numpy as np\n'), ((17829, 17874), 'numpy.zeros', 'np.zeros', (['[m_width_of_pic, m_width_of_pic, 1]'], {}), '([m_width_of_pic, m_width_of_pic, 1])\n', (17837, 17874), True, 'import numpy as np\n'), ((17894, 17939), 'numpy.zeros', 'np.zeros', (['[m_width_of_pic, m_width_of_pic, 3]'], {}), '([m_width_of_pic, m_width_of_pic, 3])\n', (17902, 17939), True, 'import numpy as np\n'), ((17963, 18008), 'numpy.zeros', 'np.zeros', (['[m_width_of_pic, m_width_of_pic, 1]'], {}), '([m_width_of_pic, m_width_of_pic, 1])\n', (17971, 18008), True, 'import numpy as np\n'), ((19230, 19260), 'numpy.unique', 'np.unique', (['voxel_index'], {'axis': '(0)'}), '(voxel_index, axis=0)\n', (19239, 19260), True, 'import numpy as np\n'), ((19423, 19456), 'numpy.zeros', 'np.zeros', ([], {'shape': 'K', 'dtype': 'np.int64'}), '(shape=K, dtype=np.int64)\n', (19431, 19456), True, 'import numpy as np\n'), ((19482, 19544), 'numpy.zeros', 'np.zeros', ([], {'shape': '(K, self.voxel_point_num, 6)', 'dtype': 'np.float32'}), '(shape=(K, self.voxel_point_num, 6), dtype=np.float32)\n', (19490, 19544), True, 'import numpy as np\n'), ((20898, 20916), 'pcl.PointCloud', 'pcl.PointCloud', (['pc'], {}), '(pc)\n', (20912, 20916), False, 'import pcl\n'), ((21669, 21688), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (21677, 21688), True, 'import numpy as np\n'), ((22894, 22921), 'numpy.array', 'np.array', (['self.d_pc[obj_pc]'], {}), '(self.d_pc[obj_pc])\n', (22902, 22921), True, 'import numpy as np\n'), ((27295, 27308), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (27301, 27308), True, 'import numpy as np\n'), ((27325, 27338), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (27331, 27338), True, 'import numpy as np\n'), ((27455, 27491), 'numpy.array', 'np.array', (['[axis_y[1], -axis_y[0], 0]'], {}), '([axis_y[1], -axis_y[0], 0])\n', (27463, 27491), True, 'import numpy as np\n'), ((27708, 27732), 'numpy.cross', 'np.cross', (['axis_x', 'axis_y'], {}), '(axis_x, axis_y)\n', (27716, 27732), True, 'import numpy as np\n'), ((30303, 30347), 'numpy.array', 'np.array', (['[approach, binormal, minor_normal]'], {}), '([approach, binormal, minor_normal])\n', (30311, 30347), True, 'import numpy as np\n'), ((30539, 30598), 'numpy.array', 'np.array', (['[trans_p2m[0], trans_p2m[1], trans_p2m[2] + 0.02]'], {}), '([trans_p2m[0], trans_p2m[1], trans_p2m[2] + 0.02])\n', (30547, 30598), True, 'import numpy as np\n'), ((31341, 31376), 'numpy.vstack', 'np.vstack', (['[x1, x2, y1, y2, z1, z2]'], {}), '([x1, x2, y1, y2, z1, z2])\n', (31350, 31376), True, 'import numpy as np\n'), ((35055, 35170), 'numpy.array', 'np.array', (['[[-1, 1, 1], [1, 1, 1], [-1, -1, 1], [1, -1, 1], [-1, 1, -1], [1, 1, -1], [\n -1, -1, -1], [1, -1, -1]]'], {}), '([[-1, 1, 1], [1, 1, 1], [-1, -1, 1], [1, -1, 1], [-1, 1, -1], [1, \n 1, -1], [-1, -1, -1], [1, -1, -1]])\n', (35063, 35170), True, 'import numpy as np\n'), ((35512, 35547), 'numpy.vstack', 'np.vstack', (['[a1, a2, a3, a4, a5, a6]'], {}), '([a1, a2, a3, a4, a5, a6])\n', (35521, 35547), True, 'import numpy as np\n'), ((36107, 36152), 'numpy.zeros', 'np.zeros', (['[m_width_of_pic, m_width_of_pic, 1]'], {}), '([m_width_of_pic, m_width_of_pic, 1])\n', (36115, 36152), True, 'import numpy as np\n'), ((36172, 36217), 'numpy.zeros', 'np.zeros', (['[m_width_of_pic, m_width_of_pic, 3]'], {}), '([m_width_of_pic, m_width_of_pic, 3])\n', (36180, 36217), True, 'import numpy as np\n'), ((36241, 36286), 'numpy.zeros', 'np.zeros', (['[m_width_of_pic, m_width_of_pic, 1]'], {}), '([m_width_of_pic, m_width_of_pic, 1])\n', (36249, 36286), True, 'import numpy as np\n'), ((37508, 37538), 'numpy.unique', 'np.unique', (['voxel_index'], {'axis': '(0)'}), '(voxel_index, axis=0)\n', (37517, 37538), True, 'import numpy as np\n'), ((37701, 37734), 'numpy.zeros', 'np.zeros', ([], {'shape': 'K', 'dtype': 'np.int64'}), '(shape=K, dtype=np.int64)\n', (37709, 37734), True, 'import numpy as np\n'), ((37760, 37822), 'numpy.zeros', 'np.zeros', ([], {'shape': '(K, self.voxel_point_num, 6)', 'dtype': 'np.float32'}), '(shape=(K, self.voxel_point_num, 6), dtype=np.float32)\n', (37768, 37822), True, 'import numpy as np\n'), ((39199, 39217), 'pcl.PointCloud', 'pcl.PointCloud', (['pc'], {}), '(pc)\n', (39213, 39217), False, 'import pcl\n'), ((39972, 39991), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (39980, 39991), True, 'import numpy as np\n'), ((41285, 41312), 'numpy.array', 'np.array', (['self.d_pc[obj_pc]'], {}), '(self.d_pc[obj_pc])\n', (41293, 41312), True, 'import numpy as np\n'), ((41333, 41357), 'numpy.random.shuffle', 'np.random.shuffle', (['fl_pc'], {}), '(fl_pc)\n', (41350, 41357), True, 'import numpy as np\n'), ((41434, 41452), 'numpy.load', 'np.load', (['fl_pc[-1]'], {}), '(fl_pc[-1])\n', (41441, 41452), True, 'import numpy as np\n'), ((45170, 45183), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (45176, 45183), True, 'import numpy as np\n'), ((45200, 45213), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (45206, 45213), True, 'import numpy as np\n'), ((45320, 45356), 'numpy.array', 'np.array', (['[axis_y[1], -axis_y[0], 0]'], {}), '([axis_y[1], -axis_y[0], 0])\n', (45328, 45356), True, 'import numpy as np\n'), ((45553, 45577), 'numpy.cross', 'np.cross', (['axis_x', 'axis_y'], {}), '(axis_x, axis_y)\n', (45561, 45577), True, 'import numpy as np\n'), ((45742, 45766), 'numpy.cross', 'np.cross', (['axis', 'approach'], {}), '(axis, approach)\n', (45750, 45766), True, 'import numpy as np\n'), ((46995, 47030), 'numpy.vstack', 'np.vstack', (['[x1, x2, y1, y2, z1, z2]'], {}), '([x1, x2, y1, y2, z1, z2])\n', (47004, 47030), True, 'import numpy as np\n'), ((47363, 47478), 'numpy.array', 'np.array', (['[[-1, 1, 1], [1, 1, 1], [-1, -1, 1], [1, -1, 1], [-1, 1, -1], [1, 1, -1], [\n -1, -1, -1], [1, -1, -1]]'], {}), '([[-1, 1, 1], [1, 1, 1], [-1, -1, 1], [1, -1, 1], [-1, 1, -1], [1, \n 1, -1], [-1, -1, -1], [1, -1, -1]])\n', (47371, 47478), True, 'import numpy as np\n'), ((47820, 47855), 'numpy.vstack', 'np.vstack', (['[a1, a2, a3, a4, a5, a6]'], {}), '([a1, a2, a3, a4, a5, a6])\n', (47829, 47855), True, 'import numpy as np\n'), ((48188, 48233), 'numpy.zeros', 'np.zeros', (['[m_width_of_pic, m_width_of_pic, 1]'], {}), '([m_width_of_pic, m_width_of_pic, 1])\n', (48196, 48233), True, 'import numpy as np\n'), ((48253, 48298), 'numpy.zeros', 'np.zeros', (['[m_width_of_pic, m_width_of_pic, 3]'], {}), '([m_width_of_pic, m_width_of_pic, 3])\n', (48261, 48298), True, 'import numpy as np\n'), ((48322, 48367), 'numpy.zeros', 'np.zeros', (['[m_width_of_pic, m_width_of_pic, 1]'], {}), '([m_width_of_pic, m_width_of_pic, 1])\n', (48330, 48367), True, 'import numpy as np\n'), ((49589, 49619), 'numpy.unique', 'np.unique', (['voxel_index'], {'axis': '(0)'}), '(voxel_index, axis=0)\n', (49598, 49619), True, 'import numpy as np\n'), ((49782, 49815), 'numpy.zeros', 'np.zeros', ([], {'shape': 'K', 'dtype': 'np.int64'}), '(shape=K, dtype=np.int64)\n', (49790, 49815), True, 'import numpy as np\n'), ((49841, 49903), 'numpy.zeros', 'np.zeros', ([], {'shape': '(K, self.voxel_point_num, 6)', 'dtype': 'np.float32'}), '(shape=(K, self.voxel_point_num, 6), dtype=np.float32)\n', (49849, 49903), True, 'import numpy as np\n'), ((51257, 51275), 'pcl.PointCloud', 'pcl.PointCloud', (['pc'], {}), '(pc)\n', (51271, 51275), False, 'import pcl\n'), ((52028, 52047), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (52036, 52047), True, 'import numpy as np\n'), ((53255, 53282), 'numpy.array', 'np.array', (['self.d_pc[obj_pc]'], {}), '(self.d_pc[obj_pc])\n', (53263, 53282), True, 'import numpy as np\n'), ((53291, 53315), 'numpy.random.shuffle', 'np.random.shuffle', (['fl_pc'], {}), '(fl_pc)\n', (53308, 53315), True, 'import numpy as np\n'), ((53374, 53392), 'numpy.load', 'np.load', (['fl_pc[-1]'], {}), '(fl_pc[-1])\n', (53381, 53392), True, 'import numpy as np\n'), ((54884, 54934), 'torch.utils.data.dataloader.default_collate', 'torch.utils.data.dataloader.default_collate', (['batch'], {}), '(batch)\n', (54927, 54934), False, 'import torch\n'), ((55976, 56087), 'mayavi.mlab.plot3d', 'mlab.plot3d', (['[un1[0], un2[0]]', '[un1[1], un2[1]]', '[un1[2], un2[2]]'], {'color': 'color_f', 'tube_radius': 'scale_factor'}), '([un1[0], un2[0]], [un1[1], un2[1]], [un1[2], un2[2]], color=\n color_f, tube_radius=scale_factor)\n', (55987, 56087), False, 'from mayavi import mlab\n'), ((1758, 1808), 'os.path.join', 'os.path.join', (['path', '"""ycb_grasp"""', 'self.tag', '"""*.npy"""'], {}), "(path, 'ycb_grasp', self.tag, '*.npy')\n", (1770, 1808), False, 'import os\n'), ((1836, 1890), 'os.path.join', 'os.path.join', (['path', '"""ycb_rgbd"""', '"""*"""', '"""clouds"""', '"""*.npy"""'], {}), "(path, 'ycb_rgbd', '*', 'clouds', '*.npy')\n", (1848, 1890), False, 'import os\n'), ((2627, 2647), 'numpy.linalg.norm', 'np.linalg.norm', (['axis'], {}), '(axis)\n', (2641, 2647), True, 'import numpy as np\n'), ((2909, 2931), 'numpy.linalg.norm', 'np.linalg.norm', (['axis_x'], {}), '(axis_x)\n', (2923, 2931), True, 'import numpy as np\n'), ((2959, 2978), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (2967, 2978), True, 'import numpy as np\n'), ((3005, 3027), 'numpy.linalg.norm', 'np.linalg.norm', (['axis_x'], {}), '(axis_x)\n', (3019, 3027), True, 'import numpy as np\n'), ((3054, 3076), 'numpy.linalg.norm', 'np.linalg.norm', (['axis_y'], {}), '(axis_y)\n', (3068, 3076), True, 'import numpy as np\n'), ((3235, 3259), 'numpy.linalg.norm', 'np.linalg.norm', (['approach'], {}), '(approach)\n', (3249, 3259), True, 'import numpy as np\n'), ((4132, 4177), 'numpy.hstack', 'np.hstack', (['[approach, binormal, minor_normal]'], {}), '([approach, binormal, minor_normal])\n', (4141, 4177), True, 'import numpy as np\n'), ((4325, 4356), 'numpy.dot', 'np.dot', (['matrix', '(pc - center).T'], {}), '(matrix, (pc - center).T)\n', (4331, 4356), True, 'import numpy as np\n'), ((7289, 7332), 'numpy.array', 'np.array', (['[x_coord_r, y_coord_r, z_coord_r]'], {}), '([x_coord_r, y_coord_r, z_coord_r])\n', (7297, 7332), True, 'import numpy as np\n'), ((8258, 8298), 'numpy.sum', 'np.sum', (['feature_buffer[..., -3:]'], {'axis': '(1)'}), '(feature_buffer[..., -3:], axis=1)\n', (8264, 8298), True, 'import numpy as np\n'), ((9456, 9473), 'numpy.sum', 'np.sum', (['bad_check'], {}), '(bad_check)\n', (9462, 9473), True, 'import numpy as np\n'), ((9500, 9527), 'numpy.where', 'np.where', (['(bad_check == True)'], {}), '(bad_check == True)\n', (9508, 9527), True, 'import numpy as np\n'), ((9551, 9590), 'numpy.delete', 'np.delete', (['grasp_pc', 'bad_ind[0]'], {'axis': '(0)'}), '(grasp_pc, bad_ind[0], axis=0)\n', (9560, 9590), True, 'import numpy as np\n'), ((9619, 9663), 'numpy.delete', 'np.delete', (['grasp_pc_norm', 'bad_ind[0]'], {'axis': '(0)'}), '(grasp_pc_norm, bad_ind[0], axis=0)\n', (9628, 9663), True, 'import numpy as np\n'), ((9679, 9717), 'numpy.sum', 'np.sum', (['(grasp_pc_norm != grasp_pc_norm)'], {}), '(grasp_pc_norm != grasp_pc_norm)\n', (9685, 9717), True, 'import numpy as np\n'), ((11173, 11189), 'numpy.load', 'np.load', (['f_grasp'], {}), '(f_grasp)\n', (11180, 11189), True, 'import numpy as np\n'), ((13604, 13654), 'os.path.join', 'os.path.join', (['path', '"""ycb_grasp"""', 'self.tag', '"""*.npy"""'], {}), "(path, 'ycb_grasp', self.tag, '*.npy')\n", (13616, 13654), False, 'import os\n'), ((13682, 13736), 'os.path.join', 'os.path.join', (['path', '"""ycb_rgbd"""', '"""*"""', '"""clouds"""', '"""*.npy"""'], {}), "(path, 'ycb_rgbd', '*', 'clouds', '*.npy')\n", (13694, 13736), False, 'import os\n'), ((14473, 14493), 'numpy.linalg.norm', 'np.linalg.norm', (['axis'], {}), '(axis)\n', (14487, 14493), True, 'import numpy as np\n'), ((14755, 14777), 'numpy.linalg.norm', 'np.linalg.norm', (['axis_x'], {}), '(axis_x)\n', (14769, 14777), True, 'import numpy as np\n'), ((14805, 14824), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (14813, 14824), True, 'import numpy as np\n'), ((14851, 14873), 'numpy.linalg.norm', 'np.linalg.norm', (['axis_x'], {}), '(axis_x)\n', (14865, 14873), True, 'import numpy as np\n'), ((14900, 14922), 'numpy.linalg.norm', 'np.linalg.norm', (['axis_y'], {}), '(axis_y)\n', (14914, 14922), True, 'import numpy as np\n'), ((15081, 15105), 'numpy.linalg.norm', 'np.linalg.norm', (['approach'], {}), '(approach)\n', (15095, 15105), True, 'import numpy as np\n'), ((15978, 16023), 'numpy.hstack', 'np.hstack', (['[approach, binormal, minor_normal]'], {}), '([approach, binormal, minor_normal])\n', (15987, 16023), True, 'import numpy as np\n'), ((16171, 16202), 'numpy.dot', 'np.dot', (['matrix', '(pc - center).T'], {}), '(matrix, (pc - center).T)\n', (16177, 16202), True, 'import numpy as np\n'), ((19135, 19178), 'numpy.array', 'np.array', (['[x_coord_r, y_coord_r, z_coord_r]'], {}), '([x_coord_r, y_coord_r, z_coord_r])\n', (19143, 19178), True, 'import numpy as np\n'), ((20104, 20144), 'numpy.sum', 'np.sum', (['feature_buffer[..., -3:]'], {'axis': '(1)'}), '(feature_buffer[..., -3:], axis=1)\n', (20110, 20144), True, 'import numpy as np\n'), ((21302, 21319), 'numpy.sum', 'np.sum', (['bad_check'], {}), '(bad_check)\n', (21308, 21319), True, 'import numpy as np\n'), ((21346, 21373), 'numpy.where', 'np.where', (['(bad_check == True)'], {}), '(bad_check == True)\n', (21354, 21373), True, 'import numpy as np\n'), ((21397, 21436), 'numpy.delete', 'np.delete', (['grasp_pc', 'bad_ind[0]'], {'axis': '(0)'}), '(grasp_pc, bad_ind[0], axis=0)\n', (21406, 21436), True, 'import numpy as np\n'), ((21465, 21509), 'numpy.delete', 'np.delete', (['grasp_pc_norm', 'bad_ind[0]'], {'axis': '(0)'}), '(grasp_pc_norm, bad_ind[0], axis=0)\n', (21474, 21509), True, 'import numpy as np\n'), ((21525, 21563), 'numpy.sum', 'np.sum', (['(grasp_pc_norm != grasp_pc_norm)'], {}), '(grasp_pc_norm != grasp_pc_norm)\n', (21531, 21563), True, 'import numpy as np\n'), ((23019, 23035), 'numpy.load', 'np.load', (['f_grasp'], {}), '(f_grasp)\n', (23026, 23035), True, 'import numpy as np\n'), ((25402, 25452), 'os.path.join', 'os.path.join', (['path', '"""ycb_grasp"""', 'self.tag', '"""*.npy"""'], {}), "(path, 'ycb_grasp', self.tag, '*.npy')\n", (25414, 25452), False, 'import os\n'), ((25534, 25598), 'os.path.join', 'os.path.join', (['path', '"""ycb_rgbd"""', '"""*"""', '"""clouds"""', '"""pc_NP3_NP5*.npy"""'], {}), "(path, 'ycb_rgbd', '*', 'clouds', 'pc_NP3_NP5*.npy')\n", (25546, 25598), False, 'import os\n'), ((27203, 27223), 'numpy.linalg.norm', 'np.linalg.norm', (['axis'], {}), '(axis)\n', (27217, 27223), True, 'import numpy as np\n'), ((27503, 27525), 'numpy.linalg.norm', 'np.linalg.norm', (['axis_x'], {}), '(axis_x)\n', (27517, 27525), True, 'import numpy as np\n'), ((27553, 27572), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (27561, 27572), True, 'import numpy as np\n'), ((27619, 27641), 'numpy.linalg.norm', 'np.linalg.norm', (['axis_x'], {}), '(axis_x)\n', (27633, 27641), True, 'import numpy as np\n'), ((27668, 27690), 'numpy.linalg.norm', 'np.linalg.norm', (['axis_y'], {}), '(axis_y)\n', (27682, 27690), True, 'import numpy as np\n'), ((27857, 27881), 'numpy.linalg.norm', 'np.linalg.norm', (['approach'], {}), '(approach)\n', (27871, 27881), True, 'import numpy as np\n'), ((27914, 27938), 'numpy.cross', 'np.cross', (['axis', 'approach'], {}), '(axis, approach)\n', (27922, 27938), True, 'import numpy as np\n'), ((28522, 28570), 'mayavi.mlab.figure', 'mlab.figure', ([], {'bgcolor': '(1, 1, 1)', 'size': '(1000, 800)'}), '(bgcolor=(1, 1, 1), size=(1000, 800))\n', (28533, 28570), False, 'from mayavi import mlab\n'), ((29774, 29821), 'mayavi.mlab.title', 'mlab.title', (['"""google"""'], {'size': '(0.3)', 'color': '(0, 0, 0)'}), "('google', size=0.3, color=(0, 0, 0))\n", (29784, 29821), False, 'from mayavi import mlab\n'), ((29834, 29845), 'mayavi.mlab.show', 'mlab.show', ([], {}), '()\n', (29843, 29845), False, 'from mayavi import mlab\n'), ((30623, 30663), 'numpy.dot', 'np.dot', (['matrix_p2m.T', '(pc - trans_p2m).T'], {}), '(matrix_p2m.T, (pc - trans_p2m).T)\n', (30629, 30663), True, 'import numpy as np\n'), ((30701, 30740), 'numpy.dot', 'np.dot', (['matrix_m2c', '(pc_p2m - center).T'], {}), '(matrix_m2c, (pc_p2m - center).T)\n', (30707, 30740), True, 'import numpy as np\n'), ((31694, 31742), 'mayavi.mlab.figure', 'mlab.figure', ([], {'bgcolor': '(1, 1, 1)', 'size': '(1000, 800)'}), '(bgcolor=(1, 1, 1), size=(1000, 800))\n', (31705, 31742), False, 'from mayavi import mlab\n'), ((34679, 34749), 'mayavi.mlab.triangular_mesh', 'mlab.triangular_mesh', (['x', 'y', 'z', 'triangles'], {'color': '(1, 0, 1)', 'opacity': '(0.2)'}), '(x, y, z, triangles, color=(1, 0, 1), opacity=0.2)\n', (34699, 34749), False, 'from mayavi import mlab\n'), ((34763, 34809), 'mayavi.mlab.title', 'mlab.title', (['"""cloud"""'], {'size': '(0.3)', 'color': '(0, 0, 0)'}), "('cloud', size=0.3, color=(0, 0, 0))\n", (34773, 34809), False, 'from mayavi import mlab\n'), ((34822, 34833), 'mayavi.mlab.show', 'mlab.show', ([], {}), '()\n', (34831, 34833), False, 'from mayavi import mlab\n'), ((37413, 37456), 'numpy.array', 'np.array', (['[x_coord_r, y_coord_r, z_coord_r]'], {}), '([x_coord_r, y_coord_r, z_coord_r])\n', (37421, 37456), True, 'import numpy as np\n'), ((38382, 38422), 'numpy.sum', 'np.sum', (['feature_buffer[..., -3:]'], {'axis': '(1)'}), '(feature_buffer[..., -3:], axis=1)\n', (38388, 38422), True, 'import numpy as np\n'), ((39603, 39620), 'numpy.sum', 'np.sum', (['bad_check'], {}), '(bad_check)\n', (39609, 39620), True, 'import numpy as np\n'), ((39649, 39676), 'numpy.where', 'np.where', (['(bad_check == True)'], {}), '(bad_check == True)\n', (39657, 39676), True, 'import numpy as np\n'), ((39700, 39739), 'numpy.delete', 'np.delete', (['grasp_pc', 'bad_ind[0]'], {'axis': '(0)'}), '(grasp_pc, bad_ind[0], axis=0)\n', (39709, 39739), True, 'import numpy as np\n'), ((39768, 39812), 'numpy.delete', 'np.delete', (['grasp_pc_norm', 'bad_ind[0]'], {'axis': '(0)'}), '(grasp_pc_norm, bad_ind[0], axis=0)\n', (39777, 39812), True, 'import numpy as np\n'), ((39828, 39866), 'numpy.sum', 'np.sum', (['(grasp_pc_norm != grasp_pc_norm)'], {}), '(grasp_pc_norm != grasp_pc_norm)\n', (39834, 39866), True, 'import numpy as np\n'), ((41383, 41399), 'numpy.load', 'np.load', (['f_grasp'], {}), '(f_grasp)\n', (41390, 41399), True, 'import numpy as np\n'), ((44140, 44190), 'os.path.join', 'os.path.join', (['path', '"""ycb_grasp"""', 'self.tag', '"""*.npy"""'], {}), "(path, 'ycb_grasp', self.tag, '*.npy')\n", (44152, 44190), False, 'import os\n'), ((44218, 44282), 'os.path.join', 'os.path.join', (['path', '"""ycb_rgbd"""', '"""*"""', '"""clouds"""', '"""pc_NP3_NP5*.npy"""'], {}), "(path, 'ycb_rgbd', '*', 'clouds', 'pc_NP3_NP5*.npy')\n", (44230, 44282), False, 'import os\n'), ((45086, 45106), 'numpy.linalg.norm', 'np.linalg.norm', (['axis'], {}), '(axis)\n', (45100, 45106), True, 'import numpy as np\n'), ((45368, 45390), 'numpy.linalg.norm', 'np.linalg.norm', (['axis_x'], {}), '(axis_x)\n', (45382, 45390), True, 'import numpy as np\n'), ((45418, 45437), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (45426, 45437), True, 'import numpy as np\n'), ((45464, 45486), 'numpy.linalg.norm', 'np.linalg.norm', (['axis_x'], {}), '(axis_x)\n', (45478, 45486), True, 'import numpy as np\n'), ((45513, 45535), 'numpy.linalg.norm', 'np.linalg.norm', (['axis_y'], {}), '(axis_y)\n', (45527, 45535), True, 'import numpy as np\n'), ((45694, 45718), 'numpy.linalg.norm', 'np.linalg.norm', (['approach'], {}), '(approach)\n', (45708, 45718), True, 'import numpy as np\n'), ((46464, 46509), 'numpy.hstack', 'np.hstack', (['[approach, binormal, minor_normal]'], {}), '([approach, binormal, minor_normal])\n', (46473, 46509), True, 'import numpy as np\n'), ((46530, 46561), 'numpy.dot', 'np.dot', (['matrix', '(pc - center).T'], {}), '(matrix, (pc - center).T)\n', (46536, 46561), True, 'import numpy as np\n'), ((49494, 49537), 'numpy.array', 'np.array', (['[x_coord_r, y_coord_r, z_coord_r]'], {}), '([x_coord_r, y_coord_r, z_coord_r])\n', (49502, 49537), True, 'import numpy as np\n'), ((50463, 50503), 'numpy.sum', 'np.sum', (['feature_buffer[..., -3:]'], {'axis': '(1)'}), '(feature_buffer[..., -3:], axis=1)\n', (50469, 50503), True, 'import numpy as np\n'), ((51661, 51678), 'numpy.sum', 'np.sum', (['bad_check'], {}), '(bad_check)\n', (51667, 51678), True, 'import numpy as np\n'), ((51705, 51732), 'numpy.where', 'np.where', (['(bad_check == True)'], {}), '(bad_check == True)\n', (51713, 51732), True, 'import numpy as np\n'), ((51756, 51795), 'numpy.delete', 'np.delete', (['grasp_pc', 'bad_ind[0]'], {'axis': '(0)'}), '(grasp_pc, bad_ind[0], axis=0)\n', (51765, 51795), True, 'import numpy as np\n'), ((51824, 51868), 'numpy.delete', 'np.delete', (['grasp_pc_norm', 'bad_ind[0]'], {'axis': '(0)'}), '(grasp_pc_norm, bad_ind[0], axis=0)\n', (51833, 51868), True, 'import numpy as np\n'), ((51884, 51922), 'numpy.sum', 'np.sum', (['(grasp_pc_norm != grasp_pc_norm)'], {}), '(grasp_pc_norm != grasp_pc_norm)\n', (51890, 51922), True, 'import numpy as np\n'), ((53333, 53349), 'numpy.load', 'np.load', (['f_grasp'], {}), '(f_grasp)\n', (53340, 53349), True, 'import numpy as np\n'), ((55450, 55540), 'mayavi.mlab.points3d', 'mlab.points3d', (['point[0]', 'point[1]', 'point[2]'], {'color': 'color_f', 'scale_factor': 'scale_factor'}), '(point[0], point[1], point[2], color=color_f, scale_factor=\n scale_factor)\n', (55463, 55540), False, 'from mayavi import mlab\n'), ((55589, 55687), 'mayavi.mlab.points3d', 'mlab.points3d', (['point[:, 0]', 'point[:, 1]', 'point[:, 2]'], {'color': 'color_f', 'scale_factor': 'scale_factor'}), '(point[:, 0], point[:, 1], point[:, 2], color=color_f,\n scale_factor=scale_factor)\n', (55602, 55687), False, 'from mayavi import mlab\n'), ((1677, 1720), 'os.path.join', 'os.path.join', (['self.path', '"""google2cloud.pkl"""'], {}), "(self.path, 'google2cloud.pkl')\n", (1689, 1720), False, 'import os\n'), ((3461, 3501), 'numpy.array', 'np.array', (['[left[0], left[1], left[2], 1]'], {}), '([left[0], left[1], left[2], 1])\n', (3469, 3501), True, 'import numpy as np\n'), ((3543, 3586), 'numpy.array', 'np.array', (['[right[0], right[1], right[2], 1]'], {}), '([right[0], right[1], right[2], 1])\n', (3551, 3586), True, 'import numpy as np\n'), ((3713, 3759), 'numpy.array', 'np.array', (['[center[0], center[1], center[2], 1]'], {}), '([center[0], center[1], center[2], 1])\n', (3721, 3759), True, 'import numpy as np\n'), ((7131, 7150), 'numpy.floor', 'np.floor', (['x_coord_r'], {}), '(x_coord_r)\n', (7139, 7150), True, 'import numpy as np\n'), ((7183, 7202), 'numpy.floor', 'np.floor', (['y_coord_r'], {}), '(y_coord_r)\n', (7191, 7202), True, 'import numpy as np\n'), ((7235, 7254), 'numpy.floor', 'np.floor', (['z_coord_r'], {}), '(z_coord_r)\n', (7243, 7254), True, 'import numpy as np\n'), ((10146, 10165), 'numpy.array', 'np.array', (['[1, 2, 0]'], {}), '([1, 2, 0])\n', (10154, 10165), True, 'import numpy as np\n'), ((10371, 10390), 'numpy.array', 'np.array', (['[0, 2, 1]'], {}), '([0, 2, 1])\n', (10379, 10390), True, 'import numpy as np\n'), ((10593, 10680), 'numpy.dstack', 'np.dstack', (['[occupy_pic1, norm_pic1, occupy_pic2, norm_pic2, occupy_pic3, norm_pic3]'], {}), '([occupy_pic1, norm_pic1, occupy_pic2, norm_pic2, occupy_pic3,\n norm_pic3])\n', (10602, 10680), True, 'import numpy as np\n'), ((11225, 11235), 'numpy.load', 'np.load', (['i'], {}), '(i)\n', (11232, 11235), True, 'import numpy as np\n'), ((13523, 13566), 'os.path.join', 'os.path.join', (['self.path', '"""google2cloud.pkl"""'], {}), "(self.path, 'google2cloud.pkl')\n", (13535, 13566), False, 'import os\n'), ((15307, 15347), 'numpy.array', 'np.array', (['[left[0], left[1], left[2], 1]'], {}), '([left[0], left[1], left[2], 1])\n', (15315, 15347), True, 'import numpy as np\n'), ((15389, 15432), 'numpy.array', 'np.array', (['[right[0], right[1], right[2], 1]'], {}), '([right[0], right[1], right[2], 1])\n', (15397, 15432), True, 'import numpy as np\n'), ((15559, 15605), 'numpy.array', 'np.array', (['[center[0], center[1], center[2], 1]'], {}), '([center[0], center[1], center[2], 1])\n', (15567, 15605), True, 'import numpy as np\n'), ((18977, 18996), 'numpy.floor', 'np.floor', (['x_coord_r'], {}), '(x_coord_r)\n', (18985, 18996), True, 'import numpy as np\n'), ((19029, 19048), 'numpy.floor', 'np.floor', (['y_coord_r'], {}), '(y_coord_r)\n', (19037, 19048), True, 'import numpy as np\n'), ((19081, 19100), 'numpy.floor', 'np.floor', (['z_coord_r'], {}), '(z_coord_r)\n', (19089, 19100), True, 'import numpy as np\n'), ((21992, 22011), 'numpy.array', 'np.array', (['[1, 2, 0]'], {}), '([1, 2, 0])\n', (22000, 22011), True, 'import numpy as np\n'), ((22217, 22236), 'numpy.array', 'np.array', (['[0, 2, 1]'], {}), '([0, 2, 1])\n', (22225, 22236), True, 'import numpy as np\n'), ((22439, 22526), 'numpy.dstack', 'np.dstack', (['[occupy_pic1, norm_pic1, occupy_pic2, norm_pic2, occupy_pic3, norm_pic3]'], {}), '([occupy_pic1, norm_pic1, occupy_pic2, norm_pic2, occupy_pic3,\n norm_pic3])\n', (22448, 22526), True, 'import numpy as np\n'), ((23071, 23081), 'numpy.load', 'np.load', (['i'], {}), '(i)\n', (23078, 23081), True, 'import numpy as np\n'), ((25321, 25364), 'os.path.join', 'os.path.join', (['self.path', '"""google2cloud.pkl"""'], {}), "(self.path, 'google2cloud.pkl')\n", (25333, 25364), False, 'import os\n'), ((28605, 28747), 'mayavi.mlab.pipeline.open', 'mlab.pipeline.open', (['"""/home/sdhm/Projects/PointNetGPD/PointNetGPD/data/ycb_meshes_google/003_cracker_box/google_512k/nontextured.ply"""'], {}), "(\n '/home/sdhm/Projects/PointNetGPD/PointNetGPD/data/ycb_meshes_google/003_cracker_box/google_512k/nontextured.ply'\n )\n", (28623, 28747), False, 'from mayavi import mlab\n'), ((30019, 30059), 'numpy.array', 'np.array', (['[left[0], left[1], left[2], 1]'], {}), '([left[0], left[1], left[2], 1])\n', (30027, 30059), True, 'import numpy as np\n'), ((30101, 30144), 'numpy.array', 'np.array', (['[right[0], right[1], right[2], 1]'], {}), '([right[0], right[1], right[2], 1])\n', (30109, 30144), True, 'import numpy as np\n'), ((31777, 31919), 'mayavi.mlab.pipeline.open', 'mlab.pipeline.open', (['"""/home/sdhm/Projects/PointNetGPD/PointNetGPD/data/ycb_meshes_google/003_cracker_box/google_512k/nontextured.ply"""'], {}), "(\n '/home/sdhm/Projects/PointNetGPD/PointNetGPD/data/ycb_meshes_google/003_cracker_box/google_512k/nontextured.ply'\n )\n", (31795, 31919), False, 'from mayavi import mlab\n'), ((32849, 32893), 'numpy.dot', 'np.dot', (['matrix_m2c', '(hand_points - center).T'], {}), '(matrix_m2c, (hand_points - center).T)\n', (32855, 32893), True, 'import numpy as np\n'), ((34124, 34162), 'numpy.array', 'np.array', (['[-1, 1, 1, -1, -1, 1, 1, -1]'], {}), '([-1, 1, 1, -1, -1, 1, 1, -1])\n', (34132, 34162), True, 'import numpy as np\n'), ((34185, 34223), 'numpy.array', 'np.array', (['[-1, -1, 1, 1, -1, -1, 1, 1]'], {}), '([-1, -1, 1, 1, -1, -1, 1, 1])\n', (34193, 34223), True, 'import numpy as np\n'), ((34246, 34284), 'numpy.array', 'np.array', (['[-1, -1, -1, -1, 1, 1, 1, 1]'], {}), '([-1, -1, -1, -1, 1, 1, 1, 1])\n', (34254, 34284), True, 'import numpy as np\n'), ((37255, 37274), 'numpy.floor', 'np.floor', (['x_coord_r'], {}), '(x_coord_r)\n', (37263, 37274), True, 'import numpy as np\n'), ((37307, 37326), 'numpy.floor', 'np.floor', (['y_coord_r'], {}), '(y_coord_r)\n', (37315, 37326), True, 'import numpy as np\n'), ((37359, 37378), 'numpy.floor', 'np.floor', (['z_coord_r'], {}), '(z_coord_r)\n', (37367, 37378), True, 'import numpy as np\n'), ((40309, 40328), 'numpy.array', 'np.array', (['[1, 2, 0]'], {}), '([1, 2, 0])\n', (40317, 40328), True, 'import numpy as np\n'), ((40544, 40563), 'numpy.array', 'np.array', (['[0, 2, 1]'], {}), '([0, 2, 1])\n', (40552, 40563), True, 'import numpy as np\n'), ((40776, 40863), 'numpy.dstack', 'np.dstack', (['[occupy_pic1, norm_pic1, occupy_pic2, norm_pic2, occupy_pic3, norm_pic3]'], {}), '([occupy_pic1, norm_pic1, occupy_pic2, norm_pic2, occupy_pic3,\n norm_pic3])\n', (40785, 40863), True, 'import numpy as np\n'), ((44059, 44102), 'os.path.join', 'os.path.join', (['self.path', '"""google2cloud.pkl"""'], {}), "(self.path, 'google2cloud.pkl')\n", (44071, 44102), False, 'import os\n'), ((45877, 45917), 'numpy.array', 'np.array', (['[left[0], left[1], left[2], 1]'], {}), '([left[0], left[1], left[2], 1])\n', (45885, 45917), True, 'import numpy as np\n'), ((45959, 46002), 'numpy.array', 'np.array', (['[right[0], right[1], right[2], 1]'], {}), '([right[0], right[1], right[2], 1])\n', (45967, 46002), True, 'import numpy as np\n'), ((46045, 46091), 'numpy.array', 'np.array', (['[center[0], center[1], center[2], 1]'], {}), '([center[0], center[1], center[2], 1])\n', (46053, 46091), True, 'import numpy as np\n'), ((49336, 49355), 'numpy.floor', 'np.floor', (['x_coord_r'], {}), '(x_coord_r)\n', (49344, 49355), True, 'import numpy as np\n'), ((49388, 49407), 'numpy.floor', 'np.floor', (['y_coord_r'], {}), '(y_coord_r)\n', (49396, 49407), True, 'import numpy as np\n'), ((49440, 49459), 'numpy.floor', 'np.floor', (['z_coord_r'], {}), '(z_coord_r)\n', (49448, 49459), True, 'import numpy as np\n'), ((52351, 52370), 'numpy.array', 'np.array', (['[1, 2, 0]'], {}), '([1, 2, 0])\n', (52359, 52370), True, 'import numpy as np\n'), ((52576, 52595), 'numpy.array', 'np.array', (['[0, 2, 1]'], {}), '([0, 2, 1])\n', (52584, 52595), True, 'import numpy as np\n'), ((52798, 52885), 'numpy.dstack', 'np.dstack', (['[occupy_pic1, norm_pic1, occupy_pic2, norm_pic2, occupy_pic3, norm_pic3]'], {}), '([occupy_pic1, norm_pic1, occupy_pic2, norm_pic2, occupy_pic3,\n norm_pic3])\n', (52807, 52885), True, 'import numpy as np\n'), ((54741, 54761), 'torch.initial_seed', 'torch.initial_seed', ([], {}), '()\n', (54759, 54761), False, 'import torch\n'), ((4857, 4874), 'numpy.sum', 'np.sum', (['a'], {'axis': '(0)'}), '(a, axis=0)\n', (4863, 4874), True, 'import numpy as np\n'), ((5685, 5702), 'numpy.sum', 'np.sum', (['a'], {'axis': '(0)'}), '(a, axis=0)\n', (5691, 5702), True, 'import numpy as np\n'), ((16703, 16720), 'numpy.sum', 'np.sum', (['a'], {'axis': '(0)'}), '(a, axis=0)\n', (16709, 16720), True, 'import numpy as np\n'), ((17531, 17548), 'numpy.sum', 'np.sum', (['a'], {'axis': '(0)'}), '(a, axis=0)\n', (17537, 17548), True, 'import numpy as np\n'), ((31408, 31425), 'numpy.sum', 'np.sum', (['a'], {'axis': '(0)'}), '(a, axis=0)\n', (31414, 31425), True, 'import numpy as np\n'), ((33600, 33643), 'numpy.dot', 'np.dot', (['matrix_m2c.T', 'pc_m2c[self.in_ind].T'], {}), '(matrix_m2c.T, pc_m2c[self.in_ind].T)\n', (33606, 33643), True, 'import numpy as np\n'), ((35582, 35599), 'numpy.sum', 'np.sum', (['a'], {'axis': '(0)'}), '(a, axis=0)\n', (35588, 35599), True, 'import numpy as np\n'), ((47062, 47079), 'numpy.sum', 'np.sum', (['a'], {'axis': '(0)'}), '(a, axis=0)\n', (47068, 47079), True, 'import numpy as np\n'), ((47890, 47907), 'numpy.sum', 'np.sum', (['a'], {'axis': '(0)'}), '(a, axis=0)\n', (47896, 47907), True, 'import numpy as np\n'), ((3804, 3856), 'numpy.array', 'np.array', (['[binormal[0], binormal[1], binormal[2], 1]'], {}), '([binormal[0], binormal[1], binormal[2], 1])\n', (3812, 3856), True, 'import numpy as np\n'), ((3915, 3967), 'numpy.array', 'np.array', (['[approach[0], approach[1], approach[2], 1]'], {}), '([approach[0], approach[1], approach[2], 1])\n', (3923, 3967), True, 'import numpy as np\n'), ((4030, 4094), 'numpy.array', 'np.array', (['[minor_normal[0], minor_normal[1], minor_normal[2], 1]'], {}), '([minor_normal[0], minor_normal[1], minor_normal[2], 1])\n', (4038, 4094), True, 'import numpy as np\n'), ((4385, 4404), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (4393, 4404), True, 'import numpy as np\n'), ((4445, 4464), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (4453, 4464), True, 'import numpy as np\n'), ((15650, 15702), 'numpy.array', 'np.array', (['[binormal[0], binormal[1], binormal[2], 1]'], {}), '([binormal[0], binormal[1], binormal[2], 1])\n', (15658, 15702), True, 'import numpy as np\n'), ((15761, 15813), 'numpy.array', 'np.array', (['[approach[0], approach[1], approach[2], 1]'], {}), '([approach[0], approach[1], approach[2], 1])\n', (15769, 15813), True, 'import numpy as np\n'), ((15876, 15940), 'numpy.array', 'np.array', (['[minor_normal[0], minor_normal[1], minor_normal[2], 1]'], {}), '([minor_normal[0], minor_normal[1], minor_normal[2], 1])\n', (15884, 15940), True, 'import numpy as np\n'), ((16231, 16250), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (16239, 16250), True, 'import numpy as np\n'), ((16291, 16310), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (16299, 16310), True, 'import numpy as np\n'), ((30879, 30898), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (30887, 30898), True, 'import numpy as np\n'), ((30941, 30960), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (30949, 30960), True, 'import numpy as np\n'), ((46136, 46188), 'numpy.array', 'np.array', (['[binormal[0], binormal[1], binormal[2], 1]'], {}), '([binormal[0], binormal[1], binormal[2], 1])\n', (46144, 46188), True, 'import numpy as np\n'), ((46247, 46299), 'numpy.array', 'np.array', (['[approach[0], approach[1], approach[2], 1]'], {}), '([approach[0], approach[1], approach[2], 1])\n', (46255, 46299), True, 'import numpy as np\n'), ((46362, 46426), 'numpy.array', 'np.array', (['[minor_normal[0], minor_normal[1], minor_normal[2], 1]'], {}), '([minor_normal[0], minor_normal[1], minor_normal[2], 1])\n', (46370, 46426), True, 'import numpy as np\n'), ((46590, 46609), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (46598, 46609), True, 'import numpy as np\n'), ((46650, 46669), 'numpy.array', 'np.array', (['[0, 1, 0]'], {}), '([0, 1, 0])\n', (46658, 46669), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.