code
stringlengths 31
1.05M
| apis
list | extract_api
stringlengths 97
1.91M
|
---|---|---|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 11 21:57:41 2020
@author: inderpreet
calculate statistics for MWHS point estimates, the results are given in latex format
results from scattering index and beuhler et al
"""
import netCDF4
import os
import matplotlib.pyplot as plt
import numpy as np
import ICI.stats as S
from read_qrnn import read_qrnn
plt.rcParams.update({'font.size': 26})
from tabulate import tabulate
from mwhs import mwhsData
from scipy.stats import skew
#%%
def get_SI_land(y_ob, y_fg, i89, i150):
"""
compute scattering index over land
"""
SI_ob = y_ob[i89, :] - y_ob[i150, :]
SI_fg = y_fg[i89, :] - y_fg[i150, :]
return (SI_ob + SI_fg)/2
def get_SI_ocean(y_ob, y_fg, y_cl, i89, i150):
"""
compute scattering index over ocean
"""
SI_ob = y_ob[i89, :] - y_ob[i150, :] -(y_cl[i89, :] - y_cl[i150, :])
SI_fg = y_fg[i89, :] - y_fg[i150, :] - (y_cl[i89, :] - y_cl[i150, :])
return (SI_ob + SI_fg)/2
def bias(y , y0):
return np.mean(y-y0)
def std(y , y0):
return np.std(y-y0)
def mae(y, y0):
return np.mean(np.abs(y-y0))
def filter_buehler_19(TB18, TB19):
"""
Filtering with buehler et al criteria
Parameters
----------
data : MWI dataset containing testing data
Returns
-------
im : logical array for the filtered data
"""
# x = data.add_noise(data.x, data.index)
im1 = TB18 < 240.0
dtb = TB19 - TB18
im2 = dtb < 0
im = np.logical_or(im1, im2)
print (np.sum(im1), np.sum(im2))
return im
if __name__ == "__main__":
#%% input parameters
depth = 3
width = 128
quantiles = np.array([0.002, 0.03, 0.16, 0.5, 0.84, 0.97, 0.998])
batchSize = 128
targets = [11, 12, 13, 14, 15]
targets = [15]
test_file = os.path.expanduser("~/Dendrite/Projects/AWS-325GHz/MWHS/data/TB_MWHS_test.nc")
iq = np.argwhere(quantiles == 0.5)[0,0]
qrnn_dir = "C89+150"
#qrnn_dir = "C150"
#qrnn_dir = "C150+118"
d = {"C89+150" : [1, 10],
"C89+150+118" : [1, 10, 6, 7 ],
"C150" : [10],
"C89+150+183" : [1, 10, 11, 12, 13, 14, 15]
}
Channels = [[1, 10], [1, 6, 7, 10]]
qrnn_dirs = ["C89+150"]
#qrnn_dirs = ["C89+150", "C89+150+118", "C150" ]
path = os.path.expanduser('~/Dendrite/Projects/AWS-325GHz/MWHS/data/')
allChannels = np.arange(1, 16, 1)
#%%
if __name__ == "__main__":
#%%
TB_ob = np.load(os.path.join(path, 'TB_obs.npy'))
TB_fg = np.load(os.path.join(path, 'TB_fg.npy'))
TB_cl = np.load(os.path.join(path, 'TB_cl.npy'))
i89, = np.argwhere(allChannels == 1)[0]
i150, = np.argwhere(allChannels == 10)[0]
for qrnn_dir, target in zip(qrnn_dirs, targets) :
qrnn_path = os.path.expanduser("~/Dendrite/Projects/AWS-325GHz/MWHS/qrnn_output/all_with_flag/%s/"%(qrnn_dir))
channels = np.array(d[qrnn_dir])
if target not in channels:
inChannels = np.concatenate([[target], channels])
else:
inChannels = channels
print(qrnn_dir, channels, inChannels)
qrnn_file = os.path.join(qrnn_path, "qrnn_mwhs_%s.nc"%(target))
i183, = np.argwhere(inChannels == target)[0]
y_pre, y_prior, y0, y, y_pos_mean = read_qrnn(qrnn_file, test_file, inChannels, target)
im1 = (np.abs(y_pre[:, 3] - y_prior[:, i183] )< 5)
data = mwhsData(test_file,
inChannels, target, ocean = False, test_data = True)
#%% SI approach
SI_land = get_SI_land(TB_ob, TB_fg, i89, i150)
SI_ocean = get_SI_ocean(TB_ob, TB_fg, TB_cl, i89, i150)
SI_land = SI_land[data.im]
SI_ocean = SI_ocean[data.im]
iocean = np.squeeze(data.lsm[:] == 0)
iland = ~iocean
SI_land[iocean] = SI_ocean[iocean]
SI = SI_land.copy()
im = np.abs(SI) <= 5
y_fil = y_prior[im, i183]
y0_fil = y0[im]
#%% Buehler et al approach
test_file_noise = os.path.join(path, "TB_MWHS_test_noisy_allsky.nc")
file = netCDF4.Dataset(test_file_noise, mode = "r")
TB_var = file.variables["TB"]
TB_noise = TB_var[:]
i18, = np.where(allChannels == 11)[0]
i19, = np.where(allChannels == 13)[0]
TB18 = TB_noise[1, i18, data.im].data
TB19 = TB_noise[1, i19, data.im].data
im18 = np.isfinite(TB18)
im19 = np.isfinite(TB19)
im18 = np.logical_and(TB18, TB19)
im_183 = filter_buehler_19(TB18, TB19)
# im_183 = im_183[data.im]
#%%
print ("-----------------channel %s-------------------------"%str(target))
# print ("bias uncorr", bias(y_prior[:, i183], y0))
print ("bias SI", bias(y_fil, y0[im]))
print ("bias B183", bias(y_prior[~im_183, i183], y0[~im_183]))
# print ("bias QRNN", bias(y_prior[im1, i183], y0[im1]))
# print ("bias QRNN_corr", bias(y_pre[im1, 3], y0[im1]))
# print ("std uncorr", std(y_prior[:, i183], y0))
print ("std SI", std(y_fil, y0[im]))
print ("std B183", std(y_prior[~im_183, i183], y0[~im_183]))
# print ("std QRNN", std(y_prior[im1, i183], y0[im1]))
# print ("std QRNN_corr", std(y_pre[im1, 3], y0[im1]))
# print ("mae uncorr", mae(y_prior[:, i183], y0))
print ("mae SI", mae(y_fil, y0[im]))
print ("mae B183", mae(y_prior[~im_183, i183], y0[~im_183]))
# print ("mae QRNN", mae(y_prior[im1, i183], y0[im1]))
# print ("mae QRNN_corr", mae(y_pre[im1, 3], y0[im1]))
print ("skew SI", skew(y_fil-y0[im]))
print ("skew B183", skew(y_prior[~im_183, i183]- y0[~im_183]))
print ("skew all", skew(y_prior[:, i183]- y0[:]))
print ("% rejected SI", np.sum(~im)/im.shape)
print ("% rejected B183", np.sum(im_183)/im.shape)
#%%
bins = np.arange(-30, 20, 0.5)
hist = np.histogram(y_fil - y0_fil, bins)
fig, ax = plt.subplots(1, 1)
# ax.plot(bins[:-1], hist[0], 'k')
ax.set_yscale('log')
hist = np.histogram(y_prior[:, i183]- y0 , bins)
ax.plot(bins[:-1], hist[0], 'b')
y_pre_fil = y_prior[~im, i183]
hist = np.histogram(y_pre_fil - y0[~im] , bins)
ax.plot(bins[:-1], hist[0], 'r')
hist = np.histogram(y_pre[im1, 3]- y0[im1], bins)
ax.plot(bins[:-1], hist[0], 'g')
TB_15 = TB_ob[14, data.im]
hist = np.histogram(y_prior[im_183, i183] - y0[im_183], bins)
ax.plot(bins[:-1], hist[0], 'y')
|
[
"numpy.abs",
"numpy.sum",
"numpy.histogram",
"numpy.mean",
"numpy.arange",
"os.path.join",
"read_qrnn.read_qrnn",
"netCDF4.Dataset",
"numpy.std",
"numpy.isfinite",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.subplots",
"mwhs.mwhsData",
"numpy.argwhere",
"numpy.squeeze",
"numpy.concatenate",
"numpy.logical_and",
"scipy.stats.skew",
"numpy.where",
"numpy.array",
"numpy.logical_or",
"os.path.expanduser"
] |
[((377, 415), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': 26}"], {}), "({'font.size': 26})\n", (396, 415), True, 'import matplotlib.pyplot as plt\n'), ((1027, 1042), 'numpy.mean', 'np.mean', (['(y - y0)'], {}), '(y - y0)\n', (1034, 1042), True, 'import numpy as np\n'), ((1070, 1084), 'numpy.std', 'np.std', (['(y - y0)'], {}), '(y - y0)\n', (1076, 1084), True, 'import numpy as np\n'), ((1502, 1525), 'numpy.logical_or', 'np.logical_or', (['im1', 'im2'], {}), '(im1, im2)\n', (1515, 1525), True, 'import numpy as np\n'), ((1680, 1733), 'numpy.array', 'np.array', (['[0.002, 0.03, 0.16, 0.5, 0.84, 0.97, 0.998]'], {}), '([0.002, 0.03, 0.16, 0.5, 0.84, 0.97, 0.998])\n', (1688, 1733), True, 'import numpy as np\n'), ((1829, 1907), 'os.path.expanduser', 'os.path.expanduser', (['"""~/Dendrite/Projects/AWS-325GHz/MWHS/data/TB_MWHS_test.nc"""'], {}), "('~/Dendrite/Projects/AWS-325GHz/MWHS/data/TB_MWHS_test.nc')\n", (1847, 1907), False, 'import os\n'), ((2372, 2435), 'os.path.expanduser', 'os.path.expanduser', (['"""~/Dendrite/Projects/AWS-325GHz/MWHS/data/"""'], {}), "('~/Dendrite/Projects/AWS-325GHz/MWHS/data/')\n", (2390, 2435), False, 'import os\n'), ((2459, 2478), 'numpy.arange', 'np.arange', (['(1)', '(16)', '(1)'], {}), '(1, 16, 1)\n', (2468, 2478), True, 'import numpy as np\n'), ((1119, 1133), 'numpy.abs', 'np.abs', (['(y - y0)'], {}), '(y - y0)\n', (1125, 1133), True, 'import numpy as np\n'), ((1537, 1548), 'numpy.sum', 'np.sum', (['im1'], {}), '(im1)\n', (1543, 1548), True, 'import numpy as np\n'), ((1550, 1561), 'numpy.sum', 'np.sum', (['im2'], {}), '(im2)\n', (1556, 1561), True, 'import numpy as np\n'), ((1922, 1951), 'numpy.argwhere', 'np.argwhere', (['(quantiles == 0.5)'], {}), '(quantiles == 0.5)\n', (1933, 1951), True, 'import numpy as np\n'), ((2560, 2592), 'os.path.join', 'os.path.join', (['path', '"""TB_obs.npy"""'], {}), "(path, 'TB_obs.npy')\n", (2572, 2592), False, 'import os\n'), ((2618, 2649), 'os.path.join', 'os.path.join', (['path', '"""TB_fg.npy"""'], {}), "(path, 'TB_fg.npy')\n", (2630, 2649), False, 'import os\n'), ((2675, 2706), 'os.path.join', 'os.path.join', (['path', '"""TB_cl.npy"""'], {}), "(path, 'TB_cl.npy')\n", (2687, 2706), False, 'import os\n'), ((2723, 2752), 'numpy.argwhere', 'np.argwhere', (['(allChannels == 1)'], {}), '(allChannels == 1)\n', (2734, 2752), True, 'import numpy as np\n'), ((2772, 2802), 'numpy.argwhere', 'np.argwhere', (['(allChannels == 10)'], {}), '(allChannels == 10)\n', (2783, 2802), True, 'import numpy as np\n'), ((2947, 3054), 'os.path.expanduser', 'os.path.expanduser', (["('~/Dendrite/Projects/AWS-325GHz/MWHS/qrnn_output/all_with_flag/%s/' % qrnn_dir\n )"], {}), "(\n '~/Dendrite/Projects/AWS-325GHz/MWHS/qrnn_output/all_with_flag/%s/' %\n qrnn_dir)\n", (2965, 3054), False, 'import os\n'), ((3095, 3116), 'numpy.array', 'np.array', (['d[qrnn_dir]'], {}), '(d[qrnn_dir])\n', (3103, 3116), True, 'import numpy as np\n'), ((3408, 3459), 'os.path.join', 'os.path.join', (['qrnn_path', "('qrnn_mwhs_%s.nc' % target)"], {}), "(qrnn_path, 'qrnn_mwhs_%s.nc' % target)\n", (3420, 3459), False, 'import os\n'), ((3591, 3642), 'read_qrnn.read_qrnn', 'read_qrnn', (['qrnn_file', 'test_file', 'inChannels', 'target'], {}), '(qrnn_file, test_file, inChannels, target)\n', (3600, 3642), False, 'from read_qrnn import read_qrnn\n'), ((3739, 3807), 'mwhs.mwhsData', 'mwhsData', (['test_file', 'inChannels', 'target'], {'ocean': '(False)', 'test_data': '(True)'}), '(test_file, inChannels, target, ocean=False, test_data=True)\n', (3747, 3807), False, 'from mwhs import mwhsData\n'), ((4123, 4151), 'numpy.squeeze', 'np.squeeze', (['(data.lsm[:] == 0)'], {}), '(data.lsm[:] == 0)\n', (4133, 4151), True, 'import numpy as np\n'), ((4512, 4562), 'os.path.join', 'os.path.join', (['path', '"""TB_MWHS_test_noisy_allsky.nc"""'], {}), "(path, 'TB_MWHS_test_noisy_allsky.nc')\n", (4524, 4562), False, 'import os\n'), ((4591, 4633), 'netCDF4.Dataset', 'netCDF4.Dataset', (['test_file_noise'], {'mode': '"""r"""'}), "(test_file_noise, mode='r')\n", (4606, 4633), False, 'import netCDF4\n'), ((4943, 4960), 'numpy.isfinite', 'np.isfinite', (['TB18'], {}), '(TB18)\n', (4954, 4960), True, 'import numpy as np\n'), ((4980, 4997), 'numpy.isfinite', 'np.isfinite', (['TB19'], {}), '(TB19)\n', (4991, 4997), True, 'import numpy as np\n'), ((5017, 5043), 'numpy.logical_and', 'np.logical_and', (['TB18', 'TB19'], {}), '(TB18, TB19)\n', (5031, 5043), True, 'import numpy as np\n'), ((6737, 6760), 'numpy.arange', 'np.arange', (['(-30)', '(20)', '(0.5)'], {}), '(-30, 20, 0.5)\n', (6746, 6760), True, 'import numpy as np\n'), ((6780, 6814), 'numpy.histogram', 'np.histogram', (['(y_fil - y0_fil)', 'bins'], {}), '(y_fil - y0_fil, bins)\n', (6792, 6814), True, 'import numpy as np\n'), ((6837, 6855), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (6849, 6855), True, 'import matplotlib.pyplot as plt\n'), ((6967, 7008), 'numpy.histogram', 'np.histogram', (['(y_prior[:, i183] - y0)', 'bins'], {}), '(y_prior[:, i183] - y0, bins)\n', (6979, 7008), True, 'import numpy as np\n'), ((7132, 7171), 'numpy.histogram', 'np.histogram', (['(y_pre_fil - y0[~im])', 'bins'], {}), '(y_pre_fil - y0[~im], bins)\n', (7144, 7171), True, 'import numpy as np\n'), ((7263, 7306), 'numpy.histogram', 'np.histogram', (['(y_pre[im1, 3] - y0[im1])', 'bins'], {}), '(y_pre[im1, 3] - y0[im1], bins)\n', (7275, 7306), True, 'import numpy as np\n'), ((7435, 7489), 'numpy.histogram', 'np.histogram', (['(y_prior[im_183, i183] - y0[im_183])', 'bins'], {}), '(y_prior[im_183, i183] - y0[im_183], bins)\n', (7447, 7489), True, 'import numpy as np\n'), ((3211, 3247), 'numpy.concatenate', 'np.concatenate', (['[[target], channels]'], {}), '([[target], channels])\n', (3225, 3247), True, 'import numpy as np\n'), ((3493, 3526), 'numpy.argwhere', 'np.argwhere', (['(inChannels == target)'], {}), '(inChannels == target)\n', (3504, 3526), True, 'import numpy as np\n'), ((3662, 3700), 'numpy.abs', 'np.abs', (['(y_pre[:, 3] - y_prior[:, i183])'], {}), '(y_pre[:, 3] - y_prior[:, i183])\n', (3668, 3700), True, 'import numpy as np\n'), ((4302, 4312), 'numpy.abs', 'np.abs', (['SI'], {}), '(SI)\n', (4308, 4312), True, 'import numpy as np\n'), ((4730, 4757), 'numpy.where', 'np.where', (['(allChannels == 11)'], {}), '(allChannels == 11)\n', (4738, 4757), True, 'import numpy as np\n'), ((4780, 4807), 'numpy.where', 'np.where', (['(allChannels == 13)'], {}), '(allChannels == 13)\n', (4788, 4807), True, 'import numpy as np\n'), ((6398, 6418), 'scipy.stats.skew', 'skew', (['(y_fil - y0[im])'], {}), '(y_fil - y0[im])\n', (6402, 6418), False, 'from scipy.stats import skew\n'), ((6450, 6492), 'scipy.stats.skew', 'skew', (['(y_prior[~im_183, i183] - y0[~im_183])'], {}), '(y_prior[~im_183, i183] - y0[~im_183])\n', (6454, 6492), False, 'from scipy.stats import skew\n'), ((6524, 6554), 'scipy.stats.skew', 'skew', (['(y_prior[:, i183] - y0[:])'], {}), '(y_prior[:, i183] - y0[:])\n', (6528, 6554), False, 'from scipy.stats import skew\n'), ((6604, 6615), 'numpy.sum', 'np.sum', (['(~im)'], {}), '(~im)\n', (6610, 6615), True, 'import numpy as np\n'), ((6664, 6678), 'numpy.sum', 'np.sum', (['im_183'], {}), '(im_183)\n', (6670, 6678), True, 'import numpy as np\n')]
|
from sklearn.datasets import load_iris
iris = load_iris()
from sklearn.cluster import DBSCAN
dbscan = DBSCAN(eps=0.2, metric='euclidean', min_samples=5)
import numpy
# DBSCAN(eps=0.5, metric='euclidean', min_samples=5,random_state=111)
iris = load_iris()
print (iris.feature_names)
X, y = load_iris(return_X_y=True)
# X = numpy.delete(X, 0, 0)
X = numpy.delete(X, 1, 0)
# X = numpy.delete(X, 2, 0)
# X = numpy.delete(X, 3, 0)
# X = numpy.delete(X, 0, 0)
# X = numpy.delete(X, 0, 0)
# X = numpy.delete(X, 0, 0)
# X = numpy.delete(X, 1, 0)
# X = numpy.delete(X, 0, 0)
# X = numpy.delete(X, 2, 0)
# X = numpy.delete(X, 0, 0)
# X = numpy.delete(X, 2, 0)
# X = numpy.delete(X, 1, 0)
# X = numpy.delete(X, 1, 0)
# X = numpy.delete(X, 1, 0)
# X = numpy.delete(X, 2, 0)
# X = numpy.delete(X, 0, 0)
# X = numpy.delete(X, 1, 0)
# X = numpy.delete(X, 2, 0)
# X = numpy.delete(X, 2, 0)
# x = numpy.delete(X, 1, 0)
# x = numpy.delete(X, 1, 0)
# x = numpy.delete(X, 1, 0)
# x = numpy.delete(X, 0, 0)
# x = numpy.delete(X, 1, 0)
# x = numpy.delete(X, 1, 0)
# x = numpy.delete(X, 0, 0)
# x = numpy.delete(X, 0, 0)
# x = numpy.delete(X, 1, 0)
dbscan.fit(X)
# from sklearn.decomposition import PCA
# import matplotlib.pyplot as pl
# pca = PCA(n_components=2).fit(X)
# pca_2d = pca.transform(X)
# for i in range(0, pca_2d.shape[0]):
# if dbscan.labels_[i] == 0:
# c1 = pl.scatter(pca_2d[i,0],pca_2d[i,1],c='r',marker='+')
# elif dbscan.labels_[i] == 1:
# c2 = pl.scatter(pca_2d[i,0],pca_2d[i,1],c='g', marker='o')
# elif dbscan.labels_[i] == -1:
# c3 = pl.scatter(pca_2d[i,0],pca_2d[i,1],c='b', marker='*')
# pl.legend([c1, c2, c3], ['Cluster 1', 'Cluster 2', 'Noise'])
# pl.title('DBSCAN finds 2 clusters and noise')
# pl.show()
from sklearn.metrics import silhouette_score
res_sil = silhouette_score(X, dbscan.labels_)
print (res_sil)
|
[
"sklearn.metrics.silhouette_score",
"sklearn.datasets.load_iris",
"numpy.delete",
"sklearn.cluster.DBSCAN"
] |
[((46, 57), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (55, 57), False, 'from sklearn.datasets import load_iris\n'), ((103, 153), 'sklearn.cluster.DBSCAN', 'DBSCAN', ([], {'eps': '(0.2)', 'metric': '"""euclidean"""', 'min_samples': '(5)'}), "(eps=0.2, metric='euclidean', min_samples=5)\n", (109, 153), False, 'from sklearn.cluster import DBSCAN\n'), ((248, 259), 'sklearn.datasets.load_iris', 'load_iris', ([], {}), '()\n', (257, 259), False, 'from sklearn.datasets import load_iris\n'), ((295, 321), 'sklearn.datasets.load_iris', 'load_iris', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (304, 321), False, 'from sklearn.datasets import load_iris\n'), ((356, 377), 'numpy.delete', 'numpy.delete', (['X', '(1)', '(0)'], {}), '(X, 1, 0)\n', (368, 377), False, 'import numpy\n'), ((1833, 1868), 'sklearn.metrics.silhouette_score', 'silhouette_score', (['X', 'dbscan.labels_'], {}), '(X, dbscan.labels_)\n', (1849, 1868), False, 'from sklearn.metrics import silhouette_score\n')]
|
"""
Script to evaluate the activation functions for the selected network + grid.
"""
import sys
import os
sys.path.insert(0, os.getcwd())
import numpy as np
import sys
import os
import subprocess
import itertools
import imageio
import json
import torch
import io
import shutil
import matplotlib.pyplot as plt
import matplotlib.ticker
from collections import defaultdict
from typing import Tuple
# From eval_VolumetricFeatures.py / Section 5.2
BEST_NETWORK = (32,4)
GRID_RESOLUTION = 32
GRID_CHANNELS = 16
activationX = ["ReLU", "Sine", "Snake:2", "SnakeAlt:1"]
BASE_PATH = 'volnet/results/eval_ActivationFunctions'
configX = [
("plume100", "config-files/plume100-v2-dvr.json", "Plume"),
("ejecta70", "config-files/ejecta70-v6-dvr.json", "Ejecta"),
("RM60", "config-files/RichtmyerMeshkov-t60-v1-dvr.json", "RM"),
#("Skull5", "neuraltextures/config-files/skull-v5-dvr.json"),
]
def main():
train()
statistics_file = eval()
make_plots(statistics_file)
def get_args_and_hdf5_file(activation, config: Tuple[str, str, str]):
"""
Assembles the command line arguments for training and the filename for the hdf5-file
with the results
:param activation: the activation function name
:param network: the network combination (channels, layers)
:return: args, filename
"""
config_name, config_settings, human_name = config
output_name = "run_%s_%s"%(config_name, activation.replace(':','-'))
parameters = [
sys.executable, "volnet/train_volnet.py",
config_settings,
"--train:mode", "world",
"--train:samples", "256**3",
"--train:batchsize", "64*64*128",
"--train:sampler_importance", "0.01",
"--val:copy_and_split",
"--outputmode", "density:direct",
"--lossmode", "density",
"-l1", "1",
"--lr_step", "50",
"-i", "200",
'--fouriercount', str((BEST_NETWORK[0]-4)//2), '--fourierstd', '1.0',
"--activation", activation,
"--layers", ':'.join([str(BEST_NETWORK[0])]*(BEST_NETWORK[1]-1)),
"--volumetric_features_resolution", str(GRID_RESOLUTION),
"--volumetric_features_channels", str(GRID_CHANNELS),
"--logdir", BASE_PATH+'/log',
"--modeldir", BASE_PATH+'/model',
"--hdf5dir", BASE_PATH+'/hdf5',
'--name', output_name,
'--save_frequency', '50'
]
hdf5_file = BASE_PATH+'/hdf5/' + output_name + ".hdf5"
return parameters, hdf5_file, output_name
def train():
print("Configurations:", len(activationX) * len(configX))
for config in configX:
for activation in activationX:
args, filename, _ = get_args_and_hdf5_file(activation, config)
if os.path.exists(filename):
print("Skipping test", filename)
else:
print("\n=====================================\nRun", filename)
subprocess.run(args, check=True)
print("\n===========================================\nDONE!")
def eval():
print("Evaluate")
statistics_file = os.path.join(BASE_PATH, 'stats.json')
if os.path.exists(statistics_file):
print("Statistics file already exists!")
return statistics_file
import common.utils as utils
import pyrenderer
from volnet.inference import LoadedModel
from losses.lossbuilder import LossBuilder
num_cameras = 64
width = 512
height = 512
STEPSIZE = 1 / 512
timer = pyrenderer.GPUTimer()
if os.name != 'nt':
rendering_mode = LoadedModel.EvaluationMode.PYTORCH16
else:
rendering_mode = LoadedModel.EvaluationMode.TENSORCORES_MIXED
enable_preintegration = True
device = torch.device('cuda')
ssim_loss = LossBuilder(device).ssim_loss(4)
lpips_loss = LossBuilder(device).lpips_loss(4, 0.0, 1.0)
def compute_stats(ln: LoadedModel, mode, reference_images, stepsize, filename_template=None,
do_ssim=False, do_lpips=False):
timingsX = []
ssimX = []
lpipsX = []
for i in range(num_cameras):
if enable_preintegration:
ln.enable_preintegration(True, convert_to_texture=True)
else:
ln.enable_preintegration(False)
current_image = ln.render_network(
cameras[i], width, height, mode,
stepsize, timer=timer)
if i > 0:
timingsX.append(timer.elapsed_milliseconds())
if filename_template is not None:
imageio.imwrite(
filename_template % i,
LoadedModel.convert_image(current_image))
if do_ssim:
ssimX.append(ssim_loss(current_image, reference_images[i]).item())
if do_lpips:
lpipsX.append(lpips_loss(current_image, reference_images[i]).item())
return \
(np.mean(timingsX), np.std(timingsX)), \
(np.mean(ssimX), np.std(ssimX)) if do_ssim else (np.NaN, np.NaN), \
(np.mean(lpipsX), np.std(lpipsX)) if do_lpips else (np.NaN, np.NaN)
# load networks
def load_and_save(activation, config):
_, filename, output_name = get_args_and_hdf5_file(activation, config)
filename = os.path.abspath(filename)
if not os.path.exists(filename):
print("File not found:", filename, file=sys.stderr)
return None, None
try:
ln = LoadedModel(filename)
# if enable_preintegration:
# ln.enable_preintegration(True)
ln.save_compiled_network(filename.replace('.hdf5', '.volnet'))
return ln, output_name
except Exception as e:
print("Unable to load '%s':" % filename, e)
return None, None
output_stats = {}
for cfg_index, config in enumerate(configX):
image_folder = os.path.join(BASE_PATH, "images_" + config[0])
local_stats = {
'cfg_index': cfg_index,
'cfg': config[1]}
reference_images = None
# collect models
lns = dict()
base_ln = None
for activation in activationX:
ln, name = load_and_save(activation, config)
lns[activation] = (ln, name)
if base_ln is None: base_ln = ln
# render reference
image_folder_reference = os.path.join(image_folder, "reference")
os.makedirs(image_folder_reference, exist_ok=True)
print("\n===================================== Render reference", cfg_index)
cameras = base_ln.get_rotation_cameras(num_cameras)
reference_images = [None] * num_cameras
for i in range(num_cameras):
reference_images[i] = base_ln.render_reference(cameras[i], width, height)
imageio.imwrite(
os.path.join(image_folder_reference, 'reference%03d.png' % i),
LoadedModel.convert_image(reference_images[i]))
# render networks
for activation in activationX:
ln, name = lns[activation]
if ln is None:
print("Skip", name, ", network is None")
continue
print("Render", name)
image_folder_screen = os.path.join(image_folder, "%s" % name)
os.makedirs(image_folder_screen, exist_ok=True)
time, ssim, lpips = compute_stats(
ln, rendering_mode, reference_images, STEPSIZE,
os.path.join(image_folder_screen, 'img%03d.png'),
True, True)
local_stats[name] = {
'time': time,
'ssim': ssim,
'lpips': lpips,
}
output_stats[config[0]] = local_stats
# save statistics
print("\n===================================== Done, save statistics")
with open(statistics_file, "w") as f:
json.dump(output_stats, f)
return statistics_file
def make_plots(statistics_file):
print("\n===================================== Make Plots")
with open(statistics_file, "r") as f:
stats = json.load(f)
output_folder = os.path.split(statistics_file)[0]
statNames = ['SSIM $\\uparrow$', 'LPIPS $\\downarrow$']
statTags = ["ssim", "lpips"]
statAggregation = [max, min]
latex = io.StringIO()
# latex header
latex.write("\\begin{tabular}{r%s}\n" % ("cc" * (len(configX))))
latex.write("\\toprule\n")
latex.write("\\multirow{2}{*}{Activation}")
for config in configX:
latex.write(" & \\multicolumn{2}{c}{%s}"%config[2])
latex.write("\\\\\n")
for config in configX:
latex.write(" & %s & %s" % tuple(statNames))
latex.write("\\\\\n")
latex.write("\n\\midrule\n")
best_per_dataset = dict()
for config in configX:
cfg_index = stats[config[0]]['cfg_index']
for tag, aggr in zip(statTags, statAggregation):
values = []
for activation in activationX:
_, _, n = get_args_and_hdf5_file(activation, configX[cfg_index])
v = "%.4f" % stats[config[0]][n][tag][0]
values.append(v)
best_per_dataset[(cfg_index, tag)] = aggr(values)
# main content
for activation in activationX:
latex.write(activation.split(':')[0])
for config in configX:
cfg_index = stats[config[0]]['cfg_index']
_, _, n = get_args_and_hdf5_file(activation, configX[cfg_index])
for tag in statTags:
v = "%.4f"%stats[config[0]][n][tag][0]
if v == best_per_dataset[(cfg_index, tag)]:
latex.write(" & $\\bm{%s}$"%v)
else:
latex.write(" & $%s$" % v)
latex.write("\\\\\n")
#footer
latex.write("\n\\bottomrule\n")
latex.write("\\end{tabular}\n")
latex = latex.getvalue()
with open(os.path.join(output_folder, "ActivationFunctions.tex"), 'w') as f:
f.write(latex)
print(latex)
print("Done")
if __name__ == '__main__':
main()
|
[
"pyrenderer.GPUTimer",
"io.StringIO",
"os.path.abspath",
"json.dump",
"os.makedirs",
"json.load",
"volnet.inference.LoadedModel",
"os.getcwd",
"subprocess.run",
"numpy.std",
"os.path.exists",
"losses.lossbuilder.LossBuilder",
"volnet.inference.LoadedModel.convert_image",
"numpy.mean",
"torch.device",
"os.path.split",
"os.path.join"
] |
[((134, 145), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (143, 145), False, 'import os\n'), ((3188, 3225), 'os.path.join', 'os.path.join', (['BASE_PATH', '"""stats.json"""'], {}), "(BASE_PATH, 'stats.json')\n", (3200, 3225), False, 'import os\n'), ((3234, 3265), 'os.path.exists', 'os.path.exists', (['statistics_file'], {}), '(statistics_file)\n', (3248, 3265), False, 'import os\n'), ((3598, 3619), 'pyrenderer.GPUTimer', 'pyrenderer.GPUTimer', ([], {}), '()\n', (3617, 3619), False, 'import pyrenderer\n'), ((3842, 3862), 'torch.device', 'torch.device', (['"""cuda"""'], {}), "('cuda')\n", (3854, 3862), False, 'import torch\n'), ((8557, 8570), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (8568, 8570), False, 'import io\n'), ((5445, 5470), 'os.path.abspath', 'os.path.abspath', (['filename'], {}), '(filename)\n', (5460, 5470), False, 'import os\n'), ((6086, 6132), 'os.path.join', 'os.path.join', (['BASE_PATH', "('images_' + config[0])"], {}), "(BASE_PATH, 'images_' + config[0])\n", (6098, 6132), False, 'import os\n'), ((6583, 6622), 'os.path.join', 'os.path.join', (['image_folder', '"""reference"""'], {}), "(image_folder, 'reference')\n", (6595, 6622), False, 'import os\n'), ((6632, 6682), 'os.makedirs', 'os.makedirs', (['image_folder_reference'], {'exist_ok': '(True)'}), '(image_folder_reference, exist_ok=True)\n', (6643, 6682), False, 'import os\n'), ((8127, 8153), 'json.dump', 'json.dump', (['output_stats', 'f'], {}), '(output_stats, f)\n', (8136, 8153), False, 'import json\n'), ((8343, 8355), 'json.load', 'json.load', (['f'], {}), '(f)\n', (8352, 8355), False, 'import json\n'), ((8377, 8407), 'os.path.split', 'os.path.split', (['statistics_file'], {}), '(statistics_file)\n', (8390, 8407), False, 'import os\n'), ((2834, 2858), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (2848, 2858), False, 'import os\n'), ((3880, 3899), 'losses.lossbuilder.LossBuilder', 'LossBuilder', (['device'], {}), '(device)\n', (3891, 3899), False, 'from losses.lossbuilder import LossBuilder\n'), ((3931, 3950), 'losses.lossbuilder.LossBuilder', 'LossBuilder', (['device'], {}), '(device)\n', (3942, 3950), False, 'from losses.lossbuilder import LossBuilder\n'), ((5487, 5511), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (5501, 5511), False, 'import os\n'), ((5641, 5662), 'volnet.inference.LoadedModel', 'LoadedModel', (['filename'], {}), '(filename)\n', (5652, 5662), False, 'from volnet.inference import LoadedModel\n'), ((7470, 7509), 'os.path.join', 'os.path.join', (['image_folder', "('%s' % name)"], {}), "(image_folder, '%s' % name)\n", (7482, 7509), False, 'import os\n'), ((7523, 7570), 'os.makedirs', 'os.makedirs', (['image_folder_screen'], {'exist_ok': '(True)'}), '(image_folder_screen, exist_ok=True)\n', (7534, 7570), False, 'import os\n'), ((10190, 10244), 'os.path.join', 'os.path.join', (['output_folder', '"""ActivationFunctions.tex"""'], {}), "(output_folder, 'ActivationFunctions.tex')\n", (10202, 10244), False, 'import os\n'), ((3027, 3059), 'subprocess.run', 'subprocess.run', (['args'], {'check': '(True)'}), '(args, check=True)\n', (3041, 3059), False, 'import subprocess\n'), ((5077, 5094), 'numpy.mean', 'np.mean', (['timingsX'], {}), '(timingsX)\n', (5084, 5094), True, 'import numpy as np\n'), ((5096, 5112), 'numpy.std', 'np.std', (['timingsX'], {}), '(timingsX)\n', (5102, 5112), True, 'import numpy as np\n'), ((7051, 7112), 'os.path.join', 'os.path.join', (['image_folder_reference', "('reference%03d.png' % i)"], {}), "(image_folder_reference, 'reference%03d.png' % i)\n", (7063, 7112), False, 'import os\n'), ((7131, 7177), 'volnet.inference.LoadedModel.convert_image', 'LoadedModel.convert_image', (['reference_images[i]'], {}), '(reference_images[i])\n', (7156, 7177), False, 'from volnet.inference import LoadedModel\n'), ((7701, 7749), 'os.path.join', 'os.path.join', (['image_folder_screen', '"""img%03d.png"""'], {}), "(image_folder_screen, 'img%03d.png')\n", (7713, 7749), False, 'import os\n'), ((4782, 4822), 'volnet.inference.LoadedModel.convert_image', 'LoadedModel.convert_image', (['current_image'], {}), '(current_image)\n', (4807, 4822), False, 'from volnet.inference import LoadedModel\n'), ((5131, 5145), 'numpy.mean', 'np.mean', (['ssimX'], {}), '(ssimX)\n', (5138, 5145), True, 'import numpy as np\n'), ((5147, 5160), 'numpy.std', 'np.std', (['ssimX'], {}), '(ssimX)\n', (5153, 5160), True, 'import numpy as np\n'), ((5212, 5227), 'numpy.mean', 'np.mean', (['lpipsX'], {}), '(lpipsX)\n', (5219, 5227), True, 'import numpy as np\n'), ((5229, 5243), 'numpy.std', 'np.std', (['lpipsX'], {}), '(lpipsX)\n', (5235, 5243), True, 'import numpy as np\n')]
|
import numpy as np
import pickle
from IPython import embed
class RunningMeanStd(object):
# https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
def __init__(self, shape=()):
self.mean = np.zeros(shape, np.float32)
self.var = np.ones(shape, np.float32)
self.count = 1e-4
self.epsilon = 1e-8
self.clip = 10
# update mean and var with current input
def update(self, x):
# embed()
# exit(0)
if np.any(np.isnan(x)):
return
x_ = x.reshape(-1, len(self.mean))
batch_mean = np.mean(x_, axis=0)
batch_var = np.var(x_, axis=0)
batch_count = x_.shape[0]
# embed()
# exit(0)
self.update_from_moments(batch_mean, batch_var, batch_count)
# get value from normalized output
def apply(self, x):
# return x
# embed()
# exit(0)
self.update(x)
x = np.clip((x - self.mean) / np.sqrt(self.var + self.epsilon), -self.clip, self.clip)
return x
def applyOnly(self, x):
# return x
# embed()
# exit(0)
x = np.clip((x - self.mean) / np.sqrt(self.var + self.epsilon), -self.clip, self.clip)
return x
# rms_x = np.zeros(shape=x.shape(), np.float32)
# for i in range(len(rms_x)):
# rms_x[i] = np.clip((x[i] - self.mean) / np.sqrt(self.var + self.epsilon), -self.clip, self.clip)
def update_from_moments(self, batch_mean, batch_var, batch_count):
self.mean, self.var, self.count = update_mean_var_count_from_moments(
self.mean, self.var, self.count, batch_mean, batch_var, batch_count)
def save(self, path):
data = {'mean':self.mean, 'var':self.var, 'count':self.count}
with open(path, 'wb') as f:
pickle.dump(data, f)
def load(self, path):
with open(path, 'rb') as f:
data = pickle.load(f)
self.mean = data['mean']
self.var = data['var']
self.count = data['count']
def update_mean_var_count_from_moments(mean, var, count, batch_mean, batch_var, batch_count):
delta = batch_mean - mean
tot_count = count + batch_count
new_mean = mean + delta * batch_count / tot_count
m_a = var * count
m_b = batch_var * batch_count
M2 = m_a + m_b + np.square(delta) * count * batch_count / tot_count
new_var = M2 / tot_count
new_count = tot_count
return new_mean, new_var, new_count
|
[
"pickle.dump",
"numpy.square",
"numpy.zeros",
"numpy.ones",
"numpy.isnan",
"numpy.mean",
"pickle.load",
"numpy.var",
"numpy.sqrt"
] |
[((235, 262), 'numpy.zeros', 'np.zeros', (['shape', 'np.float32'], {}), '(shape, np.float32)\n', (243, 262), True, 'import numpy as np\n'), ((282, 308), 'numpy.ones', 'np.ones', (['shape', 'np.float32'], {}), '(shape, np.float32)\n', (289, 308), True, 'import numpy as np\n'), ((609, 628), 'numpy.mean', 'np.mean', (['x_'], {'axis': '(0)'}), '(x_, axis=0)\n', (616, 628), True, 'import numpy as np\n'), ((649, 667), 'numpy.var', 'np.var', (['x_'], {'axis': '(0)'}), '(x_, axis=0)\n', (655, 667), True, 'import numpy as np\n'), ((511, 522), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (519, 522), True, 'import numpy as np\n'), ((1840, 1860), 'pickle.dump', 'pickle.dump', (['data', 'f'], {}), '(data, f)\n', (1851, 1860), False, 'import pickle\n'), ((1947, 1961), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1958, 1961), False, 'import pickle\n'), ((987, 1019), 'numpy.sqrt', 'np.sqrt', (['(self.var + self.epsilon)'], {}), '(self.var + self.epsilon)\n', (994, 1019), True, 'import numpy as np\n'), ((1183, 1215), 'numpy.sqrt', 'np.sqrt', (['(self.var + self.epsilon)'], {}), '(self.var + self.epsilon)\n', (1190, 1215), True, 'import numpy as np\n'), ((2378, 2394), 'numpy.square', 'np.square', (['delta'], {}), '(delta)\n', (2387, 2394), True, 'import numpy as np\n')]
|
import matrices_new_extended as mne
import numpy as np
import sympy as sp
from equality_check import Point
x, y, z = sp.symbols("x y z")
Point.base_point = np.array([x, y, z, 1])
class Test_Axis_2_x0x:
def test_matrix_2_x0x(self):
expected = Point([ z, -y, x, 1])
calculated = Point.calculate(mne._matrix_2_x0x)
assert calculated == expected
def test_matrix_2_1_mqx0x_q0q(self):
expected = Point([ z, -y, 1+x, 1])
calculated = Point.calculate(mne._matrix_2_1_mqx0x_q0q)
assert calculated == expected
def test_matrix_2_xqx(self):
expected = Point([ z, 1-y, x, 1])
calculated = Point.calculate(mne._matrix_2_xqx)
assert calculated == expected
def test_matrix_2_1_qx0x_q0mq(self):
expected = Point([ 1+z, -y, x, 1])
calculated = Point.calculate(mne._matrix_2_1_qx0x_q0mq)
assert calculated == expected
def test_matrix_2_1_xqx_q0q(self):
expected = Point([ 1+z, 1-y, 1+x, 1])
calculated = Point.calculate(mne._matrix_2_1_xqx_q0q)
assert calculated == expected
def test_matrix_2_1_qx3ox_h0h(self):
expected = Point([ 1.5+z, 1.5-y, 0.5+x, 1])
calculated = Point.calculate(mne._matrix_2_1_qx3ox_h0h)
assert calculated == expected
def test_matrix_2_1_mqxox_h0h(self):
expected = Point([ 0.5+z, 0.5-y, 1.5+x, 1])
calculated = Point.calculate(mne._matrix_2_1_mqxox_h0h)
assert calculated == expected
def test_matrix_2_1_xox_q0q(self):
expected = Point([ 0.5+z, 0.5-y, 0.5+x, 1])
calculated = Point.calculate(mne._matrix_2_1_xox_q0q)
assert calculated == expected
def test_matrix_2_1_mqx3ox_h0h(self):
expected = Point([ 0.5+z, 1.5-y, 1.5+x, 1])
calculated = Point.calculate(mne._matrix_2_1_mqx3ox_h0h)
assert calculated == expected
def test_matrix_2_1_xox_3q03q(self):
expected = Point([ 1.5+z, 0.5-y, 1.5+x, 1])
calculated = Point.calculate(mne._matrix_2_1_xox_3q03q)
assert calculated == expected
|
[
"sympy.symbols",
"numpy.array",
"equality_check.Point.calculate",
"equality_check.Point"
] |
[((118, 137), 'sympy.symbols', 'sp.symbols', (['"""x y z"""'], {}), "('x y z')\n", (128, 137), True, 'import sympy as sp\n'), ((157, 179), 'numpy.array', 'np.array', (['[x, y, z, 1]'], {}), '([x, y, z, 1])\n', (165, 179), True, 'import numpy as np\n'), ((258, 278), 'equality_check.Point', 'Point', (['[z, -y, x, 1]'], {}), '([z, -y, x, 1])\n', (263, 278), False, 'from equality_check import Point\n'), ((301, 335), 'equality_check.Point.calculate', 'Point.calculate', (['mne._matrix_2_x0x'], {}), '(mne._matrix_2_x0x)\n', (316, 335), False, 'from equality_check import Point\n'), ((435, 459), 'equality_check.Point', 'Point', (['[z, -y, 1 + x, 1]'], {}), '([z, -y, 1 + x, 1])\n', (440, 459), False, 'from equality_check import Point\n'), ((480, 522), 'equality_check.Point.calculate', 'Point.calculate', (['mne._matrix_2_1_mqx0x_q0q'], {}), '(mne._matrix_2_1_mqx0x_q0q)\n', (495, 522), False, 'from equality_check import Point\n'), ((614, 637), 'equality_check.Point', 'Point', (['[z, 1 - y, x, 1]'], {}), '([z, 1 - y, x, 1])\n', (619, 637), False, 'from equality_check import Point\n'), ((658, 692), 'equality_check.Point.calculate', 'Point.calculate', (['mne._matrix_2_xqx'], {}), '(mne._matrix_2_xqx)\n', (673, 692), False, 'from equality_check import Point\n'), ((792, 816), 'equality_check.Point', 'Point', (['[1 + z, -y, x, 1]'], {}), '([1 + z, -y, x, 1])\n', (797, 816), False, 'from equality_check import Point\n'), ((837, 879), 'equality_check.Point.calculate', 'Point.calculate', (['mne._matrix_2_1_qx0x_q0mq'], {}), '(mne._matrix_2_1_qx0x_q0mq)\n', (852, 879), False, 'from equality_check import Point\n'), ((977, 1008), 'equality_check.Point', 'Point', (['[1 + z, 1 - y, 1 + x, 1]'], {}), '([1 + z, 1 - y, 1 + x, 1])\n', (982, 1008), False, 'from equality_check import Point\n'), ((1025, 1065), 'equality_check.Point.calculate', 'Point.calculate', (['mne._matrix_2_1_xqx_q0q'], {}), '(mne._matrix_2_1_xqx_q0q)\n', (1040, 1065), False, 'from equality_check import Point\n'), ((1165, 1202), 'equality_check.Point', 'Point', (['[1.5 + z, 1.5 - y, 0.5 + x, 1]'], {}), '([1.5 + z, 1.5 - y, 0.5 + x, 1])\n', (1170, 1202), False, 'from equality_check import Point\n'), ((1219, 1261), 'equality_check.Point.calculate', 'Point.calculate', (['mne._matrix_2_1_qx3ox_h0h'], {}), '(mne._matrix_2_1_qx3ox_h0h)\n', (1234, 1261), False, 'from equality_check import Point\n'), ((1361, 1398), 'equality_check.Point', 'Point', (['[0.5 + z, 0.5 - y, 1.5 + x, 1]'], {}), '([0.5 + z, 0.5 - y, 1.5 + x, 1])\n', (1366, 1398), False, 'from equality_check import Point\n'), ((1415, 1457), 'equality_check.Point.calculate', 'Point.calculate', (['mne._matrix_2_1_mqxox_h0h'], {}), '(mne._matrix_2_1_mqxox_h0h)\n', (1430, 1457), False, 'from equality_check import Point\n'), ((1555, 1592), 'equality_check.Point', 'Point', (['[0.5 + z, 0.5 - y, 0.5 + x, 1]'], {}), '([0.5 + z, 0.5 - y, 0.5 + x, 1])\n', (1560, 1592), False, 'from equality_check import Point\n'), ((1609, 1649), 'equality_check.Point.calculate', 'Point.calculate', (['mne._matrix_2_1_xox_q0q'], {}), '(mne._matrix_2_1_xox_q0q)\n', (1624, 1649), False, 'from equality_check import Point\n'), ((1750, 1787), 'equality_check.Point', 'Point', (['[0.5 + z, 1.5 - y, 1.5 + x, 1]'], {}), '([0.5 + z, 1.5 - y, 1.5 + x, 1])\n', (1755, 1787), False, 'from equality_check import Point\n'), ((1804, 1847), 'equality_check.Point.calculate', 'Point.calculate', (['mne._matrix_2_1_mqx3ox_h0h'], {}), '(mne._matrix_2_1_mqx3ox_h0h)\n', (1819, 1847), False, 'from equality_check import Point\n'), ((1947, 1984), 'equality_check.Point', 'Point', (['[1.5 + z, 0.5 - y, 1.5 + x, 1]'], {}), '([1.5 + z, 0.5 - y, 1.5 + x, 1])\n', (1952, 1984), False, 'from equality_check import Point\n'), ((2001, 2043), 'equality_check.Point.calculate', 'Point.calculate', (['mne._matrix_2_1_xox_3q03q'], {}), '(mne._matrix_2_1_xox_3q03q)\n', (2016, 2043), False, 'from equality_check import Point\n')]
|
from CNF_Creator import *
import numpy as np
import time
import timeit
#---Parameters-----------------------------------------------
num_of_literals = 50 # Number of literals
pop_size = 10 # Population size of each generation
time_limit = 45
p_mutate = 0.9 # Probability of Mutation
p_mutate_literal = 0.1 # Probability of Mutating each literal
p_tournament_sel = 0.9 # Initial probability that random child is chosen over a fit child
beta = 100.0 #Parameter in exponential decay
max_stagnate_cnt = 3000 #Maximum number of epochs for which max_fitness is allowed to stagnate
#=============================================================
class CNF_Model:
'''
Implements a single instance of Valuation that maps literals to True/False
'''
def __init__(self, num_of_literals, arr=None):
'''
Initializes the CNF model
Arguments: num_of_literals -> Number of literals in the model
arr -> Inital values of the Valuation
if None: randomly initialized
'''
self.num_of_literals = num_of_literals
if (arr is None):
self.truth_vals = np.random.randint(0,2, size=num_of_literals)
else:
self.truth_vals = arr
self.fitness_score = -1
def fitness_eval(self, cnf_statement):
'''
Calculates the fitness score of the current valuation over a CNF_Statement
Arguments: cnf_statement -> CNF statement over which fitness is evaluated
Return: fitness_score -> Calculated Fitness score
'''
score = 0.0
for row in cnf_statement:
valid = False
for i in row:
if ((i>0 and self.truth_vals[abs(i)-1]==1) or (i<0 and self.truth_vals[abs(i)-1]==0)):
valid = True
break
if (valid):
score+=1.0
self.fitness_score = float(score)/float(len(cnf_statement))*100.0
return self.fitness_score
def get_fitness_score(self):
'''
Returns the last calculated fitness score of the model
'''
return self.fitness_score
def get_truth_values(self):
'''
Returns Representation of the truth value of the CNF_Model Found
'''
result = []
for i in range(len(self.truth_vals)):
if (self.truth_vals[i]==1):
result.append(i+1)
else:
result.append(-i-1)
return result
class Genetic_Algorithm:
'''
Class that implements the Genetic Algorithm
'''
def __init__(self, num_of_clauses, population_size = 10, num_of_literals = 50):
'''
Initializes the algorithm parameters
Arguments: num_of_clauses -> Number of clauses in the CNF statement
population_size -> Population size of each generation of models
num_of_literals -> Number of literals used in the CNF Statement
'''
self.mutate_p = p_mutate
self.max_fitness_scores = []
self.num_of_clauses = num_of_clauses
self.population_size = population_size
self.num_of_clauses = num_of_clauses
self.num_of_literals = num_of_literals
def init_population(self, cnf_statement):
'''
Creates intial population of CNF Models that is used by the algorithm
Arguments: cnf_statement -> CNF Statement beine evaluated by the class
Returns: population -> Population of CNF Models
'''
population = []
for i in range(self.population_size):
population.append(CNF_Model(self.num_of_literals))
for i in range(self.population_size):
population[i].fitness_eval(cnf_statement)
return population
def Weights(self, models):
'''
Assigns a weight to each CNF model in the population that represent
it's preference to be selected for reproduction by using fitness scores
Arguments: models -> population of models
Returns: weights -> An array of weights of models s
'''
weights = np.zeros(self.population_size)
for i in range(self.population_size):
weights[i] = models[i].get_fitness_score()
sum = weights.sum()
weights = weights/sum
return weights
def reproduce(self, parent_1, parent_2):
'''
Function to perform the Reproduction task by performing Crossover
over a random pivot
Arguments: parent_1, parent_2 -> parent models
Returns: child -> Child model
'''
length = self.num_of_literals
pivot = np.random.randint(length)
child_arr = parent_1.truth_vals[:pivot]
child_arr = np.append(child_arr,parent_2.truth_vals[pivot:])
child = CNF_Model(length, child_arr)
return child
def Mutate(self, child):
'''
Performs Mutation task
Arguments: child -> CNF model to be mutated
'''
for i in range(self.num_of_literals):
if (np.random.random() < p_mutate_literal):
child.truth_vals[i] = 1-child.truth_vals[i]
return
def Tournament_Selection(self, population, pop_size, epoch):
'''
Performs Tournament Selection of the best fit models with a
probability that increases with epoch
Argument: population-> Population in which the best fit are chosen
pop_size-> Size of the final population after best fit
epoch -> Current epoch
Returns: population2 -> Population generated after selection
'''
population2 = []
population.sort(key = lambda x: x.fitness_score, reverse = True)
p = float(p_tournament_sel)**(epoch/beta)
for i in range(0,pop_size):
if (np.random.random() > p ):
population2.append(population[i])
else:
population2.append(population[np.random.randint(pop_size,len(population))])
return population2
def Max_fitness(self, population):
'''
Finds the fitness of the most fit model in the population
Argument: population -> Most fit population
Returns: max_fitness -> Max fitness value
'''
max_fitness = 0
for k in population:
max_fitness = max(max_fitness, k.get_fitness_score())
return max_fitness
def run_algorithm(self, cnf_statement, debug_stmt = False):
'''
Function that performs the Genetic Algorithm on the CNF statement
Arguments: cnf_statement -> CNF statement whose solution has to be generated
debug_stmt -> If True, prints a more verbose info about the algo run
'''
start_time = time.time()
max_fitness = 0
population = self.init_population(cnf_statement)
epoch = 0
time_taken = 0.0
prev_fitness = 0.0
stagnate_cnt = 0
while(max_fitness<100.0):
weights = self.Weights(population)
population2 = population.copy()
for i in range(self.population_size):
parent1, parent2 = np.random.choice(population,2, p=weights)
child = self.reproduce(parent1, parent2)
child.fitness_eval(cnf_statement)
if(np.random.random() < self.mutate_p):
self.Mutate(child)
population2.append(child)
population = self.Tournament_Selection(population2, pop_size, epoch)
max_fitness = self.Max_fitness(population)
self.max_fitness_scores.append(max_fitness)
epoch+=1
if(epoch%1000 == 1 and debug_stmt):
print(f"{epoch} epoch: Fitness score {max_fitness}%\n")
if(abs(prev_fitness - max_fitness)<0.01):
stagnate_cnt+=1
else:
stagnate_cnt =0
prev_fitness = max_fitness
time_taken = time.time() - start_time
if (time_taken> time_limit-0.01):
if (debug_stmt):
print("\nTime limit exceeded, couldn't find a solution\n")
break
if (stagnate_cnt==max_stagnate_cnt):
if (debug_stmt):
print("\nFitness Score stagnated for too long\n")
break
for p in population:
if p.get_fitness_score()==max_fitness:
return p,time_taken
return None,time_taken
def main():
cnfC = CNF_Creator(n=50) # n is number of symbols in the 3-CNF sentence
#sentence = cnfC.CreateRandomSentence(m=120) # m is number of clauses in the 3-CNF sentence
#print('Random sentence : ',sentence)
sentence = cnfC.ReadCNFfromCSVfile()
#print('\nSentence from CSV file : ',sentence)
ga = Genetic_Algorithm(len(sentence))
best_model,time_taken = ga.run_algorithm(sentence)
print('\n\n')
print('Roll No : 2019A7PS0033G')
print('Number of clauses in CSV file : ',len(sentence))
print('Best model : ', best_model.get_truth_values())
print(f'Fitness value of best model : {best_model.get_fitness_score()}%')
print(f'Time taken : {time_taken}')
print('\n\n')
if __name__=='__main__':
main()
|
[
"numpy.zeros",
"time.time",
"numpy.append",
"numpy.random.random",
"numpy.random.randint",
"numpy.random.choice"
] |
[((4268, 4298), 'numpy.zeros', 'np.zeros', (['self.population_size'], {}), '(self.population_size)\n', (4276, 4298), True, 'import numpy as np\n'), ((4827, 4852), 'numpy.random.randint', 'np.random.randint', (['length'], {}), '(length)\n', (4844, 4852), True, 'import numpy as np\n'), ((4921, 4970), 'numpy.append', 'np.append', (['child_arr', 'parent_2.truth_vals[pivot:]'], {}), '(child_arr, parent_2.truth_vals[pivot:])\n', (4930, 4970), True, 'import numpy as np\n'), ((7022, 7033), 'time.time', 'time.time', ([], {}), '()\n', (7031, 7033), False, 'import time\n'), ((1211, 1256), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)'], {'size': 'num_of_literals'}), '(0, 2, size=num_of_literals)\n', (1228, 1256), True, 'import numpy as np\n'), ((5243, 5261), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (5259, 5261), True, 'import numpy as np\n'), ((6043, 6061), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (6059, 6061), True, 'import numpy as np\n'), ((7422, 7464), 'numpy.random.choice', 'np.random.choice', (['population', '(2)'], {'p': 'weights'}), '(population, 2, p=weights)\n', (7438, 7464), True, 'import numpy as np\n'), ((8300, 8311), 'time.time', 'time.time', ([], {}), '()\n', (8309, 8311), False, 'import time\n'), ((7591, 7609), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (7607, 7609), True, 'import numpy as np\n')]
|
import time
from collections import defaultdict
from typing import List, Dict
import numpy as np
def timed(callback, *args, **kwargs):
start = time.time()
result = callback(*args, **kwargs)
return result, time.time() - start
class Timer:
def __init__(self):
self.start = 0.
self.result = 0.
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.result = time.time() - self.start
def reset(self):
r = self.result
self.start = 0.
self.result = 0.
return r
class MultiTimer:
def __init__(self):
self._results: Dict[List[float]] = defaultdict(list)
self._currently_measuring = None
def time(self, fieldname):
"""
Starts measuring.
"""
if self._currently_measuring is not None:
raise RuntimeError("I was already measuring {}")
self._currently_measuring = fieldname
self._results[fieldname].append(time.time())
return self
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._results[self._currently_measuring][-1] = time.time() - self._results[self._currently_measuring][-1]
self._currently_measuring = None
def get_results(self, reset=False, reduce=True):
result = self._results
if reduce:
result = {k: np.mean(v) for k, v in result.items()}
if reset:
self.reset()
return result
def reset(self):
self._results = defaultdict(list)
self._currently_measuring = None
|
[
"collections.defaultdict",
"numpy.mean",
"time.time"
] |
[((150, 161), 'time.time', 'time.time', ([], {}), '()\n', (159, 161), False, 'import time\n'), ((376, 387), 'time.time', 'time.time', ([], {}), '()\n', (385, 387), False, 'import time\n'), ((707, 724), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (718, 724), False, 'from collections import defaultdict\n'), ((1610, 1627), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1621, 1627), False, 'from collections import defaultdict\n'), ((220, 231), 'time.time', 'time.time', ([], {}), '()\n', (229, 231), False, 'import time\n'), ((482, 493), 'time.time', 'time.time', ([], {}), '()\n', (491, 493), False, 'import time\n'), ((1045, 1056), 'time.time', 'time.time', ([], {}), '()\n', (1054, 1056), False, 'import time\n'), ((1231, 1242), 'time.time', 'time.time', ([], {}), '()\n', (1240, 1242), False, 'import time\n'), ((1460, 1470), 'numpy.mean', 'np.mean', (['v'], {}), '(v)\n', (1467, 1470), True, 'import numpy as np\n')]
|
import matplotlib.pyplot as plt
from matplotlib import animation
import matplotlib.gridspec as gridspec
from IPython.core.display import HTML
import numpy as np
import math
def animate(sequences, interval=100, blit=True, fig_size=(14, 10), get_fig=False):
if isinstance(sequences, list) or isinstance(sequences, np.ndarray):
fig, ax = plt.subplots(1, 1)
animate = [[ax.imshow(np.squeeze(_), cmap='gray')] for _ in sequences]
elif isinstance(sequences, zip):
animate = []
for i, el in enumerate(sequences):
seq = []
if i == 0:
nb_el = len(el)
nb_col = 2
nb_row = math.ceil(nb_el / nb_col)
fig, ax = plt.subplots(nb_row, nb_col)
for j in range(len(el)):
col = int(j % 2 != 0)
row = j // nb_col
if nb_row == 1:
seq.append(ax[col].imshow(np.squeeze(el[j]), cmap='gray'))
else:
seq.append(ax[row, col].imshow(np.squeeze(el[j]), cmap='gray'))
animate.append(seq)
else:
raise ValueError("Expected type is zip, list or numpy.ndarray, got ", type(sequences))
fig.set_size_inches(*fig_size)
anim = animation.ArtistAnimation(fig, animate, interval=interval, blit=blit)
if not get_fig:
return anim
else:
return anim, fig
def html_animation(sequences, interval=100, blit=True, fig_size=(14, 10)):
anim = animate(sequences, interval, blit, fig_size)
return HTML(anim.to_html5_video())
def plot_results(batch, fig_size=(14, 10)):
if batch.shape[0] == 1 or batch.ndim in [2, 3]:
fig, ax = plt.subplots(1, 1)
ax.imshow(np.squeeze(batch), cmap='gray')
else:
nb_el = batch.shape[0]
nb_col = 2
nb_row = math.ceil(nb_el / nb_col)
fig, ax = plt.subplots(nb_row, nb_col)
for j in range(nb_el):
col = int(j % 2 != 0)
row = j // nb_col
if nb_row == 1:
ax[col].imshow(np.squeeze(batch[j]), cmap='gray')
else:
ax[row, col].imshow(np.squeeze(batch[j]), cmap='gray')
fig.set_size_inches(*fig_size)
fig.show()
import matplotlib
import matplotlib.cm
class VisuResultsClassification:
def __init__(self,
x,
sequences=None,
bar=None,
graph=None,
fill=None,
interval=50,
figsize=(14, 12),
sequences_titles=None,
graph_titles=None,
fill_titles=None):
"""
Sequence
:param x:
:param sequences: List of arrays to be plotted as animated sequences
:param bar: List of tuple of arrays to be plotted as bar
:param graph: List of arrays to be plotted as curves
:param fill: List of arrays to be plotted as filled curves
For graph and fill arguments, instead of a list of lists, you can provide a list of dict, each dict being plot on
the same graph, with the key being the label legend
For bar argument, each tuple should be organized as: (yProba, yGroundtruth, nb_class, *labels[optional])
"""
assert (bar is None and graph is None and fill is None,
"You have to provide at least one of the following argument: bar, graph, fill")
bar = self.init_arg(bar)
graph = self.init_arg(graph)
sequences = self.init_arg(sequences)
sequences_titles = self.init_arg(sequences_titles)
fill = self.init_arg(fill)
graph_titles = self.init_arg(graph_titles)
fill_titles = self.init_arg(fill_titles)
self.x = x
self.length = len(x)
self.sequences = [self.normalize_array(np.squeeze(array)) for array in sequences]
self.fig = plt.figure(figsize=figsize)
norm = matplotlib.colors.Normalize(vmin=0, vmax=20)
cmap = matplotlib.cm.get_cmap('tab20')
self.colors = [cmap(norm(_)) for _ in np.arange(0, 20, 1)]
nb_plots = len(sequences) + len(graph) + len(bar) + len(fill)
self.outer = gridspec.GridSpec(math.ceil(nb_plots / 2), 2, wspace=0.1, hspace=0.25)
iter_subplot = 0
# Images
self.array_axs = []
self.image_plts = []
for seq in self.sequences:
self.array_axs.append(self.fig.add_subplot(self.outer[iter_subplot]))
self.image_plts.append(self.array_axs[-1].imshow(seq[0], cmap='gray'))
iter_subplot += 1
plt.axis('off')
for j, title in enumerate(sequences_titles):
self.array_axs[j].title.set_text(title)
# Curves
self.graph_axs = []
self.graph_vertical_lines = []
for arrays in graph:
graph_ax = self.fig.add_subplot(self.outer[iter_subplot])
if isinstance(arrays, dict):
for key in arrays:
graph_ax.plot(self.x, self.pad_missing_value(arrays[key]), label=key, color=self._get_new_color())
else:
graph_ax.plot(self.x, self.pad_missing_value(arrays), color=self._get_new_color())
iter_subplot += 1
plt.xticks(rotation=25)
graph_ax.legend()
self.graph_vertical_lines.append(graph_ax.axvline(x=self.x[0], color='k', linestyle=':'))
self.graph_axs.append(graph_ax)
for j, title in enumerate(graph_titles):
self.graph_axs[j].title.set_text(title)
self.fill_axs = []
self.fill_vertical_lines = []
for arrays in fill:
fill_ax = self.fig.add_subplot(self.outer[iter_subplot])
if isinstance(arrays, dict):
for key in arrays:
color = self._get_new_color()
fill_ax.plot(self.x, self.pad_missing_value(arrays[key]), label=key, color=color)
color[-1] = 0.5
filledArray = arrays[key]
filledArray[0] = 0
filledArray[-1] = 0
fill_ax.fill(self.x, self.pad_missing_value(filledArray), color=color)
else:
color = self._get_new_color()
fill_ax.plot(self.x, self.pad_missing_value(arrays), color=color)
color[-1] = 0.5
arrays[0] = 0
arrays[-1] = 0
fill_ax.fill(self.x, self.pad_missing_value(arrays), color=color)
plt.xticks(rotation=25)
fill_ax.legend()
self.fill_vertical_lines.append(fill_ax.axvline(x=self.x[0], color='k', linestyle=':'))
self.fill_axs.append(fill_ax)
iter_subplot += 1
for j, title in enumerate(fill_titles):
self.fill_axs[j].title.set_text(title)
# bar
self.bars = bar
self.bar_axs = []
for arrays in self.bars:
bar_ax = self.fig.add_subplot(self.outer[iter_subplot])
self.fill_bar(bar_ax, arrays, 0)
self.bar_axs.append(bar_ax)
iter_subplot += 1
self.anim = animation.FuncAnimation(self.fig, self._animate,
frames=np.arange(self.length),
interval=interval)
def init_arg(self, arg):
if arg is None:
arg = []
elif not isinstance(arg, list):
arg = [arg]
return arg
def pad_missing_value(self, array):
missing_value = max(0, self.length-len(array))
return np.pad(array, (0, missing_value), 'constant')
def fill_bar(self, bar_ax, tuple_array, timestamp):
labels = None
nb_class = tuple_array[2]
if len(tuple_array) == 4:
labels = tuple_array[-1]
tuple_array = tuple_array[:2]
proba = tuple_array[0]
gt = tuple_array[1]
if timestamp < len(gt):
pred = np.argmax(proba, axis=1)[timestamp]
gt = gt[timestamp]
color = self._get_bar_colors(pred, gt, nb_class)
bar_ax.bar(np.arange(nb_class), proba[timestamp], color=color, width=0.5)
bar_ax.set_xticks(np.arange(nb_class))
if labels is not None:
bar_ax.set_xticklabels(labels)
else:
color = [.7, .7, .7]
bar_ax.bar(np.arange(nb_class), np.zeros(nb_class), color=color, width=0.5)
bar_ax.set_xticks(np.arange(nb_class))
if labels is not None:
bar_ax.set_xticklabels(labels)
def normalize_array(self, x):
x -= x.min()
x /= x.max()
return x
def _get_new_color(self):
self.colors = np.roll(self.colors, -1)
return self.colors[0]
def _get_bar_colors(self, prediction, ground_truth, nb_class):
neutral = [.7, .7, .7]
incorrect = [.7, 0, 0]
correct = [0, .8, 0]
colors = [neutral] * nb_class
prediction = int(round(prediction))
if prediction == ground_truth:
colors[prediction] = correct
else:
colors[prediction] = incorrect
return colors
def _animate(self, i):
for j, plot in enumerate(self.image_plts):
plot.set_data(self.sequences[j][i])
for verticalLine in self.graph_vertical_lines:
verticalLine.set_data([self.x[i], self.x[i]], [0, 1])
for verticalLine in self.fill_vertical_lines:
verticalLine.set_data([self.x[i], self.x[i]], [0, 1])
plt.axis('off')
for bar_ax, bar_array in zip(self.bar_axs, self.bars):
bar_ax.clear()
self.fill_bar(bar_ax, bar_array, i)
def html_anim(self):
return HTML(self.anim.to_html5_video())
|
[
"numpy.pad",
"matplotlib.colors.Normalize",
"math.ceil",
"matplotlib.cm.get_cmap",
"numpy.roll",
"numpy.argmax",
"numpy.zeros",
"matplotlib.pyplot.axis",
"matplotlib.animation.ArtistAnimation",
"matplotlib.pyplot.figure",
"numpy.arange",
"numpy.squeeze",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.subplots"
] |
[((1273, 1342), 'matplotlib.animation.ArtistAnimation', 'animation.ArtistAnimation', (['fig', 'animate'], {'interval': 'interval', 'blit': 'blit'}), '(fig, animate, interval=interval, blit=blit)\n', (1298, 1342), False, 'from matplotlib import animation\n'), ((350, 368), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (362, 368), True, 'import matplotlib.pyplot as plt\n'), ((1707, 1725), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (1719, 1725), True, 'import matplotlib.pyplot as plt\n'), ((1854, 1879), 'math.ceil', 'math.ceil', (['(nb_el / nb_col)'], {}), '(nb_el / nb_col)\n', (1863, 1879), False, 'import math\n'), ((1898, 1926), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nb_row', 'nb_col'], {}), '(nb_row, nb_col)\n', (1910, 1926), True, 'import matplotlib.pyplot as plt\n'), ((3924, 3951), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize'}), '(figsize=figsize)\n', (3934, 3951), True, 'import matplotlib.pyplot as plt\n'), ((3967, 4011), 'matplotlib.colors.Normalize', 'matplotlib.colors.Normalize', ([], {'vmin': '(0)', 'vmax': '(20)'}), '(vmin=0, vmax=20)\n', (3994, 4011), False, 'import matplotlib\n'), ((4027, 4058), 'matplotlib.cm.get_cmap', 'matplotlib.cm.get_cmap', (['"""tab20"""'], {}), "('tab20')\n", (4049, 4058), False, 'import matplotlib\n'), ((7654, 7699), 'numpy.pad', 'np.pad', (['array', '(0, missing_value)', '"""constant"""'], {}), "(array, (0, missing_value), 'constant')\n", (7660, 7699), True, 'import numpy as np\n'), ((8799, 8823), 'numpy.roll', 'np.roll', (['self.colors', '(-1)'], {}), '(self.colors, -1)\n', (8806, 8823), True, 'import numpy as np\n'), ((9635, 9650), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (9643, 9650), True, 'import matplotlib.pyplot as plt\n'), ((1744, 1761), 'numpy.squeeze', 'np.squeeze', (['batch'], {}), '(batch)\n', (1754, 1761), True, 'import numpy as np\n'), ((4236, 4259), 'math.ceil', 'math.ceil', (['(nb_plots / 2)'], {}), '(nb_plots / 2)\n', (4245, 4259), False, 'import math\n'), ((4632, 4647), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (4640, 4647), True, 'import matplotlib.pyplot as plt\n'), ((5293, 5316), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(25)'}), '(rotation=25)\n', (5303, 5316), True, 'import matplotlib.pyplot as plt\n'), ((6571, 6594), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(25)'}), '(rotation=25)\n', (6581, 6594), True, 'import matplotlib.pyplot as plt\n'), ((3862, 3879), 'numpy.squeeze', 'np.squeeze', (['array'], {}), '(array)\n', (3872, 3879), True, 'import numpy as np\n'), ((4105, 4124), 'numpy.arange', 'np.arange', (['(0)', '(20)', '(1)'], {}), '(0, 20, 1)\n', (4114, 4124), True, 'import numpy as np\n'), ((7298, 7320), 'numpy.arange', 'np.arange', (['self.length'], {}), '(self.length)\n', (7307, 7320), True, 'import numpy as np\n'), ((8037, 8061), 'numpy.argmax', 'np.argmax', (['proba'], {'axis': '(1)'}), '(proba, axis=1)\n', (8046, 8061), True, 'import numpy as np\n'), ((8188, 8207), 'numpy.arange', 'np.arange', (['nb_class'], {}), '(nb_class)\n', (8197, 8207), True, 'import numpy as np\n'), ((8281, 8300), 'numpy.arange', 'np.arange', (['nb_class'], {}), '(nb_class)\n', (8290, 8300), True, 'import numpy as np\n'), ((8454, 8473), 'numpy.arange', 'np.arange', (['nb_class'], {}), '(nb_class)\n', (8463, 8473), True, 'import numpy as np\n'), ((8475, 8493), 'numpy.zeros', 'np.zeros', (['nb_class'], {}), '(nb_class)\n', (8483, 8493), True, 'import numpy as np\n'), ((8549, 8568), 'numpy.arange', 'np.arange', (['nb_class'], {}), '(nb_class)\n', (8558, 8568), True, 'import numpy as np\n'), ((399, 412), 'numpy.squeeze', 'np.squeeze', (['_'], {}), '(_)\n', (409, 412), True, 'import numpy as np\n'), ((678, 703), 'math.ceil', 'math.ceil', (['(nb_el / nb_col)'], {}), '(nb_el / nb_col)\n', (687, 703), False, 'import math\n'), ((730, 758), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nb_row', 'nb_col'], {}), '(nb_row, nb_col)\n', (742, 758), True, 'import matplotlib.pyplot as plt\n'), ((2082, 2102), 'numpy.squeeze', 'np.squeeze', (['batch[j]'], {}), '(batch[j])\n', (2092, 2102), True, 'import numpy as np\n'), ((2171, 2191), 'numpy.squeeze', 'np.squeeze', (['batch[j]'], {}), '(batch[j])\n', (2181, 2191), True, 'import numpy as np\n'), ((948, 965), 'numpy.squeeze', 'np.squeeze', (['el[j]'], {}), '(el[j])\n', (958, 965), True, 'import numpy as np\n'), ((1054, 1071), 'numpy.squeeze', 'np.squeeze', (['el[j]'], {}), '(el[j])\n', (1064, 1071), True, 'import numpy as np\n')]
|
#! python
# -*- coding: utf-8 -*-
"""
WavyTool is a simple program that allows you to acquire data from input devices,
i.e microphones, and save them as file (csv, png). Also, you can perform
some simple processing as spectral analysis.
:authors: <NAME>, <NAME>
:contact: <EMAIL>, <EMAIL>
:since: 2015/02/27
"""
import collections
import json
import logging
import os
import sys
import time
import urllib.request
import numpy as np
# QtPy must be imported before pyqtgraph
from qtpy.QtCore import QTimer
from qtpy.QtGui import QPixmap
from qtpy.QtWidgets import (QApplication, QFileDialog, QMainWindow,
QMessageBox, QSplashScreen)
# Must be set to the same binding here
api_names = {'pyqt5': 'PyQt5', 'pyside2': 'PySide2', 'pyqt4': 'PyQt4', 'pyside': 'PySide'}
os.environ['PYQTGRAPH_QT_LIB'] = api_names[os.environ['QT_API']]
# PyQtGraph must be imported after QtPy
import pyqtgraph as pg
# Then import the own interface
from wavytool import __version__ as version
from wavytool import app_name
from wavytool.core_wavy import AudioRecord
from wavytool.gui_wav2dat import ConvertWave2Data
from wavytool.mw_wavy import Ui_MainWindow
logging.basicConfig(level=logging.DEBUG)
# Informing about used binding
logging.info('Using Qt binding (QtPy/PyQtGraph): %s', (os.environ['QT_API'],
os.environ['PYQTGRAPH_QT_LIB']))
about = ("<h3>{} v.{}</h3>"
"<p>© <NAME>, <NAME><br/>"
"Sao Carlos Institute of Physics<br/>"
"University of Sao Paulo<br/>"
"<a href='https://github.com/dpizetta/wavy'>WavyTool on GitHub</a><br/>"
"<a href='https://pypi.org/project/wavytool'>WavyTool on PyPI</a><br/>"
"<a href='http://choosealicense.com/licenses/mit'>MIT License</a><br/></p>").format(app_name, version)
def main():
"""The main function."""
args = sys.argv[1:]
wavy = QApplication(args)
wavy.setApplicationVersion(version)
wavy.setApplicationName(app_name)
wavy.setOrganizationName("Sao Carlos Institute of Physics - University of Sao Paulo")
wavy.setOrganizationDomain("www.ifsc.usp.br")
try:
import qdarkstyle
except ImportError:
logging.warning("No dark theme installed, use 'pip install qdarkstyle' to install.")
else:
try:
wavy.setStyleSheet(qdarkstyle.load_stylesheet_from_environment())
except Exception as err:
logging.warning("Problems using qdarkstyle.\nError: %s", str(err))
pixmap = QPixmap("wavytool/images/symbol.png")
splash = QSplashScreen(pixmap)
start = time.time()
splash.show()
splash.repaint()
splash.showMessage("Loading...")
wavy.processEvents()
while time.time() - start < 1:
time.sleep(0.001)
wavy.processEvents()
splash.showMessage("Starting...")
window = MainWindow()
window.showMaximized()
splash.finish(window)
try:
with open('wavytool.config', 'r') as json_file:
data = json.load(json_file)
window.base_path = data['data_folder']
logging.info('Data folder is: %s', window.base_path)
except IOError:
window.getDataFolder()
return wavy.exec_()
class GlobalBuffer():
"""Allows real-time data transfer between plots."""
def __init__(self, buffer_size=1024):
self.recording = False
self.buffer_size = buffer_size
self.data = np.empty(self.buffer_size)
self.counter = 0
self.timestamp = 0
self.time_limit = 0
def startRecording(self):
self.timestamp = time.time()
self.recording = True
def stopRecording(self):
self.timestamp = 0
self.recording = False
self.counter = 0
def clear(self):
tmp = self.data
self.data[:self.buffer_size] = tmp
self.counter = 0
global_buffer = GlobalBuffer()
class RecordingPlotter(pg.PlotWidget):
"""Plots sub data from real time plotter.
Parameters:
sample_interval (float): sample interval. Default 0.02 seconds.
time_window (float): size (in time) for the main window. Default 20 seconds.
main_window (MainWindow): main_window.
parent (QWidget): parent.
"""
def __init__(self, sample_interval=0.02, time_window=20., main_window=None, parent=None):
super(RecordingPlotter, self).__init__(parent)
self.sample_interval = sample_interval
self.time_window = time_window
self.showGrid(x=True, y=True)
self.setLabel('top', 'Recorded data')
self.setLabel('left', 'Amplitude', 'V')
self.setLabel('bottom', 'Time', 's')
self.curve = None
self.main_window = main_window
global global_buffer
def initData(self):
# Forces update at 20 FPS, shouldn't be taxing to most systems
self._interval = int(1 / 20 * 1000)
self._bufsize = int(self.time_window / self.sample_interval)
self.x = np.linspace(0.0, self.time_window, self._bufsize)
self.setDownsampling(mode='peak')
self.databuffer = collections.deque([0.0] * self._bufsize, self._bufsize)
self.setClipToView(True)
self.data = np.empty(5)
self.ptr = 0
self.counter = 0
self.curve = self.plot(self.x[:self.ptr], self.data[:self.ptr], antialias=True)
self.timer = QTimer()
self.timer.timeout.connect(self.updateplot)
self.timer.start(self._interval)
def setSampleInterval(self, sample_interval):
self.sample_interval = sample_interval
def setTimeWindow(self, time_window):
self.time_window = time_window
self.curve.clear()
self.initData()
def getdata(self):
if global_buffer.time_limit != 0 and self.x[self.ptr] >= global_buffer.time_limit:
# TODO: this is not a good way to stop because you need the parent,
# and the parents stop method calls your methods.
# We need to thing about something different here.
self.main_window.stop()
while (self.counter > global_buffer.counter and self.counter > 0):
self.counter -= 1
return global_buffer.data[(self.counter % global_buffer.buffer_size)]
def updateplot(self):
"""Update plot."""
self.data[self.ptr] = self.getdata()
self.x[self.ptr + 1] = self.x[self.ptr] + self.sample_interval
self.ptr += 1
self.counter += 1
if self.ptr >= self.data.shape[0]:
tmp = self.data
xtmp = self.x
self.data = np.empty(self.data.shape[0] + 5)
self.x = np.empty(self.x.shape[0] + 5)
self.data[:tmp.shape[0]] = tmp
self.x[:xtmp.shape[0]] = xtmp
self.curve.setData(self.x[:self.ptr], self.data[:self.ptr])
def setCurveColor(self, r, g, b):
"""Set curve color"""
self.curve.setPen(pg.mkPen(color=(r, g, b)))
class RealTimeRecordingPlotter(pg.PlotWidget):
"""Plots data (audio) in real time.
Parameters:
sample_interval (float): sample interval. Default 0.02 seconds.
time_window (float): size (in time) for the main window. Default 20 seconds.
main_window (MainWindow): main_window.
parent (QWidget): parent.
"""
def __init__(self, sample_interval=0.02, time_window=20., parent=None):
super(RealTimeRecordingPlotter, self).__init__(parent)
self.sample_interval = sample_interval
self.time_window = time_window
self.showGrid(x=True, y=True)
self.setLabel('top', 'Input Real Time')
self.setLabel('left', 'Amplitude', 'V')
self.setLabel('bottom', 'Time', 's')
self.curve = None
global global_buffer
def initData(self):
"""Initialize data for for plotting."""
# self.sample_interval = 0.01
# Forces update at 20 FPS, shouldn't be taxing to most systems
self._interval = int(1 / 20 * 1000)
self._bufsize = int(self.time_window / self.sample_interval)
self.databuffer = collections.deque([0.0] * self._bufsize, self._bufsize)
self.x = np.linspace(-self.time_window, 0.0, self._bufsize)
self.y = np.zeros(self._bufsize, dtype=np.float)
# Initializes audio listener
self.audio = AudioRecord("output.wav", self.sample_interval)
try:
self.audio.begin_audio()
except IOError as e:
QMessageBox.information(self,
self.tr('Information'),
self.tr('No input device found, please make sure to plug it before open the '
'program. Please, restart the program and try again.\n{}'.format(e)),
QMessageBox.Ok)
exit(1)
# :TODO: needs to be separated the interval of plotting data from the acquire data.
# Initializes the timer
self.timer = QTimer()
self.timer.timeout.connect(self.updateplot)
self.timer.start(self._interval)
# Plot for the first time
self.curve = self.plot(self.x, self.y, pen=(0, 255, 255), antialias=True)
self.curve.clear()
def setSampleInterval(self, sample_interval):
"""Sets the sample interval for plotting.
Parameters:
sample_interval (float): sample interval in seconds
"""
self.sample_interval = sample_interval
self.curve.clear()
self.initData()
def setTimeWindow(self, time_window):
"""Sets the time window for plotting.
Parameters:
time_window (float): size (in time) for the main window, in seconds.
"""
self.time_window = time_window
self.curve.clear()
self.initData()
def getdata(self):
"""Gets data for plotting."""
b = self.audio.get_data_from_audio()[1]
new = b[0]
# This clipping of the signal prevents pyqtgraph from breaking due
# to large random noise when some soundcards are initiated.
# Prevents input overflow when program starts.
if new > 1e+150:
new = 1e+150
if global_buffer.recording is True:
global_buffer.counter += 1
if global_buffer.counter >= global_buffer.buffer_size:
global_buffer.clear()
global_buffer.data[global_buffer.counter] = new
return new
def updateplot(self):
"""Update plot."""
stp = self.getdata()
self.databuffer.append(stp)
self.y[:] = self.databuffer
self.curve.setData(self.x, self.y)
class MainWindow(QMainWindow):
"""Main window class.
Parameters:
parent (QWidget): parent
"""
def __init__(self, parent=None):
global global_buffer
super(MainWindow, self).__init__(parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# Check new version
self.setWindowTitle(app_name + ' ' + version)
self.update = "NOT CHECKED ..."
self.checkUpdate()
self.filepath = ""
# Initial state is none because there is no data acquired yet
self.isSaved = None
# Sample interval should be 0.02s to not overflow in XP
self.ui.doubleSpinBoxSampleInterval.setMinimum(0.02)
self.ui.doubleSpinBoxSampleInterval.setMaximum(0.5)
self.ui.doubleSpinBoxSampleInterval.setValue(0.02)
self.ui.doubleSpinBoxSampleInterval.setSingleStep(0.01)
# Connecting actions
# File actions
# self.ui.actionNew.triggered.connect(self.newFile)
# For now it cannot open a file
# self.ui.actionOpen.triggered.connect(self.openFile)
# self.ui.actionSave.triggered.connect(self.saveFile)
self.ui.actionSave_As.triggered.connect(self.saveFileAs)
self.ui.actionSave_As.setEnabled(False)
self.ui.actionPrint_graph.triggered.connect(self.saveImageAs)
self.ui.actionPrint_graph.setEnabled(False)
# Acquire actions
self.ui.actionRecord.triggered.connect(self.record)
self.ui.actionRecord.setCheckable(True)
self.ui.actionPause.triggered.connect(self.pause)
self.ui.actionPause.setCheckable(True)
self.ui.actionPause.setEnabled(False)
self.ui.actionStop.triggered.connect(self.stop)
self.ui.actionStop.setEnabled(False)
# Tools actions
self.ui.actionConvert_Wav_to_Dat.triggered.connect(self.callTools)
# Program actions
self.ui.actionQuit.triggered.connect(self.close)
self.ui.actionAbout_Wavy.triggered.connect(self.about)
# Plot widget
self.plot_widget = RealTimeRecordingPlotter(sample_interval=0.02, time_window=20.)
self.plot_widget.initData()
self.ui.gridLayout_2.addWidget(self.plot_widget, 0, 1)
self.plot_widget_rec = RecordingPlotter(sample_interval=0.02, time_window=5., main_window=self)
self.ui.gridLayout_2.addWidget(self.plot_widget_rec, 1, 1)
# Inputs
self.ui.doubleSpinBoxSampleInterval.valueChanged.connect(self.plot_widget.setSampleInterval)
self.ui.doubleSpinBoxSampleInterval.valueChanged.connect(self.plot_widget_rec.setSampleInterval)
self.ui.doubleSpinBoxSampleInterval.valueChanged.connect(self.setSampleRate)
# self.ui.doubleSpinBoxSampleRate.valueChanged.connect(self.setSampleInterval)
self.ui.spinBoxWindowTime.valueChanged.connect(self.plot_widget.setTimeWindow)
self.setSampleRate(self.ui.doubleSpinBoxSampleInterval.value())
def checkUpdate(self):
"""Check update from internet."""
url = 'https://api.github.com/repos/dpizetta/wavy/releases/latest'
try:
response = urllib.request.urlopen(url, timeout=20)
tag_version = json.loads(response.read())
except Exception:
pass
else:
if str(version) >= str(tag_version['tag_name'][1:]):
self.update = "Up to date!"
else:
self.update = "New version ({}) is available!".format(str(tag_version['tag_name']))
QMessageBox.information(self,
self.tr('Information'),
self.tr('<p>Oh, there is a new version ({}) avaliable.\n'
'Go to <a href="https://github.com/dpizetta/wavy/releases/latest">'
'download!</a></p>.'.format(tag_version['tag_name'])),
QMessageBox.Ok)
self.ui.labelAbout.setText(self.tr(about + "\nVersion status: " + self.update))
def setSampleRate(self, sample_interval):
"""Sets sample rate."""
self.ui.doubleSpinBoxSampleRate.setValue(1. / sample_interval)
def setSampleInterval(self, sample_rate):
"""Sets sample interval."""
self.ui.doubleSpinBoxSampleInterval.setValue(1. / sample_rate)
def callTools(self):
"""Call converting tool."""
dlg = ConvertWave2Data()
dlg.exec_()
def createFileName(self):
"""Construct a new file name to save the data."""
# Creates auto naming filename
filename = 'new_wavy_data_' + time.strftime("%Y%m%d%H%M%S", time.gmtime())
# Gets the current directory
# base_path = os.path.abspath(".")
self.filepath = os.path.join(self.base_path, filename)
self.setWindowFilePath(self.filepath)
def record(self):
"""Starts acquiring."""
# Create a new filename for the current acquisition
self.createFileName()
# Checks if is saved before start a new recording
if self.isSaved is False:
answer = QMessageBox.question(
self,
self.tr('Question'),
self.tr('Do you want to save your data before start a new record?'),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.saveFileAs()
if self.plot_widget_rec.curve is not None:
self.plot_widget_rec.curve.clear()
self.plot_widget_rec.initData()
self.plot_widget_rec.setCurveColor(255, 0, 0)
self.plot_widget_rec.setLabel('top', 'Recording ...')
# Set enabled buttons
self.ui.actionPause.setEnabled(True)
self.ui.actionStop.setEnabled(True)
self.ui.actionRecord.setEnabled(False)
# Set enabled inputs
self.ui.spinBoxWindowTime.setEnabled(False)
self.ui.doubleSpinBoxSampleInterval.setEnabled(False)
self.ui.doubleSpinBoxSampleRate.setEnabled(False)
self.ui.spinBoxStopRecordingAfter.setEnabled(False)
# Set enabled tool bar and menu
self.ui.toolBarFile.setEnabled(False)
self.ui.menuFile.setEnabled(False)
self.ui.menuTools.setEnabled(False)
global_buffer.time_limit = self.ui.spinBoxStopRecordingAfter.value()
global_buffer.startRecording()
self.isSaved = False
def pause(self):
"""Pauses acquiring."""
# TODO: We need to discuss if this is needed
# because the time is not correctly saved
if self.ui.actionPause.isChecked():
# Stopping changing color and label
self.plot_widget_rec.timer.stop()
self.plot_widget_rec.setCurveColor(255, 153, 0)
self.plot_widget_rec.setLabel('top', 'Paused ...')
global_buffer.stopRecording()
else:
# Starting changing color and label
self.plot_widget_rec.timer.start()
self.plot_widget_rec.setCurveColor(255, 0, 0)
self.plot_widget_rec.setLabel('top', 'Recording ...')
global_buffer.startRecording()
# Set enabled tool bar
self.ui.toolBarFile.setEnabled(False)
self.ui.menuFile.setEnabled(False)
self.ui.menuTools.setEnabled(False)
def stop(self):
"""Stops acquiring."""
# Stopping changing color and label
self.plot_widget_rec.timer.stop()
self.plot_widget_rec.setCurveColor(0, 255, 0)
self.plot_widget_rec.setLabel('top', 'Stopped ...')
# Set checked
self.ui.actionRecord.setChecked(False)
self.ui.actionPause.setChecked(False)
# Set enabled buttons
self.ui.actionPause.setEnabled(False)
self.ui.actionStop.setEnabled(False)
self.ui.actionRecord.setEnabled(True)
# Set enabled inputs
self.ui.doubleSpinBoxSampleInterval.setEnabled(True)
self.ui.doubleSpinBoxSampleRate.setEnabled(True)
self.ui.spinBoxWindowTime.setEnabled(True)
self.ui.spinBoxStopRecordingAfter.setEnabled(True)
# Set enabled tool bar
self.ui.toolBarFile.setEnabled(True)
self.ui.menuFile.setEnabled(True)
self.ui.menuTools.setEnabled(True)
self.ui.actionSave_As.setEnabled(True)
self.ui.actionPrint_graph.setEnabled(True)
global_buffer.stopRecording()
def savePNGFile(self, filepath):
"""Saves an image."""
# This extension should not be removed
# Exporter needs the extension to save correctly.
filepath += ".png"
logging.info('File path to save image: %s', filepath)
self.plot_widget_rec.setBackground('w')
exporter = pg.exporters.ImageExporter(self.plot_widget_rec.plotItem)
self.plot_widget_rec.setBackground('k')
exporter.export(filepath)
def saveCSVFile(self, filepath):
"""Saves a data file."""
# This extension should not be removed
# Exporter needs the extension to save correctly.
filepath += ".csv"
logging.info('File path to save data: %s', filepath)
exporter = pg.exporters.CSVExporter(self.plot_widget_rec.plotItem)
exporter.export(filepath)
def getDataFolder(self):
"""Get data folder option."""
answer = QMessageBox.question(self,
self.tr('Question'),
self.tr('It seems the first time you run WavyTool. Do you want to choose '
'a folder to keep exported data?'),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
path = QFileDialog.getExistingDirectory(self,
self.tr('Data folder'),
os.path.expanduser('~'))
if path:
try:
# This string converting is needed because the return is a QString
self.base_path = os.path.splitext(str(path))[0]
with open('wavytool.config', 'w') as outfile:
json.dump({'data_folder': self.base_path}, outfile)
except Exception as e:
QMessageBox.critical(self,
self.tr('Critical'),
self.tr('There was a problem set the default folder to save data:\n '
'{}'.format(str(e))),
QMessageBox.Ok)
else:
logging.info('The default folder is: %s', self.base_path)
QMessageBox.information(self,
self.tr('Information'),
self.tr('Default folder to save data was set up to:\n'
'{}.'.format(self.base_path)),
QMessageBox.Ok)
else:
self.base_path = '.'
def saveImageAs(self):
"""Saves image as."""
path = QFileDialog.getSaveFileName(self,
self.tr('Export recorded image ...'),
os.path.splitext(self.filepath)[0] + '.png',
self.tr("Image File (*.png)"))
if path:
try:
# This string converting is needed because the return is a QString
self.filepath = os.path.splitext(str(path))[0]
self.savePNGFile(self.filepath)
except Exception as e:
QMessageBox.critical(self,
self.tr('Critical'),
self.tr('There was a problem to save image:\n {}'.format(str(e))),
QMessageBox.Ok)
else:
logging.info('The image was saved in the file: %s', self.filepath)
QMessageBox.information(self,
self.tr('Information'),
self.tr('Image was successfully exported.'),
QMessageBox.Ok)
def saveFileAs(self):
"""Saves data file as."""
path = QFileDialog.getSaveFileName(self,
self.tr('Save recorded data ...'),
os.path.splitext(self.filepath)[0] + '.csv',
self.tr("Data File (*.csv)"))
if path:
try:
# This string converting is needed because the return is a QString
self.filepath = os.path.splitext(str(path))[0]
self.saveCSVFile(self.filepath)
except Exception as e:
self.isSaved = False
QMessageBox.critical(self,
self.tr('Critical'),
self.tr('There was a problem to save data\n {}'.format(str(e))),
QMessageBox.Ok)
else:
self.isSaved = True
# self.ui.actionSave_As.setEnabled(False)
logging.info('The data was saved in the file: %s', self.filepath)
QMessageBox.information(self,
self.tr('Information'),
self.tr('Data was successfully saved.\n\nDATA SAVED IS REPRESENTED '
'BY THE WINDOW RECORDING, IF YOU APPLY ZOOM ON IT, JUST '
'DATA VISIBLE WILL BE SAVED!'),
QMessageBox.Ok)
def about(self):
"""Show the dialog about."""
QMessageBox.about(self, self.tr('About'),
self.tr(about))
def closeQuestion(self):
"""Asks about to close."""
if self.isSaved is False:
answer = QMessageBox.question(
self,
self.tr('Question'),
self.tr('Do you want to save your data before exit?'),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.saveFileAs()
answer = QMessageBox.question(self,
self.tr('Close'),
self.tr('Do you want to exit?'),
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No)
return answer == QMessageBox.Yes
def closeEvent(self, event):
"""Re implements close event."""
if self.closeQuestion():
self.plot_widget.timer.stop()
self.plot_widget.audio.end_audio()
if self.plot_widget_rec.curve is not None:
self.plot_widget_rec.timer.stop()
event.accept()
else:
event.ignore()
|
[
"qdarkstyle.load_stylesheet_from_environment",
"numpy.empty",
"pyqtgraph.exporters.CSVExporter",
"os.path.join",
"collections.deque",
"os.path.expanduser",
"qtpy.QtWidgets.QSplashScreen",
"logging.warning",
"qtpy.QtCore.QTimer",
"numpy.linspace",
"pyqtgraph.mkPen",
"wavytool.mw_wavy.Ui_MainWindow",
"json.dump",
"qtpy.QtGui.QPixmap",
"time.sleep",
"wavytool.gui_wav2dat.ConvertWave2Data",
"pyqtgraph.exporters.ImageExporter",
"json.load",
"logging.basicConfig",
"time.gmtime",
"numpy.zeros",
"wavytool.core_wavy.AudioRecord",
"time.time",
"logging.info",
"os.path.splitext",
"qtpy.QtWidgets.QApplication"
] |
[((1169, 1209), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (1188, 1209), False, 'import logging\n'), ((1242, 1355), 'logging.info', 'logging.info', (['"""Using Qt binding (QtPy/PyQtGraph): %s"""', "(os.environ['QT_API'], os.environ['PYQTGRAPH_QT_LIB'])"], {}), "('Using Qt binding (QtPy/PyQtGraph): %s', (os.environ['QT_API'],\n os.environ['PYQTGRAPH_QT_LIB']))\n", (1254, 1355), False, 'import logging\n'), ((1920, 1938), 'qtpy.QtWidgets.QApplication', 'QApplication', (['args'], {}), '(args)\n', (1932, 1938), False, 'from qtpy.QtWidgets import QApplication, QFileDialog, QMainWindow, QMessageBox, QSplashScreen\n'), ((2537, 2574), 'qtpy.QtGui.QPixmap', 'QPixmap', (['"""wavytool/images/symbol.png"""'], {}), "('wavytool/images/symbol.png')\n", (2544, 2574), False, 'from qtpy.QtGui import QPixmap\n'), ((2588, 2609), 'qtpy.QtWidgets.QSplashScreen', 'QSplashScreen', (['pixmap'], {}), '(pixmap)\n', (2601, 2609), False, 'from qtpy.QtWidgets import QApplication, QFileDialog, QMainWindow, QMessageBox, QSplashScreen\n'), ((2623, 2634), 'time.time', 'time.time', ([], {}), '()\n', (2632, 2634), False, 'import time\n'), ((2781, 2798), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (2791, 2798), False, 'import time\n'), ((3460, 3486), 'numpy.empty', 'np.empty', (['self.buffer_size'], {}), '(self.buffer_size)\n', (3468, 3486), True, 'import numpy as np\n'), ((3623, 3634), 'time.time', 'time.time', ([], {}), '()\n', (3632, 3634), False, 'import time\n'), ((5008, 5057), 'numpy.linspace', 'np.linspace', (['(0.0)', 'self.time_window', 'self._bufsize'], {}), '(0.0, self.time_window, self._bufsize)\n', (5019, 5057), True, 'import numpy as np\n'), ((5126, 5181), 'collections.deque', 'collections.deque', (['([0.0] * self._bufsize)', 'self._bufsize'], {}), '([0.0] * self._bufsize, self._bufsize)\n', (5143, 5181), False, 'import collections\n'), ((5235, 5246), 'numpy.empty', 'np.empty', (['(5)'], {}), '(5)\n', (5243, 5246), True, 'import numpy as np\n'), ((5402, 5410), 'qtpy.QtCore.QTimer', 'QTimer', ([], {}), '()\n', (5408, 5410), False, 'from qtpy.QtCore import QTimer\n'), ((8113, 8168), 'collections.deque', 'collections.deque', (['([0.0] * self._bufsize)', 'self._bufsize'], {}), '([0.0] * self._bufsize, self._bufsize)\n', (8130, 8168), False, 'import collections\n'), ((8186, 8236), 'numpy.linspace', 'np.linspace', (['(-self.time_window)', '(0.0)', 'self._bufsize'], {}), '(-self.time_window, 0.0, self._bufsize)\n', (8197, 8236), True, 'import numpy as np\n'), ((8254, 8293), 'numpy.zeros', 'np.zeros', (['self._bufsize'], {'dtype': 'np.float'}), '(self._bufsize, dtype=np.float)\n', (8262, 8293), True, 'import numpy as np\n'), ((8353, 8400), 'wavytool.core_wavy.AudioRecord', 'AudioRecord', (['"""output.wav"""', 'self.sample_interval'], {}), "('output.wav', self.sample_interval)\n", (8364, 8400), False, 'from wavytool.core_wavy import AudioRecord\n'), ((9030, 9038), 'qtpy.QtCore.QTimer', 'QTimer', ([], {}), '()\n', (9036, 9038), False, 'from qtpy.QtCore import QTimer\n'), ((10961, 10976), 'wavytool.mw_wavy.Ui_MainWindow', 'Ui_MainWindow', ([], {}), '()\n', (10974, 10976), False, 'from wavytool.mw_wavy import Ui_MainWindow\n'), ((15190, 15208), 'wavytool.gui_wav2dat.ConvertWave2Data', 'ConvertWave2Data', ([], {}), '()\n', (15206, 15208), False, 'from wavytool.gui_wav2dat import ConvertWave2Data\n'), ((15544, 15582), 'os.path.join', 'os.path.join', (['self.base_path', 'filename'], {}), '(self.base_path, filename)\n', (15556, 15582), False, 'import os\n'), ((19381, 19434), 'logging.info', 'logging.info', (['"""File path to save image: %s"""', 'filepath'], {}), "('File path to save image: %s', filepath)\n", (19393, 19434), False, 'import logging\n'), ((19502, 19559), 'pyqtgraph.exporters.ImageExporter', 'pg.exporters.ImageExporter', (['self.plot_widget_rec.plotItem'], {}), '(self.plot_widget_rec.plotItem)\n', (19528, 19559), True, 'import pyqtgraph as pg\n'), ((19853, 19905), 'logging.info', 'logging.info', (['"""File path to save data: %s"""', 'filepath'], {}), "('File path to save data: %s', filepath)\n", (19865, 19905), False, 'import logging\n'), ((19925, 19980), 'pyqtgraph.exporters.CSVExporter', 'pg.exporters.CSVExporter', (['self.plot_widget_rec.plotItem'], {}), '(self.plot_widget_rec.plotItem)\n', (19949, 19980), True, 'import pyqtgraph as pg\n'), ((2225, 2314), 'logging.warning', 'logging.warning', (['"""No dark theme installed, use \'pip install qdarkstyle\' to install."""'], {}), '(\n "No dark theme installed, use \'pip install qdarkstyle\' to install.")\n', (2240, 2314), False, 'import logging\n'), ((2748, 2759), 'time.time', 'time.time', ([], {}), '()\n', (2757, 2759), False, 'import time\n'), ((3034, 3054), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (3043, 3054), False, 'import json\n'), ((3118, 3170), 'logging.info', 'logging.info', (['"""Data folder is: %s"""', 'window.base_path'], {}), "('Data folder is: %s', window.base_path)\n", (3130, 3170), False, 'import logging\n'), ((6616, 6648), 'numpy.empty', 'np.empty', (['(self.data.shape[0] + 5)'], {}), '(self.data.shape[0] + 5)\n', (6624, 6648), True, 'import numpy as np\n'), ((6670, 6699), 'numpy.empty', 'np.empty', (['(self.x.shape[0] + 5)'], {}), '(self.x.shape[0] + 5)\n', (6678, 6699), True, 'import numpy as np\n'), ((6952, 6977), 'pyqtgraph.mkPen', 'pg.mkPen', ([], {'color': '(r, g, b)'}), '(color=(r, g, b))\n', (6960, 6977), True, 'import pyqtgraph as pg\n'), ((2364, 2409), 'qdarkstyle.load_stylesheet_from_environment', 'qdarkstyle.load_stylesheet_from_environment', ([], {}), '()\n', (2407, 2409), False, 'import qdarkstyle\n'), ((15425, 15438), 'time.gmtime', 'time.gmtime', ([], {}), '()\n', (15436, 15438), False, 'import time\n'), ((20678, 20701), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (20696, 20701), False, 'import os\n'), ((22842, 22908), 'logging.info', 'logging.info', (['"""The image was saved in the file: %s"""', 'self.filepath'], {}), "('The image was saved in the file: %s', self.filepath)\n", (22854, 22908), False, 'import logging\n'), ((24194, 24259), 'logging.info', 'logging.info', (['"""The data was saved in the file: %s"""', 'self.filepath'], {}), "('The data was saved in the file: %s', self.filepath)\n", (24206, 24259), False, 'import logging\n'), ((21471, 21528), 'logging.info', 'logging.info', (['"""The default folder is: %s"""', 'self.base_path'], {}), "('The default folder is: %s', self.base_path)\n", (21483, 21528), False, 'import logging\n'), ((22168, 22199), 'os.path.splitext', 'os.path.splitext', (['self.filepath'], {}), '(self.filepath)\n', (22184, 22199), False, 'import os\n'), ((23392, 23423), 'os.path.splitext', 'os.path.splitext', (['self.filepath'], {}), '(self.filepath)\n', (23408, 23423), False, 'import os\n'), ((20990, 21041), 'json.dump', 'json.dump', (["{'data_folder': self.base_path}", 'outfile'], {}), "({'data_folder': self.base_path}, outfile)\n", (20999, 21041), False, 'import json\n')]
|
import numpy as np
import tensiga
from tensiga.iga.Nurbs import Nurbs
from tensiga.iga.Bspline import Bspline
from math import sqrt
import os
def UnitCube(n, p):
dim = n
codim = n
deg = [ p for _ in range(n) ]
kv = [ np.repeat([0., 1.], deg[k]+1) for k in range(n) ]
cp_shape = [ deg[k]+1 for k in range(n) ]
# construct control points for n cube
cp = [None] * dim
cp_proto = [ np.linspace(0., 1., cp_shape[k]) for k in range(n) ]
cp[0] = np.repeat(cp_proto[0], np.prod(cp_shape[0:-1])).reshape(cp_shape)
for k in range(1, codim):
cp[k] = np.tile(np.repeat(cp_proto[k], np.prod(cp_shape[k:-1])), np.prod(cp_shape[0:k])).reshape(cp_shape)
'''
dim = 2
codim = 2
deg = [2, 2]
kv = [ np.array([0.,0.,0.,1.,1.,1.]), np.array([0.,0.,0.,1.,1.,1.]) ]
ctrlpts = []
cp_shape = (3,3)
x = [ 0., 0., 0., 0.5, 0.5, 0.5, 1., 1., 1. ]
y = [ 0., 0.5, 1., 0., 0.5, 1., 0., 0.5, 1 ]
ctrlpts.append(np.array(x, dtype=np.float).reshape(cp_shape))
ctrlpts.append(np.array(y, dtype=np.float).reshape(cp_shape))
'''
# init primitive spline
domain = Bspline(dim, codim, kv, deg, cp)
return domain
def DiscontinuousLine(p):
deg = [p]
kv = [ np.hstack([
np.repeat(0., p+1),
np.array(0.2),
np.repeat(0.5, p+1),
np.array(0.75),
np.repeat(1., p+1) ])
]
# construct control points for n cube
cp = [ np.zeros((kv[0].size - deg[0] - 1)) ]
# init primitive spline
domain = Bspline(1, 1, kv, deg, cp)
return domain
def OpenUnitBasis(n, p, N):
dim = n
codim = n
deg = [ p for _ in range(n) ]
kv = [ np.linspace(0, 1, N) for k in range(n) ]
cp_shape = [ kv[k].size-p-1 for k in range(n) ]
# construct control points for n cube
cp = [None] * dim
cp_proto = [ np.zeros(cp_shape[k]) for k in range(n) ]
cp[0] = np.repeat(cp_proto[0], np.prod(cp_shape[0:-1])).reshape(cp_shape)
for k in range(1, codim):
cp[k] = np.tile(np.repeat(cp_proto[k], np.prod(cp_shape[k:-1])), np.prod(cp_shape[0:k])).reshape(cp_shape)
# init primitive spline
domain = Bspline(dim, codim, kv, deg, cp)
return domain
def QuarterAnnulus2D(R, r):
# geometry parameters
dim = 2;
codim = 2;
deg = [2, 1]
kv = [ np.array([0., 0., 0., 1., 1., 1.]), np.array([0., 0., 1., 1.])]
ctrlpts = []
cp_shape = (3, 2)
x = [ R, r, R, r, 0., 0. ] # numpy ordering
y = [ 0., 0., R, r, R, r ] #
w = [ 1., 1., 1./sqrt(2.), 1./sqrt(2.), 1., 1. ]
ctrlpts.append(np.array(x, dtype=np.float).reshape(cp_shape))
ctrlpts.append(np.array(y, dtype=np.float).reshape(cp_shape))
ctrlpts.append(np.array(w, dtype=np.float).reshape(cp_shape))
# init primitive spline
domain = Nurbs(dim, codim, kv, deg, ctrlpts)
return domain
def QuarterAnnulus3D(R, r, L):
## define spline data
dim = 3;
codim = 3;
deg = [2, 1, 1]
kv = [ np.array([0., 0., 0., 1., 1., 1.]),
np.array([0., 0., 1., 1.]),
np.array([0., 0., 1., 1.]) ]
# this is using the numpy ordering
ctrlpts = []
cp_shape = (3,2,2)
x = [ R, R, r, r, R, R, r, r, .0, .0, .0, .0 ]
y = [ 0, 0., 0., 0., R, R, r, r, R, R, r, r ]
z = [ 0, L, 0., L, 0., L, 0., L, 0., L, 0., L ]
w = [ 1., 1., 1., 1., 1./sqrt(2.), 1./sqrt(2.), 1./sqrt(2.), 1./sqrt(2.), 1., 1., 1., 1. ]
ctrlpts.append(np.array(x, dtype=np.float).reshape(cp_shape))
ctrlpts.append(np.array(y, dtype=np.float).reshape(cp_shape))
ctrlpts.append(np.array(z, dtype=np.float).reshape(cp_shape))
ctrlpts.append(np.array(w, dtype=np.float).reshape(cp_shape))
## init bspline object
domain = Nurbs(dim, codim, kv, deg, ctrlpts)
return domain
def Halfpipe2D(R, r):
# geometry parameters
dim = 2;
codim = 2;
deg = [2, 1]
kv = [ np.array([0., 0., 0., .5, .5, 1., 1., 1.]), np.array([0., 0., 1., 1.])]
ctrlpts = []
cp_shape = (5, 2)
W = 1./sqrt(2.)
y = [ -R, -r, -R, -r, 0., 0., R, r, R, r ] # numpy ordering
x = [ 0., 0., R, r, R, r, R, r, 0., 0. ] #
w = [ 1., 1., W, W, 1, 1, W, W, 1., 1. ]
ctrlpts.append(np.array(x, dtype=np.float).reshape(cp_shape))
ctrlpts.append(np.array(y, dtype=np.float).reshape(cp_shape))
ctrlpts.append(np.array(w, dtype=np.float).reshape(cp_shape))
# init primitive spline
domain = Nurbs(dim, codim, kv, deg, ctrlpts)
return domain
def Halfpipe3D(R, r, L):
dim = 3;
codim = 3;
deg = [2, 2, 2]
kv = [ np.array([0., 0., 0., 0.5, 0.5, 1., 1., 1.]),
np.array([0., 0., 0., 1., 1., 1.]),
np.array([0., 0., 0., 1., 1., 1.]) ]
ctrlpts = []
cp_shape = (5,3,3)
W = sqrt(2)
#W = 1.
x=[-R,-R,-R,-(R+r)/2,-(R+r)/2,-(R+r)/2,-r,-r,-r,-R,-R,-R,-(R+r)/2,-(R+r)/2,-(R+r)/2,-r,-r,-r,0,0,0,0,0,0,0,0,0,R,R,R,(R+r)/2,(R+r)/2,(R+r)/2,r,r,r,R,R,R,(R+r)/2,(R+r)/2,(R+r)/2,r,r,r]
y=[0,L/2,L,0,L/2,L,0,L/2,L,0,L/2,L,0,L/2,L,0,L/2,L,0,L/2,L,0,L/2,L,0,L/2,L,0,L/2,L,0,L/2,L,0,L/2,L,0,L/2,L,0,L/2,L,0,L/2,L]
z=[0,0,0,0,0,0,0,0,0,R,R,R,(R+r)/2,(R+r)/2,(R+r)/2,r,r,r,R,R,R,(R+r)/2,(R+r)/2,(R+r)/2,r,r,r,R,R,R,(R+r)/2,(R+r)/2,(R+r)/2,r,r,r,0,0,0,0,0,0,0,0,0]
w=[1.,1.,1.,1.,1.,1.,1.,1.,1.,1./W,1./W,1./W,1./W,1./W,1./W,1./W,1./W,1./W,1,1,1,1,1,1,1,1,1,1./W,1./W,1./W,1./W,1./W,1./W,1./W,1./W,1./W,1.,1.,1.,1.,1.,1.,1.,1.,1.]
ctrlpts.append(np.array(x, dtype=np.float).reshape(cp_shape))
ctrlpts.append(np.array(y, dtype=np.float).reshape(cp_shape))
ctrlpts.append(np.array(z, dtype=np.float).reshape(cp_shape))
ctrlpts.append(np.array(w, dtype=np.float).reshape(cp_shape))
## init bspline object
domain = Nurbs(dim, codim, kv, deg, ctrlpts)
return domain
def Shell3d():
dim = 3
codim = 3
deg = [2, 2, 2]
kv = [ np.array([0, 0, 0, 1.5707963267949, 1.5707963267949, 3.14159265358979, 3.14159265358979, 4.71238898038469, 4.71238898038469, 6.28318530717959, 6.28318530717959, 6.28318530717959]),
np.array([-88.6003574854838,-88.6003574854838,-88.6003574854838,-2,-2,-1,-1,0,0,0])+88.6003574854838,
np.array([0.,0.,0.,1.,1.,1.])]
kv = [ v/v[-1] for v in kv ]
module_path = os.path.dirname(tensiga.__file__)
inner = np.loadtxt(module_path+'/utils/rhino_data/cps_inner.txt')
center = np.loadtxt(module_path+'/utils/rhino_data/cps_center.txt')
outer = np.loadtxt(module_path+'/utils/rhino_data/cps_outer.txt')
x, y, z, w = [], [], [], []
surfs = [outer, center, inner]
for surf in surfs:
# extract weights
w_surf = surf[:,3]
# project back
npts = surf.shape[0]
surf = surf[:,0:3]/w_surf.reshape(npts, -1)
x_surf = surf[:,0]
y_surf = surf[:,1]
z_surf = surf[:,2]
x.append(x_surf)
y.append(y_surf)
z.append(z_surf)
w.append(w_surf)
x = np.ascontiguousarray(np.hstack(x))
y = np.ascontiguousarray(np.hstack(y))
z = np.ascontiguousarray(np.hstack(z))
w = np.ascontiguousarray(np.hstack(w))
cp_shape = (9,7,3)
ctrlpts = [np.array(x).reshape(cp_shape, order='F'),
np.array(y).reshape(cp_shape, order='F'),
np.array(z).reshape(cp_shape, order='F'),
np.array(w).reshape(cp_shape, order='F')]
spline = Nurbs(dim, codim, kv, deg, ctrlpts)
return spline
# gets C0 bspline mesh of given size for any domain
def interpolation_mesh(domain, ref_nodes, p=1):
idomain = UnitCube(domain.dim, 1)
for k in range(0, idomain.dim):
idomain.href(ref_nodes[k], k)
ep = [ np.unique(kv) for kv in idomain.kv ]
ctrlpts = [ domain.eval(ep, k) for k in range(domain.dim) ]
idomain.ctrlpts = ctrlpts
if int(p) > 1:
for k in range(idomain.dim):
idomain.pref(p-1, k)
return idomain
|
[
"math.sqrt",
"tensiga.iga.Bspline.Bspline",
"os.path.dirname",
"numpy.zeros",
"numpy.hstack",
"tensiga.iga.Nurbs.Nurbs",
"numpy.prod",
"numpy.array",
"numpy.loadtxt",
"numpy.linspace",
"numpy.unique",
"numpy.repeat"
] |
[((1134, 1166), 'tensiga.iga.Bspline.Bspline', 'Bspline', (['dim', 'codim', 'kv', 'deg', 'cp'], {}), '(dim, codim, kv, deg, cp)\n', (1141, 1166), False, 'from tensiga.iga.Bspline import Bspline\n'), ((1547, 1573), 'tensiga.iga.Bspline.Bspline', 'Bspline', (['(1)', '(1)', 'kv', 'deg', 'cp'], {}), '(1, 1, kv, deg, cp)\n', (1554, 1573), False, 'from tensiga.iga.Bspline import Bspline\n'), ((2175, 2207), 'tensiga.iga.Bspline.Bspline', 'Bspline', (['dim', 'codim', 'kv', 'deg', 'cp'], {}), '(dim, codim, kv, deg, cp)\n', (2182, 2207), False, 'from tensiga.iga.Bspline import Bspline\n'), ((2815, 2850), 'tensiga.iga.Nurbs.Nurbs', 'Nurbs', (['dim', 'codim', 'kv', 'deg', 'ctrlpts'], {}), '(dim, codim, kv, deg, ctrlpts)\n', (2820, 2850), False, 'from tensiga.iga.Nurbs import Nurbs\n'), ((3734, 3769), 'tensiga.iga.Nurbs.Nurbs', 'Nurbs', (['dim', 'codim', 'kv', 'deg', 'ctrlpts'], {}), '(dim, codim, kv, deg, ctrlpts)\n', (3739, 3769), False, 'from tensiga.iga.Nurbs import Nurbs\n'), ((4421, 4456), 'tensiga.iga.Nurbs.Nurbs', 'Nurbs', (['dim', 'codim', 'kv', 'deg', 'ctrlpts'], {}), '(dim, codim, kv, deg, ctrlpts)\n', (4426, 4456), False, 'from tensiga.iga.Nurbs import Nurbs\n'), ((4751, 4758), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (4755, 4758), False, 'from math import sqrt\n'), ((5716, 5751), 'tensiga.iga.Nurbs.Nurbs', 'Nurbs', (['dim', 'codim', 'kv', 'deg', 'ctrlpts'], {}), '(dim, codim, kv, deg, ctrlpts)\n', (5721, 5751), False, 'from tensiga.iga.Nurbs import Nurbs\n'), ((6231, 6264), 'os.path.dirname', 'os.path.dirname', (['tensiga.__file__'], {}), '(tensiga.__file__)\n', (6246, 6264), False, 'import os\n'), ((6277, 6336), 'numpy.loadtxt', 'np.loadtxt', (["(module_path + '/utils/rhino_data/cps_inner.txt')"], {}), "(module_path + '/utils/rhino_data/cps_inner.txt')\n", (6287, 6336), True, 'import numpy as np\n'), ((6348, 6408), 'numpy.loadtxt', 'np.loadtxt', (["(module_path + '/utils/rhino_data/cps_center.txt')"], {}), "(module_path + '/utils/rhino_data/cps_center.txt')\n", (6358, 6408), True, 'import numpy as np\n'), ((6419, 6478), 'numpy.loadtxt', 'np.loadtxt', (["(module_path + '/utils/rhino_data/cps_outer.txt')"], {}), "(module_path + '/utils/rhino_data/cps_outer.txt')\n", (6429, 6478), True, 'import numpy as np\n'), ((7349, 7384), 'tensiga.iga.Nurbs.Nurbs', 'Nurbs', (['dim', 'codim', 'kv', 'deg', 'ctrlpts'], {}), '(dim, codim, kv, deg, ctrlpts)\n', (7354, 7384), False, 'from tensiga.iga.Nurbs import Nurbs\n'), ((234, 267), 'numpy.repeat', 'np.repeat', (['[0.0, 1.0]', '(deg[k] + 1)'], {}), '([0.0, 1.0], deg[k] + 1)\n', (243, 267), True, 'import numpy as np\n'), ((412, 446), 'numpy.linspace', 'np.linspace', (['(0.0)', '(1.0)', 'cp_shape[k]'], {}), '(0.0, 1.0, cp_shape[k])\n', (423, 446), True, 'import numpy as np\n'), ((1467, 1500), 'numpy.zeros', 'np.zeros', (['(kv[0].size - deg[0] - 1)'], {}), '(kv[0].size - deg[0] - 1)\n', (1475, 1500), True, 'import numpy as np\n'), ((1692, 1712), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (1703, 1712), True, 'import numpy as np\n'), ((1867, 1888), 'numpy.zeros', 'np.zeros', (['cp_shape[k]'], {}), '(cp_shape[k])\n', (1875, 1888), True, 'import numpy as np\n'), ((2337, 2377), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 1.0, 1.0, 1.0]'], {}), '([0.0, 0.0, 0.0, 1.0, 1.0, 1.0])\n', (2345, 2377), True, 'import numpy as np\n'), ((2373, 2403), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0, 1.0]'], {}), '([0.0, 0.0, 1.0, 1.0])\n', (2381, 2403), True, 'import numpy as np\n'), ((2986, 3026), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 1.0, 1.0, 1.0]'], {}), '([0.0, 0.0, 0.0, 1.0, 1.0, 1.0])\n', (2994, 3026), True, 'import numpy as np\n'), ((3033, 3063), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0, 1.0]'], {}), '([0.0, 0.0, 1.0, 1.0])\n', (3041, 3063), True, 'import numpy as np\n'), ((3072, 3102), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0, 1.0]'], {}), '([0.0, 0.0, 1.0, 1.0])\n', (3080, 3102), True, 'import numpy as np\n'), ((3893, 3943), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.5, 0.5, 1.0, 1.0, 1.0]'], {}), '([0.0, 0.0, 0.0, 0.5, 0.5, 1.0, 1.0, 1.0])\n', (3901, 3943), True, 'import numpy as np\n'), ((3937, 3967), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0, 1.0]'], {}), '([0.0, 0.0, 1.0, 1.0])\n', (3945, 3967), True, 'import numpy as np\n'), ((4016, 4025), 'math.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (4020, 4025), False, 'from math import sqrt\n'), ((4560, 4610), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.5, 0.5, 1.0, 1.0, 1.0]'], {}), '([0.0, 0.0, 0.0, 0.5, 0.5, 1.0, 1.0, 1.0])\n', (4568, 4610), True, 'import numpy as np\n'), ((4617, 4657), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 1.0, 1.0, 1.0]'], {}), '([0.0, 0.0, 0.0, 1.0, 1.0, 1.0])\n', (4625, 4657), True, 'import numpy as np\n'), ((4664, 4704), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 1.0, 1.0, 1.0]'], {}), '([0.0, 0.0, 0.0, 1.0, 1.0, 1.0])\n', (4672, 4704), True, 'import numpy as np\n'), ((5844, 6032), 'numpy.array', 'np.array', (['[0, 0, 0, 1.5707963267949, 1.5707963267949, 3.14159265358979, \n 3.14159265358979, 4.71238898038469, 4.71238898038469, 6.28318530717959,\n 6.28318530717959, 6.28318530717959]'], {}), '([0, 0, 0, 1.5707963267949, 1.5707963267949, 3.14159265358979, \n 3.14159265358979, 4.71238898038469, 4.71238898038469, 6.28318530717959,\n 6.28318530717959, 6.28318530717959])\n', (5852, 6032), True, 'import numpy as np\n'), ((6148, 6188), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 1.0, 1.0, 1.0]'], {}), '([0.0, 0.0, 0.0, 1.0, 1.0, 1.0])\n', (6156, 6188), True, 'import numpy as np\n'), ((6940, 6952), 'numpy.hstack', 'np.hstack', (['x'], {}), '(x)\n', (6949, 6952), True, 'import numpy as np\n'), ((6983, 6995), 'numpy.hstack', 'np.hstack', (['y'], {}), '(y)\n', (6992, 6995), True, 'import numpy as np\n'), ((7026, 7038), 'numpy.hstack', 'np.hstack', (['z'], {}), '(z)\n', (7035, 7038), True, 'import numpy as np\n'), ((7069, 7081), 'numpy.hstack', 'np.hstack', (['w'], {}), '(w)\n', (7078, 7081), True, 'import numpy as np\n'), ((7633, 7646), 'numpy.unique', 'np.unique', (['kv'], {}), '(kv)\n', (7642, 7646), True, 'import numpy as np\n'), ((2543, 2552), 'math.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (2547, 2552), False, 'from math import sqrt\n'), ((2556, 2565), 'math.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (2560, 2565), False, 'from math import sqrt\n'), ((3363, 3372), 'math.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (3367, 3372), False, 'from math import sqrt\n'), ((3376, 3385), 'math.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (3380, 3385), False, 'from math import sqrt\n'), ((3389, 3398), 'math.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (3393, 3398), False, 'from math import sqrt\n'), ((3402, 3411), 'math.sqrt', 'sqrt', (['(2.0)'], {}), '(2.0)\n', (3406, 3411), False, 'from math import sqrt\n'), ((6036, 6133), 'numpy.array', 'np.array', (['[-88.6003574854838, -88.6003574854838, -88.6003574854838, -2, -2, -1, -1, 0,\n 0, 0]'], {}), '([-88.6003574854838, -88.6003574854838, -88.6003574854838, -2, -2, \n -1, -1, 0, 0, 0])\n', (6044, 6133), True, 'import numpy as np\n'), ((501, 524), 'numpy.prod', 'np.prod', (['cp_shape[0:-1]'], {}), '(cp_shape[0:-1])\n', (508, 524), True, 'import numpy as np\n'), ((1261, 1282), 'numpy.repeat', 'np.repeat', (['(0.0)', '(p + 1)'], {}), '(0.0, p + 1)\n', (1270, 1282), True, 'import numpy as np\n'), ((1293, 1306), 'numpy.array', 'np.array', (['(0.2)'], {}), '(0.2)\n', (1301, 1306), True, 'import numpy as np\n'), ((1320, 1341), 'numpy.repeat', 'np.repeat', (['(0.5)', '(p + 1)'], {}), '(0.5, p + 1)\n', (1329, 1341), True, 'import numpy as np\n'), ((1353, 1367), 'numpy.array', 'np.array', (['(0.75)'], {}), '(0.75)\n', (1361, 1367), True, 'import numpy as np\n'), ((1381, 1402), 'numpy.repeat', 'np.repeat', (['(1.0)', '(p + 1)'], {}), '(1.0, p + 1)\n', (1390, 1402), True, 'import numpy as np\n'), ((1945, 1968), 'numpy.prod', 'np.prod', (['cp_shape[0:-1]'], {}), '(cp_shape[0:-1])\n', (1952, 1968), True, 'import numpy as np\n'), ((2594, 2621), 'numpy.array', 'np.array', (['x'], {'dtype': 'np.float'}), '(x, dtype=np.float)\n', (2602, 2621), True, 'import numpy as np\n'), ((2660, 2687), 'numpy.array', 'np.array', (['y'], {'dtype': 'np.float'}), '(y, dtype=np.float)\n', (2668, 2687), True, 'import numpy as np\n'), ((2726, 2753), 'numpy.array', 'np.array', (['w'], {'dtype': 'np.float'}), '(w, dtype=np.float)\n', (2734, 2753), True, 'import numpy as np\n'), ((3448, 3475), 'numpy.array', 'np.array', (['x'], {'dtype': 'np.float'}), '(x, dtype=np.float)\n', (3456, 3475), True, 'import numpy as np\n'), ((3514, 3541), 'numpy.array', 'np.array', (['y'], {'dtype': 'np.float'}), '(y, dtype=np.float)\n', (3522, 3541), True, 'import numpy as np\n'), ((3580, 3607), 'numpy.array', 'np.array', (['z'], {'dtype': 'np.float'}), '(z, dtype=np.float)\n', (3588, 3607), True, 'import numpy as np\n'), ((3646, 3673), 'numpy.array', 'np.array', (['w'], {'dtype': 'np.float'}), '(w, dtype=np.float)\n', (3654, 3673), True, 'import numpy as np\n'), ((4200, 4227), 'numpy.array', 'np.array', (['x'], {'dtype': 'np.float'}), '(x, dtype=np.float)\n', (4208, 4227), True, 'import numpy as np\n'), ((4266, 4293), 'numpy.array', 'np.array', (['y'], {'dtype': 'np.float'}), '(y, dtype=np.float)\n', (4274, 4293), True, 'import numpy as np\n'), ((4332, 4359), 'numpy.array', 'np.array', (['w'], {'dtype': 'np.float'}), '(w, dtype=np.float)\n', (4340, 4359), True, 'import numpy as np\n'), ((5430, 5457), 'numpy.array', 'np.array', (['x'], {'dtype': 'np.float'}), '(x, dtype=np.float)\n', (5438, 5457), True, 'import numpy as np\n'), ((5496, 5523), 'numpy.array', 'np.array', (['y'], {'dtype': 'np.float'}), '(y, dtype=np.float)\n', (5504, 5523), True, 'import numpy as np\n'), ((5562, 5589), 'numpy.array', 'np.array', (['z'], {'dtype': 'np.float'}), '(z, dtype=np.float)\n', (5570, 5589), True, 'import numpy as np\n'), ((5628, 5655), 'numpy.array', 'np.array', (['w'], {'dtype': 'np.float'}), '(w, dtype=np.float)\n', (5636, 5655), True, 'import numpy as np\n'), ((7122, 7133), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (7130, 7133), True, 'import numpy as np\n'), ((7179, 7190), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (7187, 7190), True, 'import numpy as np\n'), ((7236, 7247), 'numpy.array', 'np.array', (['z'], {}), '(z)\n', (7244, 7247), True, 'import numpy as np\n'), ((7293, 7304), 'numpy.array', 'np.array', (['w'], {}), '(w)\n', (7301, 7304), True, 'import numpy as np\n'), ((647, 669), 'numpy.prod', 'np.prod', (['cp_shape[0:k]'], {}), '(cp_shape[0:k])\n', (654, 669), True, 'import numpy as np\n'), ((2091, 2113), 'numpy.prod', 'np.prod', (['cp_shape[0:k]'], {}), '(cp_shape[0:k])\n', (2098, 2113), True, 'import numpy as np\n'), ((621, 644), 'numpy.prod', 'np.prod', (['cp_shape[k:-1]'], {}), '(cp_shape[k:-1])\n', (628, 644), True, 'import numpy as np\n'), ((2065, 2088), 'numpy.prod', 'np.prod', (['cp_shape[k:-1]'], {}), '(cp_shape[k:-1])\n', (2072, 2088), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 9 15:45:53 2020
@author: Antony
"""
import matplotlib.pyplot as plt
import time
from skimage.draw import random_shapes
import numpy as np
import astra
def cirmask(im, npx=0):
"""
Apply a circular mask to the image
"""
sz = np.floor(im.shape[0])
x = np.arange(0,sz)
x = np.tile(x,(int(sz),1))
y = np.swapaxes(x,0,1)
xc = np.round(sz/2)
yc = np.round(sz/2)
r = np.sqrt(((x-xc)**2 + (y-yc)**2));
dim = im.shape
if len(dim)==2:
im = np.where(r>np.floor(sz/2) - npx,0,im)
elif len(dim)==3:
for ii in range(0,dim[2]):
im[:,:,ii] = np.where(r>np.floor(sz/2),0,im[:,:,ii])
return(im)
#%% Create a test image
start=time.time()
sz=64; min_shapes=3; max_shapes=10; min_size=2; max_size=10
image, _ = random_shapes((sz, sz), min_shapes=min_shapes, max_shapes=max_shapes, multichannel=False,
min_size=min_size, max_size=max_size, allow_overlap=True)
image = np.where(image==255, 1, image)
image = cirmask(image,5)
image = image/np.max(image)
ct = 2**8
image = np.random.poisson(lam=(image)*ct, size=None)/ct
image = image/np.max(image)
print((time.time()-start))
plt.figure(1);plt.clf();plt.imshow(image, cmap='jet');plt.show();
#%% Perform first a test for sinogram creation and image reconstruction
npr = image.shape[0] # Number of projections
# Create a basic square volume geometry
vol_geom = astra.create_vol_geom(image.shape[0], image.shape[0])
# Create a parallel beam geometry with 180 angles between 0 and pi, and image.shape[0] detector pixels of width 1.
proj_geom = astra.create_proj_geom('parallel', 1.0, int(1.0*image.shape[0]), np.linspace(0,np.pi,npr,False))
# Create a sinogram using the GPU.
proj_id = astra.create_projector('cuda',proj_geom,vol_geom)
start=time.time()
sinogram_id, sinogram = astra.create_sino(image, proj_id)
print((time.time()-start))
plt.figure(1);plt.clf();plt.imshow(sinogram, cmap='jet');plt.show();
# Create a data object for the reconstruction
rec_id = astra.data2d.create('-vol', vol_geom)
# Set up the parameters for a reconstruction algorithm using the GPU
# cfg = astra.astra_dict('SIRT_CUDA')
cfg = astra.astra_dict('FBP_CUDA')
cfg['ReconstructionDataId'] = rec_id
cfg['ProjectionDataId'] = sinogram_id
cfg['option'] = { 'FilterType': 'shepp-logan' }
# Create the algorithm object from the configuration structure
alg_id = astra.algorithm.create(cfg)
astra.algorithm.run(alg_id)
# Get the result
start=time.time()
rec = astra.data2d.get(rec_id)
print((time.time()-start))
rec = np.where(rec<0, 0, rec)
rec = cirmask(rec)
plt.figure(2);plt.clf();plt.imshow(np.concatenate((rec,image),axis=1), cmap='jet');
plt.colorbar();
plt.show();
astra.data2d.delete(sinogram_id)
astra.projector.delete(proj_id)
astra.algorithm.delete(alg_id)
astra.data2d.delete(rec_id)
#%% Perform a test using the SampleGen module
import os
from pathlib import Path
p = Path("C:\\Users\\Antony\\Documents\\GitHub\\NNs_in_Tensorflow2\\Libraries")
p = Path("C:\\Users\\Simon\\Documents\\GitHub\\NNs_in_Tensorflow2\\Libraries")
os.chdir(p)
from scipy.ndimage import gaussian_filter, uniform_filter, median_filter
import SampleGen as sg
sml = sg.random_sample()
sz=64; min_shapes=3; max_shapes=10; min_size=2; max_size=10
sml.set_pars( sz=sz, min_shapes=min_shapes, max_shapes=max_shapes, min_size = min_size, max_size=max_size)
sml.create_image()
ct = 2**8
sml.im = np.random.poisson(lam=(sml.im)*ct, size=None)/ct
sml.im = sml.im/np.max(sml.im)
# Apply a filter
sml.im = gaussian_filter(sml.im, sigma=1)
# sml.im = uniform_filter(sml.im, size=3)
# sml.im = uniform_filter(sml.im, size=5)
sml.im = cirmask(sml.im,5)
plt.figure(1);plt.clf();plt.imshow(sml.im, cmap='jet');plt.colorbar();plt.show();
sml.create_sino_geo()
sml.create_sino()
plt.figure(2);plt.clf();plt.imshow(sml.sinogram, cmap='jet');plt.colorbar();plt.show();
#%% Create a library
nims = 100000
filtering = 1
noise = 1
scaling = 0
sz=32; min_shapes=3; max_shapes=10; min_size=3; max_size=10
ct = 2**8
start=time.time()
im = np.zeros((sz,sz,nims))
s = np.zeros((sz,sz,nims))
sf = np.random.rand(nims,)*10.0**-np.random.uniform(0, 2, nims)
for ii in range(nims):
sml.set_pars( sz=sz, min_shapes=min_shapes, max_shapes=max_shapes, min_size = min_size, max_size=max_size)
image = sml.create_image()
image = cirmask(image,5)
if np.max(image)>0:
image = image/np.max(image)
if noise == True:
image = np.random.poisson(lam=(image)*ct, size=None)/ct
if np.max(image)>0:
image = image/np.max(image)
if filtering == True:
image = gaussian_filter(image, sigma=1)
if np.max(image)>0:
image = image/np.max(image)
if scaling == True:
image = image * sf[ii]
sml.im = image
im[:,:,ii] = image
s[:,:,ii] = sml.create_sino()
if np.mod(ii, 100) == 0:
print(ii)
print((time.time()-start))
# sml.astraclean()
#%%
for ii in range(0, nims):
plt.figure(2);plt.clf();
plt.imshow(im[:,:,ii], cmap = 'jet')
plt.colorbar();
plt.show()
plt.pause(1)
#%%
import h5py
p = Path('./')
fn = Path("%s\\shapes_random_noise_%dpx_norm.h5" %(p, sz))
h5f = h5py.File(fn, "w")
h5f.create_dataset('Sinograms', data = s)
h5f.create_dataset('Images', data = im)
h5f.create_dataset('ScaleFactor', data = sf)
h5f.create_dataset('Noise', data = noise)
h5f.create_dataset('NImages', data = nims)
h5f.create_dataset('ImageSize', data = sz)
h5f.close()
|
[
"matplotlib.pyplot.clf",
"numpy.floor",
"matplotlib.pyplot.figure",
"astra.data2d.get",
"pathlib.Path",
"SampleGen.random_sample",
"numpy.arange",
"numpy.round",
"astra.create_vol_geom",
"os.chdir",
"astra.create_projector",
"numpy.random.rand",
"skimage.draw.random_shapes",
"matplotlib.pyplot.imshow",
"scipy.ndimage.gaussian_filter",
"matplotlib.pyplot.colorbar",
"numpy.max",
"numpy.swapaxes",
"numpy.random.poisson",
"numpy.linspace",
"matplotlib.pyplot.pause",
"astra.algorithm.delete",
"astra.data2d.create",
"h5py.File",
"matplotlib.pyplot.show",
"numpy.mod",
"astra.algorithm.create",
"numpy.concatenate",
"astra.data2d.delete",
"numpy.random.uniform",
"astra.create_sino",
"numpy.zeros",
"time.time",
"numpy.where",
"astra.astra_dict",
"astra.projector.delete",
"astra.algorithm.run",
"numpy.sqrt"
] |
[((820, 831), 'time.time', 'time.time', ([], {}), '()\n', (829, 831), False, 'import time\n'), ((907, 1063), 'skimage.draw.random_shapes', 'random_shapes', (['(sz, sz)'], {'min_shapes': 'min_shapes', 'max_shapes': 'max_shapes', 'multichannel': '(False)', 'min_size': 'min_size', 'max_size': 'max_size', 'allow_overlap': '(True)'}), '((sz, sz), min_shapes=min_shapes, max_shapes=max_shapes,\n multichannel=False, min_size=min_size, max_size=max_size, allow_overlap\n =True)\n', (920, 1063), False, 'from skimage.draw import random_shapes\n'), ((1090, 1122), 'numpy.where', 'np.where', (['(image == 255)', '(1)', 'image'], {}), '(image == 255, 1, image)\n', (1098, 1122), True, 'import numpy as np\n'), ((1310, 1323), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (1320, 1323), True, 'import matplotlib.pyplot as plt\n'), ((1324, 1333), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1331, 1333), True, 'import matplotlib.pyplot as plt\n'), ((1334, 1363), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {'cmap': '"""jet"""'}), "(image, cmap='jet')\n", (1344, 1363), True, 'import matplotlib.pyplot as plt\n'), ((1364, 1374), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1372, 1374), True, 'import matplotlib.pyplot as plt\n'), ((1554, 1607), 'astra.create_vol_geom', 'astra.create_vol_geom', (['image.shape[0]', 'image.shape[0]'], {}), '(image.shape[0], image.shape[0])\n', (1575, 1607), False, 'import astra\n'), ((1881, 1932), 'astra.create_projector', 'astra.create_projector', (['"""cuda"""', 'proj_geom', 'vol_geom'], {}), "('cuda', proj_geom, vol_geom)\n", (1903, 1932), False, 'import astra\n'), ((1940, 1951), 'time.time', 'time.time', ([], {}), '()\n', (1949, 1951), False, 'import time\n'), ((1977, 2010), 'astra.create_sino', 'astra.create_sino', (['image', 'proj_id'], {}), '(image, proj_id)\n', (1994, 2010), False, 'import astra\n'), ((2042, 2055), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (2052, 2055), True, 'import matplotlib.pyplot as plt\n'), ((2056, 2065), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2063, 2065), True, 'import matplotlib.pyplot as plt\n'), ((2066, 2098), 'matplotlib.pyplot.imshow', 'plt.imshow', (['sinogram'], {'cmap': '"""jet"""'}), "(sinogram, cmap='jet')\n", (2076, 2098), True, 'import matplotlib.pyplot as plt\n'), ((2099, 2109), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2107, 2109), True, 'import matplotlib.pyplot as plt\n'), ((2170, 2207), 'astra.data2d.create', 'astra.data2d.create', (['"""-vol"""', 'vol_geom'], {}), "('-vol', vol_geom)\n", (2189, 2207), False, 'import astra\n'), ((2326, 2354), 'astra.astra_dict', 'astra.astra_dict', (['"""FBP_CUDA"""'], {}), "('FBP_CUDA')\n", (2342, 2354), False, 'import astra\n'), ((2557, 2584), 'astra.algorithm.create', 'astra.algorithm.create', (['cfg'], {}), '(cfg)\n', (2579, 2584), False, 'import astra\n'), ((2586, 2613), 'astra.algorithm.run', 'astra.algorithm.run', (['alg_id'], {}), '(alg_id)\n', (2605, 2613), False, 'import astra\n'), ((2641, 2652), 'time.time', 'time.time', ([], {}), '()\n', (2650, 2652), False, 'import time\n'), ((2660, 2684), 'astra.data2d.get', 'astra.data2d.get', (['rec_id'], {}), '(rec_id)\n', (2676, 2684), False, 'import astra\n'), ((2722, 2747), 'numpy.where', 'np.where', (['(rec < 0)', '(0)', 'rec'], {}), '(rec < 0, 0, rec)\n', (2730, 2747), True, 'import numpy as np\n'), ((2769, 2782), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (2779, 2782), True, 'import matplotlib.pyplot as plt\n'), ((2783, 2792), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2790, 2792), True, 'import matplotlib.pyplot as plt\n'), ((2854, 2868), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (2866, 2868), True, 'import matplotlib.pyplot as plt\n'), ((2871, 2881), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2879, 2881), True, 'import matplotlib.pyplot as plt\n'), ((2886, 2918), 'astra.data2d.delete', 'astra.data2d.delete', (['sinogram_id'], {}), '(sinogram_id)\n', (2905, 2918), False, 'import astra\n'), ((2920, 2951), 'astra.projector.delete', 'astra.projector.delete', (['proj_id'], {}), '(proj_id)\n', (2942, 2951), False, 'import astra\n'), ((2953, 2983), 'astra.algorithm.delete', 'astra.algorithm.delete', (['alg_id'], {}), '(alg_id)\n', (2975, 2983), False, 'import astra\n'), ((2985, 3012), 'astra.data2d.delete', 'astra.data2d.delete', (['rec_id'], {}), '(rec_id)\n', (3004, 3012), False, 'import astra\n'), ((3106, 3181), 'pathlib.Path', 'Path', (['"""C:\\\\Users\\\\Antony\\\\Documents\\\\GitHub\\\\NNs_in_Tensorflow2\\\\Libraries"""'], {}), "('C:\\\\Users\\\\Antony\\\\Documents\\\\GitHub\\\\NNs_in_Tensorflow2\\\\Libraries')\n", (3110, 3181), False, 'from pathlib import Path\n'), ((3187, 3261), 'pathlib.Path', 'Path', (['"""C:\\\\Users\\\\Simon\\\\Documents\\\\GitHub\\\\NNs_in_Tensorflow2\\\\Libraries"""'], {}), "('C:\\\\Users\\\\Simon\\\\Documents\\\\GitHub\\\\NNs_in_Tensorflow2\\\\Libraries')\n", (3191, 3261), False, 'from pathlib import Path\n'), ((3263, 3274), 'os.chdir', 'os.chdir', (['p'], {}), '(p)\n', (3271, 3274), False, 'import os\n'), ((3384, 3402), 'SampleGen.random_sample', 'sg.random_sample', ([], {}), '()\n', (3400, 3402), True, 'import SampleGen as sg\n'), ((3730, 3762), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['sml.im'], {'sigma': '(1)'}), '(sml.im, sigma=1)\n', (3745, 3762), False, 'from scipy.ndimage import gaussian_filter, uniform_filter, median_filter\n'), ((3882, 3895), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (3892, 3895), True, 'import matplotlib.pyplot as plt\n'), ((3896, 3905), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3903, 3905), True, 'import matplotlib.pyplot as plt\n'), ((3906, 3936), 'matplotlib.pyplot.imshow', 'plt.imshow', (['sml.im'], {'cmap': '"""jet"""'}), "(sml.im, cmap='jet')\n", (3916, 3936), True, 'import matplotlib.pyplot as plt\n'), ((3937, 3951), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3949, 3951), True, 'import matplotlib.pyplot as plt\n'), ((3952, 3962), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3960, 3962), True, 'import matplotlib.pyplot as plt\n'), ((4011, 4024), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (4021, 4024), True, 'import matplotlib.pyplot as plt\n'), ((4025, 4034), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4032, 4034), True, 'import matplotlib.pyplot as plt\n'), ((4035, 4071), 'matplotlib.pyplot.imshow', 'plt.imshow', (['sml.sinogram'], {'cmap': '"""jet"""'}), "(sml.sinogram, cmap='jet')\n", (4045, 4071), True, 'import matplotlib.pyplot as plt\n'), ((4072, 4086), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (4084, 4086), True, 'import matplotlib.pyplot as plt\n'), ((4087, 4097), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4095, 4097), True, 'import matplotlib.pyplot as plt\n'), ((4268, 4279), 'time.time', 'time.time', ([], {}), '()\n', (4277, 4279), False, 'import time\n'), ((4288, 4312), 'numpy.zeros', 'np.zeros', (['(sz, sz, nims)'], {}), '((sz, sz, nims))\n', (4296, 4312), True, 'import numpy as np\n'), ((4316, 4340), 'numpy.zeros', 'np.zeros', (['(sz, sz, nims)'], {}), '((sz, sz, nims))\n', (4324, 4340), True, 'import numpy as np\n'), ((5504, 5514), 'pathlib.Path', 'Path', (['"""./"""'], {}), "('./')\n", (5508, 5514), False, 'from pathlib import Path\n'), ((5523, 5577), 'pathlib.Path', 'Path', (["('%s\\\\shapes_random_noise_%dpx_norm.h5' % (p, sz))"], {}), "('%s\\\\shapes_random_noise_%dpx_norm.h5' % (p, sz))\n", (5527, 5577), False, 'from pathlib import Path\n'), ((5586, 5604), 'h5py.File', 'h5py.File', (['fn', '"""w"""'], {}), "(fn, 'w')\n", (5595, 5604), False, 'import h5py\n'), ((331, 352), 'numpy.floor', 'np.floor', (['im.shape[0]'], {}), '(im.shape[0])\n', (339, 352), True, 'import numpy as np\n'), ((362, 378), 'numpy.arange', 'np.arange', (['(0)', 'sz'], {}), '(0, sz)\n', (371, 378), True, 'import numpy as np\n'), ((419, 439), 'numpy.swapaxes', 'np.swapaxes', (['x', '(0)', '(1)'], {}), '(x, 0, 1)\n', (430, 439), True, 'import numpy as np\n'), ((454, 470), 'numpy.round', 'np.round', (['(sz / 2)'], {}), '(sz / 2)\n', (462, 470), True, 'import numpy as np\n'), ((479, 495), 'numpy.round', 'np.round', (['(sz / 2)'], {}), '(sz / 2)\n', (487, 495), True, 'import numpy as np\n'), ((509, 547), 'numpy.sqrt', 'np.sqrt', (['((x - xc) ** 2 + (y - yc) ** 2)'], {}), '((x - xc) ** 2 + (y - yc) ** 2)\n', (516, 547), True, 'import numpy as np\n'), ((1162, 1175), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (1168, 1175), True, 'import numpy as np\n'), ((1200, 1244), 'numpy.random.poisson', 'np.random.poisson', ([], {'lam': '(image * ct)', 'size': 'None'}), '(lam=image * ct, size=None)\n', (1217, 1244), True, 'import numpy as np\n'), ((1263, 1276), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (1269, 1276), True, 'import numpy as np\n'), ((1802, 1835), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', 'npr', '(False)'], {}), '(0, np.pi, npr, False)\n', (1813, 1835), True, 'import numpy as np\n'), ((2804, 2840), 'numpy.concatenate', 'np.concatenate', (['(rec, image)'], {'axis': '(1)'}), '((rec, image), axis=1)\n', (2818, 2840), True, 'import numpy as np\n'), ((3619, 3664), 'numpy.random.poisson', 'np.random.poisson', ([], {'lam': '(sml.im * ct)', 'size': 'None'}), '(lam=sml.im * ct, size=None)\n', (3636, 3664), True, 'import numpy as np\n'), ((3685, 3699), 'numpy.max', 'np.max', (['sml.im'], {}), '(sml.im)\n', (3691, 3699), True, 'import numpy as np\n'), ((4347, 4367), 'numpy.random.rand', 'np.random.rand', (['nims'], {}), '(nims)\n', (4361, 4367), True, 'import numpy as np\n'), ((5335, 5348), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (5345, 5348), True, 'import matplotlib.pyplot as plt\n'), ((5349, 5358), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5356, 5358), True, 'import matplotlib.pyplot as plt\n'), ((5371, 5407), 'matplotlib.pyplot.imshow', 'plt.imshow', (['im[:, :, ii]'], {'cmap': '"""jet"""'}), "(im[:, :, ii], cmap='jet')\n", (5381, 5407), True, 'import matplotlib.pyplot as plt\n'), ((5419, 5433), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (5431, 5433), True, 'import matplotlib.pyplot as plt\n'), ((5446, 5456), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5454, 5456), True, 'import matplotlib.pyplot as plt\n'), ((5464, 5476), 'matplotlib.pyplot.pause', 'plt.pause', (['(1)'], {}), '(1)\n', (5473, 5476), True, 'import matplotlib.pyplot as plt\n'), ((1287, 1298), 'time.time', 'time.time', ([], {}), '()\n', (1296, 1298), False, 'import time\n'), ((2019, 2030), 'time.time', 'time.time', ([], {}), '()\n', (2028, 2030), False, 'import time\n'), ((2693, 2704), 'time.time', 'time.time', ([], {}), '()\n', (2702, 2704), False, 'import time\n'), ((4637, 4650), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (4643, 4650), True, 'import numpy as np\n'), ((4901, 4932), 'scipy.ndimage.gaussian_filter', 'gaussian_filter', (['image'], {'sigma': '(1)'}), '(image, sigma=1)\n', (4916, 4932), False, 'from scipy.ndimage import gaussian_filter, uniform_filter, median_filter\n'), ((5185, 5200), 'numpy.mod', 'np.mod', (['ii', '(100)'], {}), '(ii, 100)\n', (5191, 5200), True, 'import numpy as np\n'), ((5240, 5251), 'time.time', 'time.time', ([], {}), '()\n', (5249, 5251), False, 'import time\n'), ((4376, 4405), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(2)', 'nims'], {}), '(0, 2, nims)\n', (4393, 4405), True, 'import numpy as np\n'), ((4677, 4690), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (4683, 4690), True, 'import numpy as np\n'), ((4737, 4781), 'numpy.random.poisson', 'np.random.poisson', ([], {'lam': '(image * ct)', 'size': 'None'}), '(lam=image * ct, size=None)\n', (4754, 4781), True, 'import numpy as np\n'), ((4797, 4810), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (4803, 4810), True, 'import numpy as np\n'), ((4945, 4958), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (4951, 4958), True, 'import numpy as np\n'), ((4841, 4854), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (4847, 4854), True, 'import numpy as np\n'), ((4989, 5002), 'numpy.max', 'np.max', (['image'], {}), '(image)\n', (4995, 5002), True, 'import numpy as np\n'), ((616, 632), 'numpy.floor', 'np.floor', (['(sz / 2)'], {}), '(sz / 2)\n', (624, 632), True, 'import numpy as np\n'), ((739, 755), 'numpy.floor', 'np.floor', (['(sz / 2)'], {}), '(sz / 2)\n', (747, 755), True, 'import numpy as np\n')]
|
import unittest
import math
import pyomo.environ as pe
import coramin
import numpy as np
from coramin.relaxations.segments import compute_k_segment_points
class TestUnivariateExp(unittest.TestCase):
@classmethod
def setUpClass(cls):
model = pe.ConcreteModel()
cls.model = model
model.y = pe.Var()
model.x = pe.Var(bounds=(-1.5, 1.5))
model.obj = pe.Objective(expr=model.y, sense=pe.maximize)
model.pw_exp = coramin.relaxations.PWUnivariateRelaxation()
model.pw_exp.build(x=model.x, aux_var=model.y, pw_repn='INC', shape=coramin.utils.FunctionShape.CONVEX,
relaxation_side=coramin.utils.RelaxationSide.BOTH, f_x_expr=pe.exp(model.x))
model.pw_exp.add_partition_point(-0.5)
model.pw_exp.add_partition_point(0.5)
model.pw_exp.rebuild()
@classmethod
def tearDownClass(cls):
pass
def test_exp_ub(self):
model = self.model.clone()
solver = pe.SolverFactory('gurobi_direct')
solver.solve(model)
self.assertAlmostEqual(pe.value(model.y), math.exp(1.5), 4)
def test_exp_mid(self):
model = self.model.clone()
model.x_con = pe.Constraint(expr=model.x <= 0.3)
solver = pe.SolverFactory('gurobi_direct')
solver.solve(model)
self.assertAlmostEqual(pe.value(model.y), 1.44, 3)
def test_exp_lb(self):
model = self.model.clone()
model.obj.sense = pe.minimize
solver = pe.SolverFactory('gurobi_direct')
solver.solve(model)
self.assertAlmostEqual(pe.value(model.y), math.exp(-1.5), 4)
class TestUnivariate(unittest.TestCase):
def helper(self, func, shape, bounds_list, relaxation_class, relaxation_side=coramin.utils.RelaxationSide.BOTH):
for lb, ub in bounds_list:
num_segments_list = [1, 2, 3]
m = pe.ConcreteModel()
m.x = pe.Var(bounds=(lb, ub))
m.aux = pe.Var()
if relaxation_class is coramin.relaxations.PWUnivariateRelaxation:
m.c = coramin.relaxations.PWUnivariateRelaxation()
m.c.build(x=m.x,
aux_var=m.aux,
relaxation_side=relaxation_side,
shape=shape,
f_x_expr=func(m.x))
else:
m.c = relaxation_class()
m.c.build(x=m.x, aux_var=m.aux, relaxation_side=relaxation_side)
m.p = pe.Param(mutable=True, initialize=0)
m.c2 = pe.Constraint(expr=m.x == m.p)
opt = pe.SolverFactory('gurobi_persistent')
for num_segments in num_segments_list:
segment_points = compute_k_segment_points(m.x, num_segments)
m.c.clear_partitions()
for pt in segment_points:
m.c.add_partition_point(pt)
var_values = pe.ComponentMap()
var_values[m.x] = pt
m.c.add_oa_point(var_values=var_values)
m.c.rebuild()
opt.set_instance(m)
for _x in [float(i) for i in np.linspace(lb, ub, 10)]:
m.p.value = _x
opt.remove_constraint(m.c2)
opt.add_constraint(m.c2)
if relaxation_side in {coramin.utils.RelaxationSide.BOTH, coramin.utils.RelaxationSide.UNDER}:
m.obj = pe.Objective(expr=m.aux)
opt.set_objective(m.obj)
res = opt.solve()
self.assertEqual(res.solver.termination_condition, pe.TerminationCondition.optimal)
self.assertLessEqual(m.aux.value, func(_x) + 1e-10)
del m.obj
if relaxation_side in {coramin.utils.RelaxationSide.BOTH, coramin.utils.RelaxationSide.OVER}:
m.obj = pe.Objective(expr=m.aux, sense=pe.maximize)
opt.set_objective(m.obj)
res = opt.solve()
self.assertEqual(res.solver.termination_condition, pe.TerminationCondition.optimal)
self.assertGreaterEqual(m.aux.value, func(_x) - 1e-10)
del m.obj
def test_exp(self):
self.helper(func=pe.exp, shape=coramin.utils.FunctionShape.CONVEX, bounds_list=[(-1, 1)],
relaxation_class=coramin.relaxations.PWUnivariateRelaxation)
def test_log(self):
self.helper(func=pe.log, shape=coramin.utils.FunctionShape.CONCAVE, bounds_list=[(0.5, 1.5)],
relaxation_class=coramin.relaxations.PWUnivariateRelaxation)
def test_quadratic(self):
def quadratic_func(x):
return x**2
self.helper(func=quadratic_func, shape=None, bounds_list=[(-1, 2)],
relaxation_class=coramin.relaxations.PWXSquaredRelaxation)
def test_arctan(self):
self.helper(func=pe.atan, shape=None, bounds_list=[(-1, 1), (-1, 0), (0, 1)],
relaxation_class=coramin.relaxations.PWArctanRelaxation)
self.helper(func=pe.atan, shape=None, bounds_list=[(-0.1, 1)],
relaxation_class=coramin.relaxations.PWArctanRelaxation,
relaxation_side=coramin.utils.RelaxationSide.OVER)
self.helper(func=pe.atan, shape=None, bounds_list=[(-1, 0.1)],
relaxation_class=coramin.relaxations.PWArctanRelaxation,
relaxation_side=coramin.utils.RelaxationSide.UNDER)
def test_sin(self):
self.helper(func=pe.sin, shape=None, bounds_list=[(-1, 1), (-1, 0), (0, 1)],
relaxation_class=coramin.relaxations.PWSinRelaxation)
self.helper(func=pe.sin, shape=None, bounds_list=[(-0.1, 1)],
relaxation_class=coramin.relaxations.PWSinRelaxation,
relaxation_side=coramin.utils.RelaxationSide.OVER)
self.helper(func=pe.sin, shape=None, bounds_list=[(-1, 0.1)],
relaxation_class=coramin.relaxations.PWSinRelaxation,
relaxation_side=coramin.utils.RelaxationSide.UNDER)
def test_cos(self):
self.helper(func=pe.cos, shape=None, bounds_list=[(-1, 1)],
relaxation_class=coramin.relaxations.PWCosRelaxation)
class TestFeasibility(unittest.TestCase):
def test_univariate_exp(self):
m = pe.ConcreteModel()
m.p = pe.Param(initialize=-1, mutable=True)
m.x = pe.Var(bounds=(-1, 1))
m.y = pe.Var()
m.z = pe.Var(bounds=(0, None))
m.c = coramin.relaxations.PWUnivariateRelaxation()
m.c.build(x=m.x, aux_var=m.y, relaxation_side=coramin.utils.RelaxationSide.BOTH,
shape=coramin.utils.FunctionShape.CONVEX, f_x_expr=pe.exp(m.x))
m.c.rebuild()
m.c2 = pe.ConstraintList()
m.c2.add(m.z >= m.y - m.p)
m.c2.add(m.z >= m.p - m.y)
m.obj = pe.Objective(expr=m.z)
opt = pe.SolverFactory('gurobi_direct')
for xval in [-1, -0.5, 0, 0.5, 1]:
pval = math.exp(xval)
m.x.fix(xval)
m.p.value = pval
res = opt.solve(m, tee=False)
self.assertTrue(res.solver.termination_condition == pe.TerminationCondition.optimal)
self.assertAlmostEqual(m.y.value, m.p.value, 6)
def test_pw_exp(self):
m = pe.ConcreteModel()
m.p = pe.Param(initialize=-1, mutable=True)
m.x = pe.Var(bounds=(-1, 1))
m.y = pe.Var()
m.z = pe.Var(bounds=(0, None))
m.c = coramin.relaxations.PWUnivariateRelaxation()
m.c.build(x=m.x, aux_var=m.y, relaxation_side=coramin.utils.RelaxationSide.BOTH,
shape=coramin.utils.FunctionShape.CONVEX, f_x_expr=pe.exp(m.x))
m.c.add_partition_point(-0.25)
m.c.add_partition_point(0.25)
m.c.rebuild()
m.c2 = pe.ConstraintList()
m.c2.add(m.z >= m.y - m.p)
m.c2.add(m.z >= m.p - m.y)
m.obj = pe.Objective(expr=m.z)
opt = pe.SolverFactory('gurobi_direct')
for xval in [-1, -0.5, 0, 0.5, 1]:
pval = math.exp(xval)
m.x.fix(xval)
m.p.value = pval
res = opt.solve(m, tee=False)
self.assertTrue(res.solver.termination_condition == pe.TerminationCondition.optimal)
self.assertAlmostEqual(m.y.value, m.p.value, 6)
def test_univariate_log(self):
m = pe.ConcreteModel()
m.p = pe.Param(initialize=-1, mutable=True)
m.x = pe.Var(bounds=(0.5, 1.5))
m.y = pe.Var()
m.z = pe.Var(bounds=(0, None))
m.c = coramin.relaxations.PWUnivariateRelaxation()
m.c.build(x=m.x, aux_var=m.y, relaxation_side=coramin.utils.RelaxationSide.BOTH,
shape=coramin.utils.FunctionShape.CONCAVE, f_x_expr=pe.log(m.x))
m.c.rebuild()
m.c2 = pe.ConstraintList()
m.c2.add(m.z >= m.y - m.p)
m.c2.add(m.z >= m.p - m.y)
m.obj = pe.Objective(expr=m.z)
opt = pe.SolverFactory('gurobi_direct')
for xval in [0.5, 0.75, 1, 1.25, 1.5]:
pval = math.log(xval)
m.x.fix(xval)
m.p.value = pval
res = opt.solve(m, tee=False)
self.assertTrue(res.solver.termination_condition == pe.TerminationCondition.optimal)
self.assertAlmostEqual(m.y.value, m.p.value, 6)
def test_pw_log(self):
m = pe.ConcreteModel()
m.p = pe.Param(initialize=-1, mutable=True)
m.x = pe.Var(bounds=(0.5, 1.5))
m.y = pe.Var()
m.z = pe.Var(bounds=(0, None))
m.c = coramin.relaxations.PWUnivariateRelaxation()
m.c.build(x=m.x, aux_var=m.y, relaxation_side=coramin.utils.RelaxationSide.BOTH,
shape=coramin.utils.FunctionShape.CONCAVE, f_x_expr=pe.log(m.x))
m.c.add_partition_point(0.9)
m.c.add_partition_point(1.1)
m.c.rebuild()
m.c2 = pe.ConstraintList()
m.c2.add(m.z >= m.y - m.p)
m.c2.add(m.z >= m.p - m.y)
m.obj = pe.Objective(expr=m.z)
opt = pe.SolverFactory('gurobi_direct')
for xval in [0.5, 0.75, 1, 1.25, 1.5]:
pval = math.log(xval)
m.x.fix(xval)
m.p.value = pval
res = opt.solve(m, tee=False)
self.assertTrue(res.solver.termination_condition == pe.TerminationCondition.optimal)
self.assertAlmostEqual(m.y.value, m.p.value, 6)
def test_x_fixed(self):
m = pe.ConcreteModel()
m.x = pe.Var(bounds=(-1, 1))
m.y = pe.Var()
m.x.fix(0)
m.c = coramin.relaxations.PWUnivariateRelaxation()
m.c.build(x=m.x, aux_var=m.y, relaxation_side=coramin.utils.RelaxationSide.BOTH,
shape=coramin.utils.FunctionShape.CONVEX, f_x_expr=pe.exp(m.x))
self.assertEqual(id(m.c.x_fixed_con.body), id(m.y))
self.assertEqual(m.c.x_fixed_con.lower, 1.0)
self.assertEqual(m.c.x_fixed_con.upper, 1.0)
def test_x_sq(self):
m = pe.ConcreteModel()
m.p = pe.Param(initialize=-1, mutable=True)
m.x = pe.Var(bounds=(-1, 1))
m.y = pe.Var()
m.z = pe.Var(bounds=(0, None))
m.c = coramin.relaxations.PWXSquaredRelaxation()
m.c.build(x=m.x, aux_var=m.y, relaxation_side=coramin.utils.RelaxationSide.BOTH)
m.c2 = pe.ConstraintList()
m.c2.add(m.z >= m.y - m.p)
m.c2.add(m.z >= m.p - m.y)
m.obj = pe.Objective(expr=m.z)
opt = pe.SolverFactory('gurobi_direct')
for xval in [-1, -0.5, 0, 0.5, 1]:
pval = xval**2
m.x.fix(xval)
m.p.value = pval
res = opt.solve(m, tee=False)
self.assertTrue(res.solver.termination_condition == pe.TerminationCondition.optimal)
self.assertAlmostEqual(m.y.value, m.p.value, 6)
|
[
"math.exp",
"pyomo.environ.log",
"pyomo.environ.SolverFactory",
"coramin.relaxations.PWUnivariateRelaxation",
"pyomo.environ.Constraint",
"pyomo.environ.Var",
"pyomo.environ.value",
"pyomo.environ.Objective",
"pyomo.environ.exp",
"coramin.relaxations.segments.compute_k_segment_points",
"numpy.linspace",
"pyomo.environ.Param",
"pyomo.environ.ConcreteModel",
"math.log",
"pyomo.environ.ComponentMap",
"pyomo.environ.ConstraintList",
"coramin.relaxations.PWXSquaredRelaxation"
] |
[((259, 277), 'pyomo.environ.ConcreteModel', 'pe.ConcreteModel', ([], {}), '()\n', (275, 277), True, 'import pyomo.environ as pe\n'), ((322, 330), 'pyomo.environ.Var', 'pe.Var', ([], {}), '()\n', (328, 330), True, 'import pyomo.environ as pe\n'), ((349, 375), 'pyomo.environ.Var', 'pe.Var', ([], {'bounds': '(-1.5, 1.5)'}), '(bounds=(-1.5, 1.5))\n', (355, 375), True, 'import pyomo.environ as pe\n'), ((397, 442), 'pyomo.environ.Objective', 'pe.Objective', ([], {'expr': 'model.y', 'sense': 'pe.maximize'}), '(expr=model.y, sense=pe.maximize)\n', (409, 442), True, 'import pyomo.environ as pe\n'), ((466, 510), 'coramin.relaxations.PWUnivariateRelaxation', 'coramin.relaxations.PWUnivariateRelaxation', ([], {}), '()\n', (508, 510), False, 'import coramin\n'), ((991, 1024), 'pyomo.environ.SolverFactory', 'pe.SolverFactory', (['"""gurobi_direct"""'], {}), "('gurobi_direct')\n", (1007, 1024), True, 'import pyomo.environ as pe\n'), ((1207, 1241), 'pyomo.environ.Constraint', 'pe.Constraint', ([], {'expr': '(model.x <= 0.3)'}), '(expr=model.x <= 0.3)\n', (1220, 1241), True, 'import pyomo.environ as pe\n'), ((1260, 1293), 'pyomo.environ.SolverFactory', 'pe.SolverFactory', (['"""gurobi_direct"""'], {}), "('gurobi_direct')\n", (1276, 1293), True, 'import pyomo.environ as pe\n'), ((1500, 1533), 'pyomo.environ.SolverFactory', 'pe.SolverFactory', (['"""gurobi_direct"""'], {}), "('gurobi_direct')\n", (1516, 1533), True, 'import pyomo.environ as pe\n'), ((6452, 6470), 'pyomo.environ.ConcreteModel', 'pe.ConcreteModel', ([], {}), '()\n', (6468, 6470), True, 'import pyomo.environ as pe\n'), ((6485, 6522), 'pyomo.environ.Param', 'pe.Param', ([], {'initialize': '(-1)', 'mutable': '(True)'}), '(initialize=-1, mutable=True)\n', (6493, 6522), True, 'import pyomo.environ as pe\n'), ((6537, 6559), 'pyomo.environ.Var', 'pe.Var', ([], {'bounds': '(-1, 1)'}), '(bounds=(-1, 1))\n', (6543, 6559), True, 'import pyomo.environ as pe\n'), ((6574, 6582), 'pyomo.environ.Var', 'pe.Var', ([], {}), '()\n', (6580, 6582), True, 'import pyomo.environ as pe\n'), ((6597, 6621), 'pyomo.environ.Var', 'pe.Var', ([], {'bounds': '(0, None)'}), '(bounds=(0, None))\n', (6603, 6621), True, 'import pyomo.environ as pe\n'), ((6636, 6680), 'coramin.relaxations.PWUnivariateRelaxation', 'coramin.relaxations.PWUnivariateRelaxation', ([], {}), '()\n', (6678, 6680), False, 'import coramin\n'), ((6889, 6908), 'pyomo.environ.ConstraintList', 'pe.ConstraintList', ([], {}), '()\n', (6906, 6908), True, 'import pyomo.environ as pe\n'), ((6995, 7017), 'pyomo.environ.Objective', 'pe.Objective', ([], {'expr': 'm.z'}), '(expr=m.z)\n', (7007, 7017), True, 'import pyomo.environ as pe\n'), ((7032, 7065), 'pyomo.environ.SolverFactory', 'pe.SolverFactory', (['"""gurobi_direct"""'], {}), "('gurobi_direct')\n", (7048, 7065), True, 'import pyomo.environ as pe\n'), ((7437, 7455), 'pyomo.environ.ConcreteModel', 'pe.ConcreteModel', ([], {}), '()\n', (7453, 7455), True, 'import pyomo.environ as pe\n'), ((7470, 7507), 'pyomo.environ.Param', 'pe.Param', ([], {'initialize': '(-1)', 'mutable': '(True)'}), '(initialize=-1, mutable=True)\n', (7478, 7507), True, 'import pyomo.environ as pe\n'), ((7522, 7544), 'pyomo.environ.Var', 'pe.Var', ([], {'bounds': '(-1, 1)'}), '(bounds=(-1, 1))\n', (7528, 7544), True, 'import pyomo.environ as pe\n'), ((7559, 7567), 'pyomo.environ.Var', 'pe.Var', ([], {}), '()\n', (7565, 7567), True, 'import pyomo.environ as pe\n'), ((7582, 7606), 'pyomo.environ.Var', 'pe.Var', ([], {'bounds': '(0, None)'}), '(bounds=(0, None))\n', (7588, 7606), True, 'import pyomo.environ as pe\n'), ((7621, 7665), 'coramin.relaxations.PWUnivariateRelaxation', 'coramin.relaxations.PWUnivariateRelaxation', ([], {}), '()\n', (7663, 7665), False, 'import coramin\n'), ((7951, 7970), 'pyomo.environ.ConstraintList', 'pe.ConstraintList', ([], {}), '()\n', (7968, 7970), True, 'import pyomo.environ as pe\n'), ((8057, 8079), 'pyomo.environ.Objective', 'pe.Objective', ([], {'expr': 'm.z'}), '(expr=m.z)\n', (8069, 8079), True, 'import pyomo.environ as pe\n'), ((8094, 8127), 'pyomo.environ.SolverFactory', 'pe.SolverFactory', (['"""gurobi_direct"""'], {}), "('gurobi_direct')\n", (8110, 8127), True, 'import pyomo.environ as pe\n'), ((8507, 8525), 'pyomo.environ.ConcreteModel', 'pe.ConcreteModel', ([], {}), '()\n', (8523, 8525), True, 'import pyomo.environ as pe\n'), ((8540, 8577), 'pyomo.environ.Param', 'pe.Param', ([], {'initialize': '(-1)', 'mutable': '(True)'}), '(initialize=-1, mutable=True)\n', (8548, 8577), True, 'import pyomo.environ as pe\n'), ((8592, 8617), 'pyomo.environ.Var', 'pe.Var', ([], {'bounds': '(0.5, 1.5)'}), '(bounds=(0.5, 1.5))\n', (8598, 8617), True, 'import pyomo.environ as pe\n'), ((8632, 8640), 'pyomo.environ.Var', 'pe.Var', ([], {}), '()\n', (8638, 8640), True, 'import pyomo.environ as pe\n'), ((8655, 8679), 'pyomo.environ.Var', 'pe.Var', ([], {'bounds': '(0, None)'}), '(bounds=(0, None))\n', (8661, 8679), True, 'import pyomo.environ as pe\n'), ((8694, 8738), 'coramin.relaxations.PWUnivariateRelaxation', 'coramin.relaxations.PWUnivariateRelaxation', ([], {}), '()\n', (8736, 8738), False, 'import coramin\n'), ((8948, 8967), 'pyomo.environ.ConstraintList', 'pe.ConstraintList', ([], {}), '()\n', (8965, 8967), True, 'import pyomo.environ as pe\n'), ((9054, 9076), 'pyomo.environ.Objective', 'pe.Objective', ([], {'expr': 'm.z'}), '(expr=m.z)\n', (9066, 9076), True, 'import pyomo.environ as pe\n'), ((9091, 9124), 'pyomo.environ.SolverFactory', 'pe.SolverFactory', (['"""gurobi_direct"""'], {}), "('gurobi_direct')\n", (9107, 9124), True, 'import pyomo.environ as pe\n'), ((9500, 9518), 'pyomo.environ.ConcreteModel', 'pe.ConcreteModel', ([], {}), '()\n', (9516, 9518), True, 'import pyomo.environ as pe\n'), ((9533, 9570), 'pyomo.environ.Param', 'pe.Param', ([], {'initialize': '(-1)', 'mutable': '(True)'}), '(initialize=-1, mutable=True)\n', (9541, 9570), True, 'import pyomo.environ as pe\n'), ((9585, 9610), 'pyomo.environ.Var', 'pe.Var', ([], {'bounds': '(0.5, 1.5)'}), '(bounds=(0.5, 1.5))\n', (9591, 9610), True, 'import pyomo.environ as pe\n'), ((9625, 9633), 'pyomo.environ.Var', 'pe.Var', ([], {}), '()\n', (9631, 9633), True, 'import pyomo.environ as pe\n'), ((9648, 9672), 'pyomo.environ.Var', 'pe.Var', ([], {'bounds': '(0, None)'}), '(bounds=(0, None))\n', (9654, 9672), True, 'import pyomo.environ as pe\n'), ((9687, 9731), 'coramin.relaxations.PWUnivariateRelaxation', 'coramin.relaxations.PWUnivariateRelaxation', ([], {}), '()\n', (9729, 9731), False, 'import coramin\n'), ((10015, 10034), 'pyomo.environ.ConstraintList', 'pe.ConstraintList', ([], {}), '()\n', (10032, 10034), True, 'import pyomo.environ as pe\n'), ((10121, 10143), 'pyomo.environ.Objective', 'pe.Objective', ([], {'expr': 'm.z'}), '(expr=m.z)\n', (10133, 10143), True, 'import pyomo.environ as pe\n'), ((10158, 10191), 'pyomo.environ.SolverFactory', 'pe.SolverFactory', (['"""gurobi_direct"""'], {}), "('gurobi_direct')\n", (10174, 10191), True, 'import pyomo.environ as pe\n'), ((10568, 10586), 'pyomo.environ.ConcreteModel', 'pe.ConcreteModel', ([], {}), '()\n', (10584, 10586), True, 'import pyomo.environ as pe\n'), ((10601, 10623), 'pyomo.environ.Var', 'pe.Var', ([], {'bounds': '(-1, 1)'}), '(bounds=(-1, 1))\n', (10607, 10623), True, 'import pyomo.environ as pe\n'), ((10638, 10646), 'pyomo.environ.Var', 'pe.Var', ([], {}), '()\n', (10644, 10646), True, 'import pyomo.environ as pe\n'), ((10680, 10724), 'coramin.relaxations.PWUnivariateRelaxation', 'coramin.relaxations.PWUnivariateRelaxation', ([], {}), '()\n', (10722, 10724), False, 'import coramin\n'), ((11100, 11118), 'pyomo.environ.ConcreteModel', 'pe.ConcreteModel', ([], {}), '()\n', (11116, 11118), True, 'import pyomo.environ as pe\n'), ((11133, 11170), 'pyomo.environ.Param', 'pe.Param', ([], {'initialize': '(-1)', 'mutable': '(True)'}), '(initialize=-1, mutable=True)\n', (11141, 11170), True, 'import pyomo.environ as pe\n'), ((11185, 11207), 'pyomo.environ.Var', 'pe.Var', ([], {'bounds': '(-1, 1)'}), '(bounds=(-1, 1))\n', (11191, 11207), True, 'import pyomo.environ as pe\n'), ((11222, 11230), 'pyomo.environ.Var', 'pe.Var', ([], {}), '()\n', (11228, 11230), True, 'import pyomo.environ as pe\n'), ((11245, 11269), 'pyomo.environ.Var', 'pe.Var', ([], {'bounds': '(0, None)'}), '(bounds=(0, None))\n', (11251, 11269), True, 'import pyomo.environ as pe\n'), ((11284, 11326), 'coramin.relaxations.PWXSquaredRelaxation', 'coramin.relaxations.PWXSquaredRelaxation', ([], {}), '()\n', (11324, 11326), False, 'import coramin\n'), ((11431, 11450), 'pyomo.environ.ConstraintList', 'pe.ConstraintList', ([], {}), '()\n', (11448, 11450), True, 'import pyomo.environ as pe\n'), ((11537, 11559), 'pyomo.environ.Objective', 'pe.Objective', ([], {'expr': 'm.z'}), '(expr=m.z)\n', (11549, 11559), True, 'import pyomo.environ as pe\n'), ((11574, 11607), 'pyomo.environ.SolverFactory', 'pe.SolverFactory', (['"""gurobi_direct"""'], {}), "('gurobi_direct')\n", (11590, 11607), True, 'import pyomo.environ as pe\n'), ((1084, 1101), 'pyomo.environ.value', 'pe.value', (['model.y'], {}), '(model.y)\n', (1092, 1101), True, 'import pyomo.environ as pe\n'), ((1103, 1116), 'math.exp', 'math.exp', (['(1.5)'], {}), '(1.5)\n', (1111, 1116), False, 'import math\n'), ((1353, 1370), 'pyomo.environ.value', 'pe.value', (['model.y'], {}), '(model.y)\n', (1361, 1370), True, 'import pyomo.environ as pe\n'), ((1593, 1610), 'pyomo.environ.value', 'pe.value', (['model.y'], {}), '(model.y)\n', (1601, 1610), True, 'import pyomo.environ as pe\n'), ((1612, 1626), 'math.exp', 'math.exp', (['(-1.5)'], {}), '(-1.5)\n', (1620, 1626), False, 'import math\n'), ((1884, 1902), 'pyomo.environ.ConcreteModel', 'pe.ConcreteModel', ([], {}), '()\n', (1900, 1902), True, 'import pyomo.environ as pe\n'), ((1921, 1944), 'pyomo.environ.Var', 'pe.Var', ([], {'bounds': '(lb, ub)'}), '(bounds=(lb, ub))\n', (1927, 1944), True, 'import pyomo.environ as pe\n'), ((1965, 1973), 'pyomo.environ.Var', 'pe.Var', ([], {}), '()\n', (1971, 1973), True, 'import pyomo.environ as pe\n'), ((2496, 2532), 'pyomo.environ.Param', 'pe.Param', ([], {'mutable': '(True)', 'initialize': '(0)'}), '(mutable=True, initialize=0)\n', (2504, 2532), True, 'import pyomo.environ as pe\n'), ((2552, 2582), 'pyomo.environ.Constraint', 'pe.Constraint', ([], {'expr': '(m.x == m.p)'}), '(expr=m.x == m.p)\n', (2565, 2582), True, 'import pyomo.environ as pe\n'), ((2601, 2638), 'pyomo.environ.SolverFactory', 'pe.SolverFactory', (['"""gurobi_persistent"""'], {}), "('gurobi_persistent')\n", (2617, 2638), True, 'import pyomo.environ as pe\n'), ((7128, 7142), 'math.exp', 'math.exp', (['xval'], {}), '(xval)\n', (7136, 7142), False, 'import math\n'), ((8190, 8204), 'math.exp', 'math.exp', (['xval'], {}), '(xval)\n', (8198, 8204), False, 'import math\n'), ((9191, 9205), 'math.log', 'math.log', (['xval'], {}), '(xval)\n', (9199, 9205), False, 'import math\n'), ((10258, 10272), 'math.log', 'math.log', (['xval'], {}), '(xval)\n', (10266, 10272), False, 'import math\n'), ((710, 725), 'pyomo.environ.exp', 'pe.exp', (['model.x'], {}), '(model.x)\n', (716, 725), True, 'import pyomo.environ as pe\n'), ((2075, 2119), 'coramin.relaxations.PWUnivariateRelaxation', 'coramin.relaxations.PWUnivariateRelaxation', ([], {}), '()\n', (2117, 2119), False, 'import coramin\n'), ((2723, 2766), 'coramin.relaxations.segments.compute_k_segment_points', 'compute_k_segment_points', (['m.x', 'num_segments'], {}), '(m.x, num_segments)\n', (2747, 2766), False, 'from coramin.relaxations.segments import compute_k_segment_points\n'), ((6839, 6850), 'pyomo.environ.exp', 'pe.exp', (['m.x'], {}), '(m.x)\n', (6845, 6850), True, 'import pyomo.environ as pe\n'), ((7824, 7835), 'pyomo.environ.exp', 'pe.exp', (['m.x'], {}), '(m.x)\n', (7830, 7835), True, 'import pyomo.environ as pe\n'), ((8898, 8909), 'pyomo.environ.log', 'pe.log', (['m.x'], {}), '(m.x)\n', (8904, 8909), True, 'import pyomo.environ as pe\n'), ((9891, 9902), 'pyomo.environ.log', 'pe.log', (['m.x'], {}), '(m.x)\n', (9897, 9902), True, 'import pyomo.environ as pe\n'), ((10883, 10894), 'pyomo.environ.exp', 'pe.exp', (['m.x'], {}), '(m.x)\n', (10889, 10894), True, 'import pyomo.environ as pe\n'), ((2929, 2946), 'pyomo.environ.ComponentMap', 'pe.ComponentMap', ([], {}), '()\n', (2944, 2946), True, 'import pyomo.environ as pe\n'), ((3159, 3182), 'numpy.linspace', 'np.linspace', (['lb', 'ub', '(10)'], {}), '(lb, ub, 10)\n', (3170, 3182), True, 'import numpy as np\n'), ((3460, 3484), 'pyomo.environ.Objective', 'pe.Objective', ([], {'expr': 'm.aux'}), '(expr=m.aux)\n', (3472, 3484), True, 'import pyomo.environ as pe\n'), ((3940, 3983), 'pyomo.environ.Objective', 'pe.Objective', ([], {'expr': 'm.aux', 'sense': 'pe.maximize'}), '(expr=m.aux, sense=pe.maximize)\n', (3952, 3983), True, 'import pyomo.environ as pe\n')]
|
import sys
import re
import time
import argparse
from collections import namedtuple, deque
from itertools import cycle, chain, repeat
import numpy as np
from PIL import Image
import rgbmatrix as rgb
sys.path.append("/home/pi/pixel_art/")
from settings import (NES_PALETTE_HEX, dispmatrix)
from core import *
from sprites.zelda2 import zelda2_animation
from sprites.finalfantasy import finalfantasy_animation
from sprites.megaman2 import megaman2_animation
from sprites.ninjagaiden import ninjagaiden_animation
from sprites.blastermaster import blastermaster_animation
from sprites.dragonwarrior import dragonwarrior_animation
from sprites.supermariobros3 import smb3_animation
from sprites.castlevania3 import castlevania3_animation
from sprites.dragonstrike import dragonstrike_animation
from sprites.excitebike import excitebike_animation
from sprites.kirbysadventure import kirbysadventure_animation
from sprites.lifeforce import lifeforce_animation
from sprites.ducktales import ducktales_animation
from sprites.ghostsandgoblins import ghostsandgoblins_animation
from sprites.batman import batman_animation
from sprites.metalgear import metalgear_animation
from sprites.kabukiquantumfighter import kabukiquantumfighter_animation
def parse_arguments():
class CustomFormatter(argparse.RawDescriptionHelpFormatter):
pass
desc = ("Run 8-bit pixel art animation montage on 32 x 32 RGB LED display")
epilog = """
"""
parser = argparse.ArgumentParser(description=desc,
add_help=False,
epilog=epilog,
formatter_class=CustomFormatter)
opt = parser.add_argument_group("Optional arguments")
opt.add_argument("-c", "--cycletime",
action="store",
dest="cycletime",
help=("Number of seconds to run each animation routine "
"(default: 10)"),
default=10,
type=int,
metavar="INT")
opt.add_argument("-s", "--shuffle",
action="store_true",
dest="shuffle",
help="Shuffle sequence of of animations prior to launch")
opt.add_argument("-a", "--cycleall",
action="store_true",
dest="cycleall",
help="Cycle through all sprites in a scene rather than choosing one at random "
)
opt.add_argument("-h", "--help",
action="help",
help="show this help message and exit")
return parser.parse_args()
def main():
args = parse_arguments()
shuffle = args.shuffle
scenes = [
excitebike_animation,
ghostsandgoblins_animation,
lifeforce_animation,
blastermaster_animation,
metalgear_animation,
zelda2_animation,
dragonwarrior_animation,
ducktales_animation,
megaman2_animation,
ninjagaiden_animation,
batman_animation,
finalfantasy_animation,
castlevania3_animation,
smb3_animation,
kabukiquantumfighter_animation,
dragonstrike_animation,
kirbysadventure_animation,
]
if shuffle:
np.random.shuffle(scenes)
scenes = deque(scenes)
#Clear the display in case anything's still on it.
dispmatrix.Clear()
#Seed the display with black for the first transition
arr = display_sprite(dispmatrix=dispmatrix,
sprite=scenes[0].bg_sprites[0],
bg_sprite=None,
center=True,
xoff=0,
yoff=0,
display=False)
arr1 = np.full((arr.shape[0], arr.shape[1], 3), convert_hex_to_rgb_tuple("000000"), dtype=np.uint8)
while True:
arr1 = animate_sprites(dispmatrix=dispmatrix,
sprite_list=scenes[0].sprite_list,
bg_sprites=scenes[0].bg_sprites,
xoffs=scenes[0].xoffs,
yoffs=scenes[0].yoffs,
frame_time=scenes[0].frame_time,
spbg_ratio=scenes[0].spbg_ratio,
center=scenes[0].center,
bg_scroll_speed=scenes[0].bg_scroll_speed,
cycle_time=args.cycletime,
clear=False,
transition=True,
transition_arr=arr1,
cycles_per_char=scenes[0].cycles_per_char,
cycle_all=args.cycleall
)
scenes.rotate(-1)
if __name__ == "__main__":
try:
main()
# If the script is killed by ctrl-c, clear the display.
except KeyboardInterrupt:
dispmatrix.Clear()
|
[
"sys.path.append",
"numpy.random.shuffle",
"argparse.ArgumentParser",
"collections.deque",
"settings.dispmatrix.Clear"
] |
[((203, 241), 'sys.path.append', 'sys.path.append', (['"""/home/pi/pixel_art/"""'], {}), "('/home/pi/pixel_art/')\n", (218, 241), False, 'import sys\n'), ((1462, 1571), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'desc', 'add_help': '(False)', 'epilog': 'epilog', 'formatter_class': 'CustomFormatter'}), '(description=desc, add_help=False, epilog=epilog,\n formatter_class=CustomFormatter)\n', (1485, 1571), False, 'import argparse\n'), ((3482, 3495), 'collections.deque', 'deque', (['scenes'], {}), '(scenes)\n', (3487, 3495), False, 'from collections import namedtuple, deque\n'), ((3556, 3574), 'settings.dispmatrix.Clear', 'dispmatrix.Clear', ([], {}), '()\n', (3572, 3574), False, 'from settings import NES_PALETTE_HEX, dispmatrix\n'), ((3442, 3467), 'numpy.random.shuffle', 'np.random.shuffle', (['scenes'], {}), '(scenes)\n', (3459, 3467), True, 'import numpy as np\n'), ((5130, 5148), 'settings.dispmatrix.Clear', 'dispmatrix.Clear', ([], {}), '()\n', (5146, 5148), False, 'from settings import NES_PALETTE_HEX, dispmatrix\n')]
|
'''
KnockoffGAN Knockoff Variable Generation
<NAME> (9/27/2018)
'''
#%% Necessary Packages
import numpy as np
from tqdm import tqdm
import tensorflow as tf
import logging
import argparse
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
#%% KnockoffGAN Function
'''
Inputs:
x_train: Training data
lamda: Power network parameter = 0.01
mu: WGAN parameter = 1
'''
logger = logging.getLogger()
def KnockoffGAN (x_train, x_name, lamda = 0.01, mu = 1, mb_size=128, niter=2000):
tf_debug = False
if tf_debug:
run_opts = tf.RunOptions(report_tensor_allocations_upon_oom = True)
config = tf.ConfigProto()
config.log_device_placement=True
config.gpu_options.allow_growth = True
else:
run_opts = None
config = None
#%% Parameters
# 1. # of samples
n = len(x_train[:,0])
# 2. # of features
x_dim = len(x_train[0,:])
# 3. # of random dimensions
z_dim = int(x_dim)
# 4. # of hidden dimensions
h_dim = int(x_dim)
# 5. # of minibatch
# mb_size = 128
# 6. WGAN parameters
lam = 10
lr = 1e-4
#%% Necessary Functions
# 1. Xavier Initialization Definition
def xavier_init(size):
in_dim = size[0]
xavier_stddev = 1. / tf.sqrt(in_dim / 2.)
return tf.random_normal(shape = size, stddev = xavier_stddev)
# 2. Sample from normal distribution: Random variable generation
def sample_Z(m, n, x_name):
if ((x_name == 'Normal') | (x_name == 'AR_Normal')):
return np.random.normal(0., np.sqrt(1./3000), size = [m, n]).copy()
elif ((x_name == 'Uniform') | (x_name == 'AR_Uniform')):
return np.random.uniform(-3*np.sqrt(1./3000),3*np.sqrt(1./3000),[m,n]).copy()
# 3. Sample from the real data (Mini-batch index sampling)
def sample_X(m, n):
return np.random.permutation(m)[:n].copy()
# 4. Permutation for MINE computation
def Permute (x):
n = len(x[:,0])
idx = np.random.permutation(n)
out = x[idx,:].copy()
return out
# 5. Bernoulli sampling for Swap and Hint variables
def sample_SH(m, n, p):
return np.random.binomial(1, p, [m,n]).copy()
#%% Placeholder inputs
# 1. Feature
X = tf.placeholder(tf.float32, shape = [None, x_dim])
# 2. Feature (Permute)
X_hat = tf.placeholder(tf.float32, shape = [None, x_dim])
# 3. Random Variable
Z = tf.placeholder(tf.float32, shape = [None, z_dim])
# 4. Swap
S = tf.placeholder(tf.float32, shape = [None, x_dim])
# 5. Hint
H = tf.placeholder(tf.float32, shape = [None, x_dim])
#%% Network Building
#%% 1. Discriminator
# Input: Swap (X, tilde X) and Hint
D_W1 = tf.Variable(xavier_init([x_dim + x_dim + x_dim, h_dim]))
D_b1 = tf.Variable(tf.zeros(shape=[h_dim]))
D_W2 = tf.Variable(xavier_init([h_dim,x_dim]))
D_b2 = tf.Variable(tf.zeros(shape=[x_dim]))
theta_D = [D_W1, D_W2, D_b1, D_b2]
#%% 2. WGAN Discriminator
# Input: tilde X
WD_W1 = tf.Variable(xavier_init([x_dim, h_dim]))
WD_b1 = tf.Variable(tf.zeros(shape=[h_dim]))
WD_W2 = tf.Variable(xavier_init([h_dim,1]))
WD_b2 = tf.Variable(tf.zeros(shape=[1]))
theta_WD = [WD_W1, WD_W2, WD_b1, WD_b2]
#%% 3. Generator
# Input: X and Z
G_W1 = tf.Variable(xavier_init([x_dim + z_dim, h_dim]))
G_b1 = tf.Variable(tf.zeros(shape=[h_dim]))
G_W2 = tf.Variable(xavier_init([h_dim,x_dim]))
G_b2 = tf.Variable(tf.zeros(shape=[x_dim]))
theta_G = [G_W1, G_W2, G_b1, G_b2]
#%% 4. MINE
# Input: X and tilde X
# For X
M_W1A = tf.Variable(xavier_init([x_dim]))
M_W1B = tf.Variable(xavier_init([x_dim]))
M_b1 = tf.Variable(tf.zeros(shape=[x_dim]))
# For tilde X
M_W2A = tf.Variable(xavier_init([x_dim]))
M_W2B = tf.Variable(xavier_init([x_dim]))
M_b2 = tf.Variable(tf.zeros(shape=[x_dim]))
# Combine
M_W3 = tf.Variable(xavier_init([x_dim]))
M_b3 = tf.Variable(tf.zeros(shape=[x_dim]))
theta_M = [M_W1A, M_W1B, M_W2A, M_W2B, M_W3, M_b1, M_b2, M_b3]
#%% Functions
# 1. Generator
def generator(x, z):
inputs = tf.concat(axis=1, values = [x, z])
G_h1 = tf.nn.tanh(tf.matmul(inputs, G_W1) + G_b1)
G_out = (tf.matmul(G_h1, G_W2) + G_b2)
return G_out
# 2. Discriminator
def discriminator(sA, sB, h):
inputs = tf.concat(axis=1, values = [sA, sB, h])
D_h1 = tf.nn.tanh(tf.matmul(inputs, D_W1) + D_b1)
D_out = tf.nn.sigmoid(tf.matmul(D_h1, D_W2) + D_b2)
return D_out
# 3. WGAN Discriminator
def WGAN_discriminator(x):
WD_h1 = tf.nn.relu(tf.matmul(x, WD_W1) + WD_b1)
WD_out = (tf.matmul(WD_h1, WD_W2) + WD_b2)
return WD_out
# 4. MINE
def MINE(x, x_hat):
M_h1 = tf.nn.tanh(M_W1A * x + M_W1B * x_hat + M_b1)
M_h2 = tf.nn.tanh(M_W2A * x + M_W2B * x_hat + M_b2)
M_out = (M_W3 * (M_h1 + M_h2) + M_b3)
Exp_M_out = tf.exp(M_out)
return M_out, Exp_M_out
#%% Combination across the networks
# 1. Generater Knockoffs
G_sample = generator(X,Z)
# 2. WGAN Outputs for real and fake
WD_real = WGAN_discriminator(X)
WD_fake = WGAN_discriminator(G_sample)
# 3. Generate swapping (X, tilde X)
SwapA = S * X + (1-S) * G_sample
SwapB = (1-S) * X + S * G_sample
# 4. Discriminator output
# (X, tilde X) is SwapA, SwapB. Hint is generated by H * S
D_out = discriminator(SwapA, SwapB, H*S)
# 5. MINE Computation
# Without permutation
M_out, _ = MINE(X, G_sample)
# Wit permutation
_, Exp_M_out = MINE(X_hat, G_sample)
# 6. WGAN Loss Replacement of Clipping algorithm to Penalty term
# 1. Line 6 in Algorithm 1
eps = tf.random_uniform([mb_size, 1], minval = 0., maxval = 1.)
X_inter = eps*X + (1. - eps) * G_sample
# 2. Line 7 in Algorithm 1
grad = tf.gradients(WGAN_discriminator(X_inter), [X_inter])[0]
grad_norm = tf.sqrt(tf.reduce_sum((grad)**2 + 1e-8, axis = 1))
grad_pen = lam * tf.reduce_mean((grad_norm - 1)**2)
#%% Loss function
# 1. WGAN Loss
WD_loss = tf.reduce_mean(WD_fake) - tf.reduce_mean(WD_real) + grad_pen
# 2. Discriminator loss
D_loss = -tf.reduce_mean(S * (1-H) * tf.log(D_out + 1e-8) + (1-S) * (1-H) * tf.log(1 - D_out + 1e-8))
# 3. MINE Loss
M_loss = tf.reduce_sum( tf.reduce_mean(M_out, axis = 0) - tf.log(tf.reduce_mean(Exp_M_out, axis = 0)) )
# 4. Generator loss
G_loss = - D_loss + mu * -tf.reduce_mean(WD_fake) + lamda * M_loss
# Solver
WD_solver = (tf.train.AdamOptimizer(learning_rate = lr, beta1 = 0.5).minimize(WD_loss, var_list = theta_WD))
D_solver = (tf.train.AdamOptimizer(learning_rate = lr, beta1 = 0.5).minimize(D_loss, var_list = theta_D))
G_solver = (tf.train.AdamOptimizer(learning_rate = lr, beta1 = 0.5).minimize(G_loss, var_list = theta_G))
M_solver = (tf.train.AdamOptimizer(learning_rate = lr, beta1 = 0.5).minimize(-M_loss, var_list = theta_M))
#%% Sessions
if tf_debug:
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer(), options=run_opts)
else:
sess = tf.Session()
sess.run(tf.global_variables_initializer())
#%% Iterations
for it in tqdm(range(niter)):
for dummy_range in range(5):
#%% WGAN, Discriminator and MINE Training
# Random variable generation
Z_mb = sample_Z(mb_size, z_dim, x_name)
# Minibatch sampling
X_idx = sample_X(n,mb_size)
X_mb = x_train[X_idx,:].copy()
X_perm_mb = Permute(X_mb)
# Swap generation
S_mb = sample_SH(mb_size, x_dim, 0.5)
# Hint generation
H_mb = sample_SH(mb_size, x_dim, 0.9)
# 1. WGAN Training
_, WD_loss_curr = sess.run([WD_solver, WD_loss], feed_dict = {X: X_mb, Z: Z_mb, X_hat: X_perm_mb, S: S_mb, H: H_mb}, options=run_opts)
# 2. Discriminator Training
# print('discriminator training')
_, D_loss_curr = sess.run([D_solver, D_loss], feed_dict = {X: X_mb, Z: Z_mb, X_hat: X_perm_mb, S: S_mb, H: H_mb}, options=run_opts)
# 3. MINE Training
# print('mine training')
_, M_loss_curr = sess.run([M_solver, M_loss], feed_dict = {X: X_mb, Z: Z_mb, X_hat: X_perm_mb, S: S_mb, H: H_mb}, options=run_opts)
#%% Generator Training
# Random variable generation
Z_mb = sample_Z(mb_size, z_dim, x_name)
# Minibatch sampling
X_idx = sample_X(n,mb_size)
X_mb = x_train[X_idx,:].copy()
X_perm_mb = Permute(X_mb)
# Swap generation
S_mb = sample_SH(mb_size, x_dim, 0.5)
# Hint generation
H_mb = sample_SH(mb_size, x_dim, 0.0)
# Generator training
# print('gen training')
_, G_loss_curr, G_sample_curr = sess.run([G_solver, G_loss, G_sample], feed_dict = {X: X_mb, Z: Z_mb, X_hat: X_perm_mb, S: S_mb, H: H_mb}, options=run_opts)
#%% Output
#print('last session run')
X_knockoff = sess.run([G_sample], feed_dict = {X: x_train, Z: sample_Z(n, z_dim, x_name)}, options=run_opts)[0]
# X_knockoff = sess.run([G_sample], feed_dict = {X: x_train, Z: sample_Z(n, z_dim, x_name)})[0]
#print('closing session')
sess.close()
tf.reset_default_graph()
return X_knockoff
def init_arg():
parser = argparse.ArgumentParser()
parser.add_argument(
'-i')
parser.add_argument(
'-o')
parser.add_argument(
'--bs', default=128, type=int)
parser.add_argument(
'--it', default=2000, type=int)
parser.add_argument(
'--target')
parser.add_argument(
'--xname', default='Normal', help='Sample distribution [Normal, Uniform]')
parser.add_argument(
'--scale', default=1, type=int)
return parser.parse_args()
if __name__ == "__main__":
args = init_arg()
df = pd.read_csv(args.i)
niter = args.it
use_scale = args.scale
x_name = args.xname
lbl = args.target
features = list(df.columns)
features.remove(lbl)
# scale/normalize dataset
range_scaler = (0, 1)
scaler = MinMaxScaler(feature_range=range_scaler)
x = df[features]
if use_scale:
scaler.fit(x)
x = scaler.transform(x)
else:
x = x.values
x_k = KnockoffGAN(
x,
x_name,
mb_size=args.bs,
niter=niter)
df_k = pd.DataFrame(x_k, columns=features)
df_k[lbl] = df[lbl]
df_k.to_csv(args.o, index=False)
|
[
"tensorflow.reduce_sum",
"argparse.ArgumentParser",
"tensorflow.nn.tanh",
"pandas.read_csv",
"tensorflow.reset_default_graph",
"sklearn.preprocessing.MinMaxScaler",
"tensorflow.ConfigProto",
"tensorflow.matmul",
"tensorflow.sqrt",
"tensorflow.RunOptions",
"pandas.DataFrame",
"tensorflow.concat",
"tensorflow.placeholder",
"tensorflow.exp",
"numpy.random.binomial",
"tensorflow.global_variables_initializer",
"tensorflow.reduce_mean",
"tensorflow.Session",
"tensorflow.random_normal",
"tensorflow.log",
"numpy.random.permutation",
"tensorflow.random_uniform",
"tensorflow.zeros",
"tensorflow.train.AdamOptimizer",
"logging.getLogger",
"numpy.sqrt"
] |
[((394, 413), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (411, 413), False, 'import logging\n'), ((2404, 2451), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, x_dim]'}), '(tf.float32, shape=[None, x_dim])\n', (2418, 2451), True, 'import tensorflow as tf\n'), ((2496, 2543), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, x_dim]'}), '(tf.float32, shape=[None, x_dim])\n', (2510, 2543), True, 'import tensorflow as tf\n'), ((2586, 2633), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, z_dim]'}), '(tf.float32, shape=[None, z_dim])\n', (2600, 2633), True, 'import tensorflow as tf\n'), ((2658, 2705), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, x_dim]'}), '(tf.float32, shape=[None, x_dim])\n', (2672, 2705), True, 'import tensorflow as tf\n'), ((2733, 2780), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None, x_dim]'}), '(tf.float32, shape=[None, x_dim])\n', (2747, 2780), True, 'import tensorflow as tf\n'), ((6162, 6217), 'tensorflow.random_uniform', 'tf.random_uniform', (['[mb_size, 1]'], {'minval': '(0.0)', 'maxval': '(1.0)'}), '([mb_size, 1], minval=0.0, maxval=1.0)\n', (6179, 6217), True, 'import tensorflow as tf\n'), ((10092, 10116), 'tensorflow.reset_default_graph', 'tf.reset_default_graph', ([], {}), '()\n', (10114, 10116), True, 'import tensorflow as tf\n'), ((10170, 10195), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (10193, 10195), False, 'import argparse\n'), ((10713, 10732), 'pandas.read_csv', 'pd.read_csv', (['args.i'], {}), '(args.i)\n', (10724, 10732), True, 'import pandas as pd\n'), ((10953, 10993), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': 'range_scaler'}), '(feature_range=range_scaler)\n', (10965, 10993), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((11228, 11263), 'pandas.DataFrame', 'pd.DataFrame', (['x_k'], {'columns': 'features'}), '(x_k, columns=features)\n', (11240, 11263), True, 'import pandas as pd\n'), ((561, 615), 'tensorflow.RunOptions', 'tf.RunOptions', ([], {'report_tensor_allocations_upon_oom': '(True)'}), '(report_tensor_allocations_upon_oom=True)\n', (574, 615), True, 'import tensorflow as tf\n'), ((635, 651), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (649, 651), True, 'import tensorflow as tf\n'), ((1366, 1416), 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': 'size', 'stddev': 'xavier_stddev'}), '(shape=size, stddev=xavier_stddev)\n', (1382, 1416), True, 'import tensorflow as tf\n'), ((2104, 2128), 'numpy.random.permutation', 'np.random.permutation', (['n'], {}), '(n)\n', (2125, 2128), True, 'import numpy as np\n'), ((2994, 3017), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[h_dim]'}), '(shape=[h_dim])\n', (3002, 3017), True, 'import tensorflow as tf\n'), ((3098, 3121), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[x_dim]'}), '(shape=[x_dim])\n', (3106, 3121), True, 'import tensorflow as tf\n'), ((3304, 3327), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[h_dim]'}), '(shape=[h_dim])\n', (3312, 3327), True, 'import tensorflow as tf\n'), ((3406, 3425), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[1]'}), '(shape=[1])\n', (3414, 3425), True, 'import tensorflow as tf\n'), ((3606, 3629), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[h_dim]'}), '(shape=[h_dim])\n', (3614, 3629), True, 'import tensorflow as tf\n'), ((3710, 3733), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[x_dim]'}), '(shape=[x_dim])\n', (3718, 3733), True, 'import tensorflow as tf\n'), ((3962, 3985), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[x_dim]'}), '(shape=[x_dim])\n', (3970, 3985), True, 'import tensorflow as tf\n'), ((4125, 4148), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[x_dim]'}), '(shape=[x_dim])\n', (4133, 4148), True, 'import tensorflow as tf\n'), ((4237, 4260), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[x_dim]'}), '(shape=[x_dim])\n', (4245, 4260), True, 'import tensorflow as tf\n'), ((4426, 4458), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(1)', 'values': '[x, z]'}), '(axis=1, values=[x, z])\n', (4435, 4458), True, 'import tensorflow as tf\n'), ((4679, 4716), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(1)', 'values': '[sA, sB, h]'}), '(axis=1, values=[sA, sB, h])\n', (4688, 4716), True, 'import tensorflow as tf\n'), ((5155, 5199), 'tensorflow.nn.tanh', 'tf.nn.tanh', (['(M_W1A * x + M_W1B * x_hat + M_b1)'], {}), '(M_W1A * x + M_W1B * x_hat + M_b1)\n', (5165, 5199), True, 'import tensorflow as tf\n'), ((5215, 5259), 'tensorflow.nn.tanh', 'tf.nn.tanh', (['(M_W2A * x + M_W2B * x_hat + M_b2)'], {}), '(M_W2A * x + M_W2B * x_hat + M_b2)\n', (5225, 5259), True, 'import tensorflow as tf\n'), ((5335, 5348), 'tensorflow.exp', 'tf.exp', (['M_out'], {}), '(M_out)\n', (5341, 5348), True, 'import tensorflow as tf\n'), ((6391, 6431), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(grad ** 2 + 1e-08)'], {'axis': '(1)'}), '(grad ** 2 + 1e-08, axis=1)\n', (6404, 6431), True, 'import tensorflow as tf\n'), ((6455, 6491), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['((grad_norm - 1) ** 2)'], {}), '((grad_norm - 1) ** 2)\n', (6469, 6491), True, 'import tensorflow as tf\n'), ((7519, 7544), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (7529, 7544), True, 'import tensorflow as tf\n'), ((7640, 7652), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (7650, 7652), True, 'import tensorflow as tf\n'), ((1330, 1351), 'tensorflow.sqrt', 'tf.sqrt', (['(in_dim / 2.0)'], {}), '(in_dim / 2.0)\n', (1337, 1351), True, 'import tensorflow as tf\n'), ((4536, 4557), 'tensorflow.matmul', 'tf.matmul', (['G_h1', 'G_W2'], {}), '(G_h1, G_W2)\n', (4545, 4557), True, 'import tensorflow as tf\n'), ((5013, 5036), 'tensorflow.matmul', 'tf.matmul', (['WD_h1', 'WD_W2'], {}), '(WD_h1, WD_W2)\n', (5022, 5036), True, 'import tensorflow as tf\n'), ((6550, 6573), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['WD_fake'], {}), '(WD_fake)\n', (6564, 6573), True, 'import tensorflow as tf\n'), ((6576, 6599), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['WD_real'], {}), '(WD_real)\n', (6590, 6599), True, 'import tensorflow as tf\n'), ((6805, 6834), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['M_out'], {'axis': '(0)'}), '(M_out, axis=0)\n', (6819, 6834), True, 'import tensorflow as tf\n'), ((7021, 7072), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'lr', 'beta1': '(0.5)'}), '(learning_rate=lr, beta1=0.5)\n', (7043, 7072), True, 'import tensorflow as tf\n'), ((7133, 7184), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'lr', 'beta1': '(0.5)'}), '(learning_rate=lr, beta1=0.5)\n', (7155, 7184), True, 'import tensorflow as tf\n'), ((7243, 7294), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'lr', 'beta1': '(0.5)'}), '(learning_rate=lr, beta1=0.5)\n', (7265, 7294), True, 'import tensorflow as tf\n'), ((7353, 7404), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'lr', 'beta1': '(0.5)'}), '(learning_rate=lr, beta1=0.5)\n', (7375, 7404), True, 'import tensorflow as tf\n'), ((7562, 7595), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (7593, 7595), True, 'import tensorflow as tf\n'), ((7670, 7703), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (7701, 7703), True, 'import tensorflow as tf\n'), ((2294, 2326), 'numpy.random.binomial', 'np.random.binomial', (['(1)', 'p', '[m, n]'], {}), '(1, p, [m, n])\n', (2312, 2326), True, 'import numpy as np\n'), ((4487, 4510), 'tensorflow.matmul', 'tf.matmul', (['inputs', 'G_W1'], {}), '(inputs, G_W1)\n', (4496, 4510), True, 'import tensorflow as tf\n'), ((4745, 4768), 'tensorflow.matmul', 'tf.matmul', (['inputs', 'D_W1'], {}), '(inputs, D_W1)\n', (4754, 4768), True, 'import tensorflow as tf\n'), ((4807, 4828), 'tensorflow.matmul', 'tf.matmul', (['D_h1', 'D_W2'], {}), '(D_h1, D_W2)\n', (4816, 4828), True, 'import tensorflow as tf\n'), ((4966, 4985), 'tensorflow.matmul', 'tf.matmul', (['x', 'WD_W1'], {}), '(x, WD_W1)\n', (4975, 4985), True, 'import tensorflow as tf\n'), ((6846, 6879), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['Exp_M_out'], {'axis': '(0)'}), '(Exp_M_out, axis=0)\n', (6860, 6879), True, 'import tensorflow as tf\n'), ((1950, 1974), 'numpy.random.permutation', 'np.random.permutation', (['m'], {}), '(m)\n', (1971, 1974), True, 'import numpy as np\n'), ((6685, 6706), 'tensorflow.log', 'tf.log', (['(D_out + 1e-08)'], {}), '(D_out + 1e-08)\n', (6691, 6706), True, 'import tensorflow as tf\n'), ((6724, 6749), 'tensorflow.log', 'tf.log', (['(1 - D_out + 1e-08)'], {}), '(1 - D_out + 1e-08)\n', (6730, 6749), True, 'import tensorflow as tf\n'), ((6945, 6968), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['WD_fake'], {}), '(WD_fake)\n', (6959, 6968), True, 'import tensorflow as tf\n'), ((1640, 1659), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3000)'], {}), '(1.0 / 3000)\n', (1647, 1659), True, 'import numpy as np\n'), ((1785, 1804), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3000)'], {}), '(1.0 / 3000)\n', (1792, 1804), True, 'import numpy as np\n'), ((1804, 1823), 'numpy.sqrt', 'np.sqrt', (['(1.0 / 3000)'], {}), '(1.0 / 3000)\n', (1811, 1823), True, 'import numpy as np\n')]
|
#!/usr/bin/python
import numpy as np
import sys
import argparse
from assembler import symbolTable, singleInstr, doubleInstr, tripleInstr
from transcoder import key_note_length, offsetArr
from music21 import midi, note, chord
#The stack size and the program size are both 256 for easy addressing
STACK_SIZE = 256
PROG_SIZE = 256
#Create a `vm themed' error
class VMError(Exception):
def __init__(self, value):
self.message = value
def __str__(self):
return self.message
class BalladVM:
#Initialize the stack, stack pointer, et. al. as zero
def __init__(self):
self.stack = np.zeros(STACK_SIZE, dtype=np.uint8)
self.sp = np.uint8(0)
self.pc = np.uint8(0)
self.s_regs = np.zeros(8, dtype=np.uint8)
self.m_regs = np.zeros(8, dtype=np.uint8)
self.progmem = np.zeros(PROG_SIZE, dtype=np.uint8)
#This loads an object from the paired code and static
# memory files
def load_obj(self, obj_fname, statmem_fname):
obj_file = open(obj_fname, 'rb')
statmem_file = open(statmem_fname, 'rb')
statmem = statmem_file.read()
obj = obj_file.read()
for i in range(len(obj)):
self.progmem[i] = ord(obj[i])
self.statmem = []
for i in range(len(statmem)):
self.statmem.append(ord(statmem[i]))
#This loads an object from the proper midi file using music21
def load_midi(self, midi_fname):
midi_file = midi.base.MidiFile()
midi_file.open(midi_fname, 'rb')
midi_file.read()
if len(midi_file.tracks) != 2:
raise VMError(
'Error: Incorrect number of tracks in Ballad program: %d',
len(midi_file.tracks))
else:
#Stream 0 has the static data section
static_stream = midi.translate.midiTrackToStream(
midi_file.tracks[0])
#Stream 1 has the program code section
code_stream = midi.translate.midiTrackToStream(
midi_file.tracks[1])
tmp_note = note.Note()
tmp_chord = chord.Chord()
static_string = ''
code_string = ''
midi_key = -1
#Transcode the static data
for curr_note in static_stream:
midi_num = -1
note_type = type(curr_note)
if note_type == type(tmp_note):
midi_num = curr_note.midi
elif note_type == type(tmp_chord):
midi_num = curr_note[0].midi
if note_type == type(tmp_note) or \
note_type == type(tmp_chord):
if curr_note.duration.quarterLength == key_note_length and midi_key < 0:
midi_key = midi_num
elif midi_key >= 0:
curr_num = offsetArr[:midi_num - midi_key]
static_string += '%1x' % curr_num
midi_key = -1
#Transcode the program code
for curr_note in code_stream:
midi_num = -1
note_type = type(curr_note)
if note_type == type(tmp_note):
midi_num = curr_note.midi
elif note_type == type(tmp_chord):
midi_num = curr_note[0].midi
if note_type == type(tmp_note) or \
note_type == type(tmp_chord):
if curr_note.duration.quarterLength == key_note_length and midi_key < 0:
midi_key = midi_num
else:
curr_num = offsetArr[:midi_num - midi_key]
code_string += '%1x' % curr_num
self.statmem = []
for i in range(0, len(static_string), 2):
self.statmem.append(int(static_string[i: i+2], 16))
for i in range(0, len(code_string), 2):
self.progmem[i/2] = (int(code_string[i: i+2], 16))
#This will run one step of execution of the
# program
def exec_timestep(self):
#Fetch instruction at pc:
curr_opcode = self.progmem[self.pc]
if curr_opcode == 0:
pass
else:
curr_instr = symbolTable[:curr_opcode]
#Deal with the fact that or, and and print are
# all reserved words
if curr_instr == 'or' or \
curr_instr == 'and' \
or curr_instr == 'print':
curr_instr += '_'
func = getattr(self, curr_instr)
curr_instr = curr_instr.replace('_','')
#Determine how many arguments we need
# then run the appropriate instruction
if curr_instr in singleInstr:
self.pc += 1
func(self.progmem[self.pc])
elif curr_instr in doubleInstr:
self.pc += 1
arg1 = self.progmem[self.pc]
self.pc += 1
arg2 = self.progmem[self.pc]
func(arg1, arg2)
elif curr_instr in tripleInstr:
self.pc += 1
arg1 = self.progmem[self.pc]
self.pc += 1
arg2 = self.progmem[self.pc]
self.pc += 1
arg3 = self.progmem[self.pc]
func(arg1, arg2, arg3)
else:
raise VMError('Error: Instruction %s not found near offset %d'
% (curr_instr, self.pc))
self.pc += 1
#Mathematical functions:
def add(self, m0, m1):
self.m_regs[m0] = self.m_regs[m0] + self.m_regs[m1]
def sub(self, m0, m1):
self.m_regs[m0] = self.m_regs[m0] - self.m_regs[m1]
def mul(self, m0, m1):
self.m_regs[m0] = self.m_regs[m0] * self.m_regs[m1]
def div(self, m0, m1):
self.m_regs[m0] = self.m_regs[m0] / self.m_regs[m1]
def xor(self, m0, m1):
self.m_regs[m0] = self.m_regs[m0] ^ self.m_regs[m1]
def or_(self, m0, m1):
self.m_regs[m0] = self.m_regs[m0] | self.m_regs[m1]
def and_(self, m0, m1):
self.m_regs[m0] = self.m_regs[m0] & self.m_regs[m1]
def inv(self, m0):
self.m_regs[m0] = ~self.m_regs[m0]
def inc(self, m0):
self.m_regs[m0] += 1
def dec(self, m0):
self.m_regs[m0] -= 1
#Jump instructions:
#Subtraction of 1 is so that when the PC gets incremented,
# it ends up at in the correct places
def jmp(self, off):
self.pc = off - 1
def jeq(self, m0, m1, off):
if self.m_regs[m0] == self.m_regs[m1]:
self.pc = off - 1
def jne(self, m0, m1, off):
if self.m_regs[m0] != self.m_regs[m1]:
self.pc = off - 1
def jlt(self, m0, m1, off):
if self.m_regs[m0] < self.m_regs[m1]:
self.pc = off - 1
def jgt(self, m0, m1, off):
if self.m_regs[m0] > self.m_regs[m1]:
self.pc = off - 1
def jlte(self, m0, m1, off):
if self.m_regs[m0] <= self.m_regs[m1]:
self.pc = off - 1
def jgte(self, m0, m1, off):
if self.m_regs[m0] >= self.m_regs[m1]:
self.pc = off - 1
def ret(self, m0):
self.pc = self.m_regs[m0] - 1
#Memory instructions:
def push(self, s0):
self.stack[self.sp] = self.s_regs[s0]
self.sp += 1
def pop(self, s0):
self.s_regs[s0] = self.stack[self.sp]
self.sp -= 1
def lstat(self, s0, m0):
self.s_regs[s0] = self.statmem[
self.m_regs[m0]]
def stget(self, s0, m0):
self.s_regs[s0] = self.stack[
self.m_regs[m0]]
def stput(self, m0, s0):
self.stack[
self.m_regs[m0]] = self.s_regs[s0]
# - Move instructions
def movim(self, m0, byte):
self.m_regs[m0] = byte
def movis(self, s0, byte):
self.s_regs[s0] = byte
def movrm(self, m0, s0):
self.m_regs[m0] = self.s_regs[s0]
def movrs(self, s0, m0):
self.s_regs[s0] = self.m_regs[m0]
#Utility instructions
#This prints out a message
def print_(self, s0, s1):
message = (self.stack[
self.s_regs[s0]:
self.s_regs[s0] +
self.s_regs[s1]])
message = ''.join(['%c' % char for char in message])
stackString = ''.join([chr(char) for char in self.stack])
sys.stdout.write(message)
#This reads in to the stack
def read(self, m0, m1):
message = raw_input()
message = message[0:self.m_regs[m1]]
oLen = len(message)
mDiff = self.m_regs[m1] - oLen
message += mDiff * '\x00'
message = [ord(char) for char in message]
self.stack[self.m_regs[m0]:
self.m_regs[m0] +
self.m_regs[m1]] = message
self.m_regs[m1] = oLen
def main():
parser = argparse.ArgumentParser(
description='This is a VM to run Ballad (byte)code')
parser.add_argument('-bc', action='store_true',
help='Run Ballad through bytecode')
parser.add_argument('name', help='The name of the Ballad file(s)')
args = parser.parse_args()
#If we're running in bytecode mode, dispatch to the bytecode loader
if args.bc:
sm_name = args.name + '.smb'
obj_name = args.name + '.ob'
vm = BalladVM()
vm.load_obj(obj_name, sm_name)
else: #Otherwise, use the midi loader
vm = BalladVM()
vm.load_midi(args.name)
#Initialize the PC to the current one - this is how we track exit
# conditions
curr_pc = vm.pc
prev_pc = -1
while curr_pc != prev_pc:
#Loop until the program is done (the PC doesn't move anymore)
vm.exec_timestep()
prev_pc = curr_pc
curr_pc = vm.pc
if __name__ == '__main__':
main()
|
[
"sys.stdout.write",
"numpy.uint8",
"argparse.ArgumentParser",
"numpy.zeros",
"music21.midi.translate.midiTrackToStream",
"music21.midi.base.MidiFile",
"music21.chord.Chord",
"music21.note.Note"
] |
[((7199, 7275), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""This is a VM to run Ballad (byte)code"""'}), "(description='This is a VM to run Ballad (byte)code')\n", (7222, 7275), False, 'import argparse\n'), ((586, 622), 'numpy.zeros', 'np.zeros', (['STACK_SIZE'], {'dtype': 'np.uint8'}), '(STACK_SIZE, dtype=np.uint8)\n', (594, 622), True, 'import numpy as np\n'), ((635, 646), 'numpy.uint8', 'np.uint8', (['(0)'], {}), '(0)\n', (643, 646), True, 'import numpy as np\n'), ((659, 670), 'numpy.uint8', 'np.uint8', (['(0)'], {}), '(0)\n', (667, 670), True, 'import numpy as np\n'), ((687, 714), 'numpy.zeros', 'np.zeros', (['(8)'], {'dtype': 'np.uint8'}), '(8, dtype=np.uint8)\n', (695, 714), True, 'import numpy as np\n'), ((731, 758), 'numpy.zeros', 'np.zeros', (['(8)'], {'dtype': 'np.uint8'}), '(8, dtype=np.uint8)\n', (739, 758), True, 'import numpy as np\n'), ((776, 811), 'numpy.zeros', 'np.zeros', (['PROG_SIZE'], {'dtype': 'np.uint8'}), '(PROG_SIZE, dtype=np.uint8)\n', (784, 811), True, 'import numpy as np\n'), ((1331, 1351), 'music21.midi.base.MidiFile', 'midi.base.MidiFile', ([], {}), '()\n', (1349, 1351), False, 'from music21 import midi, note, chord\n'), ((6798, 6823), 'sys.stdout.write', 'sys.stdout.write', (['message'], {}), '(message)\n', (6814, 6823), False, 'import sys\n'), ((1615, 1668), 'music21.midi.translate.midiTrackToStream', 'midi.translate.midiTrackToStream', (['midi_file.tracks[0]'], {}), '(midi_file.tracks[0])\n', (1647, 1668), False, 'from music21 import midi, note, chord\n'), ((1733, 1786), 'music21.midi.translate.midiTrackToStream', 'midi.translate.midiTrackToStream', (['midi_file.tracks[1]'], {}), '(midi_file.tracks[1])\n', (1765, 1786), False, 'from music21 import midi, note, chord\n'), ((1806, 1817), 'music21.note.Note', 'note.Note', ([], {}), '()\n', (1815, 1817), False, 'from music21 import midi, note, chord\n'), ((1833, 1846), 'music21.chord.Chord', 'chord.Chord', ([], {}), '()\n', (1844, 1846), False, 'from music21 import midi, note, chord\n')]
|
from numpy.random import choice
from statistics import mode
KEY = 0
VALUE = 1
TWICE = 2
THRICE = 3
PROBABILITY = [0.30, 0.265, 0.179, 0.129, 0.073, 0.035, 0.019]
NOTHING = ":x:"
CHERRY = ":cherries:"
BLUEBERRY = ":blueberries:"
COIN = ":coin:"
CARD = ":credit_card:"
GEM = ":gem:"
EIGHTBALL = ":8ball:"
SLOT = [NOTHING, CHERRY, BLUEBERRY, COIN, CARD, GEM, EIGHTBALL]
JACKPOT = {NOTHING * THRICE: 0,
CHERRY * THRICE: 50,
CHERRY * TWICE: 0.5,
BLUEBERRY * THRICE: 150,
BLUEBERRY * TWICE: 0.7,
COIN * THRICE: 500,
COIN * TWICE: 1.5,
CARD * THRICE: 1500,
GEM * THRICE: 2500,
EIGHTBALL * THRICE: 25000
}
average = []
class SlotMachine:
def __init__(self, cash):
self.cash = cash
@staticmethod
def spin():
row = choice(SLOT, size=THRICE, p=PROBABILITY)
key = "".join(row[:])
if key.count(CHERRY) == TWICE or key.count(BLUEBERRY) == TWICE or key.count(COIN) == TWICE:
key = mode(row) * TWICE
return key, "".join(row)
def pot(self):
result = self.spin()
key = result[KEY]
value = result[VALUE]
if key in JACKPOT:
cash = JACKPOT[key] * self.cash
return cash, value
else:
return value
|
[
"statistics.mode",
"numpy.random.choice"
] |
[((849, 889), 'numpy.random.choice', 'choice', (['SLOT'], {'size': 'THRICE', 'p': 'PROBABILITY'}), '(SLOT, size=THRICE, p=PROBABILITY)\n', (855, 889), False, 'from numpy.random import choice\n'), ((1038, 1047), 'statistics.mode', 'mode', (['row'], {}), '(row)\n', (1042, 1047), False, 'from statistics import mode\n')]
|
# coding: utf-8
# # Exploratory data analysis of TCGA mutation data
# In[1]:
import os
import numpy
import pandas
import seaborn
get_ipython().run_line_magic('matplotlib', 'inline')
# ## Read TCGA datasets
# In[2]:
path = os.path.join('data', 'mutation-matrix.tsv.bz2')
mutation_df = pandas.read_table(path, index_col=0)
mutation_df.columns.name = 'entrez_gene_id'
mutation_df.shape
# In[3]:
path = os.path.join('data', 'samples.tsv')
sample_df = pandas.read_table(path)
sample_df.head(2)
# ## Distribution of mutations counts for genes
# In[4]:
gene_df = mutation_df.sum(axis='rows').rename('n_mutations').reset_index()
gene_df['n_mutations_log1p'] = numpy.log1p(gene_df.n_mutations)
gene_df.head(2)
# In[5]:
ax = seaborn.distplot(gene_df.n_mutations_log1p)
xticks = ax.get_xticks()
xticklabels = numpy.expm1(xticks).round().astype(int)
axis_texts = ax.set_xticklabels(xticklabels)
# In[6]:
sum(gene_df.n_mutations == 0)
# ## Distribution of mutations counts for samples
# In[7]:
sample_df = sample_df.merge(
mutation_df.sum(axis='columns').rename('n_mutations').reset_index()
)
sample_df['n_mutations_log1p'] = numpy.log1p(sample_df.n_mutations)
sample_df.head(2)
# In[8]:
# Mutations per sample
ax = seaborn.distplot(sample_df.n_mutations_log1p)
xticks = ax.get_xticks()
xticklabels = numpy.expm1(xticks).round().astype(int)
axis_texts = ax.set_xticklabels(xticklabels)
# ## Diagnosis age versus mutation count for samples
# In[9]:
grid = seaborn.jointplot('n_mutations_log1p', 'age_diagnosed', data=sample_df, kind='hex')
xticks = grid.ax_marg_x.get_xticks()
xticklabels = numpy.expm1(xticks).round().astype(int)
axis_texts = grid.ax_marg_x.set_xticklabels(xticklabels)
# ## Mutation frequency by disease
# In[10]:
genes = mutation_df.columns.tolist()
verbose_mutation_df = sample_df.merge(mutation_df.reset_index())
mutation_freq_df = verbose_mutation_df.groupby('disease').apply(lambda df: df[genes].mean(axis='rows')).assign(
n_mutations = verbose_mutation_df.groupby('disease').apply(len)
)
mutation_freq_df.iloc[:3, :3]
# In[11]:
verbose_mutation_df.head()
# In[12]:
gene_subset = {
'7157': 'TP53', # tumor protein p53
'7428': 'VHL', # von Hippel-Lindau tumor suppressor
'29126': 'CD274', # CD274 molecule
'672': 'BRCA1', # BRCA1, DNA repair associated
'675': 'BRCA2', # BRCA2, DNA repair associated
'238': 'ALK', # anaplastic lymphoma receptor tyrosine kinase
'4221': 'MEN1', # menin 1
'5979': 'RET', # ret proto-oncogene
}
plot_df = (mutation_freq_df
.query("n_mutations > 100")
[list(gene_subset)]
.rename(columns=gene_subset)
)
# Convert to percent of max mutation rate for gene
plot_df = 100 * plot_df.divide(plot_df.max())
ax = seaborn.heatmap(plot_df)
|
[
"seaborn.heatmap",
"numpy.expm1",
"seaborn.distplot",
"seaborn.jointplot",
"pandas.read_table",
"os.path.join",
"numpy.log1p"
] |
[((234, 281), 'os.path.join', 'os.path.join', (['"""data"""', '"""mutation-matrix.tsv.bz2"""'], {}), "('data', 'mutation-matrix.tsv.bz2')\n", (246, 281), False, 'import os\n'), ((296, 332), 'pandas.read_table', 'pandas.read_table', (['path'], {'index_col': '(0)'}), '(path, index_col=0)\n', (313, 332), False, 'import pandas\n'), ((415, 450), 'os.path.join', 'os.path.join', (['"""data"""', '"""samples.tsv"""'], {}), "('data', 'samples.tsv')\n", (427, 450), False, 'import os\n'), ((463, 486), 'pandas.read_table', 'pandas.read_table', (['path'], {}), '(path)\n', (480, 486), False, 'import pandas\n'), ((673, 705), 'numpy.log1p', 'numpy.log1p', (['gene_df.n_mutations'], {}), '(gene_df.n_mutations)\n', (684, 705), False, 'import numpy\n'), ((740, 783), 'seaborn.distplot', 'seaborn.distplot', (['gene_df.n_mutations_log1p'], {}), '(gene_df.n_mutations_log1p)\n', (756, 783), False, 'import seaborn\n'), ((1151, 1185), 'numpy.log1p', 'numpy.log1p', (['sample_df.n_mutations'], {}), '(sample_df.n_mutations)\n', (1162, 1185), False, 'import numpy\n'), ((1245, 1290), 'seaborn.distplot', 'seaborn.distplot', (['sample_df.n_mutations_log1p'], {}), '(sample_df.n_mutations_log1p)\n', (1261, 1290), False, 'import seaborn\n'), ((1489, 1576), 'seaborn.jointplot', 'seaborn.jointplot', (['"""n_mutations_log1p"""', '"""age_diagnosed"""'], {'data': 'sample_df', 'kind': '"""hex"""'}), "('n_mutations_log1p', 'age_diagnosed', data=sample_df,\n kind='hex')\n", (1506, 1576), False, 'import seaborn\n'), ((2753, 2777), 'seaborn.heatmap', 'seaborn.heatmap', (['plot_df'], {}), '(plot_df)\n', (2768, 2777), False, 'import seaborn\n'), ((823, 842), 'numpy.expm1', 'numpy.expm1', (['xticks'], {}), '(xticks)\n', (834, 842), False, 'import numpy\n'), ((1330, 1349), 'numpy.expm1', 'numpy.expm1', (['xticks'], {}), '(xticks)\n', (1341, 1349), False, 'import numpy\n'), ((1624, 1643), 'numpy.expm1', 'numpy.expm1', (['xticks'], {}), '(xticks)\n', (1635, 1643), False, 'import numpy\n')]
|
# --------------
# Importing header files
import numpy as np
import warnings
warnings.filterwarnings('ignore')
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#Reading file
data = np.genfromtxt(path, delimiter=",", skip_header=1)
new=np.concatenate((data,new_record),axis=0)
age=new[:,0]
max_age=np.max(age)
min_age=np.min(age)
age_mean=np.mean(age)
age_std=np.std(age)
zero=data[data[:,2]==0]
race_0=zero[:,2]
one=data[data[:,2]==1]
race_1=one[:,2]
two=data[data[:,2]==2]
race_2=two[:,2]
three=data[data[:,2]==3]
race_3=three[:,2]
four=data[data[:,2]==4]
race_4=four[:,2]
len_1=race_1.itemsize
len_2=race_2.itemsize
len_0=race_0.itemsize
len_3=race_3.itemsize
len_4=race_4.itemsize
#Code starts here
value=np.array([len_0,len_1,len_2,len_3,len_4])
minority_race=np.min(value)
senior_citizens=new[new[:,0]>60]
working_hours=senior_citizens[:,6]
working_hours_sum=np.sum(working_hours)
senior_citizens_len=senior_citizens.itemsize
avg_working_hours=np.mean(working_hours)
high=new[new[:,1]>10]
low=new[new[:,1]<=10]
avg_pay_low=np.mean(low[:,7])
avg_pay_high=np.mean(high[:,7])
|
[
"numpy.sum",
"warnings.filterwarnings",
"numpy.std",
"numpy.genfromtxt",
"numpy.max",
"numpy.min",
"numpy.mean",
"numpy.array",
"numpy.concatenate"
] |
[((82, 115), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (105, 115), False, 'import warnings\n'), ((203, 252), 'numpy.genfromtxt', 'np.genfromtxt', (['path'], {'delimiter': '""","""', 'skip_header': '(1)'}), "(path, delimiter=',', skip_header=1)\n", (216, 252), True, 'import numpy as np\n'), ((258, 300), 'numpy.concatenate', 'np.concatenate', (['(data, new_record)'], {'axis': '(0)'}), '((data, new_record), axis=0)\n', (272, 300), True, 'import numpy as np\n'), ((322, 333), 'numpy.max', 'np.max', (['age'], {}), '(age)\n', (328, 333), True, 'import numpy as np\n'), ((343, 354), 'numpy.min', 'np.min', (['age'], {}), '(age)\n', (349, 354), True, 'import numpy as np\n'), ((365, 377), 'numpy.mean', 'np.mean', (['age'], {}), '(age)\n', (372, 377), True, 'import numpy as np\n'), ((387, 398), 'numpy.std', 'np.std', (['age'], {}), '(age)\n', (393, 398), True, 'import numpy as np\n'), ((753, 798), 'numpy.array', 'np.array', (['[len_0, len_1, len_2, len_3, len_4]'], {}), '([len_0, len_1, len_2, len_3, len_4])\n', (761, 798), True, 'import numpy as np\n'), ((810, 823), 'numpy.min', 'np.min', (['value'], {}), '(value)\n', (816, 823), True, 'import numpy as np\n'), ((913, 934), 'numpy.sum', 'np.sum', (['working_hours'], {}), '(working_hours)\n', (919, 934), True, 'import numpy as np\n'), ((1000, 1022), 'numpy.mean', 'np.mean', (['working_hours'], {}), '(working_hours)\n', (1007, 1022), True, 'import numpy as np\n'), ((1082, 1100), 'numpy.mean', 'np.mean', (['low[:, 7]'], {}), '(low[:, 7])\n', (1089, 1100), True, 'import numpy as np\n'), ((1114, 1133), 'numpy.mean', 'np.mean', (['high[:, 7]'], {}), '(high[:, 7])\n', (1121, 1133), True, 'import numpy as np\n')]
|
""" A collection of common routines for plotting ones """
import time
import matplotlib.pyplot as plt
import numpy as np
from lamberthub.utils.misc import _get_sample_vectors_from_theta_and_rho
class TauThetaPlotter:
"""A class for modelling a discrete grid contour plotter."""
def __init__(self, ax=None, fig=None, Nres=50):
"""
Initializes any instance of the plotter.
Parameters
----------
func: function
The core function which provides the results to be shown.
ax: matplotlib.Axes
The axes in which the lines will be drawn.
fig: matplotlib.Figure
The figure instance for the plot.
Nres: int
Number of total elements
"""
# Check if axes are available
if ax is None:
_, ax = plt.subplots()
# Check if figure available
if fig is None:
fig, _ = plt.subplots()
# Assign previous figure and axes. Impose equal aspect ratio.
self.ax, self.fig = ax, fig
self.ax.set_aspect("equal")
# Assign the number of points per row and column
self.Nres = Nres
def _get_spans(self, p=0.999):
"""
Returns a lineal span for transfer angle and non-dimensional time of flight.
Parameters
----------
p: float
Percentage of the final value. This is required due to singularities
in some of the solvers at transfer angles of 2pi.
Returns
-------
theta_span: np.array
An array of linearly spaced transfer angles.
tau_span: np.array
An array of linearly spaced non-dimensional transfer times.
"""
# Generate a meshgrid holding any combination of transfer angle and
# non-dimensional time of flight. The 2 * pi value is avoided by
# computing an approximate one. Nevertheless, this last value will not
# be used due to the way `pcolor` function operates.
theta_span, tau_span = [
np.linspace(0, p * 2 * np.pi, self.Nres) for _ in range(2)
]
return theta_span, tau_span
def _draw_colorbar(self, maxval, step, label, cmap, color_vmin):
"""Draws the colorbar for the figure.
Parameters
----------
maxval: float
The maximum value of the figure.
step: float
The step for drawing each of the colorbar ticks.
label: str
The title of the colorbar.
cmap: matplotlib.cmap
The colormap used in the contour plot.
"""
# Generate the colorbar
self.cbar = self.fig.colorbar(self.collection)
self.cbar.ax.get_yaxis().set_ticks([])
# Append the label and make its position
self.cbar.set_label(label)
self.cbar.ax.get_yaxis().labelpad = 15
# Properly size the aspect ratio of the colorbar
digits = int(np.log10(maxval)) + 1
cbar_title = r"$\times 10^" + f"{digits-2}$" if digits > 2 else None
self.cbar.ax.set_title(cbar_title)
# Compute the step which separates two different levels
step = maxval / cmap.N
# Draw a beautiful colorbar with the legend for the number of iterations
# in the middle
for n in range(int(cmap.N)):
# Select suitable font-color
fontcolor = "black" if n != 0 else color_vmin
# Draw the number within the scale
self.cbar.ax.text(
0.5 * maxval,
step / 2 + step * n,
str(int(step * n / 10 ** (digits - 2))),
ha="center",
va="center",
color=fontcolor,
)
def _draw_ticks(self):
"""Draws the ticks within the axes"""
# Set the X-ticks
self.ax.set_xticks(np.array([0, 0.5, 1, 1.5, 2]) * np.pi)
self.ax.set_xticklabels(
[r"$0$", r"$\frac{\pi}{2}$", r"$\pi$", r"$\frac{2\pi}{3}$", r"$2\pi$"]
)
# Set the Y-ticks
self.ax.set_yticks(np.array([0, 0.5, 1, 1.5, 2]) * np.pi)
self.ax.set_yticklabels(
[r"$0$", r"$\frac{\pi}{2}$", r"$\pi$", r"$\frac{2\pi}{3}$", r"$2\pi$"]
)
def _draw_labels(self):
"""Draws axes labels"""
# Set axes labels and title
self.ax.set_xlabel(r"$\Delta \theta$")
self.ax.set_ylabel(r"$\Delta \tau$")
def _measure_performance(solver, theta, tau):
"""
Computes the number of iterations from a particular value of theta and the
transfer angle.
Parameters
----------
solver: function
The Lambert's problem solver function.
theta: float
The transfer angle.
tau: float
The non-dimensional time of flight.
Returns
-------
Notes
-----
The customization is null to prevent users from shooting themselves and
creating performance comparisons under different boundary conditions.
"""
# Generate r1_vec and r2_vec such that r2_norm = 2 * r1_norm for various theta
r1_vec, r2_vec = _get_sample_vectors_from_theta_and_rho(theta, 2.0)
# Compute the norms, the chord and semi-perimeter
r1, r2 = [np.linalg.norm(rr) for rr in [r1_vec, r2_vec]]
c = (r1 ** 2 + r2 ** 2 - 2 * r1 * r2 * np.cos(theta)) ** 0.5
s = (r1 + r2 + c) / 2
# Compute the dimensional time from the non-dimensional one using
# Lancaster's expression. This relation is more intuitive as it relates
# revolution cases with multiples of pi.
mu = 1.00
tof = tau / (8 * mu / s ** 3) ** 0.5
# Filter non-valid input: null value is returned if no iterations were run
if tof == 0 or theta == 0:
return 0, 0, 0
# Solve the problem but only collect the number of iterations
tic = time.perf_counter()
try:
_, _, numiter, tpi = solver(
mu,
r1_vec,
r2_vec,
tof,
M=0,
prograde=True,
maxiter=35,
atol=1e-5,
rtol=1e-7,
full_output=True,
)
tac = time.perf_counter()
except ValueError:
numiter, tpi, tic, tac = (0, 0, 0, 0)
return numiter, tpi, (tac - tic)
# Vectorize the solver
_vec_measure_performance = np.vectorize(
_measure_performance, otypes=[np.ndarray, np.ndarray, np.ndarray], excluded=[0]
)
|
[
"numpy.vectorize",
"time.perf_counter",
"lamberthub.utils.misc._get_sample_vectors_from_theta_and_rho",
"numpy.linalg.norm",
"numpy.array",
"numpy.linspace",
"numpy.cos",
"numpy.log10",
"matplotlib.pyplot.subplots"
] |
[((6346, 6444), 'numpy.vectorize', 'np.vectorize', (['_measure_performance'], {'otypes': '[np.ndarray, np.ndarray, np.ndarray]', 'excluded': '[0]'}), '(_measure_performance, otypes=[np.ndarray, np.ndarray, np.\n ndarray], excluded=[0])\n', (6358, 6444), True, 'import numpy as np\n'), ((5144, 5194), 'lamberthub.utils.misc._get_sample_vectors_from_theta_and_rho', '_get_sample_vectors_from_theta_and_rho', (['theta', '(2.0)'], {}), '(theta, 2.0)\n', (5182, 5194), False, 'from lamberthub.utils.misc import _get_sample_vectors_from_theta_and_rho\n'), ((5860, 5879), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (5877, 5879), False, 'import time\n'), ((5264, 5282), 'numpy.linalg.norm', 'np.linalg.norm', (['rr'], {}), '(rr)\n', (5278, 5282), True, 'import numpy as np\n'), ((6167, 6186), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (6184, 6186), False, 'import time\n'), ((843, 857), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (855, 857), True, 'import matplotlib.pyplot as plt\n'), ((940, 954), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (952, 954), True, 'import matplotlib.pyplot as plt\n'), ((2079, 2119), 'numpy.linspace', 'np.linspace', (['(0)', '(p * 2 * np.pi)', 'self.Nres'], {}), '(0, p * 2 * np.pi, self.Nres)\n', (2090, 2119), True, 'import numpy as np\n'), ((2985, 3001), 'numpy.log10', 'np.log10', (['maxval'], {}), '(maxval)\n', (2993, 3001), True, 'import numpy as np\n'), ((3902, 3931), 'numpy.array', 'np.array', (['[0, 0.5, 1, 1.5, 2]'], {}), '([0, 0.5, 1, 1.5, 2])\n', (3910, 3931), True, 'import numpy as np\n'), ((4121, 4150), 'numpy.array', 'np.array', (['[0, 0.5, 1, 1.5, 2]'], {}), '([0, 0.5, 1, 1.5, 2])\n', (4129, 4150), True, 'import numpy as np\n'), ((5354, 5367), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (5360, 5367), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
from copy import deepcopy
import numpy as np
import pytest
from pygam import *
from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList
from pygam.utils import flatten
@pytest.fixture
def chicago_gam(chicago_X_y):
X, y = chicago_X_y
gam = PoissonGAM(terms=s(0, n_splines=200) + te(3, 1) + s(2)).fit(X, y)
return gam
def test_wrong_length():
"""iterable params must all match lengths
"""
with pytest.raises(ValueError):
SplineTerm(0, lam=[0, 1, 2], penalties=['auto', 'auto'])
def test_num_coefs(mcycle_X_y, wage_X_y):
"""make sure this method gives correct values
"""
X, y = mcycle_X_y
term = Intercept().compile(X)
assert term.n_coefs == 1
term = LinearTerm(0).compile(X)
assert term.n_coefs == 1
term = SplineTerm(0).compile(X)
assert term.n_coefs == term.n_splines
X, y = wage_X_y
term = FactorTerm(2).compile(X)
assert term.n_coefs == 5
term_a = SplineTerm(0).compile(X)
term_b = SplineTerm(1).compile(X)
term = TensorTerm(term_a, term_b).compile(X)
assert term.n_coefs == term_a.n_coefs * term_b.n_coefs
def test_term_list_removes_duplicates():
"""prove that we remove duplicated terms"""
term = SplineTerm(0)
term_list = term + term
assert isinstance(term_list, TermList)
assert len(term_list) == 1
def test_tensor_invariance_to_scaling(chicago_gam, chicago_X_y):
"""a model with tensor terms should give results regardless of input scaling
"""
X, y = chicago_X_y
X[:, 3] = X[:, 3] * 100
gam = PoissonGAM(terms=s(0, n_splines=200) + te(3, 1) + s(2)).fit(X, y)
assert np.allclose(gam.coef_, chicago_gam.coef_, atol=1e-6)
def test_tensor_must_have_at_least_2_marginal_terms():
with pytest.raises(ValueError):
te(0)
def test_tensor_term_expands_args_to_match_penalties_and_terms():
tensor = te(0, 1, lam=3)
assert len(tensor.lam) == 2
assert len(flatten(tensor.lam)) == 2
tensor = te(0, 1, penalties='auto')
assert len(tensor.lam) == 2
assert len(flatten(tensor.lam)) == 2
tensor = te(0, 1, penalties=['auto', ['auto', 'auto']])
assert len(tensor.lam) == 2
assert len(flatten(tensor.lam)) == 3
def test_tensor_term_skips_kwargs_when_marginal_term_is_supplied():
tensor = te(0, s(1), n_splines=420)
assert tensor._terms[0].n_coefs == 420
assert tensor._terms[1].n_coefs != 420
def test_tensor_term_doesnt_accept_tensor_terms():
with pytest.raises(ValueError):
te(l(0), te(0, 1))
def test_tensor_args_length_must_agree_with_number_of_terms():
with pytest.raises(ValueError):
te(0, 1, lam=[3])
with pytest.raises(ValueError):
te(0, 1, lam=[3])
with pytest.raises(ValueError):
te(0, 1, lam=[3, 3, 3])
def test_build_from_info():
"""we can rebuild terms from info
"""
terms = [Intercept(),
LinearTerm(0),
SplineTerm(0),
FactorTerm(0),
TensorTerm(0,1)]
for term in terms:
assert Term.build_from_info(term.info) == term
assert te(0, 1) == TensorTerm(SplineTerm(0, n_splines=10), SplineTerm(1, n_splines=10))
def test_by_variable():
"""our fit on the toy tensor dataset with a by variable on the linear feature
should be similar to the fit with a tensor product of a spline with a linear
term
"""
pass
def test_by_variable_doesnt_exist_in_X(mcycle_X_y):
"""raises a value error if we cannot locate the by variable
"""
term = s(0, by=1)
with pytest.raises(ValueError):
term.compile(mcycle_X_y[0])
def test_term_list_from_info():
"""we can remake a term list from info
"""
term_list = SplineTerm(0) + LinearTerm(1)
assert Term.build_from_info(term_list.info) == term_list
def test_term_list_only_accepts_terms_or_term_list():
TermList()
with pytest.raises(ValueError):
TermList(None)
def test_pop_term_from_term_list():
term_list = SplineTerm(0) + LinearTerm(1) + Intercept()
term_list_2 = deepcopy(term_list)
# by default we pop the last
assert term_list_2.pop() == term_list[-1]
assert term_list_2.pop(0) == term_list[0]
with pytest.raises(ValueError):
term_list_2.pop(1) == term_list[0]
def test_no_multiply():
"""trying to multiply terms raises an error
"""
with pytest.raises(NotImplementedError):
SplineTerm(0) * LinearTerm(1)
term_list = SplineTerm(0) + LinearTerm(1)
with pytest.raises(NotImplementedError):
term_list * term_list
def test_by_is_similar_to_tensor_with_linear_term(toy_interaction_X_y):
"""for simple interactions we can acheive equivalent fits using:
- a spline with a by-variable
- a tensor between spline and a linear term
"""
X, y = toy_interaction_X_y
gam_a = LinearGAM(te(s(0, n_splines=20), l(1))).fit(X, y)
gam_b = LinearGAM(s(0, by=1)).fit(X, y)
r2_a = gam_a.statistics_['pseudo_r2']['explained_deviance']
r2_b = gam_b.statistics_['pseudo_r2']['explained_deviance']
assert np.allclose(r2_a, r2_b)
def test_correct_smoothing_in_tensors(toy_interaction_X_y):
"""check that smoothing penalties are correctly computed across the marginal
dimensions
feature 0 is the sinusoid, so this one needs to be wiggly
feature 1 is the linear function, so this can smoothed heavily
"""
X, y = toy_interaction_X_y
# increase smoothing on linear function heavily, to no detriment
gam = LinearGAM(te(0, 1, lam=[0.6, 10000])).fit(X, y)
assert gam.statistics_['pseudo_r2']['explained_deviance'] > 0.9
# smoothing the sinusoid function heavily reduces fit quality
gam = LinearGAM(te(0, 1, lam=[10000, 0.6])).fit(X, y)
assert gam.statistics_['pseudo_r2']['explained_deviance'] < 0.1
def test_dummy_encoding(wage_X_y, wage_gam):
"""check that dummy encoding produces fewer coefficients than one-hot"""
X, y = wage_X_y
gam = LinearGAM(s(0) + s(1) + f(2, coding='dummy')).fit(X, y)
assert gam._modelmat(X=X, term=2).shape[1] == 4
assert gam.terms[2].n_coefs == 4
assert wage_gam._modelmat(X=X, term=2).shape[1] == 5
assert wage_gam.terms[2].n_coefs == 5
def test_build_cyclic_p_spline(hepatitis_X_y):
"""check the cyclic p spline builds
the r2 for a cyclic gam on a obviously aperiodic function should suffer
"""
X, y = hepatitis_X_y
# unconstrained gam
gam = LinearGAM(s(0)).fit(X, y)
r_unconstrained = gam.statistics_['pseudo_r2']['explained_deviance']
# cyclic gam
gam = LinearGAM(s(0, basis='cp')).fit(X, y)
r_cyclic = gam.statistics_['pseudo_r2']['explained_deviance']
assert r_unconstrained > r_cyclic
def test_cyclic_p_spline_periodicity(hepatitis_X_y):
"""check the cyclic p spline behavioves periodically
namely:
- the value at the edge knots should be the same
- extrapolation should be periodic
"""
X, y = hepatitis_X_y
gam = LinearGAM(s(0, basis='cp')).fit(X, y)
# check periodicity
left = gam.edge_knots_[0][1]
right = gam.edge_knots_[0][1]
assert(gam.predict(left) == gam.predict(right))
# check extrapolation
further = right + (right - left)
assert(gam.predict(further) == gam.predict(right))
def test_cyclic_p_spline_custom_period():
"""show that we can set custom edge_knots, and that these affect our model's
performance
"""
# define square wave
X = np.linspace(0, 1, 5000)
y = X > 0.5
# when modeling the full period, we get close with a periodic basis
gam = LinearGAM(s(0, basis='cp', n_splines=4, spline_order=0)).fit(X, y)
assert np.allclose(gam.predict(X), y)
assert np.allclose(gam.edge_knots_[0], [0, 1])
# when modeling a non-periodic function, our periodic model fails
gam = LinearGAM(s(0, basis='cp', n_splines=4, spline_order=0, edge_knots=[0, 0.5])).fit(X, y)
assert np.allclose(gam.predict(X), 0.5)
assert np.allclose(gam.edge_knots_[0], [0, 0.5])
def test_tensor_terms_have_constraints(toy_interaction_X_y):
"""test that we can fit a gam with constrained tensor terms,
even if those constraints are 'none'
"""
X, y = toy_interaction_X_y
gam = LinearGAM(te(0, 1, constraints='none')).fit(X, y)
assert gam._is_fitted
assert gam.terms.hasconstraint
def test_tensor_composite_constraints_equal_penalties():
"""check that the composite constraint matrix for a tensor term
is equivalent to a penalty matrix under the correct conditions
"""
from pygam.penalties import derivative
def der1(*args, **kwargs):
kwargs.update({'derivative':1})
return derivative(*args, **kwargs)
# create a 3D tensor where the penalty should be equal to the constraint
term = te(0, 1, 2,
n_splines=[4, 5, 6],
penalties=der1,
lam=1,
constraints='monotonic_inc')
# check all the dimensions
for i in range(3):
P = term._build_marginal_penalties(i).A
C = term._build_marginal_constraints(i,
-np.arange(term.n_coefs),
constraint_lam=1,
constraint_l2=0).A
assert (P == C).all()
def test_tensor_with_constraints(hepatitis_X_y):
"""we should be able to fit a gam with not 'none' constraints on a tensor term
and observe its effect in reducing the R2 of the fit
"""
X, y = hepatitis_X_y
X = np.c_[X, np.random.randn(len(X))] # add a random interaction data
# constrain useless dimension
gam_useless_constraint = LinearGAM(te(0, 1,
constraints=['none', 'monotonic_dec'],
n_splines=[20, 4]))
gam_useless_constraint.fit(X, y)
# constrain informative dimension
gam_constrained = LinearGAM(te(0, 1,
constraints=['monotonic_dec', 'none'],
n_splines=[20, 4]))
gam_constrained.fit(X, y)
assert gam_useless_constraint.statistics_['pseudo_r2']['explained_deviance'] > 0.5
assert gam_constrained.statistics_['pseudo_r2']['explained_deviance'] < 0.1
class TestRegressions(object):
def test_no_auto_dtype(self):
with pytest.raises(ValueError):
SplineTerm(feature=0, dtype='auto')
def test_compose_penalties(self):
"""penalties should be composable, and this is done by adding all
penalties on a single term, NOT multiplying them.
so a term with a derivative penalty and a None penalty should be equvalent
to a term with a derivative penalty.
"""
base_term = SplineTerm(0)
term = SplineTerm(feature=0, penalties=['auto', 'none'])
# penalties should be equivalent
assert (term.build_penalties() == base_term.build_penalties()).A.all()
# multitple penalties should be additive, not multiplicative,
# so 'none' penalty should have no effect
assert np.abs(term.build_penalties().A).sum() > 0
def test_compose_constraints(self, hepatitis_X_y):
"""we should be able to compose penalties
here we show that a gam with a monotonic increasing penalty composed with a monotonic decreasing
penalty is equivalent to a gam with only an intercept
"""
X, y = hepatitis_X_y
gam_compose = LinearGAM(s(0, constraints=['monotonic_inc', 'monotonic_dec'])).fit(X, y)
gam_intercept = LinearGAM(terms=None).fit(X, y)
assert np.allclose(gam_compose.coef_[-1], gam_intercept.coef_)
def test_constraints_and_tensor(self, chicago_X_y):
"""a model that has consrtraints and tensor terms should not fail to build
because of inability of tensor terms to build a 'none' constraint
"""
X, y = chicago_X_y
gam = PoissonGAM(s(0, constraints='monotonic_inc') + te(3, 1) + s(2)).fit(X, y)
assert gam._is_fitted
|
[
"copy.deepcopy",
"pygam.utils.flatten",
"numpy.allclose",
"pygam.terms.FactorTerm",
"pygam.penalties.derivative",
"pygam.terms.Term.build_from_info",
"pytest.raises",
"pygam.terms.TensorTerm",
"numpy.arange",
"pygam.terms.SplineTerm",
"numpy.linspace",
"pygam.terms.LinearTerm",
"pygam.terms.Intercept",
"pygam.terms.TermList"
] |
[((1283, 1296), 'pygam.terms.SplineTerm', 'SplineTerm', (['(0)'], {}), '(0)\n', (1293, 1296), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((1693, 1746), 'numpy.allclose', 'np.allclose', (['gam.coef_', 'chicago_gam.coef_'], {'atol': '(1e-06)'}), '(gam.coef_, chicago_gam.coef_, atol=1e-06)\n', (1704, 1746), True, 'import numpy as np\n'), ((3908, 3918), 'pygam.terms.TermList', 'TermList', ([], {}), '()\n', (3916, 3918), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((4093, 4112), 'copy.deepcopy', 'deepcopy', (['term_list'], {}), '(term_list)\n', (4101, 4112), False, 'from copy import deepcopy\n'), ((5125, 5148), 'numpy.allclose', 'np.allclose', (['r2_a', 'r2_b'], {}), '(r2_a, r2_b)\n', (5136, 5148), True, 'import numpy as np\n'), ((7511, 7534), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(5000)'], {}), '(0, 1, 5000)\n', (7522, 7534), True, 'import numpy as np\n'), ((7754, 7793), 'numpy.allclose', 'np.allclose', (['gam.edge_knots_[0]', '[0, 1]'], {}), '(gam.edge_knots_[0], [0, 1])\n', (7765, 7793), True, 'import numpy as np\n'), ((8018, 8059), 'numpy.allclose', 'np.allclose', (['gam.edge_knots_[0]', '[0, 0.5]'], {}), '(gam.edge_knots_[0], [0, 0.5])\n', (8029, 8059), True, 'import numpy as np\n'), ((486, 511), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (499, 511), False, 'import pytest\n'), ((521, 577), 'pygam.terms.SplineTerm', 'SplineTerm', (['(0)'], {'lam': '[0, 1, 2]', 'penalties': "['auto', 'auto']"}), "(0, lam=[0, 1, 2], penalties=['auto', 'auto'])\n", (531, 577), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((1811, 1836), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1824, 1836), False, 'import pytest\n'), ((2525, 2550), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2538, 2550), False, 'import pytest\n'), ((2652, 2677), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2665, 2677), False, 'import pytest\n'), ((2715, 2740), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2728, 2740), False, 'import pytest\n'), ((2778, 2803), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2791, 2803), False, 'import pytest\n'), ((2925, 2936), 'pygam.terms.Intercept', 'Intercept', ([], {}), '()\n', (2934, 2936), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((2951, 2964), 'pygam.terms.LinearTerm', 'LinearTerm', (['(0)'], {}), '(0)\n', (2961, 2964), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((2979, 2992), 'pygam.terms.SplineTerm', 'SplineTerm', (['(0)'], {}), '(0)\n', (2989, 2992), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((3007, 3020), 'pygam.terms.FactorTerm', 'FactorTerm', (['(0)'], {}), '(0)\n', (3017, 3020), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((3035, 3051), 'pygam.terms.TensorTerm', 'TensorTerm', (['(0)', '(1)'], {}), '(0, 1)\n', (3045, 3051), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((3594, 3619), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3607, 3619), False, 'import pytest\n'), ((3757, 3770), 'pygam.terms.SplineTerm', 'SplineTerm', (['(0)'], {}), '(0)\n', (3767, 3770), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((3773, 3786), 'pygam.terms.LinearTerm', 'LinearTerm', (['(1)'], {}), '(1)\n', (3783, 3786), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((3799, 3835), 'pygam.terms.Term.build_from_info', 'Term.build_from_info', (['term_list.info'], {}), '(term_list.info)\n', (3819, 3835), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((3928, 3953), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3941, 3953), False, 'import pytest\n'), ((3963, 3977), 'pygam.terms.TermList', 'TermList', (['None'], {}), '(None)\n', (3971, 3977), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((4063, 4074), 'pygam.terms.Intercept', 'Intercept', ([], {}), '()\n', (4072, 4074), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((4250, 4275), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4263, 4275), False, 'import pytest\n'), ((4410, 4444), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (4423, 4444), False, 'import pytest\n'), ((4501, 4514), 'pygam.terms.SplineTerm', 'SplineTerm', (['(0)'], {}), '(0)\n', (4511, 4514), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((4517, 4530), 'pygam.terms.LinearTerm', 'LinearTerm', (['(1)'], {}), '(1)\n', (4527, 4530), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((4540, 4574), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (4553, 4574), False, 'import pytest\n'), ((8720, 8747), 'pygam.penalties.derivative', 'derivative', (['*args'], {}), '(*args, **kwargs)\n', (8730, 8747), False, 'from pygam.penalties import derivative\n'), ((10816, 10829), 'pygam.terms.SplineTerm', 'SplineTerm', (['(0)'], {}), '(0)\n', (10826, 10829), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((10845, 10894), 'pygam.terms.SplineTerm', 'SplineTerm', ([], {'feature': '(0)', 'penalties': "['auto', 'none']"}), "(feature=0, penalties=['auto', 'none'])\n", (10855, 10894), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((11679, 11734), 'numpy.allclose', 'np.allclose', (['gam_compose.coef_[-1]', 'gam_intercept.coef_'], {}), '(gam_compose.coef_[-1], gam_intercept.coef_)\n', (11690, 11734), True, 'import numpy as np\n'), ((713, 724), 'pygam.terms.Intercept', 'Intercept', ([], {}), '()\n', (722, 724), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((777, 790), 'pygam.terms.LinearTerm', 'LinearTerm', (['(0)'], {}), '(0)\n', (787, 790), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((843, 856), 'pygam.terms.SplineTerm', 'SplineTerm', (['(0)'], {}), '(0)\n', (853, 856), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((943, 956), 'pygam.terms.FactorTerm', 'FactorTerm', (['(2)'], {}), '(2)\n', (953, 956), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((1011, 1024), 'pygam.terms.SplineTerm', 'SplineTerm', (['(0)'], {}), '(0)\n', (1021, 1024), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((1049, 1062), 'pygam.terms.SplineTerm', 'SplineTerm', (['(1)'], {}), '(1)\n', (1059, 1062), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((1085, 1111), 'pygam.terms.TensorTerm', 'TensorTerm', (['term_a', 'term_b'], {}), '(term_a, term_b)\n', (1095, 1111), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((1995, 2014), 'pygam.utils.flatten', 'flatten', (['tensor.lam'], {}), '(tensor.lam)\n', (2002, 2014), False, 'from pygam.utils import flatten\n'), ((2109, 2128), 'pygam.utils.flatten', 'flatten', (['tensor.lam'], {}), '(tensor.lam)\n', (2116, 2128), False, 'from pygam.utils import flatten\n'), ((2243, 2262), 'pygam.utils.flatten', 'flatten', (['tensor.lam'], {}), '(tensor.lam)\n', (2250, 2262), False, 'from pygam.utils import flatten\n'), ((3091, 3122), 'pygam.terms.Term.build_from_info', 'Term.build_from_info', (['term.info'], {}), '(term.info)\n', (3111, 3122), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((3166, 3193), 'pygam.terms.SplineTerm', 'SplineTerm', (['(0)'], {'n_splines': '(10)'}), '(0, n_splines=10)\n', (3176, 3193), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((3195, 3222), 'pygam.terms.SplineTerm', 'SplineTerm', (['(1)'], {'n_splines': '(10)'}), '(1, n_splines=10)\n', (3205, 3222), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((4031, 4044), 'pygam.terms.SplineTerm', 'SplineTerm', (['(0)'], {}), '(0)\n', (4041, 4044), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((4047, 4060), 'pygam.terms.LinearTerm', 'LinearTerm', (['(1)'], {}), '(1)\n', (4057, 4060), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((4454, 4467), 'pygam.terms.SplineTerm', 'SplineTerm', (['(0)'], {}), '(0)\n', (4464, 4467), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((4470, 4483), 'pygam.terms.LinearTerm', 'LinearTerm', (['(1)'], {}), '(1)\n', (4480, 4483), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((10409, 10434), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (10422, 10434), False, 'import pytest\n'), ((10448, 10483), 'pygam.terms.SplineTerm', 'SplineTerm', ([], {'feature': '(0)', 'dtype': '"""auto"""'}), "(feature=0, dtype='auto')\n", (10458, 10483), False, 'from pygam.terms import Term, Intercept, SplineTerm, LinearTerm, FactorTerm, TensorTerm, TermList\n'), ((9175, 9198), 'numpy.arange', 'np.arange', (['term.n_coefs'], {}), '(term.n_coefs)\n', (9184, 9198), True, 'import numpy as np\n')]
|
from serial import Serial
import time
from PyQt5.QtCore import pyqtSignal, QObject, QTimer
from PyQt5.QtWidgets import QMessageBox
from PyQt5 import QtTest
from math import isclose
import numpy as np
class Stages(QObject):
def __init__(self,parent=None):
super().__init__()
# 1 We may need to add a message box before duing FRF
# 2 Also, probably better to move to ref position in unreferenced mode
self.mainWindow=parent
self.sample_y_pos = 165
self.sample_x_pos = 52.57
self.axis_names = ['Y','X','Z']
self.axis_max = [204., 102., 12.5]
self.axis_signs = [-1., -1., 1.]
self.tol = [[1,30], [self.sample_x_pos-3,self.axis_max[1]-self.sample_x_pos-3], [0.1,0]] ## do not change this
self.offsets = [1., self.sample_x_pos, 0.1]
self.home = [0., 0., 0]
self.vels = [10, 0.5, 0.5]
self.accs = [0.1,0.1, 0.1]
self.positions = np.asarray([0.,0.,0.]) # YXZ
self.steps = np.asarray([.1,.1,.1])
self.stop = False
self.pos_box = [self.mainWindow.stage_y_pos,self.mainWindow.stage_x_pos,self.mainWindow.stage_z_pos]
self.pos_boxv =[self.mainWindow.stage_y_posv,self.mainWindow.stage_x_posv,self.mainWindow.stage_z_posv]
self.step_box =[self.mainWindow.y_step,self.mainWindow.x_step,self.mainWindow.z_step]
self.step_button=[[self.mainWindow.y_plus_btn,self.mainWindow.y_minus_btn],
[self.mainWindow.x_plus_btn,self.mainWindow.x_minus_btn],
[self.mainWindow.z_plus_btn,self.mainWindow.z_minus_btn]]
try:
self.ser = Serial('COM6', baudrate=115200, timeout=2) # factory setting
self.mainWindow.halt_stages_btn.clicked.connect(self.stop_stages)
self.mainWindow.home_stages_btn.clicked.connect(self.home_stage)
self.mainWindow.stage_set.clicked.connect(self.set_move)
self.step_box[0].setValue(self.steps[0])
self.step_box[1].setValue(self.steps[1])
self.step_box[2].setValue(self.steps[2])
self.step_box[0].valueChanged.connect(lambda axis: self.step_changed(axis=0))
self.step_box[1].valueChanged.connect(lambda axis: self.step_changed(axis=1))
self.step_box[2].valueChanged.connect(lambda axis: self.step_changed(axis=2))
self.step_button[0][0].clicked.connect(lambda args: self.step_move(args=(0,1)))
self.step_button[0][1].clicked.connect(lambda args: self.step_move(args=(0,-1)))
self.step_button[1][0].clicked.connect(lambda args: self.step_move(args=(1,1)))
self.step_button[1][1].clicked.connect(lambda args: self.step_move(args=(1,-1)))
self.step_button[2][0].clicked.connect(lambda args: self.step_move(args=(2,1)))
self.step_button[2][1].clicked.connect(lambda args: self.step_move(args=(2,-1)))
self.stage_init()
self.timer = QTimer()
self.timer.setInterval(500) # refresh position info at 2 Hz
self.timer.timeout.connect(self.get_positions)
self.timer.start()
print('Stage connected')
except:
self.ser=None
self.mainWindow.stage_set.setEnabled(False)
self.mainWindow.halt_stages_btn.setEnabled(False)
self.mainWindow.home_stages_btn.setEnabled(False)
for i in range(3):
self.pos_box[i].setEnabled(False)
self.pos_boxv[i].setEnabled(False)
self.step_box[i].setEnabled(False)
self.step_button[i][0].setEnabled(False)
self.step_button[i][1].setEnabled(False)
print('Stage not available')
def close(self):
if self.ser:
print('homing the stage...')
#self.safe_move_axes(self.home)
self.timer.stop()
for axis in range(3):
self.ser.write(("SVO "+str(axis+1)+" 0 \n").encode())
self.ser.close()
print('Stage disconnected')
self.deleteLater()
def get_positions(self):
get_position_command=("POS?\n").encode()
self.ser.write(get_position_command)
for axis in range(3):
v = float(self.ser.readline().decode()[2:])
self.positions[axis] = (self.axis_signs[axis]*v) % self.axis_max[axis]-self.offsets[axis]
self.pos_box[axis].setText(format(self.positions[axis],'.3f'))
def home_stage(self):
reply = QMessageBox.question(self.mainWindow, 'Home stage?', 'Do you want to move the stage home?',
QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
self.safe_move_axes(self.home)
### moving functions ###
def move_axis(self,axis,pos):
def move(axis,pos):
new_pos=self.axis_signs[axis]*(pos + self.offsets[axis]) % self.axis_max[axis]
move_command=("MOV "+str(axis+1)+" {} \n".format(new_pos)).encode()
self.ser.write(move_command)
if self.isValid(axis,pos) and not self.stop:
if (abs(pos-self.positions[axis])<15):
move(axis,pos)
else:
self.set_acc(axis,10)
move(axis,pos)
self.wait()
self.set_acc(axis,1)
def safe_move_axes(self,target,safe=True):
self.stop=False
for axis in range(3):
self.pos_boxv[axis].setValue(target[axis])
if safe:
self.move_axis(2,self.home[2])
self.wait()
self.move_axis(1,target[1])
self.move_axis(0,target[0])
self.wait()
self.move_axis(2,target[2])
else:
self.move_axis(1,target[1])
self.move_axis(0,target[0])
self.move_axis(2,target[2])
def isValid(self,axis,pos):
# Allow the stage to go up only when the Y coordinate is 160 ± 5mm from the imaging center
if ((axis==2) and (pos>self.positions[axis]) and
((self.positions[0]<(self.sample_y_pos-3)) or (self.positions[0]>(self.sample_y_pos+3)))):
self.mainWindow.info_area.setText('Z moving up is only allowed when ' + str(self.sample_y_pos-3) +" <= Y <= " + str(self.sample_y_pos+3))
return False
# Forbid stage to move Y beyond 160 ± 5mm when Z is nonzero
if ((axis==0) and (self.positions[2]>0.01) and
((pos<(self.sample_y_pos-5)) or (pos>(self.sample_y_pos+5)))):
self.mainWindow.info_area.setText('Y movement beyond ' + str(self.sample_y_pos-3) +" <= Y <= " + str(self.sample_y_pos+3)
+' is not allowed when Z>0.01')
return False
## Check the value within the limit
pos=pos+self.offsets[axis]
if (pos>=self.tol[axis][0]) and (pos<=(self.axis_max[axis]-self.tol[axis][1])):
return True
else:
self.mainWindow.info_area.setText(self.axis_names[axis]+' axis beyond the range')
self.mainWindow.info_area.append('The value should be between '+str(self.tol[axis][0]-self.offsets[axis])
+' and ' +str(self.axis_max[axis]-self.tol[axis][1]-self.offsets[axis]))
return False
def wait(self):
moving = True
while moving:
QtTest.QTest.qWait(500) # Check every .5 second
self.ser.write(b'\x05')
s=int(self.ser.readline().decode("utf-8"))
moving = False if s==0 else True
#### Interface functions ####
def set_move(self,axis):
target=[self.pos_boxv[axis].value() for axis in range(3)]
XYdist=np.sqrt((target[0]-self.positions[0])**2+(target[1]-self.positions[1])**2)
Zdist=np.abs(target)
if XYdist>10:
self.safe_move_axes(target)
else:
self.safe_move_axes(target,safe=False)
def step_move(self,args):
axis = args[0]
direction = args[1] # sign should be 1 or -1
self.stop=False
pos=self.positions[axis]+self.steps[axis]*direction
self.pos_boxv[axis].setValue(pos)
self.move_axis(axis,pos)
### msc functions ###
def step_changed(self,axis):
self.steps[axis] = self.step_box[axis].value()
def stop_stages(self):
self.stop=True
stop_command = ("STP\n").encode()
self.ser.write(stop_command)
def set_acc(self,axis,scaling):
self.ser.write(("ACC "+str(axis+1)+" {} \n".format(self.accs[axis]*scaling)).encode())
self.ser.write(("DEC "+str(axis+1)+" {} \n".format(self.accs[axis]*scaling)).encode())
def stage_init(self):
## initializing the stage
for axis in range(3):
self.ser.write(("SVO "+str(axis+1)+" 1 \n").encode())
reference = 1
self.ser.write("FRF? \n".encode())
for axis in range(3):
reference *= int(self.ser.readline().decode()[2])
if reference==1:
print('stage initialized')
else:
print('stage not initialized')
print('stage initialing...')
self.ser.write(("FRF \n").encode()) ## This is extremely slow. Needs improvement
self.wait()
print('stage initialized')
## Setting preferences, homing stages
for axis in range(3):
self.ser.write(("VEL "+str(axis+1)+" {} \n".format(self.vels[axis])).encode())
self.set_acc(axis,1)
self.get_positions()
print('homing the stage...')
#self.safe_move_axes(target=self.home)
self.wait()
self.get_positions()
for axis in range(3):
self.pos_boxv[axis].setValue(self.positions[axis])
|
[
"serial.Serial",
"PyQt5.QtCore.QTimer",
"numpy.abs",
"numpy.asarray",
"PyQt5.QtTest.QTest.qWait",
"PyQt5.QtWidgets.QMessageBox.question",
"numpy.sqrt"
] |
[((1056, 1083), 'numpy.asarray', 'np.asarray', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (1066, 1083), True, 'import numpy as np\n'), ((1110, 1137), 'numpy.asarray', 'np.asarray', (['[0.1, 0.1, 0.1]'], {}), '([0.1, 0.1, 0.1])\n', (1120, 1137), True, 'import numpy as np\n'), ((5024, 5173), 'PyQt5.QtWidgets.QMessageBox.question', 'QMessageBox.question', (['self.mainWindow', '"""Home stage?"""', '"""Do you want to move the stage home?"""', '(QMessageBox.Yes | QMessageBox.No)', 'QMessageBox.No'], {}), "(self.mainWindow, 'Home stage?',\n 'Do you want to move the stage home?', QMessageBox.Yes | QMessageBox.No,\n QMessageBox.No)\n", (5044, 5173), False, 'from PyQt5.QtWidgets import QMessageBox\n'), ((8771, 8860), 'numpy.sqrt', 'np.sqrt', (['((target[0] - self.positions[0]) ** 2 + (target[1] - self.positions[1]) ** 2)'], {}), '((target[0] - self.positions[0]) ** 2 + (target[1] - self.positions[\n 1]) ** 2)\n', (8778, 8860), True, 'import numpy as np\n'), ((8860, 8874), 'numpy.abs', 'np.abs', (['target'], {}), '(target)\n', (8866, 8874), True, 'import numpy as np\n'), ((1807, 1849), 'serial.Serial', 'Serial', (['"""COM6"""'], {'baudrate': '(115200)', 'timeout': '(2)'}), "('COM6', baudrate=115200, timeout=2)\n", (1813, 1849), False, 'from serial import Serial\n'), ((3233, 3241), 'PyQt5.QtCore.QTimer', 'QTimer', ([], {}), '()\n', (3239, 3241), False, 'from PyQt5.QtCore import pyqtSignal, QObject, QTimer\n'), ((8391, 8414), 'PyQt5.QtTest.QTest.qWait', 'QtTest.QTest.qWait', (['(500)'], {}), '(500)\n', (8409, 8414), False, 'from PyQt5 import QtTest\n')]
|
import numpy as np
from scipy.ndimage import gaussian_filter1d
from . import ExpFilter, Source, Visualizer
from .melbank import compute_melmat
class Sampler:
y_rolling: np.ndarray
source: Source
_gamma_table = None
def __init__(self, source: Source, visualizer: Visualizer, gamma_table_path: str = None, num_pixels: int = 60,
max_pixels_per_packet: int = 126, min_volume_threshold: int = 1e-7,
num_frames_rolling_window: int = 2, num_frequency_bins: int = 24,
min_freq: int = 200, max_freq: int = 12000
):
self.num_pixels = num_pixels
self.source = source
self.visualizer = visualizer
self.pixels = np.tile(1, (3, self.num_pixels))
self.prev_sample = np.tile(253, (3, self.num_pixels))
self.y_rolling = np.random.rand(num_frames_rolling_window, int(source.rate / source.fps)) / 1e16
self.fft_window = np.hamming(int(source.rate / source.fps) * num_frames_rolling_window)
self.mel_gain = ExpFilter(np.tile(1e-1, num_frequency_bins),
alpha_decay=0.01, alpha_rise=0.99)
self.mel_smoothing = ExpFilter(np.tile(1e-1, num_frequency_bins),
alpha_decay=0.5, alpha_rise=0.99)
self.mel_y, _ = compute_melmat(num_mel_bands=num_frequency_bins,
freq_min=min_freq,
freq_max=max_freq,
num_fft_bands=int(source.rate * num_frames_rolling_window / (2.0 * source.fps)),
sample_rate=source.rate)
self.min_vol = min_volume_threshold
if gamma_table_path:
self._gamma_table = np.load(gamma_table_path)
if max_pixels_per_packet:
self.max_pixels_per_packet = max_pixels_per_packet
def sample(self) -> bytes:
# Truncate values and cast to integer
p = np.clip(self.pixels, 0, 255).astype(int)
if self._gamma_table:
p = self._gamma_table[p]
idxs = [i for i in range(p.shape[1]) if not np.array_equal(p[:, i], self.prev_sample[:, i])]
n_packets = len(idxs) // self.max_pixels_per_packet + 1
idxs = np.array_split(idxs, n_packets)
m = []
for idx in idxs:
for i in idx:
m.append(i) # Index of pixel to change
m.append(p[0][i]) # Pixel red value
m.append(p[1][i]) # Pixel green value
m.append(p[2][i]) # Pixel blue value
self.prev_sample = np.copy(p)
return bytes(m)
def update_sample(self) -> np.ndarray:
y = self.source.audio_sample() / 2.0 ** 15
self.y_rolling[:-1] = self.y_rolling[1:]
self.y_rolling[-1, :] = np.copy(y)
y_data = np.concatenate(self.y_rolling, axis=0).astype(np.float32)
vol = np.max(np.abs(y_data))
if vol < self.min_vol:
self.pixels = np.tile(0, (3, self.num_pixels))
else:
rolling_len = len(y_data)
n_zeros = 2 ** int(np.ceil(np.log2(rolling_len))) - rolling_len
# Pad with zeros until the next power of two
y_data *= self.fft_window
y_padded = np.pad(y_data, (0, n_zeros), mode='constant')
# Construct a Mel filterbank from the FFT data
mel = np.atleast_2d(
np.abs(np.fft.rfft(y_padded)[:rolling_len // 2])
).T * self.mel_y.T
# Scale data to values more suitable for visualization
mel = np.sum(mel, axis=0)
mel = mel ** 2.0
# Gain normalization
self.mel_gain.update(np.max(gaussian_filter1d(mel, sigma=1.0)))
mel /= self.mel_gain.value
mel = self.mel_smoothing.update(mel)
# Map filterbank output onto LED strip
self.pixels = self.visualizer.visualize(mel)
return self.pixels
|
[
"numpy.pad",
"numpy.load",
"numpy.fft.rfft",
"numpy.abs",
"numpy.sum",
"numpy.copy",
"numpy.array_equal",
"scipy.ndimage.gaussian_filter1d",
"numpy.log2",
"numpy.clip",
"numpy.tile",
"numpy.array_split",
"numpy.concatenate"
] |
[((719, 751), 'numpy.tile', 'np.tile', (['(1)', '(3, self.num_pixels)'], {}), '(1, (3, self.num_pixels))\n', (726, 751), True, 'import numpy as np\n'), ((779, 813), 'numpy.tile', 'np.tile', (['(253)', '(3, self.num_pixels)'], {}), '(253, (3, self.num_pixels))\n', (786, 813), True, 'import numpy as np\n'), ((2279, 2310), 'numpy.array_split', 'np.array_split', (['idxs', 'n_packets'], {}), '(idxs, n_packets)\n', (2293, 2310), True, 'import numpy as np\n'), ((2622, 2632), 'numpy.copy', 'np.copy', (['p'], {}), '(p)\n', (2629, 2632), True, 'import numpy as np\n'), ((2833, 2843), 'numpy.copy', 'np.copy', (['y'], {}), '(y)\n', (2840, 2843), True, 'import numpy as np\n'), ((1049, 1081), 'numpy.tile', 'np.tile', (['(0.1)', 'num_frequency_bins'], {}), '(0.1, num_frequency_bins)\n', (1056, 1081), True, 'import numpy as np\n'), ((1192, 1224), 'numpy.tile', 'np.tile', (['(0.1)', 'num_frequency_bins'], {}), '(0.1, num_frequency_bins)\n', (1199, 1224), True, 'import numpy as np\n'), ((1778, 1803), 'numpy.load', 'np.load', (['gamma_table_path'], {}), '(gamma_table_path)\n', (1785, 1803), True, 'import numpy as np\n'), ((2941, 2955), 'numpy.abs', 'np.abs', (['y_data'], {}), '(y_data)\n', (2947, 2955), True, 'import numpy as np\n'), ((3014, 3046), 'numpy.tile', 'np.tile', (['(0)', '(3, self.num_pixels)'], {}), '(0, (3, self.num_pixels))\n', (3021, 3046), True, 'import numpy as np\n'), ((3293, 3338), 'numpy.pad', 'np.pad', (['y_data', '(0, n_zeros)'], {'mode': '"""constant"""'}), "(y_data, (0, n_zeros), mode='constant')\n", (3299, 3338), True, 'import numpy as np\n'), ((3612, 3631), 'numpy.sum', 'np.sum', (['mel'], {'axis': '(0)'}), '(mel, axis=0)\n', (3618, 3631), True, 'import numpy as np\n'), ((1991, 2019), 'numpy.clip', 'np.clip', (['self.pixels', '(0)', '(255)'], {}), '(self.pixels, 0, 255)\n', (1998, 2019), True, 'import numpy as np\n'), ((2861, 2899), 'numpy.concatenate', 'np.concatenate', (['self.y_rolling'], {'axis': '(0)'}), '(self.y_rolling, axis=0)\n', (2875, 2899), True, 'import numpy as np\n'), ((2151, 2198), 'numpy.array_equal', 'np.array_equal', (['p[:, i]', 'self.prev_sample[:, i]'], {}), '(p[:, i], self.prev_sample[:, i])\n', (2165, 2198), True, 'import numpy as np\n'), ((3734, 3767), 'scipy.ndimage.gaussian_filter1d', 'gaussian_filter1d', (['mel'], {'sigma': '(1.0)'}), '(mel, sigma=1.0)\n', (3751, 3767), False, 'from scipy.ndimage import gaussian_filter1d\n'), ((3138, 3158), 'numpy.log2', 'np.log2', (['rolling_len'], {}), '(rolling_len)\n', (3145, 3158), True, 'import numpy as np\n'), ((3454, 3475), 'numpy.fft.rfft', 'np.fft.rfft', (['y_padded'], {}), '(y_padded)\n', (3465, 3475), True, 'import numpy as np\n')]
|
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '..','..'))
import numpy as np
import commpy
from sdr_utils import vector as vec
from sdr_utils import plot_two_signals
class synchronization():
def __init__(self, param):
self.halfpreamble = param.halfpreamble
self.full_pream_len = len(self.halfpreamble)*2
self.Ncp = param.Ncp
self.Ncs = param.Ncs
self.K = param.K
self.M = param.M
# self.B = param.B
self.N = param.K * param.M
# def sync(self,data):
# ''' returns tupel of 2-D arrrays (payload, preamble), where the 1st dimension is number of detected
# preambles and the second dimension is the length on payload / data'''
# preamble_starts = self.detect_preamble_starts(data)
# preamble = np.vstack([data[(start):(start + self.full_pream_len)] for start in preamble_starts])
# payload = np.vstack(
# [data[(start + self.full_pream_len + self.Ncs+ self.Ncp ) + np.arange(self.N)] for start in preamble_starts]
# )
# return (payload, preamble)
def detect_preamble_starts(self,data):
self.half_peam_len = len(self.halfpreamble)
metric = self._calc_metric(data)
peak_locs = self._find_metric_peaks(metric)
#peak_locs = self._find_single_peak(metric)
preamble_starts = peak_locs - int(self.half_peam_len/2)
#print(preamble_starts)
#plot_two_signals(data, metric) # XXX
return preamble_starts
def _calc_metric(self,data):
cross_corr = np.correlate(data,self.halfpreamble, mode = "same")
#plot_two_signals(data, cross_corr) # XXX
metric = cross_corr + vec.shift(cross_corr, -self.half_peam_len)
metric = (metric.real)**2 + (metric.imag)**2
#plot_two_signals(cross_corr, metric) # XXX
autocorr = self._calc_moving_autocorr(data)
#plot_two_signals(metric/max(abs(metric)), autocorr/max(abs(autocorr)), same_axis=False)
return metric*autocorr
def _calc_threshold(self, metric):
half_peam_len = len(self.halfpreamble)
threshold = np.empty(len(metric))
metric = np.pad(metric,(half_peam_len,0),'constant')
for i in range(half_peam_len+1, len(metric)):
window = metric[i-half_peam_len:i]
window[np.where(window>threshold[i-half_peam_len-1])]/=2
threshold[i-half_peam_len] = np.sum(window)
threshold = vec.shift(threshold/2,half_peam_len//4,mode="same",fill_value=threshold[-1]/2)
threshold[np.where(threshold<max(metric)/100)]= max(metric)/100
#plot_two_signals(metric, threshold, same_axis=True)
return threshold
def _find_metric_peaks(self, metric):
threshold = self._calc_threshold(metric)
# step 1: find peaks
locs = np.where(
(metric>vec.shift(metric,1)) & (metric>vec.shift(metric,-1,fill_value=metric[-1])) &
(metric>threshold)
)[0]
# step 2: find max peak in window with length of fullpreamble
last_peak = metric[locs[0]]
last_loc = locs[0]
locs_out = np.array([],int)
for l in locs:
if ((l - last_loc) > (self.half_peam_len+5)):
locs_out = np.append(locs_out,last_loc)
last_peak = 0.0
if last_peak<metric[l]:
last_peak = metric[l]
last_loc = l
locs_out = np.append(locs_out,last_loc)
return locs_out
def _find_single_peak(self, metric):
threshold = self._calc_threshold(metric)
loc = np.argmax(metric)
if metric[loc]>threshold[loc]:
return np.array([loc])
else:
return np.array([])
def _calc_moving_autocorr(self, data):
half_peam_len = len(self.halfpreamble)
Ncp_cs = self.Ncp + self.Ncs
autocorr = data * vec.shift(data,half_peam_len)
autocorr_metric = np.empty_like(autocorr)
for i in range(half_peam_len, len(autocorr)):
autocorr_metric[i-half_peam_len] = np.sum(autocorr[i-half_peam_len:i])
autocorr_metric = np.abs(autocorr_metric)
autocorr_out = np.empty_like(autocorr_metric)
for i in range(Ncp_cs-2, len(autocorr)):
autocorr_out[i-Ncp_cs-2] = np.sum(autocorr_metric[i-Ncp_cs-2:i])
return autocorr_out/max(autocorr_out) #vec.shift(autocorr_out,Ncp_cs+half_peam_len)
|
[
"numpy.pad",
"numpy.abs",
"numpy.sum",
"numpy.argmax",
"os.path.dirname",
"numpy.empty_like",
"sdr_utils.vector.shift",
"numpy.append",
"numpy.where",
"numpy.array",
"numpy.correlate"
] |
[((47, 72), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (62, 72), False, 'import sys, os\n'), ((1644, 1694), 'numpy.correlate', 'np.correlate', (['data', 'self.halfpreamble'], {'mode': '"""same"""'}), "(data, self.halfpreamble, mode='same')\n", (1656, 1694), True, 'import numpy as np\n'), ((2263, 2309), 'numpy.pad', 'np.pad', (['metric', '(half_peam_len, 0)', '"""constant"""'], {}), "(metric, (half_peam_len, 0), 'constant')\n", (2269, 2309), True, 'import numpy as np\n'), ((2558, 2650), 'sdr_utils.vector.shift', 'vec.shift', (['(threshold / 2)', '(half_peam_len // 4)'], {'mode': '"""same"""', 'fill_value': '(threshold[-1] / 2)'}), "(threshold / 2, half_peam_len // 4, mode='same', fill_value=\n threshold[-1] / 2)\n", (2567, 2650), True, 'from sdr_utils import vector as vec\n'), ((3268, 3285), 'numpy.array', 'np.array', (['[]', 'int'], {}), '([], int)\n', (3276, 3285), True, 'import numpy as np\n'), ((3584, 3613), 'numpy.append', 'np.append', (['locs_out', 'last_loc'], {}), '(locs_out, last_loc)\n', (3593, 3613), True, 'import numpy as np\n'), ((3745, 3762), 'numpy.argmax', 'np.argmax', (['metric'], {}), '(metric)\n', (3754, 3762), True, 'import numpy as np\n'), ((4103, 4126), 'numpy.empty_like', 'np.empty_like', (['autocorr'], {}), '(autocorr)\n', (4116, 4126), True, 'import numpy as np\n'), ((4293, 4316), 'numpy.abs', 'np.abs', (['autocorr_metric'], {}), '(autocorr_metric)\n', (4299, 4316), True, 'import numpy as np\n'), ((4341, 4371), 'numpy.empty_like', 'np.empty_like', (['autocorr_metric'], {}), '(autocorr_metric)\n', (4354, 4371), True, 'import numpy as np\n'), ((1778, 1820), 'sdr_utils.vector.shift', 'vec.shift', (['cross_corr', '(-self.half_peam_len)'], {}), '(cross_corr, -self.half_peam_len)\n', (1787, 1820), True, 'from sdr_utils import vector as vec\n'), ((2522, 2536), 'numpy.sum', 'np.sum', (['window'], {}), '(window)\n', (2528, 2536), True, 'import numpy as np\n'), ((3823, 3838), 'numpy.array', 'np.array', (['[loc]'], {}), '([loc])\n', (3831, 3838), True, 'import numpy as np\n'), ((3874, 3886), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3882, 3886), True, 'import numpy as np\n'), ((4046, 4076), 'sdr_utils.vector.shift', 'vec.shift', (['data', 'half_peam_len'], {}), '(data, half_peam_len)\n', (4055, 4076), True, 'from sdr_utils import vector as vec\n'), ((4230, 4267), 'numpy.sum', 'np.sum', (['autocorr[i - half_peam_len:i]'], {}), '(autocorr[i - half_peam_len:i])\n', (4236, 4267), True, 'import numpy as np\n'), ((4462, 4503), 'numpy.sum', 'np.sum', (['autocorr_metric[i - Ncp_cs - 2:i]'], {}), '(autocorr_metric[i - Ncp_cs - 2:i])\n', (4468, 4503), True, 'import numpy as np\n'), ((2430, 2481), 'numpy.where', 'np.where', (['(window > threshold[i - half_peam_len - 1])'], {}), '(window > threshold[i - half_peam_len - 1])\n', (2438, 2481), True, 'import numpy as np\n'), ((3396, 3425), 'numpy.append', 'np.append', (['locs_out', 'last_loc'], {}), '(locs_out, last_loc)\n', (3405, 3425), True, 'import numpy as np\n'), ((2976, 2996), 'sdr_utils.vector.shift', 'vec.shift', (['metric', '(1)'], {}), '(metric, 1)\n', (2985, 2996), True, 'from sdr_utils import vector as vec\n'), ((3007, 3051), 'sdr_utils.vector.shift', 'vec.shift', (['metric', '(-1)'], {'fill_value': 'metric[-1]'}), '(metric, -1, fill_value=metric[-1])\n', (3016, 3051), True, 'from sdr_utils import vector as vec\n')]
|
import os
import argparse
import numpy as np
from tqdm import tqdm
from utils.audio import AudioProcessor
from utils.text import phoneme_to_sequence
def load_metadata(metadata_file):
items = []
with open(metadata_file, 'r') as fp:
for line in fp:
cols = line.split('|')
wav_file = cols[0] + '.wav'
text = cols[1]
items.append([text, wav_file])
return items
def generate_phoneme_sequence(text, phoneme_file):
phonemes = phoneme_to_sequence(text, ['phoneme_cleaners'],
language='en-us',
enable_eos_bos=False)
phonemes = np.asarray(phonemes, dtype=np.int32)
np.save(phoneme_file, phonemes)
return phonemes
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Extract phonemes and melspectrograms from LJSpecch for training Tacotron')
parser.add_argument('data_root', type=str, help='Data root directory')
args = parser.parse_args()
wav_dir = os.path.join(args.data_root, 'wavs')
if not os.path.exists(wav_dir):
raise FileNotFoundError('{} not found'.format(wav_dir))
metadata_file = os.path.join(args.data_root, 'metadata.csv')
if not os.path.exists(metadata_file):
raise FileNotFoundError('{} not found'.format(metadata_file))
melspec_dir = os.path.join(args.data_root, 'melspec')
if not os.path.exists(melspec_dir):
os.makedirs(melspec_dir, exist_ok=True)
spec_dir = os.path.join(args.data_root, 'spec')
if not os.path.exists(spec_dir):
os.makedirs(spec_dir, exist_ok=True)
phoneme_dir = os.path.join(args.data_root, 'phoneme')
if not os.path.exists(phoneme_dir):
os.makedirs(phoneme_dir, exist_ok=True)
items = load_metadata(metadata_file)
ap = AudioProcessor()
for text, wav_file in tqdm(items):
prefix = wav_file.replace('.wav', '')
# 音素系列を生成
generate_phoneme_sequence(
text, os.path.join(phoneme_dir, prefix + '.npy'))
wav = np.array(ap.load_wav(os.path.join(wav_dir, wav_file)),
dtype=np.float32)
# メルスペクトログラムを生成
melspec = ap.melspectrogram(wav).astype('float32')
np.save(os.path.join(melspec_dir, prefix + '.npy'), melspec)
# 線形スペクトログラムを生成
spec = ap.spectrogram(wav).astype('float32')
np.save(os.path.join(spec_dir, prefix + '.npy'), spec)
|
[
"tqdm.tqdm",
"numpy.save",
"argparse.ArgumentParser",
"os.makedirs",
"numpy.asarray",
"os.path.exists",
"utils.text.phoneme_to_sequence",
"utils.audio.AudioProcessor",
"os.path.join"
] |
[((495, 586), 'utils.text.phoneme_to_sequence', 'phoneme_to_sequence', (['text', "['phoneme_cleaners']"], {'language': '"""en-us"""', 'enable_eos_bos': '(False)'}), "(text, ['phoneme_cleaners'], language='en-us',\n enable_eos_bos=False)\n", (514, 586), False, 'from utils.text import phoneme_to_sequence\n'), ((668, 704), 'numpy.asarray', 'np.asarray', (['phonemes'], {'dtype': 'np.int32'}), '(phonemes, dtype=np.int32)\n', (678, 704), True, 'import numpy as np\n'), ((709, 740), 'numpy.save', 'np.save', (['phoneme_file', 'phonemes'], {}), '(phoneme_file, phonemes)\n', (716, 740), True, 'import numpy as np\n'), ((803, 919), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Extract phonemes and melspectrograms from LJSpecch for training Tacotron"""'}), "(description=\n 'Extract phonemes and melspectrograms from LJSpecch for training Tacotron')\n", (826, 919), False, 'import argparse\n'), ((1045, 1081), 'os.path.join', 'os.path.join', (['args.data_root', '"""wavs"""'], {}), "(args.data_root, 'wavs')\n", (1057, 1081), False, 'import os\n'), ((1203, 1247), 'os.path.join', 'os.path.join', (['args.data_root', '"""metadata.csv"""'], {}), "(args.data_root, 'metadata.csv')\n", (1215, 1247), False, 'import os\n'), ((1379, 1418), 'os.path.join', 'os.path.join', (['args.data_root', '"""melspec"""'], {}), "(args.data_root, 'melspec')\n", (1391, 1418), False, 'import os\n'), ((1523, 1559), 'os.path.join', 'os.path.join', (['args.data_root', '"""spec"""'], {}), "(args.data_root, 'spec')\n", (1535, 1559), False, 'import os\n'), ((1661, 1700), 'os.path.join', 'os.path.join', (['args.data_root', '"""phoneme"""'], {}), "(args.data_root, 'phoneme')\n", (1673, 1700), False, 'import os\n'), ((1840, 1856), 'utils.audio.AudioProcessor', 'AudioProcessor', ([], {}), '()\n', (1854, 1856), False, 'from utils.audio import AudioProcessor\n'), ((1884, 1895), 'tqdm.tqdm', 'tqdm', (['items'], {}), '(items)\n', (1888, 1895), False, 'from tqdm import tqdm\n'), ((1093, 1116), 'os.path.exists', 'os.path.exists', (['wav_dir'], {}), '(wav_dir)\n', (1107, 1116), False, 'import os\n'), ((1259, 1288), 'os.path.exists', 'os.path.exists', (['metadata_file'], {}), '(metadata_file)\n', (1273, 1288), False, 'import os\n'), ((1430, 1457), 'os.path.exists', 'os.path.exists', (['melspec_dir'], {}), '(melspec_dir)\n', (1444, 1457), False, 'import os\n'), ((1467, 1506), 'os.makedirs', 'os.makedirs', (['melspec_dir'], {'exist_ok': '(True)'}), '(melspec_dir, exist_ok=True)\n', (1478, 1506), False, 'import os\n'), ((1571, 1595), 'os.path.exists', 'os.path.exists', (['spec_dir'], {}), '(spec_dir)\n', (1585, 1595), False, 'import os\n'), ((1605, 1641), 'os.makedirs', 'os.makedirs', (['spec_dir'], {'exist_ok': '(True)'}), '(spec_dir, exist_ok=True)\n', (1616, 1641), False, 'import os\n'), ((1712, 1739), 'os.path.exists', 'os.path.exists', (['phoneme_dir'], {}), '(phoneme_dir)\n', (1726, 1739), False, 'import os\n'), ((1749, 1788), 'os.makedirs', 'os.makedirs', (['phoneme_dir'], {'exist_ok': '(True)'}), '(phoneme_dir, exist_ok=True)\n', (1760, 1788), False, 'import os\n'), ((2015, 2057), 'os.path.join', 'os.path.join', (['phoneme_dir', "(prefix + '.npy')"], {}), "(phoneme_dir, prefix + '.npy')\n", (2027, 2057), False, 'import os\n'), ((2270, 2312), 'os.path.join', 'os.path.join', (['melspec_dir', "(prefix + '.npy')"], {}), "(melspec_dir, prefix + '.npy')\n", (2282, 2312), False, 'import os\n'), ((2417, 2456), 'os.path.join', 'os.path.join', (['spec_dir', "(prefix + '.npy')"], {}), "(spec_dir, prefix + '.npy')\n", (2429, 2456), False, 'import os\n'), ((2095, 2126), 'os.path.join', 'os.path.join', (['wav_dir', 'wav_file'], {}), '(wav_dir, wav_file)\n', (2107, 2126), False, 'import os\n')]
|
import os
from collections import deque
from multiprocessing import Process
import cv2 as cv
import dlib
import numpy as np
from skimage import transform as tf
from tqdm import tqdm
STD_SIZE = (224, 224)
stablePntsIDs = [33, 36, 39, 42, 45]
def shape_to_array(shape):
coords = np.empty((68, 2))
for i in range(0, 68):
coords[i][0] = shape.part(i).x
coords[i][1] = shape.part(i).y
return coords
def cut_patch(img, landmarks, height, width, threshold=5):
center_x, center_y = np.mean(landmarks, axis=0)
if center_y - height < 0:
center_y = height
if center_y - height < 0 - threshold:
raise Exception('too much bias in height')
if center_x - width < 0:
center_x = width
if center_x - width < 0 - threshold:
raise Exception('too much bias in width')
if center_y + height > img.shape[0]:
center_y = img.shape[0] - height
if center_y + height > img.shape[0] + threshold:
raise Exception('too much bias in height')
if center_x + width > img.shape[1]:
center_x = img.shape[1] - width
if center_x + width > img.shape[1] + threshold:
raise Exception('too much bias in width')
cutted_img = np.copy(img[int(round(center_y) - round(height)): int(round(center_y) + round(height)),
int(round(center_x) - round(width)): int(round(center_x) + round(width))])
return cutted_img
def crop_patch(frames, landmarks, mean_face_landmarks):
"""Crop mouth patch
:param str frames: video_frames
:param list landmarks: interpolated landmarks
"""
for frame_idx, frame in enumerate(frames):
if frame_idx == 0:
q_frame, q_landmarks = deque(), deque()
sequence = []
q_landmarks.append(landmarks[frame_idx])
q_frame.append(frame)
if len(q_frame) == 12:
smoothed_landmarks = np.mean(q_landmarks, axis=0)
cur_landmarks = q_landmarks.popleft()
cur_frame = q_frame.popleft()
# -- affine transformation
trans = tf.estimate_transform('similarity', smoothed_landmarks[stablePntsIDs, :], mean_face_landmarks[stablePntsIDs, :])
trans_frame = tf.warp(cur_frame, inverse_map=trans.inverse, output_shape=STD_SIZE)
trans_frame = trans_frame * 255 # note output from wrap is double image (value range [0,1])
trans_frame = trans_frame.astype('uint8')
trans_landmarks = trans(cur_landmarks)
# -- crop mouth patch
sequence.append(cut_patch(trans_frame, trans_landmarks[48:68], 60, 60))
if frame_idx == len(landmarks) - 1:
while q_frame:
cur_frame = q_frame.popleft()
# -- transform frame
trans_frame = tf.warp(cur_frame, inverse_map=trans.inverse, output_shape=STD_SIZE)
trans_frame = trans_frame * 255 # note output from wrap is double image (value range [0,1])
trans_frame = trans_frame.astype('uint8')
# -- transform landmarks
trans_landmarks = trans(q_landmarks.popleft())
# -- crop mouth patch
sequence.append(cut_patch(trans_frame, trans_landmarks[48:68], 60, 60))
return np.array(sequence)
return None
def linear_interpolate(landmarks, start_idx, stop_idx):
start_landmarks = landmarks[start_idx]
stop_landmarks = landmarks[stop_idx]
delta = stop_landmarks - start_landmarks
for idx in range(1, stop_idx - start_idx):
landmarks[start_idx + idx] = start_landmarks + idx / float(stop_idx - start_idx) * delta
return landmarks
def landmarks_interpolate(landmarks):
"""Interpolate landmarks
param list landmarks: landmarks detected in raw videos
"""
valid_frames_idx = [idx for idx, _ in enumerate(landmarks) if _ is not None]
if not valid_frames_idx:
return None
for idx in range(1, len(valid_frames_idx)):
if valid_frames_idx[idx] - valid_frames_idx[idx - 1] == 1:
continue
else:
landmarks = linear_interpolate(landmarks, valid_frames_idx[idx - 1], valid_frames_idx[idx])
valid_frames_idx = [idx for idx, _ in enumerate(landmarks) if _ is not None]
# -- Corner case: keep frames at the beginning or at the end failed to be detected.
if valid_frames_idx:
landmarks[:valid_frames_idx[0]] = [landmarks[valid_frames_idx[0]]] * valid_frames_idx[0]
landmarks[valid_frames_idx[-1]:] = [landmarks[valid_frames_idx[-1]]] * (len(landmarks) - valid_frames_idx[-1])
valid_frames_idx = [idx for idx, _ in enumerate(landmarks) if _ is not None]
assert len(valid_frames_idx) == len(landmarks), "not every frame has landmark"
return landmarks
def preprocess_sample(file, face_detector, landmark_detector, mean_face_landmarks, withaudio, defaultcrop):
"""
Function to preprocess each data sample.
"""
videoFile = file + ".mp4"
audioFile = file + ".flac"
roiFile = file + ".png"
# Extract the audio from the video file using the FFmpeg utility and save it to a flac file.
if withaudio:
v2aCommand = "ffmpeg -y -v quiet -i " + videoFile + " -ac 1 -ar 16000 -vn " + audioFile
os.system(v2aCommand)
# for each frame, resize to 224x224 and crop the central 112x112 region
captureObj = cv.VideoCapture(videoFile)
frames = list()
landmarks = list()
while captureObj.isOpened():
ret, frame = captureObj.read()
if ret:
frame = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
if not len(frame) == 224:
frame = cv.resize(frame, (224, 224))
frames.append(frame)
face_rects = face_detector(frame, 0) # Detect face
if len(face_rects) < 1:
landmarks.append(None)
continue
rect = face_rects[0] # Proper number of face
landmark = landmark_detector(frame, rect) # Detect face landmarks
landmark = shape_to_array(landmark)
landmarks.append(landmark)
else:
break
captureObj.release()
preprocessed_landmarks = landmarks_interpolate(landmarks)
if preprocessed_landmarks is None:
if defaultcrop == "lrs":
frames = [frame[52:172, 52:172] for frame in frames]
else:
frames = [frame[103: 223, 67: 187] for frame in frames]
else:
frames = crop_patch(frames, preprocessed_landmarks, mean_face_landmarks)
assert frames is not None, "cannot crop from {}.".format(videoFile)
cv.imwrite(roiFile, np.concatenate(frames, axis=1).astype(int))
def preprocess_sample_list(filesList, face_detector, landmark_detector, mean_face_landmarks, withaudio, defaultcrop):
for file in tqdm(filesList, leave=True, desc="Preprocess", ncols=75):
preprocess_sample(file, face_detector, landmark_detector, mean_face_landmarks, withaudio, defaultcrop)
def preprocessing(filesList, processes, landmark_detector, mean_face_landmarks, withaudio, defaultcrop):
# Preprocessing each sample
print("\nNumber of data samples to be processed = %d" % (len(filesList)))
print("\n\nStarting preprocessing ....\n")
face_detector = dlib.get_frontal_face_detector()
def splitlist(inlist, chunksize):
return [inlist[x:x + chunksize] for x in range(0, len(inlist), chunksize)]
filesListSplitted = splitlist(filesList, int((len(filesList) / processes)))
process_list = []
for subFilesList in filesListSplitted:
p = Process(target=preprocess_sample_list, args=(subFilesList, face_detector, landmark_detector, mean_face_landmarks, withaudio, defaultcrop))
process_list.append(p)
p.Daemon = True
p.start()
for p in process_list:
p.join()
|
[
"cv2.resize",
"tqdm.tqdm",
"numpy.concatenate",
"cv2.cvtColor",
"numpy.empty",
"os.system",
"cv2.VideoCapture",
"numpy.mean",
"numpy.array",
"dlib.get_frontal_face_detector",
"skimage.transform.warp",
"skimage.transform.estimate_transform",
"multiprocessing.Process",
"collections.deque"
] |
[((300, 317), 'numpy.empty', 'np.empty', (['(68, 2)'], {}), '((68, 2))\n', (308, 317), True, 'import numpy as np\n'), ((535, 561), 'numpy.mean', 'np.mean', (['landmarks'], {'axis': '(0)'}), '(landmarks, axis=0)\n', (542, 561), True, 'import numpy as np\n'), ((5524, 5550), 'cv2.VideoCapture', 'cv.VideoCapture', (['videoFile'], {}), '(videoFile)\n', (5539, 5550), True, 'import cv2 as cv\n'), ((6999, 7055), 'tqdm.tqdm', 'tqdm', (['filesList'], {'leave': '(True)', 'desc': '"""Preprocess"""', 'ncols': '(75)'}), "(filesList, leave=True, desc='Preprocess', ncols=75)\n", (7003, 7055), False, 'from tqdm import tqdm\n'), ((7462, 7494), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (7492, 7494), False, 'import dlib\n'), ((5405, 5426), 'os.system', 'os.system', (['v2aCommand'], {}), '(v2aCommand)\n', (5414, 5426), False, 'import os\n'), ((7785, 7927), 'multiprocessing.Process', 'Process', ([], {'target': 'preprocess_sample_list', 'args': '(subFilesList, face_detector, landmark_detector, mean_face_landmarks,\n withaudio, defaultcrop)'}), '(target=preprocess_sample_list, args=(subFilesList, face_detector,\n landmark_detector, mean_face_landmarks, withaudio, defaultcrop))\n', (7792, 7927), False, 'from multiprocessing import Process\n'), ((1966, 1994), 'numpy.mean', 'np.mean', (['q_landmarks'], {'axis': '(0)'}), '(q_landmarks, axis=0)\n', (1973, 1994), True, 'import numpy as np\n'), ((2150, 2266), 'skimage.transform.estimate_transform', 'tf.estimate_transform', (['"""similarity"""', 'smoothed_landmarks[stablePntsIDs, :]', 'mean_face_landmarks[stablePntsIDs, :]'], {}), "('similarity', smoothed_landmarks[stablePntsIDs, :],\n mean_face_landmarks[stablePntsIDs, :])\n", (2171, 2266), True, 'from skimage import transform as tf\n'), ((2290, 2358), 'skimage.transform.warp', 'tf.warp', (['cur_frame'], {'inverse_map': 'trans.inverse', 'output_shape': 'STD_SIZE'}), '(cur_frame, inverse_map=trans.inverse, output_shape=STD_SIZE)\n', (2297, 2358), True, 'from skimage import transform as tf\n'), ((3373, 3391), 'numpy.array', 'np.array', (['sequence'], {}), '(sequence)\n', (3381, 3391), True, 'import numpy as np\n'), ((5708, 5745), 'cv2.cvtColor', 'cv.cvtColor', (['frame', 'cv.COLOR_BGR2GRAY'], {}), '(frame, cv.COLOR_BGR2GRAY)\n', (5719, 5745), True, 'import cv2 as cv\n'), ((1773, 1780), 'collections.deque', 'deque', ([], {}), '()\n', (1778, 1780), False, 'from collections import deque\n'), ((1782, 1789), 'collections.deque', 'deque', ([], {}), '()\n', (1787, 1789), False, 'from collections import deque\n'), ((2881, 2949), 'skimage.transform.warp', 'tf.warp', (['cur_frame'], {'inverse_map': 'trans.inverse', 'output_shape': 'STD_SIZE'}), '(cur_frame, inverse_map=trans.inverse, output_shape=STD_SIZE)\n', (2888, 2949), True, 'from skimage import transform as tf\n'), ((5810, 5838), 'cv2.resize', 'cv.resize', (['frame', '(224, 224)'], {}), '(frame, (224, 224))\n', (5819, 5838), True, 'import cv2 as cv\n'), ((6815, 6845), 'numpy.concatenate', 'np.concatenate', (['frames'], {'axis': '(1)'}), '(frames, axis=1)\n', (6829, 6845), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
import os
import numpy as np
import jax.numpy as jnp
from multiprocessing import Pool, Manager, Condition, Value, Process
import sys
import io
import time
import yaml
import re
import traceback
import subprocess
from .utils import *
from rich.progress import (
Progress,
TextColumn,
BarColumn,
TimeElapsedColumn,
TimeRemainingColumn
)
def sync_variable():
# to synchronize the runner processes with the main process
globals()["manager"] = Manager()
globals()["result_dict"] = manager.dict()
globals()["result_condition"] = Condition()
globals()["result_detail_dict"] = manager.dict()
globals()["tests"] = Value('L', 0)
globals()["fails"] = Value('L', 0)
# run failed cases last time
def get_retry_cases():
print("retry last failed cases...")
if os.access('log/runner_report.log', os.R_OK):
with open('log/runner_report.log') as fp:
cases = []
lines = fp.read().splitlines()
for line in lines:
if line.startswith('PASS '):
continue
cases.append(line.replace('FAIL ', ''))
return cases
if len(cases) == 0:
print('all pass, retry abort.')
sys.exit(0)
else:
print('could not retry without last run log.')
sys.exit(-1)
# get cases from arguments
def get_arg_cases(args_cases):
s = lambda l: l.strip()
f = lambda l: l != '' and not l.startswith('#')
if os.access(args_cases, os.R_OK):
with open(args_cases) as fp:
cases = list(filter(f, map(s, fp.read().splitlines())))
elif args_cases != '':
cases = list(filter(f, map(s, args_cases.split(','))))
else:
cases = []
return cases
def get_generator_case():
with open("log/generator_case.log") as fp:
s = lambda l: l.strip()
f = lambda l: l != '' and not l.startswith('#')
generator_info_list = list(filter(f, map(s, fp.read().splitlines())))
generator_case_list = []
generator_num_list = []
for no in range(len(generator_info_list)):
[case_name, case_num] = re.split(r'\s*,\s*', generator_info_list[no])
generator_case_list.append(case_name)
generator_num_list.append(int(case_num))
return [generator_case_list, generator_num_list]
def select_run_case( generator_case_list, generator_num_list, cases ):
total_num = 0
run_case_list = []
if len(cases) > 0:
for no in range(len(generator_case_list)):
case_name = generator_case_list[no]
for case in cases:
if not case in case_name:
continue
run_case_list.append(case_name)
total_num += generator_num_list[no]
break
else:
run_case_list = generator_case_list
total_num = sum(generator_num_list)
return [run_case_list, total_num]
def process_bar_setup( total_num ):
# progress bar configurations
progress = Progress(
TextColumn("[bold blue]{task.fields[name]}"),
BarColumn(bar_width=None),
"[progress.percentage]{task.percentage:>3.1f}%",
"case_sum:",
TextColumn("[bold red]{task.total}"),
"elapsed:",
TimeElapsedColumn(),
"remaining:",
TimeRemainingColumn()
)
progress.start()
task_id = progress.add_task("runner", name = "runner", total=total_num, start=True)
return [progress, task_id]
def runner_error(case):
with result_condition:
result_dict[case] = "python run failed."
result_detail_dict[case] = ''
with open(f'build/{case}/runner.log', 'w') as f:
f.write( result_dict[case] + '\n' + result_detail_dict[case] + '\n' )
def runner_callback(progress, task_id, completed, total):
progress.update( task_id, completed = completed )
def gen_runner_report( ps, args, generator_case_list, generator_num_list ):
failed_num = 0
# save the runner result into the log file
report = open(f'log/runner_report.log', 'w')
for case, p in ps:
ok = True
p_str = p.get().getvalue()
# find case result in result_dict
if result_dict[case] != "ok":
reason = result_dict[case]
ok = False
if p_str != '':
with open(f'build/{case}/runner.log', 'w') as f:
f.write(p_str)
if not ok:
failed_num += 1
if args.failing_info:
time.sleep(0.5)
print(f'FAIL {case} - {reason}')
report.write(f'FAIL {case} - {reason}\n')
else:
report.write(f'PASS {case}\n')
report.close()
return failed_num
# the main entrance of the runner process, including run in simulators and check the data
def run_test(case, args):
try:
stdout = sys.stdout
stderr = sys.stderr
output = io.StringIO()
sys.stdout = output
sys.stderr = output
# file information
binary = f'build/{case}/test.elf'
run_mem = f'build/{case}/run.mem'
run_log = f'build/{case}/spike.log'
res_file = f'build/{case}/spike.sig'
check_golden = f'build/{case}/check_golden.npy'
# get the cases list in the case, including test_num, name, check string, golden
case_list = np.load( check_golden, allow_pickle=True )
# run elf in spike to check if the elf is right
ret = spike_run(args, run_mem, binary, run_log, res_file)
if ret != 0:
# if failed, set the result of every case as spike-run, means failed when run in spike
# then return, stop testing this case
with result_condition:
result_dict[case] = "spike-run"
result_detail_dict[case] = f'\nspike-run failed!!!\nPlease check the spike log in {run_log} '
fails.value += len(case_list)
tests.value += len(case_list)
with open(f'build/{case}/runner.log', 'w') as f:
f.write( result_dict[case] + '\n' + result_detail_dict[case] + '\n' )
sys.stdout = stdout
sys.stderr = stderr
return output
# use these two variables to keep test info for this case
test_result = ''
test_detail = ''
# use this to count failed subcases in this case
failed_case_list = []
# check the golden result computed by python with the spike result
spike_result = {}
start_dict = {}
read_elf_cmd = args.config['compile']['readelf'] + ' -s ' + binary
try:
addr_begin_sig_str = str( subprocess.check_output( read_elf_cmd + ' | grep begin_signature ', shell=True ), encoding = 'utf-8' )
addr_begin_sig = int( addr_begin_sig_str.split()[1], 16 )
flag_begin_sig = True
except:
flag_begin_sig = False
for test_case in case_list:
if test_case["check_str"] != '':
# when test["check_str"] == 0, no need to check
if flag_begin_sig:
try:
addr_testdata_str = str( subprocess.check_output( read_elf_cmd + f' | grep test_{test_case["no"]}_data ', shell=True ), encoding = 'utf-8' )
addr_testdata = int( addr_testdata_str.split()[1], 16 )
except:
test_result += test_case["name"]+f'_faild_find_{test_case["no"]}_test_data-'
test_detail += f"Can't find symbol test_{test_case['no']}_data, please check build/{case}/test.map.\n"
failed_case_list.append(test_case["name"])
continue
golden = test_case["golden"] = copy_to_dtype( test_case["golden_data"], eval(f'jnp.{test_case["golden_dtype"]}') )
#because many subcases in one signature file, so we need the spike_start to know where to find the result
result = from_txt( res_file, golden, addr_testdata - addr_begin_sig )
start_dict[test_case["name"]] = addr_testdata - addr_begin_sig
spike_result[test_case["name"]] = result
#save the python golden result and spike result into check.data file of each case
os.makedirs(f'build/{test_case["name"]}', exist_ok=True)
check_result = check_to_txt( golden, result, f'build/{test_case["name"]}/check.data', test_case["check_str"] )
if not check_result:
# if check failed, set result as "check failed", because the elf can be run in more sims, so don't use result_dict and notify result_condition
test_result += test_case["name"]+"_check failed-"
test_detail += f'The python golden data and spike results of test case {test_case["no"]} in build/{case}/test.S check failed. You can find the data in build/{test_case["name"]}/check.data\n'
failed_case_list.append(test_case["name"])
else:
test_result += test_case["name"]+"_faild_find_begin_signature"
test_detail += f"Can't find symbol begin_signature, please check build/{case}/test.map.\n"
failed_case_list.append(test_case["name"])
# run case in more simulators and compare simulator results with spike results, which need to be same
sims_result = sims_run( args, f'build/{case}', binary )
for sim in [ "vcs", "verilator", "gem5" ]:
if args.config[sim]['path'] == None:
# don't set the path of sim, so dont't run it and needn't judge the result
continue
if sims_result[sim] != 0:
# sim run failed
# because the elf maybe can be run in more sims, so don't use result_dict and notify result_condition
test_result += sim + "_failed-"
test_detail += f'{binary} runned unsuccessfully in {sim}, please check build/{case}/{sim}.log\n'
failed_case_list = case_list
else:
# sim run successfully, so we compare the sim results with spike results
for test_case in case_list:
if test_case["check_str"] != '':
golden = test_case["golden"]
# get sim result, because many cases in one signature file, so we need the start to know where to find the result
if test_case["name"] in start_dict.keys():
result = from_txt( f'build/{case}/{sim}.sig', golden, start_dict[test_case["name"]] )
else:
test_result += test_case["name"] + '_' + sim + f"_failed_find_{test_case['no']}_start-"
test_detail_dict = f"Can't find test case {test_case['no']} start addr computed when check golden and spike result in build/{case}/test.S, please verify that.\n"
# maybe check failed or other sim failed either so we have this judge s
if test_case["name"] not in failed_case_list:
failed_case_list.append(test_case["name"])
continue
# save the spike result and sim result into diff-sim.data
os.makedirs(f'build/{test_case["name"]}', exist_ok=True)
diff_result = diff_to_txt( spike_result[test_case["name"]], result, f'build/{test_case["name"]}/diff-{sim}.data', "spike", sim )
if not diff_result:
# if spike result don't equal with sim result, diff failed, write 'sim_diff failed' to test_result
test_result += test_case["name"] + '_' + sim + "_diff failed-"
test_detail_dict = f'The results of spike and {sim} of test case {test_case["no"]}in build/{case}/test.S check failed. You can find the data in build/{test_case["name"]}/diff-{sim}.data\n'
# maybe check failed or other sim failed either so we have this judge
if test_case["name"] not in failed_case_list:
failed_case_list.append(test_case["name"])
with result_condition:
if test_result == '':
result_dict[case] = "ok"
result_detail_dict[case] = ''
tests.value += len(case_list)
else:
result_dict[case] = test_result
result_detail_dict[case] = test_detail
fails.value += len(failed_case_list)
tests.value += len(case_list)
with open(f'build/{case}/runner.log', 'w') as f:
f.write( result_dict[case] + '\n' + result_detail_dict[case] + '\n' )
sys.stdout = stdout
sys.stderr = stderr
return output
except:
if output in locals().keys():
sys.stdout = stdout
sys.stderr = stderr
else:
output = io.StringIO()
result_dict[case] = 'python failed'
error_output = io.StringIO()
traceback.print_tb(sys.exc_info()[2], file=error_output)
error_str = error_output.getvalue()
error_str += "\nUnexpected error: " + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1])
result_detail_dict[case] = error_str
with open(f'build/{case}/runner.log', 'w') as f:
f.write( result_dict[case] + '\n' + result_detail_dict[case] + '\n' )
# print(error_str)
return output
def main(args):
try:
# define some global sync variables to synchronize the runner processes with the main process
sync_variable()
if args.retry:
cases = get_retry_cases()
else:
cases = get_arg_cases(args.cases)
print("looking for the cases...")
[generator_case_list, generator_num_list] = get_generator_case()
[run_case_list, total_num] = select_run_case( generator_case_list, generator_num_list, cases )
[progress, task_id] = process_bar_setup( total_num )
ps = []
with Pool(processes=args.nproc) as pool:
for case in run_case_list:
res = pool.apply_async(run_test, [ case, args ],
callback=lambda _: runner_callback( progress, task_id, tests.value, total_num ),
error_callback=lambda _: runner_error(case) )
ps.append((case, res))
failed_num = gen_runner_report( ps, args, generator_case_list, generator_num_list )
progress.stop()
# spike may make that user can't input in command line, use stty sane to fix that.
os.system("stty sane")
if failed_num == 0:
print(f'{len(ps)} files running finish, all pass.( {tests.value} tests )')
sys.exit(0)
else:
if args.failing_info:
print(f'{len(ps)} files running finish, {failed_num} failed.( {tests.value} tests, {fails.value} failed.)')
else:
print(f'{len(ps)} files running finish, {failed_num} failed.( {tests.value} tests, {fails.value} failed, please look at the log/runner_report.log for the failing information. )')
sys.exit(-1)
except KeyboardInterrupt:
if 'pool' in locals():
pool.close()
pool.join()
if 'progress' in locals():
progress.stop()
print("Catch KeyboardInterrupt!")
os.system("stty sane")
sys.exit(-1)
if __name__ == "__main__":
main()
|
[
"io.StringIO",
"numpy.load",
"re.split",
"rich.progress.TextColumn",
"os.makedirs",
"multiprocessing.Manager",
"subprocess.check_output",
"multiprocessing.Value",
"rich.progress.TimeElapsedColumn",
"multiprocessing.Condition",
"os.system",
"rich.progress.BarColumn",
"time.sleep",
"rich.progress.TimeRemainingColumn",
"sys.exc_info",
"multiprocessing.Pool",
"os.access",
"sys.exit"
] |
[((494, 503), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (501, 503), False, 'from multiprocessing import Pool, Manager, Condition, Value, Process\n'), ((586, 597), 'multiprocessing.Condition', 'Condition', ([], {}), '()\n', (595, 597), False, 'from multiprocessing import Pool, Manager, Condition, Value, Process\n'), ((676, 689), 'multiprocessing.Value', 'Value', (['"""L"""', '(0)'], {}), "('L', 0)\n", (681, 689), False, 'from multiprocessing import Pool, Manager, Condition, Value, Process\n'), ((715, 728), 'multiprocessing.Value', 'Value', (['"""L"""', '(0)'], {}), "('L', 0)\n", (720, 728), False, 'from multiprocessing import Pool, Manager, Condition, Value, Process\n'), ((829, 872), 'os.access', 'os.access', (['"""log/runner_report.log"""', 'os.R_OK'], {}), "('log/runner_report.log', os.R_OK)\n", (838, 872), False, 'import os\n'), ((1529, 1559), 'os.access', 'os.access', (['args_cases', 'os.R_OK'], {}), '(args_cases, os.R_OK)\n', (1538, 1559), False, 'import os\n'), ((1370, 1382), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (1378, 1382), False, 'import sys\n'), ((3111, 3155), 'rich.progress.TextColumn', 'TextColumn', (['"""[bold blue]{task.fields[name]}"""'], {}), "('[bold blue]{task.fields[name]}')\n", (3121, 3155), False, 'from rich.progress import Progress, TextColumn, BarColumn, TimeElapsedColumn, TimeRemainingColumn\n'), ((3165, 3190), 'rich.progress.BarColumn', 'BarColumn', ([], {'bar_width': 'None'}), '(bar_width=None)\n', (3174, 3190), False, 'from rich.progress import Progress, TextColumn, BarColumn, TimeElapsedColumn, TimeRemainingColumn\n'), ((3278, 3314), 'rich.progress.TextColumn', 'TextColumn', (['"""[bold red]{task.total}"""'], {}), "('[bold red]{task.total}')\n", (3288, 3314), False, 'from rich.progress import Progress, TextColumn, BarColumn, TimeElapsedColumn, TimeRemainingColumn\n'), ((3344, 3363), 'rich.progress.TimeElapsedColumn', 'TimeElapsedColumn', ([], {}), '()\n', (3361, 3363), False, 'from rich.progress import Progress, TextColumn, BarColumn, TimeElapsedColumn, TimeRemainingColumn\n'), ((3395, 3416), 'rich.progress.TimeRemainingColumn', 'TimeRemainingColumn', ([], {}), '()\n', (3414, 3416), False, 'from rich.progress import Progress, TextColumn, BarColumn, TimeElapsedColumn, TimeRemainingColumn\n'), ((5072, 5085), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (5083, 5085), False, 'import io\n'), ((5529, 5569), 'numpy.load', 'np.load', (['check_golden'], {'allow_pickle': '(True)'}), '(check_golden, allow_pickle=True)\n', (5536, 5569), True, 'import numpy as np\n'), ((2202, 2248), 're.split', 're.split', (['"""\\\\s*,\\\\s*"""', 'generator_info_list[no]'], {}), "('\\\\s*,\\\\s*', generator_info_list[no])\n", (2210, 2248), False, 'import re\n'), ((13770, 13783), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (13781, 13783), False, 'import io\n'), ((14833, 14859), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'args.nproc'}), '(processes=args.nproc)\n', (14837, 14859), False, 'from multiprocessing import Pool, Manager, Condition, Value, Process\n'), ((15438, 15460), 'os.system', 'os.system', (['"""stty sane"""'], {}), "('stty sane')\n", (15447, 15460), False, 'import os\n'), ((16353, 16375), 'os.system', 'os.system', (['"""stty sane"""'], {}), "('stty sane')\n", (16362, 16375), False, 'import os\n'), ((16384, 16396), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (16392, 16396), False, 'import sys\n'), ((1285, 1296), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1293, 1296), False, 'import sys\n'), ((4637, 4652), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (4647, 4652), False, 'import time\n'), ((6887, 6965), 'subprocess.check_output', 'subprocess.check_output', (["(read_elf_cmd + ' | grep begin_signature ')"], {'shell': '(True)'}), "(read_elf_cmd + ' | grep begin_signature ', shell=True)\n", (6910, 6965), False, 'import subprocess\n'), ((13687, 13700), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (13698, 13700), False, 'import io\n'), ((15601, 15612), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (15609, 15612), False, 'import sys\n'), ((16085, 16097), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (16093, 16097), False, 'import sys\n'), ((8590, 8646), 'os.makedirs', 'os.makedirs', (['f"""build/{test_case[\'name\']}"""'], {'exist_ok': '(True)'}), '(f"build/{test_case[\'name\']}", exist_ok=True)\n', (8601, 8646), False, 'import os\n'), ((13811, 13825), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (13823, 13825), False, 'import sys\n'), ((11891, 11947), 'os.makedirs', 'os.makedirs', (['f"""build/{test_case[\'name\']}"""'], {'exist_ok': '(True)'}), '(f"build/{test_case[\'name\']}", exist_ok=True)\n', (11902, 11947), False, 'import os\n'), ((13974, 13988), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (13986, 13988), False, 'import sys\n'), ((7409, 7504), 'subprocess.check_output', 'subprocess.check_output', (['(read_elf_cmd + f" | grep test_{test_case[\'no\']}_data ")'], {'shell': '(True)'}), '(read_elf_cmd +\n f" | grep test_{test_case[\'no\']}_data ", shell=True)\n', (7432, 7504), False, 'import subprocess\n'), ((13943, 13957), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (13955, 13957), False, 'import sys\n')]
|
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
#create multindex dataframe
arrays = [['Fruit', 'Fruit', 'Fruit', 'Veggies', 'Veggies', 'Veggies'],
['Bananas', 'Oranges', 'Pears', 'Carrots', 'Potatoes', 'Celery']]
index = pd.MultiIndex.from_tuples(list(zip(*arrays)))
df = pd.DataFrame(np.random.randint(10, 50, size=(1, 6)), columns=index)
#plotting
fig, axes = plt.subplots(nrows=1, ncols=2, sharey=True, figsize=(14 / 2.54, 10 / 2.54)) # width, height
for i, col in enumerate(df.columns.levels[0]):
print(col)
ax = axes[i]
df[col].T.plot(ax=ax, kind='bar', width=.8)
ax.legend_.remove()
ax.set_xlabel(col, weight='bold')
ax.yaxis.grid(b=True, which='major', color='black', linestyle='--', alpha=.4)
ax.set_axisbelow(True)
for tick in ax.get_xticklabels():
tick.set_rotation(0)
#make the ticklines invisible
ax.tick_params(axis=u'both', which=u'both', length=0)
plt.tight_layout()
# remove spacing in between
fig.subplots_adjust(wspace=0) # space between plots
plt.show()
|
[
"matplotlib.pyplot.tight_layout",
"numpy.random.randint",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] |
[((398, 473), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'nrows': '(1)', 'ncols': '(2)', 'sharey': '(True)', 'figsize': '(14 / 2.54, 10 / 2.54)'}), '(nrows=1, ncols=2, sharey=True, figsize=(14 / 2.54, 10 / 2.54))\n', (410, 473), True, 'import matplotlib.pyplot as plt\n'), ((943, 961), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (959, 961), True, 'import matplotlib.pyplot as plt\n'), ((1044, 1054), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1052, 1054), True, 'import matplotlib.pyplot as plt\n'), ((320, 358), 'numpy.random.randint', 'np.random.randint', (['(10)', '(50)'], {'size': '(1, 6)'}), '(10, 50, size=(1, 6))\n', (337, 358), True, 'import numpy as np\n')]
|
import unittest
import numpy as np
import pandas as pd
import os
import pytz
from clairvoyant import History
dir_path = os.path.dirname(os.path.realpath(__file__))
class Test_History(unittest.TestCase):
def setUp(self):
column_map = {
'Date': 'Unnamed: 0', 'Open': 'open', 'High': 'high', 'Low': 'low',
'Close': 'close', 'Volume': 'volume', 'Sentiment': 'sentiment',
'Influence': 'influence'
}
self.sample = History(
os.path.join(dir_path, 'tsla-sentiment.csv'), col_map=column_map
)
def test_get_data(self):
data = self.sample
self.assertTrue(isinstance(data._df, pd.DataFrame))
# KeyError happens if the column doesn't exist.
self.assertRaises(KeyError, data.__getitem__, 'Blah')
# You can get a column by name, returns a series.
self.assertTrue(isinstance(data['Close'], pd.Series))
# You can get a column by attribute, returns a series.
self.assertTrue(isinstance(data.close, pd.Series))
def test_rename(self):
data = self.sample
data.rename(columns={'date': 'Date', 'close': 'Close'})
self.assertEqual(data._col_map['Date'], 'Date')
self.assertEqual(data._col_map['Close'], 'Close')
self.assertTrue(isinstance(data.date, pd.Series))
self.assertTrue(isinstance(data.close, pd.Series))
def test_iteration(self):
data = self.sample
count = 0
for i in data:
count += 1
print(count)
self.assertEqual(count, 232)
def test_slicing_with_dates(self):
data = self.sample
tz = data._timezone
start = tz.localize(pd.to_datetime('2017-02-24 06:30:00'))
end = tz.localize(pd.to_datetime('2017-02-24 07:00:00'))
# slicing produces a new History object
cpy = data[start:end]
self.assertEqual(cpy.date.iloc[0], '2017-02-24 06:30:00')
self.assertEqual(cpy.date.iloc[-1], '2017-02-24 07:00:00')
# renaming will change the namedtuple attributes
data.rename(columns={'date': 'mydate'})
for row in data[start:end]:
self.assertTrue(hasattr(row, 'mydate'))
self.assertFalse(hasattr(row, 'date'))
def test_slicing_with_integers(self):
data = self.sample
# can also slice by integer index
for row in data[0:3]:
self.assertTrue(isinstance(row, tuple))
self.assertTrue(hasattr(row, 'date'))
def test_len(self):
data = self.sample
self.assertEqual(len(data), 232)
def test_features(self):
data = self.sample
self.assertEqual(data.features, ['Sentiment', 'Influence'])
data.features = ['Volume']
self.assertEqual(data.features, ['Volume'])
self.assertRaises(KeyError, setattr, data, 'features', ['test'])
def test_getting_rows(self):
data = self.sample
print(data[-1])
# You can get by index, returns a series
self.assertTrue(isinstance(data[0], pd.Series))
self.assertEqual(data.date.iloc[-1], '2017-03-10 13:00:00')
print(data['2017-03-10 13:00:00'])
# You can also get by date, returns a dataframe
self.assertTrue(isinstance(data['2017-03-10 13:00:00'], pd.DataFrame))
self.assertEqual(data['2017-03-10 13:00:00'].index[0], 231)
def test_rate_of_return(self):
data = self.sample
self.assertTrue(np.isclose(
data.return_rate[1], -0.00061491160645644951)
)
|
[
"os.path.realpath",
"pandas.to_datetime",
"os.path.join",
"numpy.isclose"
] |
[((136, 162), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (152, 162), False, 'import os\n'), ((499, 543), 'os.path.join', 'os.path.join', (['dir_path', '"""tsla-sentiment.csv"""'], {}), "(dir_path, 'tsla-sentiment.csv')\n", (511, 543), False, 'import os\n'), ((1708, 1745), 'pandas.to_datetime', 'pd.to_datetime', (['"""2017-02-24 06:30:00"""'], {}), "('2017-02-24 06:30:00')\n", (1722, 1745), True, 'import pandas as pd\n'), ((1773, 1810), 'pandas.to_datetime', 'pd.to_datetime', (['"""2017-02-24 07:00:00"""'], {}), "('2017-02-24 07:00:00')\n", (1787, 1810), True, 'import pandas as pd\n'), ((3482, 3537), 'numpy.isclose', 'np.isclose', (['data.return_rate[1]', '(-0.0006149116064564495)'], {}), '(data.return_rate[1], -0.0006149116064564495)\n', (3492, 3537), True, 'import numpy as np\n')]
|
import numpy as np
numNodes = 891
coord = np.zeros((numNodes,2))
K = np.zeros((numNodes*2,numNodes*2))
for i in range(numNodes):
coord[i,0] = int(i / 11) / 10
coord[i,1] = i % 11 / 10
gaussxi = np.array([-1,1,1,-1]) / np.sqrt(3)
gausseta = np.array([-1,-1,1,1]) / np.sqrt(3)
numEle = 800
E = 1e5
nu = 0.25
D = E / (1 - nu**2)*np.array([[1,nu,0],[nu,1,0],[0,0,(1-nu)/2]])
for e in range(numEle):
Ke = np.zeros((8,8))
start = int((e // 10) * 11 + e % 10)
xe = np.zeros((4,2))
xe[0,:] = coord[start,:]
xe[1,:] = coord[start+11,:]
xe[2,:] = coord[start+12,:]
xe[3,:] = coord[start+1,:]
for igauss in range(4):
xi = gaussxi[igauss]
eta = gausseta[igauss]
Jpar = np.zeros((2,4))
Jpar[0,0] = -(1 - eta)/4
Jpar[0,1] = (1 - eta)/4
Jpar[0,2] = (1 + eta)/4
Jpar[0,3] = -(1 + eta)/4
Jpar[1,0] = -(1 - xi)/4
Jpar[1,1] = -(1 + xi)/4
Jpar[1,2] = (1 + xi)/4
Jpar[1,3] = (1 - xi)/4
J = np.dot(Jpar,xe)
Jinv = np.linalg.inv(J)
Npar = np.zeros((2,4))
B = np.zeros((3,8))
for i in range(4):
Npar[0,i] = Jinv[0,0] * Jpar[0,i] + Jinv[0,1] * Jpar[1,i]
Npar[1,i] = Jinv[1,0] * Jpar[0,i] + Jinv[1,1] * Jpar[1,i]
B[0,2*i] = Npar[0,i]
B[1,2*i+1] = Npar[1,i]
B[2,2*i] = Npar[1,i]
B[2,2*i+1] = Npar[0,i]
temp = np.dot(np.transpose(B),D)
detJ = np.linalg.det(J)
Ke = Ke + np.dot(temp,B)*detJ
|
[
"numpy.zeros",
"numpy.transpose",
"numpy.linalg.det",
"numpy.array",
"numpy.linalg.inv",
"numpy.dot",
"numpy.sqrt"
] |
[((43, 66), 'numpy.zeros', 'np.zeros', (['(numNodes, 2)'], {}), '((numNodes, 2))\n', (51, 66), True, 'import numpy as np\n'), ((70, 108), 'numpy.zeros', 'np.zeros', (['(numNodes * 2, numNodes * 2)'], {}), '((numNodes * 2, numNodes * 2))\n', (78, 108), True, 'import numpy as np\n'), ((208, 232), 'numpy.array', 'np.array', (['[-1, 1, 1, -1]'], {}), '([-1, 1, 1, -1])\n', (216, 232), True, 'import numpy as np\n'), ((232, 242), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (239, 242), True, 'import numpy as np\n'), ((254, 278), 'numpy.array', 'np.array', (['[-1, -1, 1, 1]'], {}), '([-1, -1, 1, 1])\n', (262, 278), True, 'import numpy as np\n'), ((278, 288), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (285, 288), True, 'import numpy as np\n'), ((341, 397), 'numpy.array', 'np.array', (['[[1, nu, 0], [nu, 1, 0], [0, 0, (1 - nu) / 2]]'], {}), '([[1, nu, 0], [nu, 1, 0], [0, 0, (1 - nu) / 2]])\n', (349, 397), True, 'import numpy as np\n'), ((420, 436), 'numpy.zeros', 'np.zeros', (['(8, 8)'], {}), '((8, 8))\n', (428, 436), True, 'import numpy as np\n'), ((486, 502), 'numpy.zeros', 'np.zeros', (['(4, 2)'], {}), '((4, 2))\n', (494, 502), True, 'import numpy as np\n'), ((729, 745), 'numpy.zeros', 'np.zeros', (['(2, 4)'], {}), '((2, 4))\n', (737, 745), True, 'import numpy as np\n'), ((1031, 1047), 'numpy.dot', 'np.dot', (['Jpar', 'xe'], {}), '(Jpar, xe)\n', (1037, 1047), True, 'import numpy as np\n'), ((1062, 1078), 'numpy.linalg.inv', 'np.linalg.inv', (['J'], {}), '(J)\n', (1075, 1078), True, 'import numpy as np\n'), ((1094, 1110), 'numpy.zeros', 'np.zeros', (['(2, 4)'], {}), '((2, 4))\n', (1102, 1110), True, 'import numpy as np\n'), ((1122, 1138), 'numpy.zeros', 'np.zeros', (['(3, 8)'], {}), '((3, 8))\n', (1130, 1138), True, 'import numpy as np\n'), ((1525, 1541), 'numpy.linalg.det', 'np.linalg.det', (['J'], {}), '(J)\n', (1538, 1541), True, 'import numpy as np\n'), ((1491, 1506), 'numpy.transpose', 'np.transpose', (['B'], {}), '(B)\n', (1503, 1506), True, 'import numpy as np\n'), ((1560, 1575), 'numpy.dot', 'np.dot', (['temp', 'B'], {}), '(temp, B)\n', (1566, 1575), True, 'import numpy as np\n')]
|
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.markers as mk
import matplotlib.pylab as plt
def sp_plot(df, x_col, y_col, color_col,ci = None,domain_range=[0, 20, 0 , 20],
ax=None,aggplot=True,x_jitter=0,height=3,legend=True):
"""
create SP vizualization plot from 2 columns of a df
"""
# # create axes if not passed
# if ax is None:
# fig = plt.figure()
# ax = fig.add_subplot(111)
all_markers = list(mk.MarkerStyle.markers.keys())
n_markers = df[color_col].unique().shape[0] # number unique
cur_markers = all_markers[:n_markers]
sns.lmplot(x_col, y_col, data=df, hue=color_col, ci=ci,
markers =cur_markers, palette="Set1",x_jitter=x_jitter,
height=height,legend=legend)
if aggplot:
# adda whole data regression line, but don't cover the scatter data
sns.regplot(x_col, y_col, data=df, color='black', scatter=False, ci=ci,)
plt.axis(domain_range)
def plot_clustermat(z,fmt=None):
"""
black and white matshow for clustering and feat allocation matrices
Parameters
-----------
z : nparray, square to be plotted
fmt : if z is not a square, then str of what it is
fmt options:
'crplist' : a list of values from zero to k
'ibplist' : a list of lists of varying lengths
'list' : a list, but not nparray otherwise ready to plot
"""
processing = {'crplist': lambda x: list_to_mat(x),
'ibplist': lambda x: make_square(x),
'list': lambda x: np.asarray(x),
None: lambda x: x}
z_mat = processing[fmt](z)
# print(z_mat)
N,K = z_mat.shape
# no white grid
sns.set_style("whitegrid", {'axes.grid' : False})
# plot the data
plt.matshow(z_mat,cmap=plt.cm.gray_r)
# make the tick marks at the ints
ax = plt.gca()
ax.set_xticks(np.arange(0, K, 1))
ax.set_yticks(np.arange(0, N, 1))
# Labels for major ticks
ax.set_xticklabels(np.arange(0, K, 1))
ax.set_yticklabels(np.arange(0, N, 1))
# Minor ticks at 1/2 marks
ax.set_xticks(np.arange(-.5, K, 1), minor=True)
ax.set_yticks(np.arange(-.5, N, 1), minor=True)
# Gridlines based on minor ticks
plt.grid(which='minor', color='k', linestyle='-', linewidth=3)
def make_square(z):
"""
convert a list of lists of varying sizes to a square matrix
"""
D = len(z[-1])
return np.asarray([np.concatenate((z_i,np.zeros([D-len(z_i)]))) for z_i in z])
def list_to_mat(z):
"""
make a list of length N with values 1 to K into an NxK binanry matrix
"""
K = np.max(z)
tmp = np.eye(K+1)
return np.asarray([tmp[z_i] for z_i in z])
|
[
"seaborn.set_style",
"seaborn.lmplot",
"matplotlib.markers.MarkerStyle.markers.keys",
"numpy.asarray",
"matplotlib.pylab.axis",
"matplotlib.pylab.gca",
"seaborn.regplot",
"numpy.max",
"numpy.arange",
"numpy.eye",
"matplotlib.pylab.grid",
"matplotlib.pylab.matshow"
] |
[((636, 781), 'seaborn.lmplot', 'sns.lmplot', (['x_col', 'y_col'], {'data': 'df', 'hue': 'color_col', 'ci': 'ci', 'markers': 'cur_markers', 'palette': '"""Set1"""', 'x_jitter': 'x_jitter', 'height': 'height', 'legend': 'legend'}), "(x_col, y_col, data=df, hue=color_col, ci=ci, markers=cur_markers,\n palette='Set1', x_jitter=x_jitter, height=height, legend=legend)\n", (646, 781), True, 'import seaborn as sns\n'), ((993, 1015), 'matplotlib.pylab.axis', 'plt.axis', (['domain_range'], {}), '(domain_range)\n', (1001, 1015), True, 'import matplotlib.pylab as plt\n'), ((1742, 1790), 'seaborn.set_style', 'sns.set_style', (['"""whitegrid"""', "{'axes.grid': False}"], {}), "('whitegrid', {'axes.grid': False})\n", (1755, 1790), True, 'import seaborn as sns\n'), ((1817, 1855), 'matplotlib.pylab.matshow', 'plt.matshow', (['z_mat'], {'cmap': 'plt.cm.gray_r'}), '(z_mat, cmap=plt.cm.gray_r)\n', (1828, 1855), True, 'import matplotlib.pylab as plt\n'), ((1904, 1913), 'matplotlib.pylab.gca', 'plt.gca', ([], {}), '()\n', (1911, 1913), True, 'import matplotlib.pylab as plt\n'), ((2284, 2346), 'matplotlib.pylab.grid', 'plt.grid', ([], {'which': '"""minor"""', 'color': '"""k"""', 'linestyle': '"""-"""', 'linewidth': '(3)'}), "(which='minor', color='k', linestyle='-', linewidth=3)\n", (2292, 2346), True, 'import matplotlib.pylab as plt\n'), ((2669, 2678), 'numpy.max', 'np.max', (['z'], {}), '(z)\n', (2675, 2678), True, 'import numpy as np\n'), ((2689, 2702), 'numpy.eye', 'np.eye', (['(K + 1)'], {}), '(K + 1)\n', (2695, 2702), True, 'import numpy as np\n'), ((2712, 2747), 'numpy.asarray', 'np.asarray', (['[tmp[z_i] for z_i in z]'], {}), '([tmp[z_i] for z_i in z])\n', (2722, 2747), True, 'import numpy as np\n'), ((490, 519), 'matplotlib.markers.MarkerStyle.markers.keys', 'mk.MarkerStyle.markers.keys', ([], {}), '()\n', (517, 519), True, 'import matplotlib.markers as mk\n'), ((915, 986), 'seaborn.regplot', 'sns.regplot', (['x_col', 'y_col'], {'data': 'df', 'color': '"""black"""', 'scatter': '(False)', 'ci': 'ci'}), "(x_col, y_col, data=df, color='black', scatter=False, ci=ci)\n", (926, 986), True, 'import seaborn as sns\n'), ((1932, 1950), 'numpy.arange', 'np.arange', (['(0)', 'K', '(1)'], {}), '(0, K, 1)\n', (1941, 1950), True, 'import numpy as np\n'), ((1970, 1988), 'numpy.arange', 'np.arange', (['(0)', 'N', '(1)'], {}), '(0, N, 1)\n', (1979, 1988), True, 'import numpy as np\n'), ((2043, 2061), 'numpy.arange', 'np.arange', (['(0)', 'K', '(1)'], {}), '(0, K, 1)\n', (2052, 2061), True, 'import numpy as np\n'), ((2086, 2104), 'numpy.arange', 'np.arange', (['(0)', 'N', '(1)'], {}), '(0, N, 1)\n', (2095, 2104), True, 'import numpy as np\n'), ((2156, 2177), 'numpy.arange', 'np.arange', (['(-0.5)', 'K', '(1)'], {}), '(-0.5, K, 1)\n', (2165, 2177), True, 'import numpy as np\n'), ((2208, 2229), 'numpy.arange', 'np.arange', (['(-0.5)', 'N', '(1)'], {}), '(-0.5, N, 1)\n', (2217, 2229), True, 'import numpy as np\n'), ((1592, 1605), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (1602, 1605), True, 'import numpy as np\n')]
|
# Copyright (C) 2017-2018 Intel Corporation
#
# SPDX-License-Identifier: MIT
import run_utils as utils
import numpy as np
import sys, os
import dpctl, dpctl.tensor as dpt
from dpbench_python.pairwise_distance.pairwise_distance_python import (
pairwise_distance_python,
)
from dpbench_datagen.pairwise_distance import gen_rand_data, gen_data_to_file
######################################################
# GLOBAL DECLARATIONS THAT WILL BE USED IN ALL FILES #
######################################################
# make xrange available in python 3
try:
xrange
except NameError:
xrange = range
def gen_data(nopt, dims):
X, Y = gen_rand_data(nopt, dims)
return (X, Y, np.empty((nopt, nopt)))
def run(name, sizes=5, step=2, nopt=2 ** 10):
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--steps", required=False, default=sizes, help="Number of steps"
)
parser.add_argument(
"--step", required=False, default=step, help="Factor for each step"
)
parser.add_argument(
"--size", required=False, default=nopt, help="Initial data size"
)
parser.add_argument(
"--repeat", required=False, default=1, help="Iterations inside measured region"
)
parser.add_argument(
"--text", required=False, default="", help="Print with each result"
)
parser.add_argument("-d", type=int, default=3, help="Dimensions")
parser.add_argument(
"--usm",
required=False,
action="store_true",
help="Use USM Shared or pure numpy",
)
parser.add_argument(
"--test",
required=False,
action="store_true",
help="Check for correctness by comparing output with naieve Python version",
)
args = parser.parse_args()
sizes = int(args.steps)
step = int(args.step)
nopt = int(args.size)
repeat = int(args.repeat)
dims = int(args.d)
clean_string = ["make", "clean"]
utils.run_command(clean_string, verbose=True)
if args.usm:
build_string = ["make", "comp"]
utils.run_command(build_string, verbose=True)
exec_name = "./pairwise_distance_comp"
else:
build_string = ["make"]
utils.run_command(build_string, verbose=True)
exec_name = "./pairwise_distance"
if args.test:
X, Y, p_D = gen_data(nopt, dims)
pairwise_distance_python(X, Y, p_D)
# run dpcpp
gen_data_to_file(nopt, dims)
# run the C program
run_cmd = [exec_name, str(nopt), str(1), "-t"]
utils.run_command(run_cmd, verbose=True)
# read output of dpcpp
n_D = np.fromfile("D.bin").reshape(nopt, nopt)
if np.allclose(n_D, p_D):
print("Test succeeded\n")
else:
print("Test failed\n")
return
if os.path.isfile("runtimes.csv"):
os.remove("runtimes.csv")
for i in xrange(sizes):
gen_data_to_file(nopt, dims)
# run the C program
run_cmd = [exec_name, str(nopt), str(repeat)]
utils.run_command(run_cmd, verbose=True)
nopt *= step
repeat -= step
if repeat < 1:
repeat = 1
if __name__ == "__main__":
run("Pairwise distance dpcpp")
|
[
"os.remove",
"argparse.ArgumentParser",
"numpy.fromfile",
"run_utils.run_command",
"numpy.empty",
"numpy.allclose",
"dpbench_python.pairwise_distance.pairwise_distance_python.pairwise_distance_python",
"os.path.isfile",
"dpbench_datagen.pairwise_distance.gen_data_to_file",
"dpbench_datagen.pairwise_distance.gen_rand_data"
] |
[((649, 674), 'dpbench_datagen.pairwise_distance.gen_rand_data', 'gen_rand_data', (['nopt', 'dims'], {}), '(nopt, dims)\n', (662, 674), False, 'from dpbench_datagen.pairwise_distance import gen_rand_data, gen_data_to_file\n'), ((799, 824), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (822, 824), False, 'import argparse\n'), ((1976, 2021), 'run_utils.run_command', 'utils.run_command', (['clean_string'], {'verbose': '(True)'}), '(clean_string, verbose=True)\n', (1993, 2021), True, 'import run_utils as utils\n'), ((2845, 2875), 'os.path.isfile', 'os.path.isfile', (['"""runtimes.csv"""'], {}), "('runtimes.csv')\n", (2859, 2875), False, 'import sys, os\n'), ((693, 715), 'numpy.empty', 'np.empty', (['(nopt, nopt)'], {}), '((nopt, nopt))\n', (701, 715), True, 'import numpy as np\n'), ((2088, 2133), 'run_utils.run_command', 'utils.run_command', (['build_string'], {'verbose': '(True)'}), '(build_string, verbose=True)\n', (2105, 2133), True, 'import run_utils as utils\n'), ((2231, 2276), 'run_utils.run_command', 'utils.run_command', (['build_string'], {'verbose': '(True)'}), '(build_string, verbose=True)\n', (2248, 2276), True, 'import run_utils as utils\n'), ((2387, 2422), 'dpbench_python.pairwise_distance.pairwise_distance_python.pairwise_distance_python', 'pairwise_distance_python', (['X', 'Y', 'p_D'], {}), '(X, Y, p_D)\n', (2411, 2422), False, 'from dpbench_python.pairwise_distance.pairwise_distance_python import pairwise_distance_python\n'), ((2452, 2480), 'dpbench_datagen.pairwise_distance.gen_data_to_file', 'gen_data_to_file', (['nopt', 'dims'], {}), '(nopt, dims)\n', (2468, 2480), False, 'from dpbench_datagen.pairwise_distance import gen_rand_data, gen_data_to_file\n'), ((2572, 2612), 'run_utils.run_command', 'utils.run_command', (['run_cmd'], {'verbose': '(True)'}), '(run_cmd, verbose=True)\n', (2589, 2612), True, 'import run_utils as utils\n'), ((2712, 2733), 'numpy.allclose', 'np.allclose', (['n_D', 'p_D'], {}), '(n_D, p_D)\n', (2723, 2733), True, 'import numpy as np\n'), ((2885, 2910), 'os.remove', 'os.remove', (['"""runtimes.csv"""'], {}), "('runtimes.csv')\n", (2894, 2910), False, 'import sys, os\n'), ((2948, 2976), 'dpbench_datagen.pairwise_distance.gen_data_to_file', 'gen_data_to_file', (['nopt', 'dims'], {}), '(nopt, dims)\n', (2964, 2976), False, 'from dpbench_datagen.pairwise_distance import gen_rand_data, gen_data_to_file\n'), ((3068, 3108), 'run_utils.run_command', 'utils.run_command', (['run_cmd'], {'verbose': '(True)'}), '(run_cmd, verbose=True)\n', (3085, 3108), True, 'import run_utils as utils\n'), ((2659, 2679), 'numpy.fromfile', 'np.fromfile', (['"""D.bin"""'], {}), "('D.bin')\n", (2670, 2679), True, 'import numpy as np\n')]
|
import numpy as np
import matplotlib.pyplot as plt
from rbm import RBM
import click
import gzip
import pickle
@click.group(context_settings={"help_option_names": ['-h', '--help']})
def cli():
"""Simple tool for training an RBM"""
pass
# @click.option('--target-path', type=click.Path(exists=True),
# help="path to the wavefunction data")
@cli.command("train")
@click.option('--train-path', default='../data/Ising2d_L4.pkl.gz',
show_default=True, type=click.Path(exists=True),
help="path to the training data")
@click.option('--save', type=click.Path(),
help="where to save the trained RBM parameters (if at all)")
@click.option('-n', '--num-hidden', default=None, type=int,
help=("number of hidden units in the RBM; defaults to "
"number of visible units"))
@click.option('-e', '--epochs', default=1000, show_default=True, type=int)
@click.option('-b', '--batch-size', default=100, show_default=True, type=int)
@click.option('-k', default=1, show_default=True, type=int,
help="number of Contrastive Divergence steps")
@click.option('-l', '--learning-rate', default=1e-3,
show_default=True, type=float)
@click.option('-m', '--momentum', default=0.5, show_default=True, type=float,
help=("value of the momentum parameter; ignored if "
"using SGD or Adam optimization"))
@click.option('--l1', default=0, show_default=True, type=float,
help="L1 regularization parameter")
@click.option('--l2', default=0, show_default=True, type=float,
help="L2 regularization parameter")
@click.option('--log-every', default=0, show_default=True, type=int,
help=("how often the validation statistics are recorded, "
"in epochs; 0 means no logging"))
@click.option('--seed', default=1234, show_default=True, type=int,
help="random seed to initialize the RBM with")
@click.option('-p', '--persistent', is_flag=True,
help="use Persistent Contrastive Divergence (PCD)")
@click.option('--persist-from', default=0, show_default=True, type=int,
help=("if PCD flag is given, use vanilla CD until the given "
"epoch, then switch to PCD"))
@click.option('--plot', is_flag=True)
@click.option('--no-prog', is_flag=True)
@click.option('--method', default='momentum', show_default=True,
type=click.Choice(["nesterov", "momentum", "sgd", "adam"]),
help="the optimization method to use")
def train(train_path, save, num_hidden, epochs, batch_size,
k, persistent, persist_from, learning_rate, momentum, l1, l2,
method, seed, log_every, plot, no_prog):
"""Train an RBM"""
# train_set = np.loadtxt(train_path)
# target_psi = np.loadtxt(target_path) if target_path is not None else None
with gzip.open(train_path) as f:
train_set = pickle.load(f, encoding='bytes')
num_hidden = train_set.shape[-1] if num_hidden is None else num_hidden
rbm = RBM(num_visible=train_set.shape[-1],
num_hidden=num_hidden,
seed=seed)
# learning_rate = schedulers.bounded_exponential_decay(0.1, 1e-6, epochs)
# momentum = schedulers.bounded_exponential_decay(0.5, 0.99, epochs)
nll_list = rbm.train(train_set, None, epochs,
batch_size, k=k,
persistent=persistent,
persist_from=persist_from,
lr=learning_rate,
momentum=momentum,
l1_reg=l1, l2_reg=l2,
beta1=0.9, beta2=0.999, epsilon=1e-8,
method=method,
log_every=log_every,
progbar=(not no_prog))
if save:
rbm.save(save)
if plot and nll_list:
fig, ax1 = plt.subplots(figsize=(10, 10))
ax1.plot(log_every * np.arange(len(nll_list)),
np.array(nll_list) / len(train_set), 'b')
ax1.set_xlabel("Epoch")
ax1.set_ylabel("NLL per training example", color='b')
ax1.tick_params('y', colors='b')
ax1.set_xlim(0, epochs)
if persistent and persist_from > 0:
# mark starting point of PCD if enabled and not zero
ax1.axvline(x=persist_from, linestyle=':', color='g')
# ax2 = ax1.twinx()
# ax2.plot(log_every * np.arange(len(overlap_list)),
# overlap_list, 'r')
# ax2.set_ylabel('Overlap', color='r')
# ax2.tick_params('y', colors='r')
# ax2.axhline(y=1, xmin=0, xmax=len(overlap_list),
# linestyle=':', color='r') # plot maximum overlap
plt.show()
# @cli.command("test")
# @click.option('--train-path', default='../c++/training_data.txt',
# show_default=True)
# @click.option('--target-path', default='../c++/target_psi.txt',
# show_default=True)
# @click.option('-n', '--num-hidden', default=None, type=int,
# help=("number of hidden units in the RBM; defaults to "
# "number of visible units"))
# @click.option('-k', default=1, show_default=True, type=int,
# help="number of Contrastive Divergence steps")
# @click.option('-e', '--epsilon', default=1e-8, show_default=True, type=float)
# @click.option('--seed', default=1234, show_default=True, type=int,
# help="random seed to initialize the RBM with")
# def test(train_path, target_path, num_hidden, k, epsilon, seed):
# """Tests the RBM's gradient computations"""
# train_set = np.loadtxt(train_path)
# target_psi = np.loadtxt(target_path)
# num_hidden = train_set.shape[-1] if num_hidden is None else num_hidden
# rbm = RBM(num_visible=train_set.shape[-1],
# num_hidden=num_hidden,
# seed=seed)
# rbm.test_gradients(train_set, target_psi, k, epsilon)
if __name__ == '__main__':
cli()
|
[
"gzip.open",
"matplotlib.pyplot.show",
"click.option",
"click.Choice",
"pickle.load",
"numpy.array",
"click.Path",
"rbm.RBM",
"click.group",
"matplotlib.pyplot.subplots"
] |
[((113, 182), 'click.group', 'click.group', ([], {'context_settings': "{'help_option_names': ['-h', '--help']}"}), "(context_settings={'help_option_names': ['-h', '--help']})\n", (124, 182), False, 'import click\n'), ((684, 826), 'click.option', 'click.option', (['"""-n"""', '"""--num-hidden"""'], {'default': 'None', 'type': 'int', 'help': '"""number of hidden units in the RBM; defaults to number of visible units"""'}), "('-n', '--num-hidden', default=None, type=int, help=\n 'number of hidden units in the RBM; defaults to number of visible units')\n", (696, 826), False, 'import click\n'), ((862, 935), 'click.option', 'click.option', (['"""-e"""', '"""--epochs"""'], {'default': '(1000)', 'show_default': '(True)', 'type': 'int'}), "('-e', '--epochs', default=1000, show_default=True, type=int)\n", (874, 935), False, 'import click\n'), ((937, 1013), 'click.option', 'click.option', (['"""-b"""', '"""--batch-size"""'], {'default': '(100)', 'show_default': '(True)', 'type': 'int'}), "('-b', '--batch-size', default=100, show_default=True, type=int)\n", (949, 1013), False, 'import click\n'), ((1015, 1125), 'click.option', 'click.option', (['"""-k"""'], {'default': '(1)', 'show_default': '(True)', 'type': 'int', 'help': '"""number of Contrastive Divergence steps"""'}), "('-k', default=1, show_default=True, type=int, help=\n 'number of Contrastive Divergence steps')\n", (1027, 1125), False, 'import click\n'), ((1136, 1223), 'click.option', 'click.option', (['"""-l"""', '"""--learning-rate"""'], {'default': '(0.001)', 'show_default': '(True)', 'type': 'float'}), "('-l', '--learning-rate', default=0.001, show_default=True,\n type=float)\n", (1148, 1223), False, 'import click\n'), ((1234, 1407), 'click.option', 'click.option', (['"""-m"""', '"""--momentum"""'], {'default': '(0.5)', 'show_default': '(True)', 'type': 'float', 'help': '"""value of the momentum parameter; ignored if using SGD or Adam optimization"""'}), "('-m', '--momentum', default=0.5, show_default=True, type=float,\n help=\n 'value of the momentum parameter; ignored if using SGD or Adam optimization'\n )\n", (1246, 1407), False, 'import click\n'), ((1434, 1537), 'click.option', 'click.option', (['"""--l1"""'], {'default': '(0)', 'show_default': '(True)', 'type': 'float', 'help': '"""L1 regularization parameter"""'}), "('--l1', default=0, show_default=True, type=float, help=\n 'L1 regularization parameter')\n", (1446, 1537), False, 'import click\n'), ((1548, 1651), 'click.option', 'click.option', (['"""--l2"""'], {'default': '(0)', 'show_default': '(True)', 'type': 'float', 'help': '"""L2 regularization parameter"""'}), "('--l2', default=0, show_default=True, type=float, help=\n 'L2 regularization parameter')\n", (1560, 1651), False, 'import click\n'), ((1662, 1827), 'click.option', 'click.option', (['"""--log-every"""'], {'default': '(0)', 'show_default': '(True)', 'type': 'int', 'help': '"""how often the validation statistics are recorded, in epochs; 0 means no logging"""'}), "('--log-every', default=0, show_default=True, type=int, help=\n 'how often the validation statistics are recorded, in epochs; 0 means no logging'\n )\n", (1674, 1827), False, 'import click\n'), ((1858, 1975), 'click.option', 'click.option', (['"""--seed"""'], {'default': '(1234)', 'show_default': '(True)', 'type': 'int', 'help': '"""random seed to initialize the RBM with"""'}), "('--seed', default=1234, show_default=True, type=int, help=\n 'random seed to initialize the RBM with')\n", (1870, 1975), False, 'import click\n'), ((1986, 2091), 'click.option', 'click.option', (['"""-p"""', '"""--persistent"""'], {'is_flag': '(True)', 'help': '"""use Persistent Contrastive Divergence (PCD)"""'}), "('-p', '--persistent', is_flag=True, help=\n 'use Persistent Contrastive Divergence (PCD)')\n", (1998, 2091), False, 'import click\n'), ((2102, 2274), 'click.option', 'click.option', (['"""--persist-from"""'], {'default': '(0)', 'show_default': '(True)', 'type': 'int', 'help': '"""if PCD flag is given, use vanilla CD until the given epoch, then switch to PCD"""'}), "('--persist-from', default=0, show_default=True, type=int, help\n =\n 'if PCD flag is given, use vanilla CD until the given epoch, then switch to PCD'\n )\n", (2114, 2274), False, 'import click\n'), ((2300, 2336), 'click.option', 'click.option', (['"""--plot"""'], {'is_flag': '(True)'}), "('--plot', is_flag=True)\n", (2312, 2336), False, 'import click\n'), ((2338, 2377), 'click.option', 'click.option', (['"""--no-prog"""'], {'is_flag': '(True)'}), "('--no-prog', is_flag=True)\n", (2350, 2377), False, 'import click\n'), ((3074, 3144), 'rbm.RBM', 'RBM', ([], {'num_visible': 'train_set.shape[-1]', 'num_hidden': 'num_hidden', 'seed': 'seed'}), '(num_visible=train_set.shape[-1], num_hidden=num_hidden, seed=seed)\n', (3077, 3144), False, 'from rbm import RBM\n'), ((2906, 2927), 'gzip.open', 'gzip.open', (['train_path'], {}), '(train_path)\n', (2915, 2927), False, 'import gzip\n'), ((2954, 2986), 'pickle.load', 'pickle.load', (['f'], {'encoding': '"""bytes"""'}), "(f, encoding='bytes')\n", (2965, 2986), False, 'import pickle\n'), ((3932, 3962), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (3944, 3962), True, 'import matplotlib.pyplot as plt\n'), ((4778, 4788), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4786, 4788), True, 'import matplotlib.pyplot as plt\n'), ((492, 515), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (502, 515), False, 'import click\n'), ((594, 606), 'click.Path', 'click.Path', ([], {}), '()\n', (604, 606), False, 'import click\n'), ((2462, 2515), 'click.Choice', 'click.Choice', (["['nesterov', 'momentum', 'sgd', 'adam']"], {}), "(['nesterov', 'momentum', 'sgd', 'adam'])\n", (2474, 2515), False, 'import click\n'), ((4035, 4053), 'numpy.array', 'np.array', (['nll_list'], {}), '(nll_list)\n', (4043, 4053), True, 'import numpy as np\n')]
|
# Copyright 2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# To see moto-server logs
# pytest -s -p no:logging tests/test_aio_s3fs.py
import pytest
@pytest.mark.asyncio
async def test_pandas_s3_io(
aio_s3_bucket, aio_s3fs
):
import numpy as np
import pandas as pd
s3_file = f"s3://{aio_s3_bucket}/data.csv"
print(s3_file)
data = {"1": np.random.rand(5)}
df = pd.DataFrame(data=data)
df.to_csv(s3_file)
s3_df = pd.read_csv(s3_file, index_col=0)
assert isinstance(s3_df, pd.DataFrame)
pd.testing.assert_frame_equal(df, s3_df)
@pytest.mark.asyncio
async def test_zarr_s3_io(
aio_s3_bucket, aio_s3fs
):
import numpy as np
import pandas as pd
import s3fs
import xarray as xr
fmap = s3fs.S3Map(f"s3://{aio_s3_bucket}/test_datasets/test.zarr", s3=s3fs.S3FileSystem())
print(fmap.root)
ds = xr.Dataset(
{"foo": (("x", "y"), np.random.rand(4, 5))},
coords={
"x": [10, 20, 30, 40],
"y": pd.date_range("2000-01-01", periods=5),
"z": ("x", list("abcd")),
},
)
ds.to_zarr(fmap, consolidated=True)
s3_ds = xr.open_zarr(fmap, consolidated=True)
assert isinstance(s3_ds, xr.Dataset)
xr.testing.assert_equal(ds, s3_ds)
|
[
"pandas.DataFrame",
"pandas.testing.assert_frame_equal",
"xarray.testing.assert_equal",
"pandas.date_range",
"pandas.read_csv",
"s3fs.S3FileSystem",
"xarray.open_zarr",
"numpy.random.rand"
] |
[((902, 925), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'data'}), '(data=data)\n', (914, 925), True, 'import pandas as pd\n'), ((961, 994), 'pandas.read_csv', 'pd.read_csv', (['s3_file'], {'index_col': '(0)'}), '(s3_file, index_col=0)\n', (972, 994), True, 'import pandas as pd\n'), ((1042, 1082), 'pandas.testing.assert_frame_equal', 'pd.testing.assert_frame_equal', (['df', 's3_df'], {}), '(df, s3_df)\n', (1071, 1082), True, 'import pandas as pd\n'), ((1658, 1695), 'xarray.open_zarr', 'xr.open_zarr', (['fmap'], {'consolidated': '(True)'}), '(fmap, consolidated=True)\n', (1670, 1695), True, 'import xarray as xr\n'), ((1741, 1775), 'xarray.testing.assert_equal', 'xr.testing.assert_equal', (['ds', 's3_ds'], {}), '(ds, s3_ds)\n', (1764, 1775), True, 'import xarray as xr\n'), ((874, 891), 'numpy.random.rand', 'np.random.rand', (['(5)'], {}), '(5)\n', (888, 891), True, 'import numpy as np\n'), ((1326, 1345), 's3fs.S3FileSystem', 's3fs.S3FileSystem', ([], {}), '()\n', (1343, 1345), False, 'import s3fs\n'), ((1418, 1438), 'numpy.random.rand', 'np.random.rand', (['(4)', '(5)'], {}), '(4, 5)\n', (1432, 1438), True, 'import numpy as np\n'), ((1511, 1549), 'pandas.date_range', 'pd.date_range', (['"""2000-01-01"""'], {'periods': '(5)'}), "('2000-01-01', periods=5)\n", (1524, 1549), True, 'import pandas as pd\n')]
|
import numpy as np
import multiprocessing
from abito.lib.stats.weighted import _quantile_sorted, _sort_obs
__all__ = ['generate_bootstrap_estimates']
def _do_bootstrap_plain(obs, stat_func, stat_args, n_iters, seed):
np.random.seed(seed)
nobs = obs.shape[0]
result = []
for i in range(n_iters):
new_ind = np.random.choice(nobs, nobs, replace=True)
obs_new = obs[new_ind]
result.append(stat_func(obs_new, **stat_args))
return result
def _do_bootstrap_weighted(obs, weights, stat_func, stat_args, n_iters, seed):
np.random.seed(seed)
if stat_func.__name__ == 'quantile':
obs, weights = _sort_obs(obs, weights)
stat_func = _quantile_sorted
nobs = weights.sum()
ps = weights / nobs
result = []
for i in range(n_iters):
new_weights = np.random.multinomial(nobs, ps)
weights = new_weights
result.append(stat_func(weights=weights, obs=obs, **stat_args))
return result
def _prepare_bootstrap_procedure(obs, weights, stat_func, n_iters, **stat_args):
if weights.shape[0] == 0:
func = _do_bootstrap_plain
args = (obs, stat_func, stat_args, n_iters)
else:
func = _do_bootstrap_weighted
args = (obs, weights, stat_func, stat_args, n_iters)
return func, args
def generate_bootstrap_estimates(obs, stat_func, n_iters, weights=np.empty(0), n_threads=1, **stat_args):
n_threads = multiprocessing.cpu_count() if n_threads == -1 else n_threads
if n_threads <= 1:
func, args = _prepare_bootstrap_procedure(obs, weights, stat_func, n_iters, **stat_args)
seed = np.random.randint(2**32)
results = np.asarray(func(*args, seed))
else:
with multiprocessing.Pool(n_threads) as pool:
n_iters_per_thread = int(n_iters / n_threads)
pool_results = []
seeds = np.random.randint(2**32, size=n_threads)
for seed in seeds:
func, args = _prepare_bootstrap_procedure(obs, weights, stat_func, n_iters_per_thread, **stat_args)
r = pool.apply_async(func, (*args, seed))
pool_results.append(r)
results = []
[results.extend(r.get()) for r in pool_results]
results = np.asarray(results)
return results
|
[
"numpy.random.seed",
"abito.lib.stats.weighted._sort_obs",
"numpy.empty",
"numpy.random.multinomial",
"numpy.asarray",
"multiprocessing.Pool",
"numpy.random.randint",
"numpy.random.choice",
"multiprocessing.cpu_count"
] |
[((225, 245), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (239, 245), True, 'import numpy as np\n'), ((566, 586), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (580, 586), True, 'import numpy as np\n'), ((1382, 1393), 'numpy.empty', 'np.empty', (['(0)'], {}), '(0)\n', (1390, 1393), True, 'import numpy as np\n'), ((334, 376), 'numpy.random.choice', 'np.random.choice', (['nobs', 'nobs'], {'replace': '(True)'}), '(nobs, nobs, replace=True)\n', (350, 376), True, 'import numpy as np\n'), ((652, 675), 'abito.lib.stats.weighted._sort_obs', '_sort_obs', (['obs', 'weights'], {}), '(obs, weights)\n', (661, 675), False, 'from abito.lib.stats.weighted import _quantile_sorted, _sort_obs\n'), ((831, 862), 'numpy.random.multinomial', 'np.random.multinomial', (['nobs', 'ps'], {}), '(nobs, ps)\n', (852, 862), True, 'import numpy as np\n'), ((1438, 1465), 'multiprocessing.cpu_count', 'multiprocessing.cpu_count', ([], {}), '()\n', (1463, 1465), False, 'import multiprocessing\n'), ((1635, 1661), 'numpy.random.randint', 'np.random.randint', (['(2 ** 32)'], {}), '(2 ** 32)\n', (1652, 1661), True, 'import numpy as np\n'), ((1731, 1762), 'multiprocessing.Pool', 'multiprocessing.Pool', (['n_threads'], {}), '(n_threads)\n', (1751, 1762), False, 'import multiprocessing\n'), ((1880, 1922), 'numpy.random.randint', 'np.random.randint', (['(2 ** 32)'], {'size': 'n_threads'}), '(2 ** 32, size=n_threads)\n', (1897, 1922), True, 'import numpy as np\n'), ((2272, 2291), 'numpy.asarray', 'np.asarray', (['results'], {}), '(results)\n', (2282, 2291), True, 'import numpy as np\n')]
|
from ising import *
import os
import numpy as np
Ns = [10, 20, 50, 100, 1000] # System Size
T_Tcs = np.linspace(0.5, 1.7, 30) # T/Tc
Tc = 2.268 # Onsager's Tc
for n in Ns:
for i, T_Tc in enumerate(T_Tcs):
T = T_Tc*Tc
wd = 'magnetization/size-{0}/temp-{1}'.format(n, i)
if not os.path.exists(wd):
os.makedirs(wd)
if n !=1000:
write_job_script(wd=wd, n=n, s= n * 1000000, T=T, i=i)
else:
write_job_script(wd=wd, n=n, s= n * 1000000, T=T, i=i, nprocs = 1, q ='long')
run_job(wd)
|
[
"os.path.exists",
"os.makedirs",
"numpy.linspace"
] |
[((102, 127), 'numpy.linspace', 'np.linspace', (['(0.5)', '(1.7)', '(30)'], {}), '(0.5, 1.7, 30)\n', (113, 127), True, 'import numpy as np\n'), ((309, 327), 'os.path.exists', 'os.path.exists', (['wd'], {}), '(wd)\n', (323, 327), False, 'import os\n'), ((342, 357), 'os.makedirs', 'os.makedirs', (['wd'], {}), '(wd)\n', (353, 357), False, 'import os\n')]
|
import numpy as np
import scipy.stats
from functools import partial
from ..util.math import flattengrid
from ..comp.codata import ILR, close
from .log import Handle
logger = Handle(__name__)
def get_scaler(*fs):
"""
Generate a function which will transform columns of an array
based on input functions (e.g. :code:`np.log` will log-transform the x values,
:code:`None, np.log` will log-transform the y values but not the x).
Parameters
------------
fs
A series of functions to apply to subsequent axes of an array.
"""
def scaler(arr, fs=fs):
A = arr.copy()
for ix, f in enumerate(fs):
if f is not None:
A[:, ix] = f(A[:, ix])
return A
return partial(scaler, fs=fs)
def sample_kde(data, samples, renorm=False, transform=lambda x: x, bw_method=None):
"""
Sample a Kernel Density Estimate at points or a grid defined.
Parameters
------------
data : :class:`numpy.ndarray`
Source data to estimate the kernel density estimate; observations should be
in rows (:code:`npoints, ndim`).
samples : :class:`numpy.ndarray`
Coordinates to sample the KDE estimate at (:code:`npoints, ndim`).
transform
Transformation used prior to kernel density estimate.
bw_method : :class:`str`, :class:`float`, callable
Method used to calculate the estimator bandwidth.
See :func:`scipy.stats.kde.gaussian_kde`.
Returns
----------
:class:`numpy.ndarray`
"""
# check shape info first
data = np.atleast_2d(data)
if data.shape[0] == 1: # single row which should be a column
logger.debug("Transposing data row to column format for KDE.")
data = data.T
tdata = transform(data)
tdata = tdata[np.isfinite(tdata).all(axis=1), :] # filter rows with nans
K = scipy.stats.gaussian_kde(tdata.T, bw_method=bw_method)
if isinstance(samples, list) and isinstance(samples[0], np.ndarray): # meshgrid
logger.debug("Sampling with meshgrid.")
zshape = samples[0].shape
ksamples = transform(flattengrid(samples))
else:
zshape = samples.shape[0]
ksamples = transform(samples)
# ensures shape is fine even if row is passed
ksamples = ksamples.reshape(-1, tdata.shape[1])
# samples shouldnt typically contain nans
# ksamples = ksamples[np.isfinite(ksamples).all(axis=1), :]
if not tdata.shape[1] == ksamples.shape[1]:
logger.warn("Dimensions of data and samples do not match.")
zi = K(ksamples.T)
zi = zi.reshape(zshape)
if renorm:
logger.debug("Normalising KDE sample.")
zi = zi / np.nanmax(zi)
return zi
def sample_ternary_kde(data, samples, transform=ILR):
"""
Sample a Kernel Density Estimate in ternary space points or a grid defined by
samples.
Parameters
------------
data : :class:`numpy.ndarray`
Source data to estimate the kernel density estimate (:code:`npoints, ndim`).
samples : :class:`numpy.ndarray`
Coordinates to sample the KDE estimate at (:code:`npoints, ndim`)..
transform
Log-transformation used prior to kernel density estimate.
Returns
----------
:class:`numpy.ndarray`
"""
return sample_kde(data, samples, transform=lambda x: transform(close(x)))
def lognorm_to_norm(mu, s):
"""
Calculate mean and variance for a normal random variable from the lognormal
parameters :code:`mu` and :code:`s`.
Parameters
-----------
mu : :class:`float`
Parameter :code:`mu` for the lognormal distribution.
s : :class:`float`
:code:`sigma` for the lognormal distribution.
Returns
--------
mean : :class:`float`
Mean of the normal distribution.
sigma : :class:`float`
Variance of the normal distribution.
"""
mean = np.exp(mu + 0.5 * s ** 2)
variance = (np.exp(s ** 2) - 1) * np.exp(2 * mu + s ** 2)
return mean, np.sqrt(variance)
def norm_to_lognorm(mean, sigma, exp=True):
"""
Calculate :code:`mu` and :code:`sigma` parameters for a lognormal random variable
with a given mean and variance. Lognormal with parameters
:code:`mean` and :code:`sigma`.
Parameters
-----------
mean : :class:`float`
Mean of the normal distribution.
sigma : :class:`float`
:code:`sigma` of the normal distribution.
exp : :class:`bool`
If using the :mod:`scipy.stats` parameterisation; this uses
:code:`scale = np.exp(mu)`.
Returns
--------
mu : :class:`float`
Parameter :code:`mu` for the lognormal distribution.
s : :class:`float`
:code:`sigma` of the lognormal distribution.
"""
mu = np.log(mean / np.sqrt(1 + sigma ** 2 / (mean ** 2)))
v = np.log(1 + sigma ** 2 / (mean ** 2))
if exp: # scipy parameterisation of lognormal uses scale = np.exp(mu) !
mu = np.exp(mu)
return mu, np.sqrt(v)
|
[
"numpy.atleast_2d",
"functools.partial",
"numpy.log",
"numpy.isfinite",
"numpy.exp",
"numpy.nanmax",
"numpy.sqrt"
] |
[((749, 771), 'functools.partial', 'partial', (['scaler'], {'fs': 'fs'}), '(scaler, fs=fs)\n', (756, 771), False, 'from functools import partial\n'), ((1578, 1597), 'numpy.atleast_2d', 'np.atleast_2d', (['data'], {}), '(data)\n', (1591, 1597), True, 'import numpy as np\n'), ((3903, 3928), 'numpy.exp', 'np.exp', (['(mu + 0.5 * s ** 2)'], {}), '(mu + 0.5 * s ** 2)\n', (3909, 3928), True, 'import numpy as np\n'), ((4833, 4867), 'numpy.log', 'np.log', (['(1 + sigma ** 2 / mean ** 2)'], {}), '(1 + sigma ** 2 / mean ** 2)\n', (4839, 4867), True, 'import numpy as np\n'), ((3967, 3990), 'numpy.exp', 'np.exp', (['(2 * mu + s ** 2)'], {}), '(2 * mu + s ** 2)\n', (3973, 3990), True, 'import numpy as np\n'), ((4008, 4025), 'numpy.sqrt', 'np.sqrt', (['variance'], {}), '(variance)\n', (4015, 4025), True, 'import numpy as np\n'), ((4960, 4970), 'numpy.exp', 'np.exp', (['mu'], {}), '(mu)\n', (4966, 4970), True, 'import numpy as np\n'), ((4986, 4996), 'numpy.sqrt', 'np.sqrt', (['v'], {}), '(v)\n', (4993, 4996), True, 'import numpy as np\n'), ((2692, 2705), 'numpy.nanmax', 'np.nanmax', (['zi'], {}), '(zi)\n', (2701, 2705), True, 'import numpy as np\n'), ((3945, 3959), 'numpy.exp', 'np.exp', (['(s ** 2)'], {}), '(s ** 2)\n', (3951, 3959), True, 'import numpy as np\n'), ((4786, 4821), 'numpy.sqrt', 'np.sqrt', (['(1 + sigma ** 2 / mean ** 2)'], {}), '(1 + sigma ** 2 / mean ** 2)\n', (4793, 4821), True, 'import numpy as np\n'), ((1804, 1822), 'numpy.isfinite', 'np.isfinite', (['tdata'], {}), '(tdata)\n', (1815, 1822), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*
import numpy as np
a = np.array([2, 0, 1 ,5])
print(a)
print(a[:3])
print(a.min())
# 由小到大排序
a.sort()
print(a)
# 二维矩阵
b = np.array([[1,2,3], [4,5,6]])
print(b*b)
|
[
"numpy.array"
] |
[((47, 69), 'numpy.array', 'np.array', (['[2, 0, 1, 5]'], {}), '([2, 0, 1, 5])\n', (55, 69), True, 'import numpy as np\n'), ((149, 181), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (157, 181), True, 'import numpy as np\n')]
|
import sys
from vispy import scene
from vispy.scene import SceneCanvas
from vispy.visuals import transforms
from PyQt5 import QtWidgets, QtCore
from PyQt5.QtCore import *
from PyQt5.QtWidgets import QMainWindow, QWidget, QLabel, QGridLayout, QPushButton, QCheckBox, QSlider
#from MyWidget import *
from vispy import io
#from vispy.visuals.filters import Alpha
from vispy.visuals.transforms import STTransform, MatrixTransform, ChainTransform
import numpy as np
from xml.dom.minidom import parse
import pickle
import random
from bvh import Bvh
import trimesh #
from util import *
from myMath import *
#scaleMatDict=pickle.load(open('personalAdjustmentMatrix.pkl','rb'))
personalDict=pickle.load(open('output/personalDict.pkl','rb'))
scaleMatDict=personalDict['scaleMatDict']
headH_headM_personal=personalDict['headH_headM_personal']
######################################################
dataFolder='template/'
objPath=dataFolder+'OBJ/'
#bvhPath=dataFolder+'motion/'
######################################################
appQt = QtWidgets.QApplication(sys.argv)
win = QMainWindow()
win.resize(700, 500)
win.setWindowTitle('Component Tester')
canvas=SceneCanvas()
canvas.create_native()
canvas.native.setParent(win)
canvas.unfreeze()
view=canvas.central_widget.add_view() #add view to the canvas
view.camera='turntable'
view.camera.up='+y'
canvas.freeze()
#transparent surface must come last (order is important)
originAxis=scene.visuals.XYZAxis(parent=view.scene) #axis length=1
print(originAxis.pos)
################ marker set ####################
#markerDict={}
#f=open(objPath+'markerVertex.txt','r')
#markerCount=0
#for line in f.readlines():
# markerName,objName,vertexIndex=line.strip().split(' ')
# objName=objName+'.obj'
# vertexIndex=int(vertexIndex)
# if objName not in markerDict:
# markerDict[objName]=[]
#
# markerDict[objName].append((vertexIndex,markerName,))
# markerCount+=1
#
#f.close()
#print(markerDict.keys())
######################################################
human = parse(dataFolder+"human.xml")
nodes=human.getElementsByTagName('Node')
humanJointPositionList=[]
boxCenterList=[]
for aNode in nodes:
name=aNode.getAttribute('name')
parent=aNode.getAttribute('parent')
body=aNode.getElementsByTagName('Body')[0]
joint=aNode.getElementsByTagName('Joint')[0]
#print(name,parent,body,joint) #ok
mass=body.getAttribute('mass')
size=floatList(body.getAttribute('size')) #*
size*=np.diag(scaleMatDict[name])[:3]
#print(size)
contact=body.getAttribute('contact') #Off or On
obj=body.getAttribute('obj')
bodyT=getTransformationXML(body) #*
jointT=getTransformationXML(joint) #*
#boxCenterList.append(bodyT[0:3,3])
#humanJointPositionList.append(jointT[0:3,3])
modBoxCenter=(scaleMatDict[name]@bodyT[:,3:4])[0:3,0]
bodyT[:3,3]=modBoxCenter
#bodyT rotation should be adjusted a little bit too
#but keeping orientation the same should not be that bad
boxCenterList.append(modBoxCenter)
humanJointPositionList.append((scaleMatDict[name]@jointT[:,3:4])[0:3,0])
#print(bodyT) #ok
#print(jointT) #ok
verts, faces, normals, nothin = io.read_mesh(objPath+obj) #verts preserve the number of vertex but the order is not preserved
verts*=0.01
vertexMod = scaleMatDict[name] @ np.vstack([verts.transpose(),np.ones([1,verts.shape[0]])]) #(4,n)
verts = vertexMod[:3,:].transpose()
objMesh = scene.visuals.Mesh(parent=view.scene,vertices=verts, shading='flat', faces=faces,color=(0.8, 0.8, 0.8,0.2)) #'flat' is much faster than 'smooth', None removes lighting
objMesh.set_gl_state('translucent', cull_face=False,depth_test=False)
aBox=scene.visuals.Box(parent=view.scene,width=size[0],depth=size[1],height=size[2],color=(0.8,0.1,0.1,0.2))
aBox.transform=MatrixTransform(bodyT.transpose()) #transpose to match openGL format
aBox.set_gl_state('translucent', cull_face=False,depth_test=False)
#if(obj in markerDict):
# v=trimesh.load(objPath+obj, process=False).vertices #use this library because it preserve the order of vertex
# v*=0.01
# for tup in markerDict[obj]:
# vertexIndex,markerName=tup #markerDict[obj][i]
# markerPosition.append(v[vertexIndex,:])
# print(obj,markerName,vertexIndex,v[vertexIndex,:])
#else:
# print(obj)
humanJointPositionList=np.stack(humanJointPositionList)
humanJointMarker=scene.visuals.Markers(parent=view.scene,size=8,pos=humanJointPositionList,face_color='green')
humanJointMarker.set_gl_state('translucent', cull_face=False,depth_test=False)
boxCenterList=np.stack(boxCenterList)
boxCenterMarker=scene.visuals.Markers(parent=view.scene,size=8,pos=boxCenterList,face_color='red')
boxCenterMarker.set_gl_state('translucent', cull_face=False,depth_test=False)
###################### marker position ######################
'''
markerPosition=[] #np.zeros([markerCount,3]) #global frame
finalMarkerDict=pickle.load(open(dataFolder+"markerPosition.pkl",'rb')) #processed from 01 genTemplateMarkerPosition
for markerName in finalMarkerDict:
markerPosition.append(finalMarkerDict[markerName])
markerPosition=np.stack(markerPosition)
markerMarker=scene.visuals.Markers(parent=view.scene,size=8,pos=markerPosition,face_color='orange')
markerMarker.set_gl_state('translucent', cull_face=False,depth_test=False)
'''
###################################################################
muscles = parse(dataFolder+"muscle284.xml").getElementsByTagName('Unit')
wp=[]
muscleColor=[]
indexPair=[]
random.seed(a=0)
for e in muscles:
name=e.getAttribute('name')
f0=e.getAttribute('f0')
lm=e.getAttribute('lm')
lt=e.getAttribute('lt')
pen_angle=e.getAttribute('pen_angle')
lmax=e.getAttribute('lmax')
#print(name)
#Random a bright color
while True:
cr=random.random()
cg=random.random()
cb=random.random()
if(max([cr,cg,cb])>0.3):
break
for i,w in enumerate(e.getElementsByTagName('Waypoint')):
belongTo=w.getAttribute('body')
p=np.array(floatList(w.getAttribute('p'))) #*
#print(belongTo,p)
pMod=scaleMatDict[belongTo] @ homo(p)
wp.append(pMod[:3])
muscleColor.append([cr,cg,cb,1.0])
if(i>0):
indexPair.append([len(wp)-2,len(wp)-1])
#aLine=scene.visuals.Line(parent=view.scene,pos=wp,width=1,connect='strip',color='yellow') # method='agg'
#aLine.set_gl_state('translucent', cull_face=False,depth_test=True)
wp=np.stack(wp)
muscleColor=np.stack(muscleColor)
allLine=scene.visuals.Line(parent=view.scene,pos=wp,color=muscleColor,width=1,connect=np.array(indexPair))
allLine.set_gl_state('translucent', cull_face=False,depth_test=True)
'''
################# BVH ####################################
bvhMultiplier=0.01
with open(bvhPath+'SNFAT_walking.bvh') as f:
mocap = Bvh(f.read())
rootName='Character1_Hips'
rootOffset=np.array([float(e) for e in next(mocap.root.filter('ROOT'))['OFFSET']])*bvhMultiplier
rootOffset=humanJointPositionList[0,:] #np.zeros(3) #hack
#turn everything I need to dict of dict
s={
'':{
'parent':'',
'relOffset':np.zeros(3), #relative offset from parent joint
'absOffset':np.zeros(3) #absolute offset in rest pose (T-pose)
},
rootName:{
'parent':'',
'relOffset':rootOffset,
'absOffset':rootOffset,
}}
bvhJointList=mocap.get_joints_names()
print(bvhJointList)
for p in bvhJointList:
s[p]['children']=[str(e).split(' ',1)[1] for e in mocap.joint_direct_children(p)]
for c in s[p]['children']:
relativeOffset=np.array(mocap.joint_offset(c))*bvhMultiplier
s[c]={
'parent':p,
'relOffset':relativeOffset,
'absOffset':s[p]['absOffset']+relativeOffset,
}
bvhJointPosition=np.zeros([len(bvhJointList),3])
for i,p in enumerate(bvhJointList):
bvhJointPosition[i,:]=s[p]['absOffset']
#print(p,s[p]['absOffset'])
if('RIGMESH' in p):
bvhJointPosition[i,:]=0
#print(bvhJointPosition)
bvhMarker=scene.visuals.Markers(parent=view.scene,size=8,pos=bvhJointPosition,face_color='blue')
bvhMarker.set_gl_state('translucent', cull_face=False,depth_test=False)
'''
#=================================
rightPanel=QWidget()
gbox = QtWidgets.QGridLayout()
testButton=QPushButton()
testButton.setText("test")
gbox.addWidget(testButton,0,1)
def test():
#sk.set_data(connect=np.array([[0,1],[0,2],[0,3]],dtype=int))
print('click')
testButton.clicked.connect(test)
rightPanel.setLayout(gbox)
splitter=QtWidgets.QSplitter(QtCore.Qt.Horizontal)
splitter.addWidget(canvas.native) #add canvas to splitter
splitter.addWidget(rightPanel)
win.setCentralWidget(splitter) #add splitter to main window
#========================
#========================
win.show()
appQt.exec_()
|
[
"vispy.scene.visuals.Mesh",
"PyQt5.QtWidgets.QGridLayout",
"PyQt5.QtWidgets.QPushButton",
"numpy.ones",
"vispy.scene.visuals.Markers",
"PyQt5.QtWidgets.QApplication",
"numpy.diag",
"vispy.scene.SceneCanvas",
"vispy.scene.visuals.XYZAxis",
"PyQt5.QtWidgets.QWidget",
"random.seed",
"numpy.stack",
"PyQt5.QtWidgets.QMainWindow",
"vispy.scene.visuals.Box",
"random.random",
"vispy.io.read_mesh",
"xml.dom.minidom.parse",
"numpy.array",
"PyQt5.QtWidgets.QSplitter"
] |
[((1039, 1071), 'PyQt5.QtWidgets.QApplication', 'QtWidgets.QApplication', (['sys.argv'], {}), '(sys.argv)\n', (1061, 1071), False, 'from PyQt5 import QtWidgets, QtCore\n'), ((1079, 1092), 'PyQt5.QtWidgets.QMainWindow', 'QMainWindow', ([], {}), '()\n', (1090, 1092), False, 'from PyQt5.QtWidgets import QMainWindow, QWidget, QLabel, QGridLayout, QPushButton, QCheckBox, QSlider\n'), ((1161, 1174), 'vispy.scene.SceneCanvas', 'SceneCanvas', ([], {}), '()\n', (1172, 1174), False, 'from vispy.scene import SceneCanvas\n'), ((1438, 1478), 'vispy.scene.visuals.XYZAxis', 'scene.visuals.XYZAxis', ([], {'parent': 'view.scene'}), '(parent=view.scene)\n', (1459, 1478), False, 'from vispy import scene\n'), ((2042, 2073), 'xml.dom.minidom.parse', 'parse', (["(dataFolder + 'human.xml')"], {}), "(dataFolder + 'human.xml')\n", (2047, 2073), False, 'from xml.dom.minidom import parse\n'), ((4451, 4483), 'numpy.stack', 'np.stack', (['humanJointPositionList'], {}), '(humanJointPositionList)\n', (4459, 4483), True, 'import numpy as np\n'), ((4501, 4601), 'vispy.scene.visuals.Markers', 'scene.visuals.Markers', ([], {'parent': 'view.scene', 'size': '(8)', 'pos': 'humanJointPositionList', 'face_color': '"""green"""'}), "(parent=view.scene, size=8, pos=humanJointPositionList,\n face_color='green')\n", (4522, 4601), False, 'from vispy import scene\n'), ((4689, 4712), 'numpy.stack', 'np.stack', (['boxCenterList'], {}), '(boxCenterList)\n', (4697, 4712), True, 'import numpy as np\n'), ((4729, 4818), 'vispy.scene.visuals.Markers', 'scene.visuals.Markers', ([], {'parent': 'view.scene', 'size': '(8)', 'pos': 'boxCenterList', 'face_color': '"""red"""'}), "(parent=view.scene, size=8, pos=boxCenterList,\n face_color='red')\n", (4750, 4818), False, 'from vispy import scene\n'), ((5625, 5641), 'random.seed', 'random.seed', ([], {'a': '(0)'}), '(a=0)\n', (5636, 5641), False, 'import random\n'), ((6604, 6616), 'numpy.stack', 'np.stack', (['wp'], {}), '(wp)\n', (6612, 6616), True, 'import numpy as np\n'), ((6629, 6650), 'numpy.stack', 'np.stack', (['muscleColor'], {}), '(muscleColor)\n', (6637, 6650), True, 'import numpy as np\n'), ((8371, 8380), 'PyQt5.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (8378, 8380), False, 'from PyQt5.QtWidgets import QMainWindow, QWidget, QLabel, QGridLayout, QPushButton, QCheckBox, QSlider\n'), ((8389, 8412), 'PyQt5.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', ([], {}), '()\n', (8410, 8412), False, 'from PyQt5 import QtWidgets, QtCore\n'), ((8427, 8440), 'PyQt5.QtWidgets.QPushButton', 'QPushButton', ([], {}), '()\n', (8438, 8440), False, 'from PyQt5.QtWidgets import QMainWindow, QWidget, QLabel, QGridLayout, QPushButton, QCheckBox, QSlider\n'), ((8669, 8710), 'PyQt5.QtWidgets.QSplitter', 'QtWidgets.QSplitter', (['QtCore.Qt.Horizontal'], {}), '(QtCore.Qt.Horizontal)\n', (8688, 8710), False, 'from PyQt5 import QtWidgets, QtCore\n'), ((3221, 3248), 'vispy.io.read_mesh', 'io.read_mesh', (['(objPath + obj)'], {}), '(objPath + obj)\n', (3233, 3248), False, 'from vispy import io\n'), ((3492, 3607), 'vispy.scene.visuals.Mesh', 'scene.visuals.Mesh', ([], {'parent': 'view.scene', 'vertices': 'verts', 'shading': '"""flat"""', 'faces': 'faces', 'color': '(0.8, 0.8, 0.8, 0.2)'}), "(parent=view.scene, vertices=verts, shading='flat', faces\n =faces, color=(0.8, 0.8, 0.8, 0.2))\n", (3510, 3607), False, 'from vispy import scene\n'), ((3748, 3863), 'vispy.scene.visuals.Box', 'scene.visuals.Box', ([], {'parent': 'view.scene', 'width': 'size[0]', 'depth': 'size[1]', 'height': 'size[2]', 'color': '(0.8, 0.1, 0.1, 0.2)'}), '(parent=view.scene, width=size[0], depth=size[1], height=\n size[2], color=(0.8, 0.1, 0.1, 0.2))\n', (3765, 3863), False, 'from vispy import scene\n'), ((2486, 2513), 'numpy.diag', 'np.diag', (['scaleMatDict[name]'], {}), '(scaleMatDict[name])\n', (2493, 2513), True, 'import numpy as np\n'), ((5528, 5563), 'xml.dom.minidom.parse', 'parse', (["(dataFolder + 'muscle284.xml')"], {}), "(dataFolder + 'muscle284.xml')\n", (5533, 5563), False, 'from xml.dom.minidom import parse\n'), ((5925, 5940), 'random.random', 'random.random', ([], {}), '()\n', (5938, 5940), False, 'import random\n'), ((5952, 5967), 'random.random', 'random.random', ([], {}), '()\n', (5965, 5967), False, 'import random\n'), ((5979, 5994), 'random.random', 'random.random', ([], {}), '()\n', (5992, 5994), False, 'import random\n'), ((6737, 6756), 'numpy.array', 'np.array', (['indexPair'], {}), '(indexPair)\n', (6745, 6756), True, 'import numpy as np\n'), ((3400, 3428), 'numpy.ones', 'np.ones', (['[1, verts.shape[0]]'], {}), '([1, verts.shape[0]])\n', (3407, 3428), True, 'import numpy as np\n')]
|
import numpy as np
import copy
from memory_profiler import profile
# physical/external base state of all entites
def isNear(box,landmark,threshold=0.05):
if (np.sum(np.square(box.state.p_pos-landmark.state.p_pos)) <= threshold):
return True
else:
return False
def calcDistance(entity1,entity2):
return np.sqrt(np.sum(np.square(entity1.state.p_pos - entity2.state.p_pos)))
class EntityState(object):
def __init__(self):
# physical position
self.p_pos = None
# physical velocity
self.p_vel = None
# state of agents (including communication and internal/mental state)
class AgentState(EntityState):
def __init__(self):
super(AgentState, self).__init__()
# communication utterance
self.c = None
# action of the agent
class Action(object):
def __init__(self):
# physical action
self.u = None
# communication action
self.c = None
# pickup Action
self.pickup = False
# drop Action
self.drop = False
class Wall(object):
def __init__(self, orient='H', axis_pos=0.0, endpoints=(-1, 1), width=0.1,
hard=True):
# orientation: 'H'orizontal or 'V'ertical
self.orient = orient
# position along axis which wall lays on (y-axis for H, x-axis for V)
self.axis_pos = axis_pos
# endpoints of wall (x-coords for H, y-coords for V)
self.endpoints = np.array(endpoints)
# width of wall
self.width = width
# whether wall is impassable to all agents
self.hard = hard
# color of wall
self.color = np.array([0.0, 0.0, 0.0])
# properties and state of physical world entity
class Entity(object):
def __init__(self):
# index among all entities (important to set for distance caching)
self.i = 0
# name
self.name = ''
# properties:
self.size = 0.050
# entity can move / be pushed
self.movable = False
# entity collides with others
self.collide = True
# entity can pass through non-hard walls
self.ghost = False
# material density (affects mass)
self.density = 25.0
# color
self.color = None
# max speed and accel
self.max_speed = None
self.accel = None
# state
self.state = EntityState()
# mass
self.initial_mass = 1.0
@property
def mass(self):
return self.initial_mass
# properties of landmark entities
class Landmark(Entity):
def __init__(self):
super(Landmark, self).__init__()
# Properties of the load that we wish to carry
class LoadBox(Entity):
"""
The load boxes have to reach the corresponding landmarks
"""
def __init__(self):
super(LoadBox, self).__init__()
# Agent which will get reward once this task is done
self.agentAssigned = None
# Reward agent will get once the Box Reaches its target
self.rewardAssigned = None
# Determines whether any agent has picked up this box
self.pickedUp = False
# Agent which is handling this box
self.agentHandling = None
#Boxes are pickup but but do not collide
self.collide = False
# Distance of te
self.goalDistInit = None
#To store the previous state of goalDist of the Box
self.prevGoalDist = None
def farInit(self,landmark):
self.goalDistInit = calcDistance(self,landmark)
self.prevGoalDist = copy.deepcopy(self.goalDistInit)
def rewardDist(self,landmark,rewardMultiplier=2.0,negativeRewardMultiplier=-2.0,stagnantReward = -5.0, nearRewardConstant = 10.0):
threshold = 0.05
boxReached = isNear(self,landmark,threshold=threshold)
#! Heavily penalizing taking the box away
distancePrev = copy.deepcopy(self.prevGoalDist)
distanceNow = calcDistance(self,landmark)
#? Updating the goal distance in the memory
self.prevGoalDist = distanceNow
#! Rewarding negative if the box is stagnant
if distanceNow == distancePrev and not boxReached:
return stagnantReward
elif distanceNow == distancePrev and boxReached:
return nearRewardConstant
#! Rewarding negative if the box has been moved away from the target
# elif distanceNow > distancePrev:
# # print("Box Moved Away")
# return negativeRewardMultiplier*((distanceNow-distancePrev)/self.goalDistInit)
#How much is the box nearer to the goal compared to where it was initially
else:
return rewardMultiplier*(1.0-(distanceNow/self.goalDistInit))
# properties of agent entities
class Agent(Entity):
def __init__(self):
super(Agent, self).__init__()
#Names of Assigned Boxes
self.assignedBoxes = []
#Stores all of the boxes that the agent has handled on his way
self.extraBoxesHandled = []
# agents are movable by default
self.movable = True
# cannot send communication signals
self.silent = False
# cannot observe the world
self.blind = False
# is agent from a warehouse task
self.warehouse = False
# physical motor noise amount
self.u_noise = None
# communication noise amount
self.c_noise = None
# control range
self.u_range = 1.0
# state
self.state = AgentState()
# action
self.action = Action()
# script behavior to execute
self.action_callback = None
# multi-agent world
class World(object):
def __init__(self):
# list of agents and entities (can change at execution-time!)
self.agents = []
# list of all of the landmarks initialized
self.landmarks = []
self.walls = []
#To add the boxes that needs to be transported
self.boxes = []
#Reward Associated with the boxes
self.boxRewards = []
#Max number of agents expected
self.maxAgents = 5
#Max number of boxes expected
self.maxBoxes = 5
# communication channel dimensionality
self.dim_c = 0
# position dimensionality
self.dim_p = 2
# color dimensionality
self.dim_color = 3
# simulation timestep
self.dt = 0.1
# physical damping
self.damping = 0.25
# contact response parameters
self.contact_force = 1e+2
self.contact_margin = 1e-3
# cache distances between all agents (not calculated by default)
self.cache_dists = False
self.cached_dist_vect = None
self.cached_dist_mag = None
# return all entities in the world
@property
def entities(self):
return self.agents + self.boxes + self.landmarks
# return all agents controllable by external policies
@property
def policy_agents(self):
return [agent for agent in self.agents if agent.action_callback is None]
# return all agents controlled by world scripts
@property
def scripted_agents(self):
return [agent for agent in self.agents if agent.action_callback is not None]
#! We have not been using this distance which is being calculated every step
def calculate_distances(self):
if self.cached_dist_vect is None:
# initialize distance data structure
self.cached_dist_vect = np.zeros((len(self.entities),
len(self.entities),
self.dim_p))
# calculate minimum distance for a collision between all entities
self.min_dists = np.zeros((len(self.entities), len(self.entities)))
for ia, entity_a in enumerate(self.entities):
for ib in range(ia + 1, len(self.entities)):
entity_b = self.entities[ib]
min_dist = entity_a.size + entity_b.size
self.min_dists[ia, ib] = min_dist
self.min_dists[ib, ia] = min_dist
for ia, entity_a in enumerate(self.entities):
for ib in range(ia + 1, len(self.entities)):
entity_b = self.entities[ib]
delta_pos = entity_a.state.p_pos - entity_b.state.p_pos
self.cached_dist_vect[ia, ib, :] = delta_pos
self.cached_dist_vect[ib, ia, :] = -delta_pos
self.cached_dist_mag = np.linalg.norm(self.cached_dist_vect, axis=2)
self.cached_collisions = (self.cached_dist_mag <= self.min_dists)
def boxRewardCalc(self):
for i, box in enumerate(self.boxes):
self.boxRewards[i] = box.rewardDist(self.landmarks[i])
# update state of the world
def step(self):
# set actions for scripted agents
for agent in self.scripted_agents:
agent.action = agent.action_callback(agent, self)
# update boxes states
self.agent_pick_drop()
# Calculate all of the rewards
self.boxRewardCalc()
# gather forces applied to entities
p_force = [None] * len(self.entities)
# apply agent physical controls
p_force = self.apply_action_force(p_force)
# apply environment forces
p_force = self.apply_environment_force(p_force)
# integrate physical state
self.integrate_state(p_force)
# update agent state
for agent in self.agents:
self.update_agent_state(agent)
# calculate and store distances between all entities
if self.cache_dists:
self.calculate_distances()
def agent_pick_drop(self):
threshold = 0.25
busy_agents = []
# Caculating agents which are busy in carrying boxes
for box in self.boxes:
if box.agentHandling:
busy_agents.append(box.agentHandling)
# Dropping the boxes and removing the respective agents from busy_list
for i, agent in enumerate(self.agents):
# TODO if the agent is stationary only then he can put down the box
if agent.action.drop:
for box in self.boxes:
if box.agentHandling == agent.name: # and (np.linalg.norm(agent.state.p_vel) < 0.5):
agent.color = agent.color + np.ones(agent.color.size)*.5
busy_agents.remove(box.agentHandling)
box.movable = False
box.pickedUp = False
box.agentHandling = None
box.state.p_pos = copy.deepcopy(agent.state.p_pos)
box.state.p_vel = np.zeros(self.dim_p)
break
# TODO better assignment needs to be done (Agent will pick up the box which is most close to him, meanwhile box will be picked up by the agent which is closest)
for box in self.boxes:
closest_agent = []
for agent in self.agents:
# If agent already has one box with him, he can't pickup another one
if agent.name in busy_agents: # or (np.linalg.norm(agent.state.p_vel) > 0.5):
continue
# If agent is in threshold distance he is in competition to pickup the box
# TODO if agent is stationary only then he can pick up the box
if agent.action.pickup and calcDistance(agent,box) <= threshold:
dist = calcDistance(agent,box)
closest_agent.append((dist, agent))
# Select the closest among all agents in threshold distance
if len(closest_agent) > 0:
closest_agent.sort()
#! changing the color of the agent with the box on him
closest_agent[0][1].color = closest_agent[0][1].color - np.ones(agent.color.size)*.5
busy_agents.append(closest_agent[0][1].name)
#? If this box is an extra work that the agent is doing then
if box.name not in closest_agent[0][1].assignedBoxes and box.name not in closest_agent[0][1].extraBoxesHandled:
# print("{} picked extra {}".format(closest_agent[0][1].name,box.name))
closest_agent[0][1].extraBoxesHandled.append(box.name)
box.movable = True
box.pickedUp = True
box.agentHandling = closest_agent[0][1].name
box.state.p_pos = copy.deepcopy(closest_agent[0][1].state.p_pos) ## ISSUE of assignment(temporary fix using deepcopy)
box.state.p_vel = copy.deepcopy(closest_agent[0][1].state.p_vel) ## ISSUE of assignment(temporary fix using deepcopy)
# gather agent action forces
def apply_action_force(self, p_force):
# set applied forces
for i,agent in enumerate(self.agents):
if agent.movable:
noise = np.random.randn(*agent.action.u.shape) * agent.u_noise if agent.u_noise else 0.0
p_force[i] = (agent.mass * agent.accel if agent.accel is not None else agent.mass) * agent.action.u + noise
# apply forces to the pickedup boxes
for i,box in enumerate(self.boxes):
if box.pickedUp:
assert box.agentHandling != None
agent_id = int(box.agentHandling.split(' ')[1])
total_force = p_force[agent_id]
p_force[agent_id] = total_force * (self.agents[agent_id].mass / (box.mass + self.agents[agent_id].mass))
p_force[i + len(self.agents)] = total_force * (box.mass / (box.mass + self.agents[agent_id].mass))
return p_force
# gather physical forces acting on entities
def apply_environment_force(self, p_force):
# simple (but inefficient) collision response
for a,entity_a in enumerate(self.entities):
for b,entity_b in enumerate(self.entities):
if(b <= a): continue
[f_a, f_b] = self.get_collision_force(entity_a, entity_b)
if(f_a is not None):
if(p_force[a] is None): p_force[a] = 0.0
p_force[a] = f_a + p_force[a]
if(f_b is not None):
if(p_force[b] is None): p_force[b] = 0.0
p_force[b] = f_b + p_force[b]
if entity_a.movable:
for wall in self.walls:
wf = self.get_wall_collision_force(entity_a, wall)
if wf is not None:
if p_force[a] is None:
p_force[a] = 0.0
p_force[a] = p_force[a] + wf
return p_force
# integrate physical state
def integrate_state(self, p_force):
# Compute pairs of boxes and agents
pairs = {}
for i,box in enumerate(self.boxes):
if box.agentHandling:
agent_id = int(box.agentHandling.split(' ')[1])
pairs[agent_id] = len(self.agents) + i
pairs[len(self.agents) + i] = agent_id
# Update the state by taking damping anf forces applied in account
for i,entity in enumerate(self.entities):
if not entity.movable: continue
entity.state.p_vel = entity.state.p_vel * (1 - self.damping)
if i in pairs.keys():
if (p_force[i] is not None):
if (p_force[pairs[i]] is not None):
net_force = p_force[i] + p_force[pairs[i]]
else:
net_force = p_force[i]
else:
if (p_force[pairs[i]] is not None):
net_force = p_force[pairs[i]]
else:
net_force = 0
net_mass = entity.mass + self.entities[pairs[i]].mass
entity.state.p_vel += (net_force / net_mass) * self.dt
else:
if (p_force[i] is not None):
entity.state.p_vel += (p_force[i] / entity.mass) * self.dt
# set max allowed speed to minimum of max allowed speed of box and agent
if i in pairs.keys():
if self.entities[pairs[i]].max_speed is not None:
if entity.max_speed is not None:
max_speed = min(self.entities[pairs[i]].max_speed, entity.max_speed)
else:
max_speed = self.entities[pairs[i]].max_speed
else:
max_speed = entity.max_speed
else:
max_speed = entity.max_speed
if max_speed is not None:
speed = np.sqrt(np.square(entity.state.p_vel[0]) + np.square(entity.state.p_vel[1]))
if speed > entity.max_speed:
entity.state.p_vel = entity.state.p_vel / np.sqrt(np.square(entity.state.p_vel[0]) +
np.square(entity.state.p_vel[1])) * entity.max_speed
entity.state.p_pos += entity.state.p_vel * self.dt
def update_agent_state(self, agent):
# set communication state (directly for now)
if agent.silent:
agent.state.c = np.zeros(self.dim_c)#! Message state is directly set hmmmm if silent
else:
noise = np.random.randn(*agent.action.c.shape) * agent.c_noise if agent.c_noise else 0.0
agent.state.c = agent.action.c + noise
# get collision forces for any contact between two entities
def get_collision_force(self, entity_a, entity_b):
if (not entity_a.collide) or (not entity_b.collide):
return [None, None] # not a collider
if (not entity_a.movable) and (not entity_b.movable):
return [None, None] # neither entity moves
if (entity_a is entity_b):
return [None, None] # don't collide against itself
if isinstance(entity_a, Agent) and isinstance(entity_b, LoadBox):
if entity_b.pickedUp and (entity_b.agentHandling == entity_a.name):
return [None, None] # agent don't collide with object if it is carrying it
if self.cache_dists:
delta_pos = self.cached_dist_vect[ia, ib]
dist = self.cached_dist_mag[ia, ib]
dist_min = self.min_dists[ia, ib]
else:
# compute actual distance between entities
delta_pos = entity_a.state.p_pos - entity_b.state.p_pos
dist = np.sqrt(np.sum(np.square(delta_pos)))
# minimum allowable distance
dist_min = entity_a.size + entity_b.size
# softmax penetration
k = self.contact_margin
penetration = np.logaddexp(0, -(dist - dist_min)/k)*k
force = self.contact_force * delta_pos / dist * penetration
if entity_a.movable and entity_b.movable:
# consider mass in collisions
force_ratio = entity_b.mass / entity_a.mass
force_a = force_ratio * force
force_b = -(1 / force_ratio) * force
else:
force_a = +force if entity_a.movable else None
force_b = -force if entity_b.movable else None
return [force_a, force_b]
# get collision forces for contact between an entity and a wall
def get_wall_collision_force(self, entity, wall):
if entity.ghost and not wall.hard:
return None # ghost passes through soft walls
if wall.orient == 'H':
prll_dim = 0
perp_dim = 1
else:
prll_dim = 1
perp_dim = 0
ent_pos = entity.state.p_pos
if (ent_pos[prll_dim] < wall.endpoints[0] - entity.size or
ent_pos[prll_dim] > wall.endpoints[1] + entity.size):
return None # entity is beyond endpoints of wall
elif (ent_pos[prll_dim] < wall.endpoints[0] or
ent_pos[prll_dim] > wall.endpoints[1]):
# part of entity is beyond wall
if ent_pos[prll_dim] < wall.endpoints[0]:
dist_past_end = ent_pos[prll_dim] - wall.endpoints[0]
else:
dist_past_end = ent_pos[prll_dim] - wall.endpoints[1]
theta = np.arcsin(dist_past_end / entity.size)
dist_min = np.cos(theta) * entity.size + 0.5 * wall.width
else: # entire entity lies within bounds of wall
theta = 0
dist_past_end = 0
dist_min = entity.size + 0.5 * wall.width
# only need to calculate distance in relevant dim
delta_pos = ent_pos[perp_dim] - wall.axis_pos
dist = np.abs(delta_pos)
# softmax penetration
k = self.contact_margin
penetration = np.logaddexp(0, -(dist - dist_min)/k)*k
force_mag = self.contact_force * delta_pos / dist * penetration
force = np.zeros(2)
force[perp_dim] = np.cos(theta) * force_mag
force[prll_dim] = np.sin(theta) * np.abs(force_mag)
return force
|
[
"copy.deepcopy",
"numpy.abs",
"numpy.random.randn",
"numpy.square",
"numpy.zeros",
"numpy.arcsin",
"numpy.ones",
"numpy.sin",
"numpy.linalg.norm",
"numpy.array",
"numpy.logaddexp",
"numpy.cos"
] |
[((1463, 1482), 'numpy.array', 'np.array', (['endpoints'], {}), '(endpoints)\n', (1471, 1482), True, 'import numpy as np\n'), ((1655, 1680), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (1663, 1680), True, 'import numpy as np\n'), ((3590, 3622), 'copy.deepcopy', 'copy.deepcopy', (['self.goalDistInit'], {}), '(self.goalDistInit)\n', (3603, 3622), False, 'import copy\n'), ((3948, 3980), 'copy.deepcopy', 'copy.deepcopy', (['self.prevGoalDist'], {}), '(self.prevGoalDist)\n', (3961, 3980), False, 'import copy\n'), ((8676, 8721), 'numpy.linalg.norm', 'np.linalg.norm', (['self.cached_dist_vect'], {'axis': '(2)'}), '(self.cached_dist_vect, axis=2)\n', (8690, 8721), True, 'import numpy as np\n'), ((20909, 20926), 'numpy.abs', 'np.abs', (['delta_pos'], {}), '(delta_pos)\n', (20915, 20926), True, 'import numpy as np\n'), ((21139, 21150), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (21147, 21150), True, 'import numpy as np\n'), ((171, 220), 'numpy.square', 'np.square', (['(box.state.p_pos - landmark.state.p_pos)'], {}), '(box.state.p_pos - landmark.state.p_pos)\n', (180, 220), True, 'import numpy as np\n'), ((348, 400), 'numpy.square', 'np.square', (['(entity1.state.p_pos - entity2.state.p_pos)'], {}), '(entity1.state.p_pos - entity2.state.p_pos)\n', (357, 400), True, 'import numpy as np\n'), ((17528, 17548), 'numpy.zeros', 'np.zeros', (['self.dim_c'], {}), '(self.dim_c)\n', (17536, 17548), True, 'import numpy as np\n'), ((19008, 19047), 'numpy.logaddexp', 'np.logaddexp', (['(0)', '(-(dist - dist_min) / k)'], {}), '(0, -(dist - dist_min) / k)\n', (19020, 19047), True, 'import numpy as np\n'), ((21011, 21050), 'numpy.logaddexp', 'np.logaddexp', (['(0)', '(-(dist - dist_min) / k)'], {}), '(0, -(dist - dist_min) / k)\n', (21023, 21050), True, 'import numpy as np\n'), ((21177, 21190), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (21183, 21190), True, 'import numpy as np\n'), ((21229, 21242), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (21235, 21242), True, 'import numpy as np\n'), ((21245, 21262), 'numpy.abs', 'np.abs', (['force_mag'], {}), '(force_mag)\n', (21251, 21262), True, 'import numpy as np\n'), ((12733, 12779), 'copy.deepcopy', 'copy.deepcopy', (['closest_agent[0][1].state.p_pos'], {}), '(closest_agent[0][1].state.p_pos)\n', (12746, 12779), False, 'import copy\n'), ((12868, 12914), 'copy.deepcopy', 'copy.deepcopy', (['closest_agent[0][1].state.p_vel'], {}), '(closest_agent[0][1].state.p_vel)\n', (12881, 12914), False, 'import copy\n'), ((20508, 20546), 'numpy.arcsin', 'np.arcsin', (['(dist_past_end / entity.size)'], {}), '(dist_past_end / entity.size)\n', (20517, 20546), True, 'import numpy as np\n'), ((17631, 17669), 'numpy.random.randn', 'np.random.randn', (['*agent.action.c.shape'], {}), '(*agent.action.c.shape)\n', (17646, 17669), True, 'import numpy as np\n'), ((18807, 18827), 'numpy.square', 'np.square', (['delta_pos'], {}), '(delta_pos)\n', (18816, 18827), True, 'import numpy as np\n'), ((10851, 10883), 'copy.deepcopy', 'copy.deepcopy', (['agent.state.p_pos'], {}), '(agent.state.p_pos)\n', (10864, 10883), False, 'import copy\n'), ((10926, 10946), 'numpy.zeros', 'np.zeros', (['self.dim_p'], {}), '(self.dim_p)\n', (10934, 10946), True, 'import numpy as np\n'), ((12103, 12128), 'numpy.ones', 'np.ones', (['agent.color.size'], {}), '(agent.color.size)\n', (12110, 12128), True, 'import numpy as np\n'), ((13176, 13214), 'numpy.random.randn', 'np.random.randn', (['*agent.action.u.shape'], {}), '(*agent.action.u.shape)\n', (13191, 13214), True, 'import numpy as np\n'), ((16979, 17011), 'numpy.square', 'np.square', (['entity.state.p_vel[0]'], {}), '(entity.state.p_vel[0])\n', (16988, 17011), True, 'import numpy as np\n'), ((17014, 17046), 'numpy.square', 'np.square', (['entity.state.p_vel[1]'], {}), '(entity.state.p_vel[1])\n', (17023, 17046), True, 'import numpy as np\n'), ((20570, 20583), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (20576, 20583), True, 'import numpy as np\n'), ((10580, 10605), 'numpy.ones', 'np.ones', (['agent.color.size'], {}), '(agent.color.size)\n', (10587, 10605), True, 'import numpy as np\n'), ((17163, 17195), 'numpy.square', 'np.square', (['entity.state.p_vel[0]'], {}), '(entity.state.p_vel[0])\n', (17172, 17195), True, 'import numpy as np\n'), ((17264, 17296), 'numpy.square', 'np.square', (['entity.state.p_vel[1]'], {}), '(entity.state.p_vel[1])\n', (17273, 17296), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `ndex2.client` package."""
import os
import sys
import io
import decimal
import unittest
import numpy as np
import json
import requests_mock
from unittest.mock import MagicMock
from requests.exceptions import HTTPError
from ndex2 import client
from ndex2.client import Ndex2
from ndex2.client import DecimalEncoder
from ndex2 import __version__
from ndex2.exceptions import NDExInvalidCXError
from ndex2.exceptions import NDExNotFoundError
from ndex2.exceptions import NDExUnauthorizedError
from ndex2.exceptions import NDExInvalidParameterError
from ndex2.exceptions import NDExUnsupportedCallError
from ndex2.exceptions import NDExError
SKIP_REASON = 'NDEX2_TEST_USER environment variable detected, ' \
'skipping for integration tests'
@unittest.skipIf(os.getenv('NDEX2_TEST_SERVER') is not None, SKIP_REASON)
class TestClient(unittest.TestCase):
def get_rest_admin_status_dict(self, version='2.1'):
return {"networkCount": 1321,
"userCount": 12,
"groupCount": 0,
"message": "Online",
"properties": {"ServerVersion": version,
"ServerResultLimit": "10000"}}
def get_rest_admin_v1_empty_dict(self):
return {}
def get_rest_admin_status_url(self):
return client.DEFAULT_SERVER + '/rest/admin/status'
def setUp(self):
"""Set up test fixtures, if any."""
pass
def tearDown(self):
"""Tear down test fixtures, if any."""
pass
def test_decimalencoder(self):
dec = DecimalEncoder()
if sys.version_info.major >= 3:
# test bytes is returned as string
res = dec.default(bytes('hello', 'utf-8'))
self.assertTrue(isinstance(res, str))
# test decimal.Decimal is float
res = dec.default(decimal.Decimal(5))
self.assertTrue(isinstance(res, float))
# test numpy.int64 is int
res = dec.default(np.int64(12))
self.assertTrue(isinstance(res, int))
# test regular old int which throws TypeError
try:
res = dec.default(np.int32(1))
self.assertEqual(res, int(1))
except TypeError:
pass
def test_ndex2_constructor_with_localhost(self):
# this is invasive, but there isn't really a good way
# to test the constructor
# try with nothing set
ndex = Ndex2(host='localhost')
self.assertEqual(ndex.debug, False)
self.assertEqual(ndex.version, 1.3)
self.assertEqual(ndex.status, {})
self.assertEqual(ndex.username, None)
self.assertEqual(ndex.password, None)
self.assertEqual(ndex.user_agent, '')
self.assertEqual(ndex.host, 'http://localhost:8080/ndexbio-rest')
self.assertTrue(ndex.s is not None)
self.assertTrue(ndex.timeout, 30)
ndex.set_request_timeout(10)
self.assertTrue(ndex.timeout, 30)
# try with user, pass and user_agent set oh and host
# with extra text prepended to localhost
ndex = Ndex2(host='xxxlocalhost', username='bob',
password='<PASSWORD>', user_agent='yo', debug=True,
timeout=1)
self.assertEqual(ndex.debug, True)
self.assertEqual(ndex.version, 1.3)
self.assertEqual(ndex.status, {})
self.assertEqual(ndex.username, 'bob')
self.assertEqual(ndex.password, '<PASSWORD>')
self.assertEqual(ndex.user_agent, ' yo')
self.assertEqual(ndex.host, 'http://localhost:8080/ndexbio-rest')
self.assertTrue(ndex.s is not None)
self.assertTrue(ndex.timeout, 1)
# try with user_agent set to None Issue #34
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
ndex = Ndex2(user_agent=None)
self.assertEqual(ndex.user_agent, '')
def test_ndex2_constructor_that_raises_httperror(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
text='uhoh',
reason='some error',
status_code=404)
ndex = Ndex2()
self.assertEqual(ndex.debug, False)
self.assertEqual(ndex.version, '1.3')
self.assertEqual(ndex.status, {})
self.assertEqual(ndex.username, None)
self.assertEqual(ndex.password, None)
self.assertEqual(ndex.user_agent, '')
self.assertEqual(ndex.host, client.DEFAULT_SERVER + '/rest')
self.assertTrue(ndex.s is not None)
def test_ndex2_constructor_with_defaulthost_serverversionnone(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json={"networkCount": 1321,
"userCount": 12,
"groupCount": 0,
"message": "Online",
"properties": {"ServerVersion": None}})
ndex = Ndex2()
self.assertEqual(ndex.debug, False)
self.assertEqual(ndex.version, '1.3')
self.assertEqual(ndex.status, {})
self.assertEqual(ndex.username, None)
self.assertEqual(ndex.password, None)
self.assertEqual(ndex.user_agent, '')
self.assertEqual(ndex.host, client.DEFAULT_SERVER + '/rest')
self.assertTrue(ndex.s is not None)
def test_ndex2_constructor_with_defaulthost_properties_is_none(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json={"networkCount": 1321,
"userCount": 12,
"groupCount": 0,
"message": "Online"})
ndex = Ndex2()
self.assertEqual(ndex.debug, False)
self.assertEqual(ndex.version, '1.3')
self.assertEqual(ndex.status, {})
self.assertEqual(ndex.username, None)
self.assertEqual(ndex.password, None)
self.assertEqual(ndex.user_agent, '')
self.assertEqual(ndex.host, client.DEFAULT_SERVER + '/rest')
self.assertTrue(ndex.s is not None)
def test_ndex2_constructor_with_defaulthost_thatisversionone(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json={"networkCount": 1321,
"userCount": 12,
"groupCount": 0,
"message": "Online",
"properties": {"ServerVersion": "1.1",
"ServerResultLimit": "10000"}})
try:
Ndex2()
self.fail('Expected exception')
except Exception as e:
self.assertEqual(str(e),
'This release only supports NDEx 2.x server.')
def test_ndex2_constructor_with_defaulthost_thatisversiontwo(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
ndex = Ndex2()
self.assertEqual(ndex.debug, False)
self.assertEqual(ndex.version, '2.1')
self.assertEqual(ndex.status, {})
self.assertEqual(ndex.username, None)
self.assertEqual(ndex.password, None)
self.assertEqual(ndex.user_agent, '')
self.assertEqual(ndex.host, client.DEFAULT_SERVER + '/v2')
self.assertTrue(ndex.s is not None)
def test_ndex2_require_auth(self):
ndex = Ndex2(host='localhost')
try:
ndex._require_auth()
self.fail('Expected exception')
except Exception as e:
self.assertEqual(str(e),
'This method requires user authentication')
def test_ndex2_get_user_agent(self):
ndex = Ndex2(host='localhost')
# try with default
res = ndex._get_user_agent()
self.assertEqual(res, 'NDEx2-Python/' + __version__)
ndex = Ndex2(host='localhost', user_agent='hi')
# try with user_agent set
res = ndex._get_user_agent()
self.assertEqual(res, 'NDEx2-Python/' + __version__ + ' hi')
def test_convert_exception_to_ndex_error(self):
# try passing none
ndex = Ndex2(host='localhost')
try:
ndex._convert_exception_to_ndex_error(None)
except NDExError as ne:
self.assertEqual('Caught unknown error', str(ne))
# try passing in a ValueError
try:
ndex._convert_exception_to_ndex_error(ValueError('hi'))
except NDExError as ne:
self.assertEqual('Caught ValueError: hi', str(ne))
def test_convert_requests_http_error_to_ndex_error(self):
# try passing none
ndex = Ndex2(host='localhost')
try:
ndex._convert_requests_http_error_to_ndex_error(None)
except NDExError as ne:
self.assertEqual('Caught unknown server error', str(ne))
error = MagicMock()
error.response = MagicMock()
error.response.status_code = 404
error.response.text = 'hi'
# try passing in a mock HTTPError
try:
ndex._convert_requests_http_error_to_ndex_error(error)
self.fail('Expected NDExNotFoundError')
except NDExNotFoundError as ne:
self.assertEqual('Caught 404 from server: hi', str(ne))
# try passing in a 500 error
error.response.status_code = 500
try:
ndex._convert_requests_http_error_to_ndex_error(error)
self.fail('Expected NDExError')
except NDExError as ne:
self.assertEqual('Caught 500 from server: hi', str(ne))
def test_ndex2_put_no_json_empty_resp_code_204(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.put(client.DEFAULT_SERVER + '/v2/hi', status_code=204)
ndex = Ndex2()
res = ndex.put('/hi')
self.assertEqual(res, '')
def test_ndex2_put_no_json_empty_code_200(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.put(client.DEFAULT_SERVER + '/v2/hi',
status_code=200,
text='hehe',
request_headers={'Content-Type': 'application/'
'json;charset=UTF-8',
'Accept': 'application/json',
'User-Agent': client.userAgent},
headers={'Content-Type': 'application/foo'})
ndex = Ndex2()
ndex.set_debug_mode(True)
res = ndex.put('/hi')
self.assertEqual(res, 'hehe')
def test_ndex2_put_returns_code_401(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.put(client.DEFAULT_SERVER + '/v2/hi',
status_code=401,
text='hehe',
request_headers={'Content-Type': 'application/'
'json;charset=UTF-8',
'Accept': 'application/json',
'User-Agent': client.userAgent},
headers={'Content-Type': 'application/foo'})
ndex = Ndex2()
ndex.set_debug_mode(True)
try:
ndex.put('/hi')
self.fail('Expected HTTPError')
except HTTPError as he:
self.assertEqual(he.response.status_code, 401)
self.assertEqual(he.response.text, 'hehe')
def test_ndex2_put_returns_code_500(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.put(client.DEFAULT_SERVER + '/v2/hi',
status_code=500,
text='hehe',
request_headers={'Content-Type': 'application/'
'json;charset=UTF-8',
'Accept': 'application/json',
'User-Agent': client.userAgent},
headers={'Content-Type': 'application/foo'})
ndex = Ndex2()
ndex.set_debug_mode(True)
try:
ndex.put('/hi')
self.fail('Expected HTTPError')
except HTTPError as he:
self.assertEqual(he.response.status_code, 500)
self.assertEqual(he.response.text, 'hehe')
def test_ndex2_put_with_json_and_json_resp(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.put(client.DEFAULT_SERVER + '/v2/hi',
status_code=200,
json={'hi': 'bye'},
headers={'Content-Type': 'application/json'})
ndex = Ndex2()
ndex.set_debug_mode(True)
res = ndex.put('/hi', put_json='{"x": "y"}')
self.assertEqual(res, {'hi': 'bye'})
def test_ndex2_post_with_json_and_json_resp(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.post(client.DEFAULT_SERVER + '/v2/hi',
status_code=200,
json={'hi': 'bye'},
headers={'Content-Type': 'application/json'})
ndex = Ndex2()
ndex.set_debug_mode(True)
res = ndex.post('/hi', post_json='{"x": "y"}')
self.assertEqual(res, {'hi': 'bye'})
def test_ndex2_delete_with_json_and_json_resp(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.delete(client.DEFAULT_SERVER + '/v2/hi',
status_code=200,
json={'hi': 'bye'},
headers={'Content-Type': 'application/json'})
ndex = Ndex2()
ndex.set_debug_mode(True)
res = ndex.delete('/hi', data='{"x": "y"}')
self.assertEqual(res, {'hi': 'bye'})
def test_ndex2_delete_no_data(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.delete(client.DEFAULT_SERVER + '/v2/hi',
status_code=200,
json={'hi': 'bye'},
headers={'Content-Type': 'application/json'})
ndex = Ndex2()
ndex.set_debug_mode(True)
res = ndex.delete('/hi')
self.assertEqual(res, {'hi': 'bye'})
def test_ndex2_get_with_json_and_json_resp(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.get(client.DEFAULT_SERVER + '/v2/hi?x=y',
status_code=200,
json={'hi': 'bye'},
headers={'Content-Type': 'application/json'})
ndex = Ndex2()
ndex.set_debug_mode(True)
res = ndex.get('/hi', get_params={"x": "y"})
self.assertEqual(res, {'hi': 'bye'})
def test_ndex2_get_stream_withparams(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.get(client.DEFAULT_SERVER + '/v2/hi?x=y',
status_code=200,
json={'hi': 'bye'},
headers={'Content-Type': 'application/json'})
ndex = Ndex2()
ndex.set_debug_mode(True)
res = ndex.get_stream('/hi', get_params={"x": "y"})
self.assertEqual(res.json(), {'hi': 'bye'})
self.assertEqual(res.status_code, 200)
def test_ndex2_post_stream_withparams(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.post(client.DEFAULT_SERVER + '/v2/hi',
status_code=200,
json={'hi': 'bye'},
request_headers={'Connection': 'close'},
headers={'Content-Type': 'application/json'})
ndex = Ndex2()
ndex.set_debug_mode(True)
res = ndex.post_stream('/hi', post_json={"x": "y"})
self.assertEqual(res.json(), {'hi': 'bye'})
self.assertEqual(res.status_code, 200)
def test_ndex2_put_multipart(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.put(client.DEFAULT_SERVER + '/v2/hi',
request_headers={'Connection': 'close'},
status_code=200)
ndex = Ndex2()
ndex.set_debug_mode(True)
res = ndex.put_multipart('/hi', fields={"x": "y"})
self.assertEqual(res, '')
def test_ndex2_post_multipart(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.post(client.DEFAULT_SERVER + '/v2/hi',
request_headers={'Connection': 'close'},
status_code=200)
ndex = Ndex2()
ndex.set_debug_mode(True)
res = ndex.post_multipart('/hi', fields={"x": "y"})
self.assertEqual(res, '')
def test_ndex2_post_multipart_with_querystring(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.post(client.DEFAULT_SERVER + '/v2/hi?yo=1',
request_headers={'Connection': 'close'},
status_code=200)
ndex = Ndex2()
ndex.set_debug_mode(True)
res = ndex.post_multipart('/hi', {"x": "y"}, query_string='yo=1')
self.assertEqual(res, '')
def test_get_id_for_user_invalid_param(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
ndex = Ndex2()
# try where None passed in and client was
# also constructed with anonymous connection so
# no username
try:
ndex.get_id_for_user(None)
self.fail('Expected NDExInvalidParameterError')
except NDExInvalidParameterError as ne:
self.assertTrue('None passed in this method' in str(ne))
# try where username is not of type str
try:
ndex.get_id_for_user(44)
self.fail('Expected NDExInvalidParameterError')
except NDExInvalidParameterError as ne:
self.assertEqual('Username must be of type str', str(ne))
# try where username is empty str
try:
ndex.get_id_for_user('')
self.fail('Expected NDExInvalidParameterError')
except NDExInvalidParameterError as ne:
self.assertEqual('Username cannot be empty str', str(ne))
def test_get_user_by_username(self):
with requests_mock.mock() as m:
resurl = client.DEFAULT_SERVER + '/v2/user?username=bob'
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.get(resurl, json={'userName': 'bob'},
headers={'Content-Type': 'application/json'})
ndex = Ndex2()
res = ndex.get_user_by_username('bob')
self.assertEqual('bob', res['userName'])
def test_get_id_for_user_success(self):
with requests_mock.mock() as m:
resurl = client.DEFAULT_SERVER + '/v2/user?username=bob'
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.get(resurl, json={'externalId': '12345'},
headers={'Content-Type': 'application/json'})
ndex = Ndex2()
self.assertEqual('12345', ndex.get_id_for_user('bob'))
def test_get_id_for_user_httperror(self):
with requests_mock.mock() as m:
resurl = client.DEFAULT_SERVER + '/v2/user?username=bob'
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.get(resurl,
status_code=404,
text='error',
headers={'Content-Type': 'application/json'})
ndex = Ndex2()
try:
ndex.get_id_for_user('bob')
self.fail('Expected NDExNotFoundError')
except NDExNotFoundError as ne:
self.assertEqual('Caught 404 from server: error',
str(ne))
def test_get_id_for_user_exception(self):
with requests_mock.mock() as m:
resurl = client.DEFAULT_SERVER + '/v2/user?username=bob'
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.get(resurl,
headers={'Content-Type': 'application/json'})
ndex = Ndex2()
try:
ndex.get_id_for_user('bob')
self.fail('Expected NDExError')
except NDExError as ne:
self.assertTrue('Caught JSONDecodeError:' in str(ne))
def test_get_id_for_user_no_external_id(self):
with requests_mock.mock() as m:
resurl = client.DEFAULT_SERVER + '/v2/user?username=bob'
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.get(resurl,
json={'uhoh': 'missing'},
headers={'Content-Type': 'application/json'})
ndex = Ndex2()
try:
ndex.get_id_for_user('bob')
self.fail('Expected NDExError')
except NDExError as ne:
self.assertTrue('Unable to get user id for user: bob',
str(ne))
def test_get_user_by_id_invalid_user_id(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
ndex = Ndex2()
# try passing None
try:
ndex.get_user_by_id(None)
self.fail('Expect NDExInvalidParameterError')
except NDExInvalidParameterError as ne:
self.assertEqual('user_id must be a str', str(ne))
# try passing empty string
try:
ndex.get_user_by_id('')
self.fail('Expect NDExInvalidParameterError')
except NDExInvalidParameterError as ne:
self.assertEqual('user_id cannot be an empty str', str(ne))
def test_get_user_by_id_success(self):
with requests_mock.mock() as m:
resurl = client.DEFAULT_SERVER + '/v2/user/foo'
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.get(resurl,
json={'userName': 'foo'},
headers={'Content-Type': 'application/json'})
ndex = Ndex2()
res = ndex.get_user_by_id('foo')
self.assertEqual('foo', res['userName'])
def test_get_user_by_id_404_error(self):
with requests_mock.mock() as m:
resurl = client.DEFAULT_SERVER + '/v2/user/foo'
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.get(resurl,
text='some error', status_code=404)
ndex = Ndex2()
try:
res = ndex.get_user_by_id('foo')
self.fail('Expected NDExNotFoundError: ' + str(res))
except NDExNotFoundError as ne:
self.assertEqual('Caught 404 from server: some error',
str(ne))
def test_get_user_by_id_500_error(self):
with requests_mock.mock() as m:
resurl = client.DEFAULT_SERVER + '/v2/user/foo'
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.get(resurl,
text='error', status_code=500)
ndex = Ndex2()
try:
res = ndex.get_user_by_id('foo')
self.fail('Expected NDExNotFoundError: ' + str(res))
except NDExError as ne:
self.assertEqual('Caught 500 from server: error',
str(ne))
def test_get_user_by_id_random_exception(self):
with requests_mock.mock() as m:
resurl = client.DEFAULT_SERVER + '/v2/user/foo'
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.get(resurl,
headers={'Content-Type': 'application/json'})
ndex = Ndex2()
try:
res = ndex.get_user_by_id('foo')
self.fail('Expected NDExError: ' + str(res))
except NDExError as ne:
self.assertTrue('Caught JSONDecodeError' in str(ne))
def test_get_networksets_for_user_invalid_userid(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
ndex = Ndex2()
# try where user id is None
try:
ndex.get_networksets_for_user_id(None)
self.fail('Expected NDExInvalidParameterError')
except NDExInvalidParameterError as ne:
self.assertEqual('user_id must be of type str',
str(ne))
# try where user id is not of type str
try:
ndex.get_networksets_for_user_id(4)
self.fail('Expected NDExInvalidParameterError')
except NDExInvalidParameterError as ne:
self.assertEqual('user_id must be of type str',
str(ne))
def test_get_networksets_for_user_invalid_limit(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
ndex = Ndex2()
# try where limit is not int or str
try:
ndex.get_networksets_for_user_id('foo', limit=3.5)
self.fail('Expected NDExInvalidParameterError')
except NDExInvalidParameterError as ne:
self.assertTrue('limit parameter must be of type '
'int ' in str(ne))
def test_get_networksets_for_user_invalid_offset_type(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
ndex = Ndex2()
# try where limit is not int or str
try:
ndex.get_networksets_for_user_id('foo', offset=3.5)
self.fail('Expected NDExInvalidParameterError')
except NDExInvalidParameterError as ne:
self.assertTrue('offset parameter must be of type '
'int ' in str(ne))
def test_get_networksets_for_user_invalid_offset(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
ndex = Ndex2()
# try where limit is not int or str
try:
ndex.get_networksets_for_user_id('foo', offset=5)
self.fail('Expected NDExInvalidParameterError')
except NDExInvalidParameterError as ne:
self.assertTrue('limit (0) parameter must be set to '
'positive ' in str(ne))
def test_get_networksets_for_user_success(self):
with requests_mock.mock() as m:
resurl = client.DEFAULT_SERVER + '/v2/user/foo/networksets'
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.get(resurl, json={'hi': 'there'},
headers={'Content-Type': 'application/json'})
ndex = Ndex2()
res = ndex.get_networksets_for_user_id('foo',
summary_only=False,
showcase=True)
self.assertEqual('there', res['hi'])
def test_get_networksets_for_user_httperror(self):
with requests_mock.mock() as m:
resurl = client.DEFAULT_SERVER + '/v2/user/foo/networksets'
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.get(resurl, status_code=404)
ndex = Ndex2()
try:
ndex.get_networksets_for_user_id('foo')
except NDExNotFoundError as ne:
self.assertEqual('Caught 404 from server: ', str(ne))
def test_get_networksets_for_user_exception(self):
with requests_mock.mock() as m:
resurl = client.DEFAULT_SERVER + '/v2/user/foo/networksets'
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.get(resurl,
headers={'Content-Type': 'application/json'})
ndex = Ndex2()
try:
ndex.get_networksets_for_user_id('foo')
except NDExError as ne:
self.assertTrue('Caught JSONDecodeError: ' in str(ne))
def test_save_new_network_none_as_cx(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
ndex = Ndex2()
try:
ndex.save_new_network(None)
self.fail('expected NDExInvalidCXError')
except NDExInvalidCXError as ice:
self.assertEqual(str(ice), 'CX is None')
def test_save_new_network_invalid_as_cx(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
ndex = Ndex2()
try:
ndex.save_new_network('hi')
self.fail('expected NDExInvalidCXError')
except NDExInvalidCXError as ice:
self.assertEqual(str(ice), 'CX is not a list')
def test_save_new_network_empty_cx(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
ndex = Ndex2()
try:
ndex.save_new_network([])
self.fail('expected NDExInvalidCXError')
except NDExInvalidCXError as ice:
self.assertEqual(str(ice), 'CX appears to be empty')
def test_save_new_network_cx_with_no_status(self):
with requests_mock.mock() as m:
resurl = client.DEFAULT_SERVER + '/v2/network/asdf'
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.post(client.DEFAULT_SERVER + '/v2/network',
request_headers={'Connection': 'close'},
status_code=1,
text=resurl)
ndex = Ndex2(username='bob', password='<PASSWORD>')
res = ndex.save_new_network([{'foo': '123'}])
self.assertEqual(res, resurl)
decode_txt = m.last_request.text.read().decode('UTF-8')
self.assertTrue('Content-Disposition: form-data; '
'name="CXNetworkStream"; '
'filename="filename"' in decode_txt)
self.assertTrue('Content-Type: application/'
'octet-stream' in decode_txt)
self.assertTrue('{"foo": "123"}' in decode_txt)
self.assertTrue('{"status": [{"' in decode_txt)
self.assertTrue('"error": ""' in decode_txt)
self.assertTrue('"success": true' in decode_txt)
def test_save_new_network_cx_with_no_status_ndexv1(self):
with requests_mock.mock() as m:
resurl = client.DEFAULT_SERVER + '/rest/network/asdf'
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict(version=None))
m.post(client.DEFAULT_SERVER + '/rest/network/asCX',
request_headers={'Connection': 'close'},
status_code=1,
text=resurl)
ndex = Ndex2(username='bob', password='<PASSWORD>')
res = ndex.save_new_network([{'foo': '123'}])
self.assertEqual(res, resurl)
decode_txt = m.last_request.text.read().decode('UTF-8')
self.assertTrue('Content-Disposition: form-data; '
'name="CXNetworkStream"; '
'filename="filename"' in decode_txt)
self.assertTrue('Content-Type: application/'
'octet-stream' in decode_txt)
self.assertTrue('{"foo": "123"}' in decode_txt)
self.assertTrue('{"status": [{"' in decode_txt)
self.assertTrue('"error": ""' in decode_txt)
self.assertTrue('"success": true' in decode_txt)
def test_save_new_network_cx_with_emptystatus_and_publicvisibility(self):
with requests_mock.mock() as m:
resurl = client.DEFAULT_SERVER + '/v2/network/asdf'
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.post(client.DEFAULT_SERVER + '/v2/network?visibility=PUBLIC',
request_headers={'Connection': 'close'},
status_code=1,
text=resurl)
ndex = Ndex2(username='bob', password='<PASSWORD>')
res = ndex.save_new_network([{'foo': '123'},
{"status": []}],
visibility='PUBLIC')
self.assertEqual(res, resurl)
decode_txt = m.last_request.text.read().decode('UTF-8')
self.assertTrue('Content-Disposition: form-data; '
'name="CXNetworkStream"; '
'filename="filename"' in decode_txt)
self.assertTrue('Content-Type: application/'
'octet-stream' in decode_txt)
self.assertTrue('{"foo": "123"}' in decode_txt)
self.assertTrue('{"status": [{"' in decode_txt)
self.assertTrue('"error": ""' in decode_txt)
self.assertTrue('"success": true' in decode_txt)
def test_save_new_network_cx_with_status(self):
with requests_mock.mock() as m:
resurl = client.DEFAULT_SERVER + '/v2/network/asdf'
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.post(client.DEFAULT_SERVER + '/v2/network',
request_headers={'Connection': 'close'},
status_code=1,
text=resurl)
ndex = Ndex2(username='bob', password='<PASSWORD>')
res = ndex.save_new_network([{'foo': '123'},
{"status": [{"error": "",
"success": True}]}])
self.assertEqual(res, resurl)
decode_txt = m.last_request.text.read().decode('UTF-8')
self.assertTrue('Content-Disposition: '
'form-data; name="CXNetworkStream"; '
'filename="filename"' in decode_txt)
self.assertTrue('Content-Type: application/'
'octet-stream' in decode_txt)
self.assertTrue('{"foo": "123"}' in decode_txt)
self.assertTrue('{"status": [{"' in decode_txt)
self.assertTrue('"error": ""' in decode_txt)
self.assertTrue('"success": true' in decode_txt)
def test_update_cx_network_success(self):
with requests_mock.mock() as m:
resurl = client.DEFAULT_SERVER + '/v2/network/asdf'
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.put(client.DEFAULT_SERVER + '/v2/network/someid',
request_headers={'Connection': 'close'},
status_code=1,
text=resurl)
ndex = Ndex2(username='bob', password='<PASSWORD>')
cx = [{'foo': '123'},
{"status": [{"error": "", "success": True}]}]
if sys.version_info.major == 3:
stream = io.BytesIO(json.dumps(cx,
cls=DecimalEncoder)
.encode('utf-8'))
else:
stream = io.BytesIO(json.dumps(cx, cls=DecimalEncoder))
res = ndex.update_cx_network(stream, 'someid')
self.assertEqual(res, resurl)
decode_txt = m.last_request.text.read().decode('UTF-8')
self.assertTrue('Content-Disposition: form-data; '
'name="CXNetworkStream"; '
'filename="filename"' in decode_txt)
self.assertTrue('Content-Type: application/'
'octet-stream' in decode_txt)
self.assertTrue('{"foo": "123"}' in decode_txt)
self.assertTrue('{"status": [{"' in decode_txt)
self.assertTrue('"error": ""' in decode_txt)
self.assertTrue('"success": true' in decode_txt)
def test_update_cx_network_success_ndexv1(self):
with requests_mock.mock() as m:
resurl = client.DEFAULT_SERVER + '/rest/network/asdf'
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict(version=None))
m.put(client.DEFAULT_SERVER + '/rest/network/asCX/someid',
request_headers={'Connection': 'close'},
status_code=1,
text=resurl)
ndex = Ndex2(username='bob', password='<PASSWORD>')
cx = [{'foo': '123'},
{"status": [{"error": "", "success": True}]}]
if sys.version_info.major == 3:
stream = io.BytesIO(json.dumps(cx,
cls=DecimalEncoder)
.encode('utf-8'))
else:
stream = io.BytesIO(json.dumps(cx, cls=DecimalEncoder))
res = ndex.update_cx_network(stream, 'someid')
self.assertEqual(res, resurl)
decode_txt = m.last_request.text.read().decode('UTF-8')
self.assertTrue('Content-Disposition: form-data; '
'name="CXNetworkStream"; '
'filename="filename"' in decode_txt)
self.assertTrue('Content-Type: application/'
'octet-stream' in decode_txt)
self.assertTrue('{"foo": "123"}' in decode_txt)
self.assertTrue('{"status": [{"' in decode_txt)
self.assertTrue('"error": ""' in decode_txt)
self.assertTrue('"success": true' in decode_txt)
def test_validate_network_system_properties(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
ndex = Ndex2()
# try passing none
try:
ndex._validate_network_system_properties(None)
self.fail('Expected exception')
except NDExInvalidParameterError as ne:
self.assertEqual('network_properties must be a '
'string or a dict', str(ne))
# try passing empty string
try:
ndex._validate_network_system_properties('')
self.fail('Expected exception')
except NDExInvalidParameterError as ne:
self.assertTrue('Error parsing json string' in str(ne))
# try passing empty dict
try:
ndex._validate_network_system_properties({})
self.fail('Expected exception')
except NDExInvalidParameterError as ne:
self.assertTrue('network_properties appears to '
'be empty' in str(ne))
# try passing invalid property
try:
ndex._validate_network_system_properties({'showcase': True,
'foo': 'blah'})
self.fail('Expected exception')
except NDExInvalidParameterError as ne:
self.assertEqual('foo is not a valid network system '
'property', str(ne))
# try passing invalid readOnly property
try:
ndex._validate_network_system_properties({'showcase': True,
'readOnly': 'blah'})
self.fail('Expected exception')
except NDExInvalidParameterError as ne:
self.assertEqual('readOnly value must be a bool '
'set to True or False', str(ne))
# try passing invalid showcase property
try:
ndex._validate_network_system_properties({'showcase': 'haha'})
self.fail('Expected exception')
except NDExInvalidParameterError as ne:
self.assertEqual('showcase value must be a bool '
'set to True or False', str(ne))
# try passing invalid index_level property as bool
try:
ndex._validate_network_system_properties({'index_level':
False})
self.fail('Expected exception')
except NDExInvalidParameterError as ne:
self.assertEqual('index_level value must be '
'a string set to NONE, META, or ALL', str(ne))
# try passing invalid index_level property
try:
ndex._validate_network_system_properties({'index_level':
'blah'})
self.fail('Expected exception')
except NDExInvalidParameterError as ne:
self.assertEqual('index_level value must be '
'a string set to NONE, META, or ALL', str(ne))
# try passing invalid visibility property bool
try:
ndex._validate_network_system_properties({'visibility': True})
self.fail('Expected exception')
except NDExInvalidParameterError as ne:
self.assertEqual('visibility value must be '
'a string set to PUBLIC or PRIVATE',
str(ne))
# try passing invalid visibility property
try:
ndex._validate_network_system_properties({'visibility':
'ha'})
self.fail('Expected exception')
except NDExInvalidParameterError as ne:
self.assertEqual('visibility value must be '
'a string set to PUBLIC or PRIVATE',
str(ne))
# try passing valid dict
valid_dict = {'showcase': True,
'visibility': 'PUBLIC',
'index_level': 'ALL',
'readOnly': True}
res = ndex._validate_network_system_properties(valid_dict)
check_dict = json.loads(res)
self.assertEqual(valid_dict, check_dict)
# try passing dict with validation off
res = ndex._validate_network_system_properties({},
skipvalidation=True)
self.assertEqual('{}', res)
def test_set_network_system_properties_test_no_auth(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
ndex = Ndex2()
try:
ndex.set_network_system_properties('236ecfce-be48-4652-'
'b488-b08f0cc9c795',
{'visibility': 'PUBLIC'})
self.fail('Expected exception')
except NDExUnauthorizedError as ne:
self.assertEqual('This method requires user '
'authentication', str(ne))
def test_set_network_system_properties_invalid_propertytype(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
ndex = Ndex2(username='bob', password='<PASSWORD>')
try:
ndex.set_network_system_properties('236ecfce-be48-4652-b488-'
'b08f0cc9c795',
True)
self.fail('Expected exception')
except NDExInvalidParameterError as ne:
self.assertEqual('network_properties '
'must be a string or a dict', str(ne))
def test_set_network_system_properties_ndexv1(self):
theuuid = '236ecfce-be48-4652-b488-b08f0cc9c795'
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict(version=None))
ndex = Ndex2(username='bob', password='<PASSWORD>')
valid_dict = {'showcase': True,
'visibility': 'PUBLIC',
'index_level': 'ALL',
'readOnly': True}
try:
ndex.set_network_system_properties(theuuid,
valid_dict)
self.fail('Expected NDExUnsupportedCallError')
except NDExUnsupportedCallError as ne:
self.assertEqual('This call only works with NDEx 2+',
str(ne))
def test_set_network_system_properties_success(self):
theuuid = '236ecfce-be48-4652-b488-b08f0cc9c795'
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.put(client.DEFAULT_SERVER + '/v2/network/' +
theuuid + '/systemproperty',
request_headers={'Content-Type': 'application/json;'
'charset=UTF-8',
'Accept': 'application/json',
'User-Agent': client.userAgent},
headers={'Content-Type': 'application/foo'},
status_code=200,
text='')
ndex = Ndex2(username='bob', password='<PASSWORD>')
valid_dict = {'showcase': True,
'visibility': 'PUBLIC',
'index_level': 'ALL',
'readOnly': True}
res = ndex.set_network_system_properties(theuuid,
valid_dict)
self.assertEqual('', res)
checkdict = json.loads(m.last_request.text)
self.assertEqual(valid_dict, checkdict)
def test_make_network_public_noauth(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
ndex = Ndex2()
try:
ndex.make_network_public('236ecfce-be48-4652-b488-'
'b08f0cc9c795')
self.fail('Expected exception')
except NDExUnauthorizedError as ne:
self.assertEqual('This method requires user authentication',
str(ne))
def test_make_network_public_success(self):
theuuid = '236ecfce-be48-4652-b488-b08f0cc9c795'
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.put(client.DEFAULT_SERVER + '/v2/network/' +
theuuid + '/systemproperty',
request_headers={'Content-Type': 'application/json;'
'charset=UTF-8',
'Accept': 'application/json',
'User-Agent': client.userAgent},
headers={'Content-Type': 'application/foo'},
status_code=200,
text='')
ndex = Ndex2(username='bob', password='<PASSWORD>')
res = ndex.make_network_public(theuuid)
self.assertEqual('', res)
checkdict = json.loads(m.last_request.text)
self.assertEqual({'visibility': 'PUBLIC'}, checkdict)
def test_make_network_private_noauth(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
ndex = Ndex2()
try:
ndex.make_network_private('236ecfce-be48-4652-b488-'
'b08f0cc9c795')
self.fail('Expected exception')
except NDExUnauthorizedError as ne:
self.assertEqual('This method requires user authentication',
str(ne))
def test_make_network_private_success(self):
theuuid = '236ecfce-be48-4652-b488-b08f0cc9c795'
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.put(client.DEFAULT_SERVER + '/v2/network/' +
theuuid + '/systemproperty',
request_headers={'Content-Type': 'application/json;'
'charset=UTF-8',
'Accept': 'application/json',
'User-Agent': client.userAgent},
headers={'Content-Type': 'application/foo'},
status_code=200,
text='')
ndex = Ndex2(username='bob', password='<PASSWORD>')
res = ndex.make_network_private(theuuid)
self.assertEqual('', res)
checkdict = json.loads(m.last_request.text)
self.assertEqual({'visibility': 'PRIVATE'}, checkdict)
def test_make_network_public_indexed_noauth(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
ndex = Ndex2()
try:
ndex._make_network_public_indexed('236ecfce-be48-4652-'
'b488-b08f0cc9c795')
self.fail('Expected exception')
except NDExUnauthorizedError as ne:
self.assertEqual('This method requires user authentication',
str(ne))
def test_make_network_public_indexed_success(self):
theuuid = '236ecfce-be48-4652-b488-b08f0cc9c795'
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.put(client.DEFAULT_SERVER + '/v2/network/' +
theuuid + '/systemproperty',
request_headers={'Content-Type': 'application/json;'
'charset=UTF-8',
'Accept': 'application/json',
'User-Agent': client.userAgent},
headers={'Content-Type': 'application/foo'},
status_code=200,
text='')
ndex = Ndex2(username='bob', password='<PASSWORD>')
res = ndex._make_network_public_indexed(theuuid)
self.assertEqual('', res)
checkdict = json.loads(m.last_request.text)
self.assertEqual({'visibility': 'PUBLIC',
'index_level': 'ALL',
'showcase': True}, checkdict)
def test_make_network_public_indexed_ndexv1(self):
theuuid = '236ecfce-be48-4652-b488-b08f0cc9c795'
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict(version=None))
ndex = Ndex2(username='bob', password='<PASSWORD>')
try:
ndex._make_network_public_indexed(theuuid)
self.fail('Expected NDExUnsupportedCallError')
except NDExUnsupportedCallError as ne:
self.assertEqual('Only 2+ of NDEx supports '
'setting/changing index level', str(ne))
def test_set_read_only_noauth(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
ndex = Ndex2()
try:
ndex.set_read_only('236ecfce-be48-4652-b488-b08f0cc9c795',
True)
self.fail('Expected exception')
except NDExUnauthorizedError as ne:
self.assertEqual('This method requires user authentication',
str(ne))
def test_set_read_only_true_success(self):
theuuid = '236ecfce-be48-4652-b488-b08f0cc9c795'
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.put(client.DEFAULT_SERVER + '/v2/network/' +
theuuid + '/systemproperty',
request_headers={'Content-Type': 'application/json;'
'charset=UTF-8',
'Accept': 'application/json',
'User-Agent': client.userAgent},
headers={'Content-Type': 'application/foo'},
status_code=200,
text='')
ndex = Ndex2(username='bob', password='<PASSWORD>')
res = ndex.set_read_only(theuuid, True)
self.assertEqual('', res)
checkdict = json.loads(m.last_request.text)
self.assertEqual({'readOnly': True}, checkdict)
def test_set_read_only_false_success(self):
theuuid = '236ecfce-be48-4652-b488-b08f0cc9c795'
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.put(client.DEFAULT_SERVER + '/v2/network/' +
theuuid + '/systemproperty',
request_headers={'Content-Type': 'application/json;'
'charset=UTF-8',
'Accept': 'application/json',
'User-Agent': client.userAgent},
headers={'Content-Type': 'application/foo'},
status_code=200,
text='')
ndex = Ndex2(username='bob', password='<PASSWORD>')
res = ndex.set_read_only(theuuid, False)
self.assertEqual('', res)
checkdict = json.loads(m.last_request.text)
self.assertEqual({'readOnly': False}, checkdict)
def test_get_network_as_cx_stream_success(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.get(client.DEFAULT_SERVER + '/v2/network/someid',
status_code=200,
json={'hi': 'bye'},
headers={'Content-Type': 'application/json'})
ndex = Ndex2()
ndex.set_debug_mode(True)
res = ndex.get_network_as_cx_stream('someid')
self.assertEqual(res.json(), {'hi': 'bye'})
self.assertEqual(res.status_code, 200)
def test_get_network_as_cx_stream_success_ndexv1(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict(version=None))
m.get(client.DEFAULT_SERVER + '/rest/network/someid/asCX',
status_code=200,
json={'hi': 'bye'},
headers={'Content-Type': 'application/json'})
ndex = Ndex2()
ndex.set_debug_mode(True)
res = ndex.get_network_as_cx_stream('someid')
self.assertEqual(res.json(), {'hi': 'bye'})
self.assertEqual(res.status_code, 200)
def test_get_network_aspect_as_cx_stream_success(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.get(client.DEFAULT_SERVER + '/v2/network/someid/aspect/sa',
status_code=200,
json={'hi': 'bye'},
headers={'Content-Type': 'application/json'})
ndex = Ndex2()
ndex.set_debug_mode(True)
res = ndex.get_network_aspect_as_cx_stream('someid', 'sa')
self.assertEqual(res.json(), {'hi': 'bye'})
self.assertEqual(res.status_code, 200)
def test_get_network_aspect_as_cx_stream_success_ndexv1(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict(version=None))
m.get(client.DEFAULT_SERVER + '/rest/network/someid/asCX',
status_code=200,
json={'hi': 'bye'},
headers={'Content-Type': 'application/json'})
ndex = Ndex2()
ndex.set_debug_mode(True)
res = ndex.get_network_aspect_as_cx_stream('someid', 'sa')
self.assertEqual(res.json(), {'hi': 'bye'})
self.assertEqual(res.status_code, 200)
def test_get_neighborhood_as_cx_stream(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.post(client.DEFAULT_SERVER + '/v2/search/network/someid/query',
status_code=200,
json={'hi': 'bye'},
request_headers={'Connection': 'close'},
headers={'Content-Type': 'application/json'})
ndex = Ndex2()
ndex.set_debug_mode(True)
res = ndex.get_neighborhood_as_cx_stream('someid',
'query')
self.assertEqual(res.json(), {'hi': 'bye'})
self.assertEqual(res.status_code, 200)
def test_get_neighborhood_as_cx_stream_ndexv1(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict(version=None))
m.post(client.DEFAULT_SERVER + '/rest/network/someid/query',
status_code=200,
json={'hi': 'bye'},
request_headers={'Connection': 'close'},
headers={'Content-Type': 'application/json'})
ndex = Ndex2()
ndex.set_debug_mode(True)
res = ndex.get_neighborhood_as_cx_stream('someid',
'query')
self.assertEqual(res.json(), {'hi': 'bye'})
self.assertEqual(res.status_code, 200)
def test_get_neighborhood(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.post(client.DEFAULT_SERVER + '/v2/search/network/someid/query',
status_code=200,
json={'data': [{'hi': 'bye'}]},
request_headers={'Connection': 'close'},
headers={'Content-Type': 'application/json'})
ndex = Ndex2()
ndex.set_debug_mode(True)
res = ndex.get_neighborhood('someid', 'query')
self.assertEqual(res, [{'hi': 'bye'}])
def test_get_neighborhood_list_return(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.post(client.DEFAULT_SERVER + '/v2/search/network/someid/query',
status_code=200,
json=[{'hi': 'bye'}],
request_headers={'Connection': 'close'},
headers={'Content-Type': 'application/json'})
ndex = Ndex2()
ndex.set_debug_mode(True)
res = ndex.get_neighborhood('someid', 'query')
self.assertEqual(res, [{'hi': 'bye'}])
def test_get_neighborhood_str_return(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.post(client.DEFAULT_SERVER + '/v2/search/network/someid/query',
status_code=200,
json='blah',
request_headers={'Connection': 'close'},
headers={'Content-Type': 'application/json'})
ndex = Ndex2()
ndex.set_debug_mode(True)
res = ndex.get_neighborhood('someid', 'query')
self.assertEqual(res, 'blah')
def test_get_neighborhood_ndexv1(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict(version=None))
ndex = Ndex2()
ndex.set_debug_mode(True)
try:
ndex.get_neighborhood('someid', 'query')
self.fail('Expected Exception')
except Exception as e:
self.assertEqual('get_neighborhood is not supported for '
'versions prior to 2.0, use '
'get_neighborhood_as_cx_stream', str(e))
def test_upload_file(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
ndex = Ndex2()
try:
ndex.upload_file('foo')
self.fail('Expected NDExError')
except NDExError:
pass
def test_get_interconnectquery_as_cx_stream(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.post(client.DEFAULT_SERVER +
'/v2/search/network/someid/interconnectquery',
status_code=200,
json={'hi': 'bye'},
request_headers={'Connection': 'close'},
headers={'Content-Type': 'application/json'})
ndex = Ndex2()
ndex.set_debug_mode(True)
res = ndex.get_interconnectquery_as_cx_stream('someid',
'query')
self.assertEqual(res.json(), {'hi': 'bye'})
self.assertEqual(res.status_code, 200)
def test_get_interconnectquery(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.post(client.DEFAULT_SERVER +
'/v2/search/network/someid/interconnectquery',
status_code=200,
json={'data': [{'hi': 'bye'}]},
request_headers={'Connection': 'close'},
headers={'Content-Type': 'application/json'})
ndex = Ndex2()
ndex.set_debug_mode(True)
res = ndex.get_interconnectquery('someid', 'query')
self.assertEqual(res, [{'hi': 'bye'}])
def test_get_interconnectquery_as_list(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.post(client.DEFAULT_SERVER +
'/v2/search/network/someid/interconnectquery',
status_code=200,
json=[{'hi': 'bye'}],
request_headers={'Connection': 'close'},
headers={'Content-Type': 'application/json'})
ndex = Ndex2()
ndex.set_debug_mode(True)
res = ndex.get_interconnectquery('someid', 'query')
self.assertEqual(res, [{'hi': 'bye'}])
def test_get_interconnectquery_as_str(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.post(client.DEFAULT_SERVER +
'/v2/search/network/someid/interconnectquery',
status_code=200,
json='foo',
request_headers={'Connection': 'close'},
headers={'Content-Type': 'application/json'})
ndex = Ndex2()
ndex.set_debug_mode(True)
res = ndex.get_interconnectquery('someid', 'query')
self.assertEqual(res, 'foo')
def test_search_networks(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.post(client.DEFAULT_SERVER +
'/v2/search/network?start=0&size=100',
status_code=200,
json={'hi': 'bye'},
headers={'Content-Type': 'application/json'})
ndex = Ndex2()
ndex.set_debug_mode(True)
res = ndex.search_networks(search_string='hi',
account_name='bob',
include_groups=True)
self.assertEqual(res, {'hi': 'bye'})
def test_search_networks_ndexv1(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict(version=None))
m.post(client.DEFAULT_SERVER + '/rest/network/search/0/100',
status_code=200,
json={'hi': 'bye'},
headers={'Content-Type': 'application/json'})
ndex = Ndex2()
ndex.set_debug_mode(True)
res = ndex.search_networks(search_string='hi',
account_name='bob',
include_groups=True)
self.assertEqual(res, {'hi': 'bye'})
def test_search_networks_by_property_filter(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
ndex = Ndex2()
try:
ndex.search_networks_by_property_filter()
self.fail('Expected Exception')
except Exception:
pass
def test_get_network_summary(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.get(client.DEFAULT_SERVER + '/v2/network/someid/summary',
status_code=200,
json={'hi': 'bye'},
headers={'Content-Type': 'application/json'})
ndex = Ndex2()
ndex.set_debug_mode(True)
res = ndex.get_network_summary('someid')
self.assertEqual(res, {'hi': 'bye'})
def test_get_network_summary_ndexv1(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict(version=None))
m.get(client.DEFAULT_SERVER + '/rest/network/someid',
status_code=200,
json={'hi': 'bye'},
headers={'Content-Type': 'application/json'})
ndex = Ndex2()
ndex.set_debug_mode(True)
res = ndex.get_network_summary('someid')
self.assertEqual(res, {'hi': 'bye'})
def test_delete_networkset_none_passed_in(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
ndex = Ndex2()
ndex.set_debug_mode(True)
try:
ndex.delete_networkset(None)
self.fail('Expected exception')
except NDExInvalidParameterError as ne:
self.assertEqual('networkset id cannot be None',
str(ne))
def test_delete_networkset_non_string_passed_in(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
ndex = Ndex2()
ndex.set_debug_mode(True)
try:
ndex.delete_networkset(True)
self.fail('Expected exception')
except NDExInvalidParameterError as ne:
self.assertEqual('networkset id must be a string',
str(ne))
def test_delete_networkset_not_authorized(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
ndex = Ndex2()
try:
ndex.delete_networkset('someid')
self.fail('Expected exception')
except NDExUnauthorizedError as ne:
self.assertEqual('This method requires user authentication',
str(ne))
def test_delete_networkset_success(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.delete(client.DEFAULT_SERVER + '/v2/networkset/someid',
status_code=204,
headers={'Content-Type': 'application/json'})
ndex = Ndex2(username='bob', password='<PASSWORD>')
self.assertEqual(None, ndex.delete_networkset('someid'))
def test_delete_networkset_server_says_not_authorized(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.delete(client.DEFAULT_SERVER + '/v2/networkset/someid',
status_code=401,
headers={'Content-Type': 'application/json'})
ndex = Ndex2(username='bob', password='<PASSWORD>')
try:
ndex.delete_networkset('someid')
self.fail('Expected exception')
except NDExUnauthorizedError as ne:
self.assertEqual('Not authorized', str(ne))
def test_delete_networkset_server_says_not_found(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.delete(client.DEFAULT_SERVER + '/v2/networkset/someid',
status_code=404,
headers={'Content-Type': 'application/json'})
ndex = Ndex2(username='bob', password='<PASSWORD>')
try:
ndex.delete_networkset('someid')
self.fail('Expected exception')
except NDExNotFoundError as ne:
self.assertEqual('Network set with id: someid not found',
str(ne))
def test_delete_networkset_server_500_error_no_json(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.delete(client.DEFAULT_SERVER + '/v2/networkset/someid',
status_code=500,
headers={'Content-Type': 'application/json'})
ndex = Ndex2(username='bob', password='<PASSWORD>')
try:
ndex.delete_networkset('someid')
self.fail('Expected exception')
except NDExError as ne:
self.assertEqual('Unknown error server returned '
'status code: 500',
str(ne))
def test_delete_networkset_server_503_with_json(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.delete(client.DEFAULT_SERVER + '/v2/networkset/someid',
status_code=503,
json={"errorCode": "string",
"message": "string",
"description": "string",
"stackTrace": "string",
"threadId": "string",
"timeStamp": "2019-09-09T16:36:25.699Z"},
headers={'Content-Type': 'application/json'})
ndex = Ndex2(username='bob', password='<PASSWORD>')
try:
ndex.delete_networkset('someid')
self.fail('Expected exception')
except NDExError as ne:
self.assertTrue('Unknown error server returned '
'status code: 503 : ' in str(ne))
def test_get_task_by_id_no_auth(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
ndex = Ndex2()
try:
ndex.get_task_by_id('someid')
self.fail('Expected Exception')
except NDExUnauthorizedError:
pass
def test_get_task_by_id_success(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.get(client.DEFAULT_SERVER + '/v2/task/someid',
status_code=200,
json={'hi': 'bye'},
headers={'Content-Type': 'application/json'})
ndex = Ndex2(username='bob', password='<PASSWORD>')
res = ndex.get_task_by_id('someid')
self.assertEqual('bye', res['hi'])
def test_add_networks_to_networkset(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
m.post(client.DEFAULT_SERVER + '/v2/networkset/aid/members',
status_code=200,
json='',
headers={'Content-Type': 'application/json'})
ndex = Ndex2(username='bob', password='<PASSWORD>')
res = ndex.add_networks_to_networkset('aid', ['someid'])
self.assertEqual('', res)
def test_get_network_ids_for_user_invalid_offset_limit(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
ndex = Ndex2()
try:
ndex.get_network_ids_for_user('bob', limit=None, offset=5)
self.fail('Expected NDExInvalidParameterError')
except NDExInvalidParameterError as ne:
self.assertEqual('Limit must be set to a positive '
'number to use offset', str(ne))
# try where limit is str and offset is None
try:
ndex.get_network_ids_for_user('bob', limit='ha', offset=None)
self.fail('Expected NDExInvalidParameterError')
except NDExInvalidParameterError as ne:
self.assertEqual('Limit must be an int', str(ne))
# try where limit is str and offset is str
try:
ndex.get_network_ids_for_user('bob', offset='3')
self.fail('Expected NDExInvalidParameterError')
except NDExInvalidParameterError as ne:
self.assertEqual('Offset must be an int', str(ne))
def test_get_network_ids_for_user_success_no_ids(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
resurl = client.DEFAULT_SERVER + '/v2/user?username=bob'
m.get(resurl, json={'externalId': '12345'},
headers={'Content-Type': 'application/json'})
resurl = client.DEFAULT_SERVER + '/v2/user/12345/networksummary?offset=0&limit=1000'
m.get(resurl,
json=[],
headers={'Content-Type': 'application/json'})
ndex = Ndex2()
res = ndex.get_network_ids_for_user('bob')
self.assertEqual([], res)
def test_get_network_ids_for_user_success_with_ids(self):
with requests_mock.mock() as m:
m.get(self.get_rest_admin_status_url(),
json=self.get_rest_admin_status_dict())
resurl = client.DEFAULT_SERVER + '/v2/user?username=bob'
m.get(resurl, json={'externalId': '12345'},
headers={'Content-Type': 'application/json'})
resurl = client.DEFAULT_SERVER + '/v2/user/12345/networksummary?offset=0&limit=1000'
m.get(resurl,
json=[{'externalId': '1'}, {'externalId': '2'}],
headers={'Content-Type': 'application/json'})
ndex = Ndex2()
res = ndex.get_network_ids_for_user('bob')
self.assertEqual(2, len(res))
self.assertTrue('1' in res)
self.assertTrue('2' in res)
|
[
"ndex2.client.DecimalEncoder",
"unittest.mock.MagicMock",
"json.loads",
"decimal.Decimal",
"requests_mock.mock",
"json.dumps",
"ndex2.client.Ndex2",
"numpy.int32",
"numpy.int64",
"os.getenv"
] |
[((1625, 1641), 'ndex2.client.DecimalEncoder', 'DecimalEncoder', ([], {}), '()\n', (1639, 1641), False, 'from ndex2.client import DecimalEncoder\n'), ((2484, 2507), 'ndex2.client.Ndex2', 'Ndex2', ([], {'host': '"""localhost"""'}), "(host='localhost')\n", (2489, 2507), False, 'from ndex2.client import Ndex2\n'), ((3141, 3250), 'ndex2.client.Ndex2', 'Ndex2', ([], {'host': '"""xxxlocalhost"""', 'username': '"""bob"""', 'password': '"""<PASSWORD>"""', 'user_agent': '"""yo"""', 'debug': '(True)', 'timeout': '(1)'}), "(host='xxxlocalhost', username='bob', password='<PASSWORD>',\n user_agent='yo', debug=True, timeout=1)\n", (3146, 3250), False, 'from ndex2.client import Ndex2\n'), ((7785, 7808), 'ndex2.client.Ndex2', 'Ndex2', ([], {'host': '"""localhost"""'}), "(host='localhost')\n", (7790, 7808), False, 'from ndex2.client import Ndex2\n'), ((8097, 8120), 'ndex2.client.Ndex2', 'Ndex2', ([], {'host': '"""localhost"""'}), "(host='localhost')\n", (8102, 8120), False, 'from ndex2.client import Ndex2\n'), ((8262, 8302), 'ndex2.client.Ndex2', 'Ndex2', ([], {'host': '"""localhost"""', 'user_agent': '"""hi"""'}), "(host='localhost', user_agent='hi')\n", (8267, 8302), False, 'from ndex2.client import Ndex2\n'), ((8538, 8561), 'ndex2.client.Ndex2', 'Ndex2', ([], {'host': '"""localhost"""'}), "(host='localhost')\n", (8543, 8561), False, 'from ndex2.client import Ndex2\n'), ((9045, 9068), 'ndex2.client.Ndex2', 'Ndex2', ([], {'host': '"""localhost"""'}), "(host='localhost')\n", (9050, 9068), False, 'from ndex2.client import Ndex2\n'), ((9266, 9277), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (9275, 9277), False, 'from unittest.mock import MagicMock\n'), ((9303, 9314), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (9312, 9314), False, 'from unittest.mock import MagicMock\n'), ((834, 864), 'os.getenv', 'os.getenv', (['"""NDEX2_TEST_SERVER"""'], {}), "('NDEX2_TEST_SERVER')\n", (843, 864), False, 'import os\n'), ((1902, 1920), 'decimal.Decimal', 'decimal.Decimal', (['(5)'], {}), '(5)\n', (1917, 1920), False, 'import decimal\n'), ((2031, 2043), 'numpy.int64', 'np.int64', (['(12)'], {}), '(12)\n', (2039, 2043), True, 'import numpy as np\n'), ((3793, 3813), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (3811, 3813), False, 'import requests_mock\n'), ((3949, 3971), 'ndex2.client.Ndex2', 'Ndex2', ([], {'user_agent': 'None'}), '(user_agent=None)\n', (3954, 3971), False, 'from ndex2.client import Ndex2\n'), ((4096, 4116), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (4114, 4116), False, 'import requests_mock\n'), ((4299, 4306), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (4304, 4306), False, 'from ndex2.client import Ndex2\n'), ((4809, 4829), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (4827, 4829), False, 'import requests_mock\n'), ((5144, 5151), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (5149, 5151), False, 'from ndex2.client import Ndex2\n'), ((5655, 5675), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (5673, 5675), False, 'import requests_mock\n'), ((5927, 5934), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (5932, 5934), False, 'from ndex2.client import Ndex2\n'), ((6436, 6456), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (6454, 6456), False, 'import requests_mock\n'), ((7153, 7173), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (7171, 7173), False, 'import requests_mock\n'), ((7309, 7316), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (7314, 7316), False, 'from ndex2.client import Ndex2\n'), ((10049, 10069), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (10067, 10069), False, 'import requests_mock\n'), ((10274, 10281), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (10279, 10281), False, 'from ndex2.client import Ndex2\n'), ((10421, 10441), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (10439, 10441), False, 'import requests_mock\n'), ((11030, 11037), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (11035, 11037), False, 'from ndex2.client import Ndex2\n'), ((11213, 11233), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (11231, 11233), False, 'import requests_mock\n'), ((11822, 11829), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (11827, 11829), False, 'from ndex2.client import Ndex2\n'), ((12184, 12204), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (12202, 12204), False, 'import requests_mock\n'), ((12793, 12800), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (12798, 12800), False, 'from ndex2.client import Ndex2\n'), ((13162, 13182), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (13180, 13182), False, 'import requests_mock\n'), ((13507, 13514), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (13512, 13514), False, 'from ndex2.client import Ndex2\n'), ((13728, 13748), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (13746, 13748), False, 'import requests_mock\n'), ((14077, 14084), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (14082, 14084), False, 'from ndex2.client import Ndex2\n'), ((14302, 14322), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (14320, 14322), False, 'import requests_mock\n'), ((14659, 14666), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (14664, 14666), False, 'from ndex2.client import Ndex2\n'), ((14865, 14885), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (14883, 14885), False, 'import requests_mock\n'), ((15222, 15229), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (15227, 15229), False, 'from ndex2.client import Ndex2\n'), ((15422, 15442), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (15440, 15442), False, 'import requests_mock\n'), ((15771, 15778), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (15776, 15778), False, 'from ndex2.client import Ndex2\n'), ((15985, 16005), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (16003, 16005), False, 'import requests_mock\n'), ((16334, 16341), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (16339, 16341), False, 'from ndex2.client import Ndex2\n'), ((16614, 16634), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (16632, 16634), False, 'import requests_mock\n'), ((17023, 17030), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (17028, 17030), False, 'from ndex2.client import Ndex2\n'), ((17294, 17314), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (17312, 17314), False, 'import requests_mock\n'), ((17596, 17603), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (17601, 17603), False, 'from ndex2.client import Ndex2\n'), ((17798, 17818), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (17816, 17818), False, 'import requests_mock\n'), ((18103, 18110), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (18108, 18110), False, 'from ndex2.client import Ndex2\n'), ((18323, 18343), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (18341, 18343), False, 'import requests_mock\n'), ((18633, 18640), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (18638, 18640), False, 'from ndex2.client import Ndex2\n'), ((18859, 18879), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (18877, 18879), False, 'import requests_mock\n'), ((19014, 19021), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (19019, 19021), False, 'from ndex2.client import Ndex2\n'), ((20062, 20082), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (20080, 20082), False, 'import requests_mock\n'), ((20403, 20410), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (20408, 20410), False, 'from ndex2.client import Ndex2\n'), ((20573, 20593), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (20591, 20593), False, 'import requests_mock\n'), ((20918, 20925), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (20923, 20925), False, 'from ndex2.client import Ndex2\n'), ((21053, 21073), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (21071, 21073), False, 'import requests_mock\n'), ((21435, 21442), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (21440, 21442), False, 'from ndex2.client import Ndex2\n'), ((21772, 21792), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (21790, 21792), False, 'import requests_mock\n'), ((22087, 22094), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (22092, 22094), False, 'from ndex2.client import Ndex2\n'), ((22375, 22395), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (22393, 22395), False, 'import requests_mock\n'), ((22734, 22741), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (22739, 22741), False, 'from ndex2.client import Ndex2\n'), ((23064, 23084), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (23082, 23084), False, 'import requests_mock\n'), ((23220, 23227), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (23225, 23227), False, 'from ndex2.client import Ndex2\n'), ((23843, 23863), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (23861, 23863), False, 'import requests_mock\n'), ((24194, 24201), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (24199, 24201), False, 'from ndex2.client import Ndex2\n'), ((24359, 24379), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (24377, 24379), False, 'import requests_mock\n'), ((24656, 24663), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (24661, 24663), False, 'from ndex2.client import Ndex2\n'), ((25015, 25035), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (25033, 25035), False, 'import requests_mock\n'), ((25307, 25314), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (25312, 25314), False, 'from ndex2.client import Ndex2\n'), ((25660, 25680), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (25678, 25680), False, 'import requests_mock\n'), ((25967, 25974), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (25972, 25974), False, 'from ndex2.client import Ndex2\n'), ((26281, 26301), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (26299, 26301), False, 'import requests_mock\n'), ((26437, 26444), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (26442, 26444), False, 'from ndex2.client import Ndex2\n'), ((27196, 27216), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (27214, 27216), False, 'import requests_mock\n'), ((27352, 27359), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (27357, 27359), False, 'from ndex2.client import Ndex2\n'), ((27806, 27826), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (27824, 27826), False, 'import requests_mock\n'), ((27962, 27969), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (27967, 27969), False, 'from ndex2.client import Ndex2\n'), ((28413, 28433), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (28431, 28433), False, 'import requests_mock\n'), ((28569, 28576), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (28574, 28576), False, 'from ndex2.client import Ndex2\n'), ((29018, 29038), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (29036, 29038), False, 'import requests_mock\n'), ((29358, 29365), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (29363, 29365), False, 'from ndex2.client import Ndex2\n'), ((29680, 29700), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (29698, 29700), False, 'import requests_mock\n'), ((29951, 29958), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (29956, 29958), False, 'from ndex2.client import Ndex2\n'), ((30216, 30236), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (30234, 30236), False, 'import requests_mock\n'), ((30534, 30541), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (30539, 30541), False, 'from ndex2.client import Ndex2\n'), ((30785, 30805), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (30803, 30805), False, 'import requests_mock\n'), ((30941, 30948), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (30946, 30948), False, 'from ndex2.client import Ndex2\n'), ((31235, 31255), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (31253, 31255), False, 'import requests_mock\n'), ((31391, 31398), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (31396, 31398), False, 'from ndex2.client import Ndex2\n'), ((31686, 31706), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (31704, 31706), False, 'import requests_mock\n'), ((31842, 31849), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (31847, 31849), False, 'from ndex2.client import Ndex2\n'), ((32150, 32170), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (32168, 32170), False, 'import requests_mock\n'), ((32554, 32598), 'ndex2.client.Ndex2', 'Ndex2', ([], {'username': '"""bob"""', 'password': '"""<PASSWORD>"""'}), "(username='bob', password='<PASSWORD>')\n", (32559, 32598), False, 'from ndex2.client import Ndex2\n'), ((33379, 33399), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (33397, 33399), False, 'import requests_mock\n'), ((33804, 33848), 'ndex2.client.Ndex2', 'Ndex2', ([], {'username': '"""bob"""', 'password': '"""<PASSWORD>"""'}), "(username='bob', password='<PASSWORD>')\n", (33809, 33848), False, 'from ndex2.client import Ndex2\n'), ((34645, 34665), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (34663, 34665), False, 'import requests_mock\n'), ((35067, 35111), 'ndex2.client.Ndex2', 'Ndex2', ([], {'username': '"""bob"""', 'password': '"""<PASSWORD>"""'}), "(username='bob', password='<PASSWORD>')\n", (35072, 35111), False, 'from ndex2.client import Ndex2\n'), ((36000, 36020), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (36018, 36020), False, 'import requests_mock\n'), ((36404, 36448), 'ndex2.client.Ndex2', 'Ndex2', ([], {'username': '"""bob"""', 'password': '"""<PASSWORD>"""'}), "(username='bob', password='<PASSWORD>')\n", (36409, 36448), False, 'from ndex2.client import Ndex2\n'), ((37354, 37374), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (37372, 37374), False, 'import requests_mock\n'), ((37761, 37805), 'ndex2.client.Ndex2', 'Ndex2', ([], {'username': '"""bob"""', 'password': '"""<PASSWORD>"""'}), "(username='bob', password='<PASSWORD>')\n", (37766, 37805), False, 'from ndex2.client import Ndex2\n'), ((38983, 39003), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (39001, 39003), False, 'import requests_mock\n'), ((39411, 39455), 'ndex2.client.Ndex2', 'Ndex2', ([], {'username': '"""bob"""', 'password': '"""<PASSWORD>"""'}), "(username='bob', password='<PASSWORD>')\n", (39416, 39455), False, 'from ndex2.client import Ndex2\n'), ((40635, 40655), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (40653, 40655), False, 'import requests_mock\n'), ((40791, 40798), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (40796, 40798), False, 'from ndex2.client import Ndex2\n'), ((45186, 45201), 'json.loads', 'json.loads', (['res'], {}), '(res)\n', (45196, 45201), False, 'import json\n'), ((45567, 45587), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (45585, 45587), False, 'import requests_mock\n'), ((45723, 45730), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (45728, 45730), False, 'from ndex2.client import Ndex2\n'), ((46273, 46293), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (46291, 46293), False, 'import requests_mock\n'), ((46429, 46473), 'ndex2.client.Ndex2', 'Ndex2', ([], {'username': '"""bob"""', 'password': '"""<PASSWORD>"""'}), "(username='bob', password='<PASSWORD>')\n", (46434, 46473), False, 'from ndex2.client import Ndex2\n'), ((47048, 47068), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (47066, 47068), False, 'import requests_mock\n'), ((47217, 47261), 'ndex2.client.Ndex2', 'Ndex2', ([], {'username': '"""bob"""', 'password': '"""<PASSWORD>"""'}), "(username='bob', password='<PASSWORD>')\n", (47222, 47261), False, 'from ndex2.client import Ndex2\n'), ((47944, 47964), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (47962, 47964), False, 'import requests_mock\n'), ((48603, 48647), 'ndex2.client.Ndex2', 'Ndex2', ([], {'username': '"""bob"""', 'password': '"""<PASSWORD>"""'}), "(username='bob', password='<PASSWORD>')\n", (48608, 48647), False, 'from ndex2.client import Ndex2\n'), ((49024, 49055), 'json.loads', 'json.loads', (['m.last_request.text'], {}), '(m.last_request.text)\n', (49034, 49055), False, 'import json\n'), ((49169, 49189), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (49187, 49189), False, 'import requests_mock\n'), ((49325, 49332), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (49330, 49332), False, 'from ndex2.client import Ndex2\n'), ((49809, 49829), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (49827, 49829), False, 'import requests_mock\n'), ((50468, 50512), 'ndex2.client.Ndex2', 'Ndex2', ([], {'username': '"""bob"""', 'password': '"""<PASSWORD>"""'}), "(username='bob', password='<PASSWORD>')\n", (50473, 50512), False, 'from ndex2.client import Ndex2\n'), ((50627, 50658), 'json.loads', 'json.loads', (['m.last_request.text'], {}), '(m.last_request.text)\n', (50637, 50658), False, 'import json\n'), ((50787, 50807), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (50805, 50807), False, 'import requests_mock\n'), ((50943, 50950), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (50948, 50950), False, 'from ndex2.client import Ndex2\n'), ((51430, 51450), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (51448, 51450), False, 'import requests_mock\n'), ((52089, 52133), 'ndex2.client.Ndex2', 'Ndex2', ([], {'username': '"""bob"""', 'password': '"""<PASSWORD>"""'}), "(username='bob', password='<PASSWORD>')\n", (52094, 52133), False, 'from ndex2.client import Ndex2\n'), ((52249, 52280), 'json.loads', 'json.loads', (['m.last_request.text'], {}), '(m.last_request.text)\n', (52259, 52280), False, 'import json\n'), ((52417, 52437), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (52435, 52437), False, 'import requests_mock\n'), ((52573, 52580), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (52578, 52580), False, 'from ndex2.client import Ndex2\n'), ((53083, 53103), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (53101, 53103), False, 'import requests_mock\n'), ((53742, 53786), 'ndex2.client.Ndex2', 'Ndex2', ([], {'username': '"""bob"""', 'password': '"""<PASSWORD>"""'}), "(username='bob', password='<PASSWORD>')\n", (53747, 53786), False, 'from ndex2.client import Ndex2\n'), ((53910, 53941), 'json.loads', 'json.loads', (['m.last_request.text'], {}), '(m.last_request.text)\n', (53920, 53941), False, 'import json\n'), ((54234, 54254), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (54252, 54254), False, 'import requests_mock\n'), ((54403, 54447), 'ndex2.client.Ndex2', 'Ndex2', ([], {'username': '"""bob"""', 'password': '"""<PASSWORD>"""'}), "(username='bob', password='<PASSWORD>')\n", (54408, 54447), False, 'from ndex2.client import Ndex2\n'), ((54828, 54848), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (54846, 54848), False, 'import requests_mock\n'), ((54984, 54991), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (54989, 54991), False, 'from ndex2.client import Ndex2\n'), ((55458, 55478), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (55476, 55478), False, 'import requests_mock\n'), ((56117, 56161), 'ndex2.client.Ndex2', 'Ndex2', ([], {'username': '"""bob"""', 'password': '"""<PASSWORD>"""'}), "(username='bob', password='<PASSWORD>')\n", (56122, 56161), False, 'from ndex2.client import Ndex2\n'), ((56276, 56307), 'json.loads', 'json.loads', (['m.last_request.text'], {}), '(m.last_request.text)\n', (56286, 56307), False, 'import json\n'), ((56487, 56507), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (56505, 56507), False, 'import requests_mock\n'), ((57146, 57190), 'ndex2.client.Ndex2', 'Ndex2', ([], {'username': '"""bob"""', 'password': '"""<PASSWORD>"""'}), "(username='bob', password='<PASSWORD>')\n", (57151, 57190), False, 'from ndex2.client import Ndex2\n'), ((57306, 57337), 'json.loads', 'json.loads', (['m.last_request.text'], {}), '(m.last_request.text)\n', (57316, 57337), False, 'import json\n'), ((57466, 57486), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (57484, 57486), False, 'import requests_mock\n'), ((57823, 57830), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (57828, 57830), False, 'from ndex2.client import Ndex2\n'), ((58108, 58128), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (58126, 58128), False, 'import requests_mock\n'), ((58484, 58491), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (58489, 58491), False, 'from ndex2.client import Ndex2\n'), ((58769, 58789), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (58787, 58789), False, 'import requests_mock\n'), ((59136, 59143), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (59141, 59143), False, 'from ndex2.client import Ndex2\n'), ((59441, 59461), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (59459, 59461), False, 'import requests_mock\n'), ((59817, 59824), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (59822, 59824), False, 'from ndex2.client import Ndex2\n'), ((60105, 60125), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (60123, 60125), False, 'import requests_mock\n'), ((60539, 60546), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (60544, 60546), False, 'from ndex2.client import Ndex2\n'), ((60888, 60908), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (60906, 60908), False, 'import requests_mock\n'), ((61329, 61336), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (61334, 61336), False, 'from ndex2.client import Ndex2\n'), ((61658, 61678), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (61676, 61678), False, 'import requests_mock\n'), ((62104, 62111), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (62109, 62111), False, 'from ndex2.client import Ndex2\n'), ((62323, 62343), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (62341, 62343), False, 'import requests_mock\n'), ((62759, 62766), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (62764, 62766), False, 'from ndex2.client import Ndex2\n'), ((62977, 62997), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (62995, 62997), False, 'import requests_mock\n'), ((63404, 63411), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (63409, 63411), False, 'from ndex2.client import Ndex2\n'), ((63609, 63629), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (63627, 63629), False, 'import requests_mock\n'), ((63778, 63785), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (63783, 63785), False, 'from ndex2.client import Ndex2\n'), ((64238, 64258), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (64256, 64258), False, 'import requests_mock\n'), ((64394, 64401), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (64399, 64401), False, 'from ndex2.client import Ndex2\n'), ((64627, 64647), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (64645, 64647), False, 'import requests_mock\n'), ((65092, 65099), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (65097, 65099), False, 'from ndex2.client import Ndex2\n'), ((65436, 65456), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (65454, 65456), False, 'import requests_mock\n'), ((65913, 65920), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (65918, 65920), False, 'from ndex2.client import Ndex2\n'), ((66138, 66158), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (66156, 66158), False, 'import requests_mock\n'), ((66605, 66612), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (66610, 66612), False, 'from ndex2.client import Ndex2\n'), ((66829, 66849), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (66847, 66849), False, 'import requests_mock\n'), ((67286, 67293), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (67291, 67293), False, 'from ndex2.client import Ndex2\n'), ((67487, 67507), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (67505, 67507), False, 'import requests_mock\n'), ((67884, 67891), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (67889, 67891), False, 'from ndex2.client import Ndex2\n'), ((68214, 68234), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (68232, 68234), False, 'import requests_mock\n'), ((68595, 68602), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (68600, 68602), False, 'from ndex2.client import Ndex2\n'), ((68937, 68957), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (68955, 68957), False, 'import requests_mock\n'), ((69093, 69100), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (69098, 69100), False, 'from ndex2.client import Ndex2\n'), ((69329, 69349), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (69347, 69349), False, 'import requests_mock\n'), ((69694, 69701), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (69699, 69701), False, 'from ndex2.client import Ndex2\n'), ((69903, 69923), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (69921, 69923), False, 'import requests_mock\n'), ((70274, 70281), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (70279, 70281), False, 'from ndex2.client import Ndex2\n'), ((70489, 70509), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (70507, 70509), False, 'import requests_mock\n'), ((70646, 70653), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (70651, 70653), False, 'from ndex2.client import Ndex2\n'), ((71034, 71054), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (71052, 71054), False, 'import requests_mock\n'), ((71191, 71198), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (71196, 71198), False, 'from ndex2.client import Ndex2\n'), ((71575, 71595), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (71593, 71595), False, 'import requests_mock\n'), ((71732, 71739), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (71737, 71739), False, 'from ndex2.client import Ndex2\n'), ((72081, 72101), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (72099, 72101), False, 'import requests_mock\n'), ((72412, 72456), 'ndex2.client.Ndex2', 'Ndex2', ([], {'username': '"""bob"""', 'password': '"""<PASSWORD>"""'}), "(username='bob', password='<PASSWORD>')\n", (72417, 72456), False, 'from ndex2.client import Ndex2\n'), ((72605, 72625), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (72623, 72625), False, 'import requests_mock\n'), ((72936, 72980), 'ndex2.client.Ndex2', 'Ndex2', ([], {'username': '"""bob"""', 'password': '"""<PASSWORD>"""'}), "(username='bob', password='<PASSWORD>')\n", (72941, 72980), False, 'from ndex2.client import Ndex2\n'), ((73277, 73297), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (73295, 73297), False, 'import requests_mock\n'), ((73608, 73652), 'ndex2.client.Ndex2', 'Ndex2', ([], {'username': '"""bob"""', 'password': '"""<PASSWORD>"""'}), "(username='bob', password='<PASSWORD>')\n", (73613, 73652), False, 'from ndex2.client import Ndex2\n'), ((74004, 74024), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (74022, 74024), False, 'import requests_mock\n'), ((74335, 74379), 'ndex2.client.Ndex2', 'Ndex2', ([], {'username': '"""bob"""', 'password': '"""<PASSWORD>"""'}), "(username='bob', password='<PASSWORD>')\n", (74340, 74379), False, 'from ndex2.client import Ndex2\n'), ((74764, 74784), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (74782, 74784), False, 'import requests_mock\n'), ((75414, 75458), 'ndex2.client.Ndex2', 'Ndex2', ([], {'username': '"""bob"""', 'password': '"""<PASSWORD>"""'}), "(username='bob', password='<PASSWORD>')\n", (75419, 75458), False, 'from ndex2.client import Ndex2\n'), ((75797, 75817), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (75815, 75817), False, 'import requests_mock\n'), ((75953, 75960), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (75958, 75960), False, 'from ndex2.client import Ndex2\n'), ((76192, 76212), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (76210, 76212), False, 'import requests_mock\n'), ((76546, 76590), 'ndex2.client.Ndex2', 'Ndex2', ([], {'username': '"""bob"""', 'password': '"""<PASSWORD>"""'}), "(username='bob', password='<PASSWORD>')\n", (76551, 76590), False, 'from ndex2.client import Ndex2\n'), ((76747, 76767), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (76765, 76767), False, 'import requests_mock\n'), ((77102, 77146), 'ndex2.client.Ndex2', 'Ndex2', ([], {'username': '"""bob"""', 'password': '"""<PASSWORD>"""'}), "(username='bob', password='<PASSWORD>')\n", (77107, 77146), False, 'from ndex2.client import Ndex2\n'), ((77334, 77354), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (77352, 77354), False, 'import requests_mock\n'), ((77491, 77498), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (77496, 77498), False, 'from ndex2.client import Ndex2\n'), ((78570, 78590), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (78588, 78590), False, 'import requests_mock\n'), ((79129, 79136), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (79134, 79136), False, 'from ndex2.client import Ndex2\n'), ((79306, 79326), 'requests_mock.mock', 'requests_mock.mock', ([], {}), '()\n', (79324, 79326), False, 'import requests_mock\n'), ((79905, 79912), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (79910, 79912), False, 'from ndex2.client import Ndex2\n'), ((2189, 2200), 'numpy.int32', 'np.int32', (['(1)'], {}), '(1)\n', (2197, 2200), True, 'import numpy as np\n'), ((6855, 6862), 'ndex2.client.Ndex2', 'Ndex2', ([], {}), '()\n', (6860, 6862), False, 'from ndex2.client import Ndex2\n'), ((38175, 38209), 'json.dumps', 'json.dumps', (['cx'], {'cls': 'DecimalEncoder'}), '(cx, cls=DecimalEncoder)\n', (38185, 38209), False, 'import json\n'), ((39825, 39859), 'json.dumps', 'json.dumps', (['cx'], {'cls': 'DecimalEncoder'}), '(cx, cls=DecimalEncoder)\n', (39835, 39859), False, 'import json\n'), ((37985, 38019), 'json.dumps', 'json.dumps', (['cx'], {'cls': 'DecimalEncoder'}), '(cx, cls=DecimalEncoder)\n', (37995, 38019), False, 'import json\n'), ((39635, 39669), 'json.dumps', 'json.dumps', (['cx'], {'cls': 'DecimalEncoder'}), '(cx, cls=DecimalEncoder)\n', (39645, 39669), False, 'import json\n')]
|
import ABCLogger, pygame as py, numpy, itertools
from Numerical import Numerical
from global_values import *
class CirclesLogger(ABCLogger.ABCLogger):
def log(self, foreignSelf):
return repr(foreignSelf._nextCollisionTime)
class Circles:
def expectedTimeCircles(self, circleA, circleB):
#TODO refactor this to work without self.time and move it as an unbound method to Circle class
positionDifference = circleA.currentPosition(self.time) - circleB.currentPosition(self.time)
velocityDifference = circleA.velocity - circleB.velocity
radiiSum = circleA.radius + circleB.radius
leadingCoefficient = velocityDifference[0]**2 + velocityDifference[1]**2
middleCoefficient = 2*(velocityDifference[0]*positionDifference[0] + velocityDifference[1]*positionDifference[1])
constantCoefficient = positionDifference[0]**2 + positionDifference[1]**2 - radiiSum**2
return Numerical.solveQuadraticPrune([leadingCoefficient, middleCoefficient, constantCoefficient]) + self.time
def expectedTimeWalls(self, circle): # the order is East, West, North, South
wallsHorizontal = [circle.radius, width - circle.radius]
wallsVertical = [circle.radius, height - circle.radius]
for horizontal in wallsHorizontal:
solution = Numerical.solveLinear([circle.velocity[0], circle.currentPosition(self.time)[0] - horizontal])
yield solution + self.time
for vertical in wallsVertical:
solution = Numerical.solveLinear([circle.velocity[1], circle.currentPosition(self.time)[1] - vertical])
yield solution + self.time
@staticmethod
def newVelocitiesCircles(circleA, circleB):
normalVector = circleB.position - circleA.position
commonFactor = normalVector/numpy.dot(normalVector, normalVector)
normalComponentA = numpy.dot(circleA.velocity, normalVector)*commonFactor
normalComponentB = numpy.dot(circleB.velocity, normalVector)*commonFactor
circleANewVelocity = circleA.velocity - normalComponentA + normalComponentB
circleBNewVelocity = circleB.velocity - normalComponentB + normalComponentA
return circleANewVelocity, circleBNewVelocity
@staticmethod
def naiveCollisionCheck(circleA, circleB):
positionDifference = circleB.position - circleA.position
distance = numpy.linalg.norm(positionDifference)
radiiSum = circleA.radius + circleB.radius
return distance < radiiSum
def isWithinCurrentTimeslice(self, time):
return time > self.time and time < self.time + 1
@property
def circlesNo(self):
return len(self.circles)
def __init__(self, circles):
self.circles = circles
self.time = 0
self.circleCircle = numpy.ndarray(shape=([self.circlesNo]*2), dtype = float)
self.circleWall = numpy.ndarray(shape=([self.circlesNo, wallsNumber]), dtype = float) # the order is East, West, North, South
self.circleCircle.fill(float("inf"))
#properties of the next collision. To be modified exlusively by self.whenNextCollision:
self._nextCollisionTime = 0
self._isPairOfCircles = False
self._i = (None, None) # can be (int, int) or (int,)
self.allPairsCollisions()
self.allWallsCollisions()
self.whenNextCollision()
def allWallsCollisions(self):
for circleIndex in range(self.circlesNo):
self.updateCircleWallEntry(circleIndex)
def updateCircleWallEntry(self, circleIndex):
for wallIndex, time in enumerate(self.expectedTimeWalls(self.circles[circleIndex])):
self.circleWall[circleIndex][wallIndex] = time
def allPairsCollisions(self):
for indices in self.yieldPairsIndices():
self.updateCircleCircleEntry(indices)
def updateCircleCircleEntry(self, circlesIndices):
self.circleCircle[circlesIndices] = self.expectedTimeCircles(*(self.circles[index] for index in circlesIndices))
def yieldPairsIndices(self):
for xIndex in range(0, self.circlesNo - 1):
for yIndex in range(xIndex + 1, self.circlesNo):
yield xIndex, yIndex
def soonestCircleCircleCollision(self):
minimum = float("+inf")
indices = None
for pair in self.yieldPairsIndices():
time = float(self.circleCircle[pair])
if time < minimum:
minimum = time
indices = pair
return indices, minimum
def soonestCircleWallCollision(self):
minimum = float("+inf")
index = None
for circleIndex in range(self.circlesNo):
for wallIndex in range(wallsNumber):
time = float(self.circleWall[circleIndex, wallIndex])
if time < minimum:
minimum = time
index = circleIndex, wallIndex
return index, minimum
def whenNextCollision(self): # Side efects :(. This function should be exlusive for changing that attributes.
circles = self.soonestCircleCircleCollision()
wall = self.soonestCircleWallCollision()
if circles[1] < wall[1]:
self._isPairOfCircles = True
self._i = circles[0]
self._nextCollisionTime = circles[1]
else:
self._isPairOfCircles = False
self._i = wall[0]
self._nextCollisionTime = wall[1]
def carryOutCircleCollision(self):
assert self._isPairOfCircles == True
circles = tuple(self.circles[i] for i in self._i)
newVelocities = self.newVelocitiesCircles(*circles)
for i, circle in enumerate(circles):
circle.position = circle.currentPosition(self._nextCollisionTime)
circle.velocity = newVelocities[i]
circle.time = float(self._nextCollisionTime)
for i in self._i:
self.updateCircleWallEntry(i)
self.circleCircle[self._i] = float("+inf")
for pairIndex in itertools.chain(self.yieldPairsForIndex(*self._i), self.yieldPairsForIndex(*self._i[::-1])):
self.updateCircleCircleEntry(pairIndex)
def carryOutWallCollision(self):
assert self._isPairOfCircles == False
if self._i[1] in [0, 1]:
component = 0
else:
component = 1
circle = self.circles[self._i[0]]
circle.position = circle.currentPosition(self._nextCollisionTime)
circle.velocity[component] *= -1
circle.time = float(self._nextCollisionTime)
self.updateCircleWallEntry(self._i[0])
self.circleWall[self._i] = float("+inf")
for pairIndex in self.yieldPairsForIndex(*[self._i[0]]*2):
self.updateCircleCircleEntry(pairIndex)
#vim mark X set here.
def carryOutCollision(self):
assert self.isWithinCurrentTimeslice(self._nextCollisionTime)
if self._isPairOfCircles:
self.carryOutCircleCollision()
else:
self.carryOutWallCollision()
def animationStep(self):
if self.isWithinCurrentTimeslice(self._nextCollisionTime):
self.carryOutCollision()
self.whenNextCollision()
else:
self.time += 1
def yieldPairsForIndex(self, index, withoutIndex):
for i in range(index):
if i != withoutIndex:
yield i, index
for i in range(index + 1, self.circlesNo):
if i != withoutIndex:
yield index, i
def animate(self):
while True:
queue = py.event.get()
for event in queue:
if event.type == py.QUIT:
quit()
screen.fill([0,0,0])
[circle.plot(self.time) for circle in self.circles]
py.display.update()
self.animationStep()
|
[
"pygame.event.get",
"Numerical.Numerical.solveQuadraticPrune",
"pygame.display.update",
"numpy.linalg.norm",
"numpy.dot",
"numpy.ndarray"
] |
[((2195, 2232), 'numpy.linalg.norm', 'numpy.linalg.norm', (['positionDifference'], {}), '(positionDifference)\n', (2212, 2232), False, 'import ABCLogger, pygame as py, numpy, itertools\n'), ((2562, 2616), 'numpy.ndarray', 'numpy.ndarray', ([], {'shape': '([self.circlesNo] * 2)', 'dtype': 'float'}), '(shape=[self.circlesNo] * 2, dtype=float)\n', (2575, 2616), False, 'import ABCLogger, pygame as py, numpy, itertools\n'), ((2639, 2702), 'numpy.ndarray', 'numpy.ndarray', ([], {'shape': '[self.circlesNo, wallsNumber]', 'dtype': 'float'}), '(shape=[self.circlesNo, wallsNumber], dtype=float)\n', (2652, 2702), False, 'import ABCLogger, pygame as py, numpy, itertools\n'), ((878, 973), 'Numerical.Numerical.solveQuadraticPrune', 'Numerical.solveQuadraticPrune', (['[leadingCoefficient, middleCoefficient, constantCoefficient]'], {}), '([leadingCoefficient, middleCoefficient,\n constantCoefficient])\n', (907, 973), False, 'from Numerical import Numerical\n'), ((1668, 1705), 'numpy.dot', 'numpy.dot', (['normalVector', 'normalVector'], {}), '(normalVector, normalVector)\n', (1677, 1705), False, 'import ABCLogger, pygame as py, numpy, itertools\n'), ((1727, 1768), 'numpy.dot', 'numpy.dot', (['circleA.velocity', 'normalVector'], {}), '(circleA.velocity, normalVector)\n', (1736, 1768), False, 'import ABCLogger, pygame as py, numpy, itertools\n'), ((1803, 1844), 'numpy.dot', 'numpy.dot', (['circleB.velocity', 'normalVector'], {}), '(circleB.velocity, normalVector)\n', (1812, 1844), False, 'import ABCLogger, pygame as py, numpy, itertools\n'), ((6631, 6645), 'pygame.event.get', 'py.event.get', ([], {}), '()\n', (6643, 6645), True, 'import ABCLogger, pygame as py, numpy, itertools\n'), ((6793, 6812), 'pygame.display.update', 'py.display.update', ([], {}), '()\n', (6810, 6812), True, 'import ABCLogger, pygame as py, numpy, itertools\n')]
|
from enum import IntEnum
import numpy as np
class Cell(IntEnum):
Empty = 0
O = -1 # player 2
X = 1 # player 1
class Result(IntEnum):
X_Wins = 1
O_Wins = -1
Draw = 0
Incomplete = 2
SIZE = 3
class Board(object):
"""docstring for Board"""
def __init__(self, cells=None):
super(Board, self).__init__()
if cells is None:
self.cells = np.array([Cell.Empty] * SIZE ** 2)
else:
self.cells = cells.copy()
def cells_2d(self):
return self.cells.reshape(SIZE, SIZE)
def execute_turn(self, move):
assert self.cells[move] == Cell.Empty, "Cell is not empty"
self.cells[move] = self.whose_turn()
return
def whose_turn(self):
non_zero_count = np.count_nonzero(self.cells)
return Cell.X if (non_zero_count % 2 == 0) else Cell.O
def get_valid_moves(self):
return [i for i in range(self.cells.size)
if self.cells[i] == Cell.Empty]
def get_invalid_moves(self):
return [i for i in range(self.cells.size)
if self.cells[i] != Cell.Empty]
def simulate_turn(self, move):
new_board = Board(self.cells)
new_board.execute_turn(move)
return new_board
def print(self):
rows, cols = self.cells_2d().shape
print('\n')
for row in range(rows):
print('|', end="")
for col in range(cols):
cell = self.cells_2d()[row][col]
print(" %s " % self.cell_to_char(cell), end="|")
if row < rows - 1:
print("\n-------------")
print('\n')
def cell_to_char(self, cell):
if cell == Cell.Empty:
return ' '
if cell == Cell.X:
return 'X'
if cell == Cell.O:
return 'O'
assert False, "Undefined tic tac toe cell"
def is_move_valid(self, move):
if move > (SIZE ** 2 - 1) or move < 0:
return False
if self.cells[move] == Cell.Empty:
return True
return False
def is_game_over(self):
return self.get_game_result() != Result.Incomplete
def get_game_result(self):
rows_cols_and_diagonals = self.get_rows_cols_and_diagonals()
sums = list(map(sum, rows_cols_and_diagonals))
max_value = max(sums)
min_value = min(sums)
if max_value == SIZE:
return Result.X_Wins
if min_value == -SIZE:
return Result.O_Wins
if not self.get_valid_moves():
return Result.Draw
return Result.Incomplete
def get_rows_cols_and_diagonals(self):
rows_and_diagonal = self.get_rows_and_diagonal(self.cells_2d())
cols_and_antidiagonal = self.get_rows_and_diagonal(np.rot90(self.cells_2d()))
return rows_and_diagonal + cols_and_antidiagonal
def get_rows_and_diagonal(self, cells_2d):
num_rows = cells_2d.shape[0]
return ([row for row in cells_2d[range(num_rows), :]]
+ [cells_2d.diagonal()])
def get_depth(self):
return sum(cell != Cell.Empty for cell in self.cells)
|
[
"numpy.array",
"numpy.count_nonzero"
] |
[((777, 805), 'numpy.count_nonzero', 'np.count_nonzero', (['self.cells'], {}), '(self.cells)\n', (793, 805), True, 'import numpy as np\n'), ((404, 438), 'numpy.array', 'np.array', (['([Cell.Empty] * SIZE ** 2)'], {}), '([Cell.Empty] * SIZE ** 2)\n', (412, 438), True, 'import numpy as np\n')]
|
import numpy as np
from mayavi import mlab as mayalab
def plot_pc_with_normal(pcs,pcs_n,scale_factor=1.0):
mayalab.quiver3d(pcs[:, 0], pcs[:, 1], pcs[:, 2], pcs_n[:, 0], pcs_n[:, 1], pcs_n[:, 2], mode='arrow',scale_factor=1.0)
def plot_pc(pcs,color=None,scale_factor=.05,mode='point'):
if color == 'r':
mayalab.points3d(pcs[:,0],pcs[:,1],pcs[:,2],mode=mode,scale_factor=scale_factor,color=(1,0,0))
elif color == 'blue':
mayalab.points3d(pcs[:,0],pcs[:,1],pcs[:,2],mode=mode,scale_factor=scale_factor,color=(0,0,1))
elif color == 'green':
mayalab.points3d(pcs[:,0],pcs[:,1],pcs[:,2],mode=mode,scale_factor=scale_factor,color=(0,1,0))
elif color == 'ycan':
mayalab.points3d(pcs[:,0],pcs[:,1],pcs[:,2],mode=mode,scale_factor=scale_factor,color=(0,1,1))
else:
mayalab.points3d(pcs[:,0],pcs[:,1],pcs[:,2],mode=mode,scale_factor=scale_factor)
class Bbox(object):
def __init__(self,extrema=None,corner_points=None,frame_length=None):
"""
z -x
| /
| /
| /
0-----> y
c5
| --/--> c8
| /
| /
c6 c1-----> c4
| /
| /
|/
c2 -----> c3
"""
if extrema is not None:
[xmin, ymin, zmin, xmax, ymax, zmax] = extrema
self.c1 = np.array([xmin, ymin, zmin])
self.c2 = np.array([xmax, ymin, zmin])
self.c3 = np.array([xmax, ymax, zmin])
self.c4 = np.array([xmin, ymax, zmin])
self.c5 = np.array([xmin, ymin, zmax])
self.c6 = np.array([xmax, ymin, zmax])
self.c7 = np.array([xmax, ymax, zmax])
self.c8 = np.array([xmin, ymax, zmax])
self.corner_points = np.vstack([self.c1,self.c2,self.c3,self.c4,self.c5,self.c6,self.c7,self.c8])
self.frame_length = np.array([xmax-xmin,ymax-ymin,zmax-zmin])
else:
self.corner_points = corner_points
self.frame_length = frame_length
self.frame_rot = np.eye(4)
self.transformation(np.eye(4))
def transformation(self,T):
self.corner_points_4d = np.hstack([np.copy(self.corner_points),np.ones((8,1))])
self.corner_points_transformed = self.corner_points_4d.dot(T.transpose())[:,0:3]
def is_point_inside(self, point, transformed=True):
if transformed:
self.basis = np.zeros((3,3))
self.basis[:,0] = self.corner_points_transformed[1]-self.corner_points_transformed[0]
self.basis[:,1] = self.corner_points_transformed[3]-self.corner_points_transformed[0]
self.basis[:,2] = self.corner_points_transformed[4]-self.corner_points_transformed[0]
self.basis[:,0] = self.basis[:,0] / (np.linalg.norm(self.basis[:,0]) + 1e-16)
self.basis[:,1] = self.basis[:,1] / (np.linalg.norm(self.basis[:,1]) + 1e-16)
self.basis[:,2] = self.basis[:,2] / (np.linalg.norm(self.basis[:,2]) + 1e-16)
point = point - self.corner_points_transformed[0]
point = point.dot(self.basis)
if point[0] < self.frame_length[0] and point[1] < self.frame_length[1] and point[2] < self.frame_length[2] and point[0] > 0 and point[1] > 0 and point[2] > 0:
return True
else:
return False
def points_inside(self, points_o, transformed=True):
if transformed:
self.basis = np.zeros((3,3))
self.basis[:,0] = self.corner_points_transformed[1]-self.corner_points_transformed[0]
self.basis[:,1] = self.corner_points_transformed[3]-self.corner_points_transformed[0]
self.basis[:,2] = self.corner_points_transformed[4]-self.corner_points_transformed[0]
self.basis[:,0] = self.basis[:,0] / (np.linalg.norm(self.basis[:,0]) + 1e-16)
self.basis[:,1] = self.basis[:,1] / (np.linalg.norm(self.basis[:,1]) + 1e-16)
self.basis[:,2] = self.basis[:,2] / (np.linalg.norm(self.basis[:,2]) + 1e-16)
transformed_o = np.copy(self.corner_points_transformed[0]).reshape((1,3))
points = points_o - transformed_o
points = points.dot(self.basis)
points_check = np.hstack([points[:,0:1] < self.frame_length[0], points[:,1:2] < self.frame_length[1] , points[:,2:3] < self.frame_length[2] , points[:,0:1] > 0 ,points[:,1:2] > 0, points[:,2:3] > 0])
points_flag = np.all(points_check,axis=1)
if np.any(points_flag):
#pct = np.vstack([transformed_o,transformed_o,transformed_o])
#plot_pc_with_normal(pct,self.basis.transpose() * 0.01)
#plot_pc(self.corner_points_transformed,color='ycan',scale_factor=0.01,mode='sphere')
#plot_pc(points_o[points_flag],color='r',scale_factor=0.01,mode='sphere')
#print(points_check[points_flag])
#print(points[points_flag])
return True
else:
return False
def rect_intersect(self,table=None):
if np.any(self.corner_points_transformed[:,2] < 0.0):
return True
else:
return False
def plot(self,transformed=True):
if transformed:
p1 = self.corner_points_transformed[0]
p2 = self.corner_points_transformed[1]
p3 = self.corner_points_transformed[2]
p4 = self.corner_points_transformed[3]
p5 = self.corner_points_transformed[0]
p6 = self.corner_points_transformed[1]
p7 = self.corner_points_transformed[2]
p8 = self.corner_points_transformed[3]
p9 = self.corner_points_transformed[4]
p10 = self.corner_points_transformed[5]
p11 = self.corner_points_transformed[6]
p12 = self.corner_points_transformed[7]
c1 = p1 - p2
c2 = p2 - p3
c3 = p3 - p4
c4 = p4 - p1
c5 = p1 - p9
c6 = p2 - p10
c7 = p3 - p11
c8 = p4 - p12
c9 = p9 - p10
c10 = p10 - p11
c11 = p11 - p12
c12 = p12 - p9
ps = np.vstack([p1,p2,p3,p4,p5,p6,p7,p8,p9,p10,p11,p12])
cs = np.vstack([c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12])
cs = cs * -1.0
mayalab.quiver3d(ps[:,0],ps[:,1],ps[:,2],cs[:,0],cs[:,1],cs[:,2],mode='2ddash',scale_factor=1.0)
if __name__ == '__main__':
tmp = Bbox(extrema=np.array([-1.0,-1.0,-1.0,1.0,1.0,1.0])*0.1)
tmp.transformation(np.eye(4))
print(tmp.is_point_inside(np.array([0.0,0,0])))
#tmp.plot()
#mayalab.show()
|
[
"mayavi.mlab.quiver3d",
"numpy.copy",
"numpy.zeros",
"numpy.all",
"mayavi.mlab.points3d",
"numpy.hstack",
"numpy.any",
"numpy.ones",
"numpy.array",
"numpy.linalg.norm",
"numpy.eye",
"numpy.vstack"
] |
[((110, 234), 'mayavi.mlab.quiver3d', 'mayalab.quiver3d', (['pcs[:, 0]', 'pcs[:, 1]', 'pcs[:, 2]', 'pcs_n[:, 0]', 'pcs_n[:, 1]', 'pcs_n[:, 2]'], {'mode': '"""arrow"""', 'scale_factor': '(1.0)'}), "(pcs[:, 0], pcs[:, 1], pcs[:, 2], pcs_n[:, 0], pcs_n[:, 1],\n pcs_n[:, 2], mode='arrow', scale_factor=1.0)\n", (126, 234), True, 'from mayavi import mlab as mayalab\n'), ((313, 422), 'mayavi.mlab.points3d', 'mayalab.points3d', (['pcs[:, 0]', 'pcs[:, 1]', 'pcs[:, 2]'], {'mode': 'mode', 'scale_factor': 'scale_factor', 'color': '(1, 0, 0)'}), '(pcs[:, 0], pcs[:, 1], pcs[:, 2], mode=mode, scale_factor=\n scale_factor, color=(1, 0, 0))\n', (329, 422), True, 'from mayavi import mlab as mayalab\n'), ((1868, 1877), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1874, 1877), True, 'import numpy as np\n'), ((4651, 4701), 'numpy.any', 'np.any', (['(self.corner_points_transformed[:, 2] < 0.0)'], {}), '(self.corner_points_transformed[:, 2] < 0.0)\n', (4657, 4701), True, 'import numpy as np\n'), ((5973, 5982), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (5979, 5982), True, 'import numpy as np\n'), ((436, 545), 'mayavi.mlab.points3d', 'mayalab.points3d', (['pcs[:, 0]', 'pcs[:, 1]', 'pcs[:, 2]'], {'mode': 'mode', 'scale_factor': 'scale_factor', 'color': '(0, 0, 1)'}), '(pcs[:, 0], pcs[:, 1], pcs[:, 2], mode=mode, scale_factor=\n scale_factor, color=(0, 0, 1))\n', (452, 545), True, 'from mayavi import mlab as mayalab\n'), ((1240, 1268), 'numpy.array', 'np.array', (['[xmin, ymin, zmin]'], {}), '([xmin, ymin, zmin])\n', (1248, 1268), True, 'import numpy as np\n'), ((1285, 1313), 'numpy.array', 'np.array', (['[xmax, ymin, zmin]'], {}), '([xmax, ymin, zmin])\n', (1293, 1313), True, 'import numpy as np\n'), ((1330, 1358), 'numpy.array', 'np.array', (['[xmax, ymax, zmin]'], {}), '([xmax, ymax, zmin])\n', (1338, 1358), True, 'import numpy as np\n'), ((1375, 1403), 'numpy.array', 'np.array', (['[xmin, ymax, zmin]'], {}), '([xmin, ymax, zmin])\n', (1383, 1403), True, 'import numpy as np\n'), ((1420, 1448), 'numpy.array', 'np.array', (['[xmin, ymin, zmax]'], {}), '([xmin, ymin, zmax])\n', (1428, 1448), True, 'import numpy as np\n'), ((1465, 1493), 'numpy.array', 'np.array', (['[xmax, ymin, zmax]'], {}), '([xmax, ymin, zmax])\n', (1473, 1493), True, 'import numpy as np\n'), ((1510, 1538), 'numpy.array', 'np.array', (['[xmax, ymax, zmax]'], {}), '([xmax, ymax, zmax])\n', (1518, 1538), True, 'import numpy as np\n'), ((1555, 1583), 'numpy.array', 'np.array', (['[xmin, ymax, zmax]'], {}), '([xmin, ymax, zmax])\n', (1563, 1583), True, 'import numpy as np\n'), ((1611, 1698), 'numpy.vstack', 'np.vstack', (['[self.c1, self.c2, self.c3, self.c4, self.c5, self.c6, self.c7, self.c8]'], {}), '([self.c1, self.c2, self.c3, self.c4, self.c5, self.c6, self.c7,\n self.c8])\n', (1620, 1698), True, 'import numpy as np\n'), ((1714, 1763), 'numpy.array', 'np.array', (['[xmax - xmin, ymax - ymin, zmax - zmin]'], {}), '([xmax - xmin, ymax - ymin, zmax - zmin])\n', (1722, 1763), True, 'import numpy as np\n'), ((1902, 1911), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (1908, 1911), True, 'import numpy as np\n'), ((2212, 2228), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (2220, 2228), True, 'import numpy as np\n'), ((3165, 3181), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (3173, 3181), True, 'import numpy as np\n'), ((3898, 4095), 'numpy.hstack', 'np.hstack', (['[points[:, 0:1] < self.frame_length[0], points[:, 1:2] < self.frame_length[\n 1], points[:, 2:3] < self.frame_length[2], points[:, 0:1] > 0, points[:,\n 1:2] > 0, points[:, 2:3] > 0]'], {}), '([points[:, 0:1] < self.frame_length[0], points[:, 1:2] < self.\n frame_length[1], points[:, 2:3] < self.frame_length[2], points[:, 0:1] >\n 0, points[:, 1:2] > 0, points[:, 2:3] > 0])\n', (3907, 4095), True, 'import numpy as np\n'), ((4104, 4132), 'numpy.all', 'np.all', (['points_check'], {'axis': '(1)'}), '(points_check, axis=1)\n', (4110, 4132), True, 'import numpy as np\n'), ((4141, 4160), 'numpy.any', 'np.any', (['points_flag'], {}), '(points_flag)\n', (4147, 4160), True, 'import numpy as np\n'), ((5609, 5671), 'numpy.vstack', 'np.vstack', (['[p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12]'], {}), '([p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12])\n', (5618, 5671), True, 'import numpy as np\n'), ((5672, 5734), 'numpy.vstack', 'np.vstack', (['[c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12]'], {}), '([c1, c2, c3, c4, c5, c6, c7, c8, c9, c10, c11, c12])\n', (5681, 5734), True, 'import numpy as np\n'), ((5752, 5865), 'mayavi.mlab.quiver3d', 'mayalab.quiver3d', (['ps[:, 0]', 'ps[:, 1]', 'ps[:, 2]', 'cs[:, 0]', 'cs[:, 1]', 'cs[:, 2]'], {'mode': '"""2ddash"""', 'scale_factor': '(1.0)'}), "(ps[:, 0], ps[:, 1], ps[:, 2], cs[:, 0], cs[:, 1], cs[:, 2],\n mode='2ddash', scale_factor=1.0)\n", (5768, 5865), True, 'from mayavi import mlab as mayalab\n'), ((6012, 6033), 'numpy.array', 'np.array', (['[0.0, 0, 0]'], {}), '([0.0, 0, 0])\n', (6020, 6033), True, 'import numpy as np\n'), ((560, 669), 'mayavi.mlab.points3d', 'mayalab.points3d', (['pcs[:, 0]', 'pcs[:, 1]', 'pcs[:, 2]'], {'mode': 'mode', 'scale_factor': 'scale_factor', 'color': '(0, 1, 0)'}), '(pcs[:, 0], pcs[:, 1], pcs[:, 2], mode=mode, scale_factor=\n scale_factor, color=(0, 1, 0))\n', (576, 669), True, 'from mayavi import mlab as mayalab\n'), ((1986, 2013), 'numpy.copy', 'np.copy', (['self.corner_points'], {}), '(self.corner_points)\n', (1993, 2013), True, 'import numpy as np\n'), ((2014, 2029), 'numpy.ones', 'np.ones', (['(8, 1)'], {}), '((8, 1))\n', (2021, 2029), True, 'import numpy as np\n'), ((5907, 5950), 'numpy.array', 'np.array', (['[-1.0, -1.0, -1.0, 1.0, 1.0, 1.0]'], {}), '([-1.0, -1.0, -1.0, 1.0, 1.0, 1.0])\n', (5915, 5950), True, 'import numpy as np\n'), ((683, 792), 'mayavi.mlab.points3d', 'mayalab.points3d', (['pcs[:, 0]', 'pcs[:, 1]', 'pcs[:, 2]'], {'mode': 'mode', 'scale_factor': 'scale_factor', 'color': '(0, 1, 1)'}), '(pcs[:, 0], pcs[:, 1], pcs[:, 2], mode=mode, scale_factor=\n scale_factor, color=(0, 1, 1))\n', (699, 792), True, 'from mayavi import mlab as mayalab\n'), ((790, 882), 'mayavi.mlab.points3d', 'mayalab.points3d', (['pcs[:, 0]', 'pcs[:, 1]', 'pcs[:, 2]'], {'mode': 'mode', 'scale_factor': 'scale_factor'}), '(pcs[:, 0], pcs[:, 1], pcs[:, 2], mode=mode, scale_factor=\n scale_factor)\n', (806, 882), True, 'from mayavi import mlab as mayalab\n'), ((2548, 2580), 'numpy.linalg.norm', 'np.linalg.norm', (['self.basis[:, 0]'], {}), '(self.basis[:, 0])\n', (2562, 2580), True, 'import numpy as np\n'), ((2632, 2664), 'numpy.linalg.norm', 'np.linalg.norm', (['self.basis[:, 1]'], {}), '(self.basis[:, 1])\n', (2646, 2664), True, 'import numpy as np\n'), ((2716, 2748), 'numpy.linalg.norm', 'np.linalg.norm', (['self.basis[:, 2]'], {}), '(self.basis[:, 2])\n', (2730, 2748), True, 'import numpy as np\n'), ((3501, 3533), 'numpy.linalg.norm', 'np.linalg.norm', (['self.basis[:, 0]'], {}), '(self.basis[:, 0])\n', (3515, 3533), True, 'import numpy as np\n'), ((3585, 3617), 'numpy.linalg.norm', 'np.linalg.norm', (['self.basis[:, 1]'], {}), '(self.basis[:, 1])\n', (3599, 3617), True, 'import numpy as np\n'), ((3669, 3701), 'numpy.linalg.norm', 'np.linalg.norm', (['self.basis[:, 2]'], {}), '(self.basis[:, 2])\n', (3683, 3701), True, 'import numpy as np\n'), ((3733, 3775), 'numpy.copy', 'np.copy', (['self.corner_points_transformed[0]'], {}), '(self.corner_points_transformed[0])\n', (3740, 3775), True, 'import numpy as np\n')]
|
import os
import numpy as np
def generate_synth_unit_sphere_dataset(N_data=20000,
rand_seed=38,
sampling_magnitude=50000.0,
noise_level=0.01,
dataset_save_path='unit_sphere_random.npy'):
"""
Generate a dataset on a sphere of radius 1.0 (unit sphere):
"""
np.random.seed(rand_seed)
print("noise_level = %f" % noise_level)
random_3d_dataset = np.random.uniform(low=-sampling_magnitude,
high=sampling_magnitude,
size=(N_data, 3))
noise_3d = np.random.normal(loc=0.0, scale=noise_level, size=(N_data, 3))
random_unit_sphere_dataset = ((random_3d_dataset /
np.expand_dims(
np.linalg.norm(random_3d_dataset, axis=1),
axis=1)) +
noise_3d)
norm_random_unit_sphere_dataset = np.linalg.norm(random_unit_sphere_dataset,
axis=1)
np.save(dataset_save_path, random_unit_sphere_dataset)
return random_unit_sphere_dataset, norm_random_unit_sphere_dataset
if __name__ == '__main__':
[rand_unit_sphere_dataset, norm_rand_unit_sphere_dataset
] = generate_synth_unit_sphere_dataset()
|
[
"numpy.random.uniform",
"numpy.save",
"numpy.random.seed",
"numpy.linalg.norm",
"numpy.random.normal"
] |
[((429, 454), 'numpy.random.seed', 'np.random.seed', (['rand_seed'], {}), '(rand_seed)\n', (443, 454), True, 'import numpy as np\n'), ((524, 614), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-sampling_magnitude)', 'high': 'sampling_magnitude', 'size': '(N_data, 3)'}), '(low=-sampling_magnitude, high=sampling_magnitude, size=(\n N_data, 3))\n', (541, 614), True, 'import numpy as np\n'), ((709, 771), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0)', 'scale': 'noise_level', 'size': '(N_data, 3)'}), '(loc=0.0, scale=noise_level, size=(N_data, 3))\n', (725, 771), True, 'import numpy as np\n'), ((1096, 1146), 'numpy.linalg.norm', 'np.linalg.norm', (['random_unit_sphere_dataset'], {'axis': '(1)'}), '(random_unit_sphere_dataset, axis=1)\n', (1110, 1146), True, 'import numpy as np\n'), ((1204, 1258), 'numpy.save', 'np.save', (['dataset_save_path', 'random_unit_sphere_dataset'], {}), '(dataset_save_path, random_unit_sphere_dataset)\n', (1211, 1258), True, 'import numpy as np\n'), ((919, 960), 'numpy.linalg.norm', 'np.linalg.norm', (['random_3d_dataset'], {'axis': '(1)'}), '(random_3d_dataset, axis=1)\n', (933, 960), True, 'import numpy as np\n')]
|
import os
import os.path as osp
import json
from collections import OrderedDict
import numpy as np
from sklearn.metrics import average_precision_score
from sklearn.metrics import confusion_matrix
import functools
import sklearn
__all__ = [
'compute_result_multilabel',
'compute_result',
]
def calibrated_ap(label, predicted):
target_frame = np.stack([label, predicted], axis = 1)
# target_frame[:,1] : pred
# target_frame[:,0] : gt
num_frame = target_frame.shape[0]
target_frame = target_frame[target_frame[:,1].argsort()][::-1]
sum_prec = 0
total_positive = target_frame[:,0].sum()
num_positive = total_positive
num_negative = num_frame - num_positive
w = num_negative / num_positive
tp = 0.0
fp = 0.0
for k in range(0, num_frame):
if (target_frame[k,1] > 0.0): # conf_threshold = 0.0 으로 둠
if (target_frame[k, 0] == 0):
fp += 1
if (target_frame[k,0] == 1):
tp += 1
sum_prec += w * tp / (w * tp + fp)
return (sum_prec / total_positive)
def compute_result_multilabel(dataset, class_index, score_metrics, target_metrics, save_dir, result_file,
ignore_class=[0], save=True, verbose=False, smooth=False, switch=False):
result = OrderedDict()
score_metrics = np.array(score_metrics)
pred_metrics = np.argmax(score_metrics, axis=1)
target_metrics = np.array(target_metrics)
###################################################################################################################
# We follow (Shou et al., 2017) and adopt their per-frame evaluation method of THUMOS'14 datset.
# Source: https://bitbucket.org/columbiadvmm/cdc/src/master/THUMOS14/eval/PreFrameLabeling/compute_framelevel_mAP.m
###################################################################################################################
# Simple temporal smoothing via NMS of 5-frames window
if smooth:
prob = np.copy(score_metrics)
prob1 = prob.reshape(1, prob.shape[0], prob.shape[1])
prob2 = np.append(prob[0, :].reshape(1, -1), prob[0:-1, :], axis=0).reshape(1, prob.shape[0], prob.shape[1])
prob3 = np.append(prob[1:, :], prob[-1, :].reshape(1, -1), axis=0).reshape(1, prob.shape[0], prob.shape[1])
prob4 = np.append(prob[0:2, :], prob[0:-2, :], axis=0).reshape(1, prob.shape[0], prob.shape[1])
prob5 = np.append(prob[2:, :], prob[-2:, :], axis=0).reshape(1, prob.shape[0], prob.shape[1])
probsmooth = np.squeeze(np.max(np.concatenate((prob1, prob2, prob3, prob4, prob5), axis=0), axis=0))
score_metrics = np.copy(probsmooth)
# Assign cliff diving (5) as diving (8)
if switch:
switch_index = np.where(score_metrics[:, 5] > score_metrics[:, 8])[0]
score_metrics[switch_index, 8] = score_metrics[switch_index, 5]
if dataset == "THUMOS":
# Remove ambiguous (21)
valid_index = np.where(target_metrics[:, 21]!=1)[0] #THUMOS
# Compute AP
result['AP'] = OrderedDict()
if dataset == "THUMOS":
print('Dataset: ', dataset)
for cls in range(len(class_index)):
if cls not in ignore_class:
result['AP'][class_index[cls]] = average_precision_score(
(target_metrics[valid_index, cls]==1).astype(np.int),
score_metrics[valid_index, cls])
if verbose:
print('{} AP: {:.5f}'.format(class_index[cls], result['AP'][class_index[cls]]))
elif dataset == "TVSeries":
print('Dataset: ', dataset)
for cls in range(len(class_index)):
if cls not in ignore_class:
result['AP'][class_index[cls]] = calibrated_ap(
(target_metrics[:, cls]==1).astype(np.int), score_metrics[:,cls])
# result['AP'][class_index[cls]] = average_precision_score(
# (target_metrics[:, cls]==1).astype(np.int),
# score_metrics[:, cls])
if verbose:
print('{} AP: {:.5f}'.format(class_index[cls], result['AP'][class_index[cls]]))
# Compute mAP
result['mAP'] = np.mean(list(result['AP'].values()))
if verbose:
print('mAP: {:.5f}'.format(result['mAP']))
# Save
if save:
if not osp.isdir(save_dir):
os.makedirs(save_dir)
with open(osp.join(save_dir, result_file), 'w') as f:
json.dump(result, f)
if verbose:
print('Saved the result to {}'.format(osp.join(save_dir, result_file)))
return result['mAP']
def compute_result(class_index, score_metrics, target_metrics, save_dir, result_file,
ignore_class=[0], save=True, verbose=False):
result = OrderedDict()
score_metrics = np.array(score_metrics)
pred_metrics = np.argmax(score_metrics, axis=1)
target_metrics = np.array(target_metrics)
# Compute ACC
correct = np.sum((target_metrics!=0) & (target_metrics==pred_metrics))
total = np.sum(target_metrics!=0)
result['ACC'] = correct / total
if verbose:
print('ACC: {:.5f}'.format(result['ACC']))
# Compute confusion matrix
result['confusion_matrix'] = \
confusion_matrix(target_metrics, pred_metrics).tolist()
# Compute AP
result['AP'] = OrderedDict()
for cls in range(len(class_index)):
if cls not in ignore_class:
result['AP'][class_index[cls]] = average_precision_score(
(target_metrics==cls).astype(np.int),
score_metrics[:, cls])
if verbose:
print('{} AP: {:.5f}'.format(class_index[cls], result['AP'][class_index[cls]]))
# Compute mAP
result['mAP'] = np.mean(list(result['AP'].values()))
if verbose:
print('mAP: {:.5f}'.format(result['mAP']))
# Save
if save:
if not osp.isdir(save_dir):
os.makedirs(save_dir)
with open(osp.join(save_dir, result_file), 'w') as f:
json.dump(result, f)
if verbose:
print('Saved the result to {}'.format(osp.join(save_dir, result_file)))
return result['mAP']
|
[
"numpy.stack",
"json.dump",
"numpy.sum",
"os.makedirs",
"numpy.copy",
"numpy.argmax",
"os.path.isdir",
"numpy.append",
"numpy.where",
"numpy.array",
"collections.OrderedDict",
"sklearn.metrics.confusion_matrix",
"os.path.join",
"numpy.concatenate"
] |
[((359, 395), 'numpy.stack', 'np.stack', (['[label, predicted]'], {'axis': '(1)'}), '([label, predicted], axis=1)\n', (367, 395), True, 'import numpy as np\n'), ((1329, 1342), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1340, 1342), False, 'from collections import OrderedDict\n'), ((1363, 1386), 'numpy.array', 'np.array', (['score_metrics'], {}), '(score_metrics)\n', (1371, 1386), True, 'import numpy as np\n'), ((1406, 1438), 'numpy.argmax', 'np.argmax', (['score_metrics'], {'axis': '(1)'}), '(score_metrics, axis=1)\n', (1415, 1438), True, 'import numpy as np\n'), ((1460, 1484), 'numpy.array', 'np.array', (['target_metrics'], {}), '(target_metrics)\n', (1468, 1484), True, 'import numpy as np\n'), ((3090, 3103), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3101, 3103), False, 'from collections import OrderedDict\n'), ((4834, 4847), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4845, 4847), False, 'from collections import OrderedDict\n'), ((4868, 4891), 'numpy.array', 'np.array', (['score_metrics'], {}), '(score_metrics)\n', (4876, 4891), True, 'import numpy as np\n'), ((4911, 4943), 'numpy.argmax', 'np.argmax', (['score_metrics'], {'axis': '(1)'}), '(score_metrics, axis=1)\n', (4920, 4943), True, 'import numpy as np\n'), ((4965, 4989), 'numpy.array', 'np.array', (['target_metrics'], {}), '(target_metrics)\n', (4973, 4989), True, 'import numpy as np\n'), ((5023, 5087), 'numpy.sum', 'np.sum', (['((target_metrics != 0) & (target_metrics == pred_metrics))'], {}), '((target_metrics != 0) & (target_metrics == pred_metrics))\n', (5029, 5087), True, 'import numpy as np\n'), ((5096, 5123), 'numpy.sum', 'np.sum', (['(target_metrics != 0)'], {}), '(target_metrics != 0)\n', (5102, 5123), True, 'import numpy as np\n'), ((5397, 5410), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5408, 5410), False, 'from collections import OrderedDict\n'), ((2038, 2060), 'numpy.copy', 'np.copy', (['score_metrics'], {}), '(score_metrics)\n', (2045, 2060), True, 'import numpy as np\n'), ((2695, 2714), 'numpy.copy', 'np.copy', (['probsmooth'], {}), '(probsmooth)\n', (2702, 2714), True, 'import numpy as np\n'), ((2798, 2849), 'numpy.where', 'np.where', (['(score_metrics[:, 5] > score_metrics[:, 8])'], {}), '(score_metrics[:, 5] > score_metrics[:, 8])\n', (2806, 2849), True, 'import numpy as np\n'), ((3004, 3040), 'numpy.where', 'np.where', (['(target_metrics[:, 21] != 1)'], {}), '(target_metrics[:, 21] != 1)\n', (3012, 3040), True, 'import numpy as np\n'), ((4388, 4407), 'os.path.isdir', 'osp.isdir', (['save_dir'], {}), '(save_dir)\n', (4397, 4407), True, 'import os.path as osp\n'), ((4421, 4442), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (4432, 4442), False, 'import os\n'), ((4517, 4537), 'json.dump', 'json.dump', (['result', 'f'], {}), '(result, f)\n', (4526, 4537), False, 'import json\n'), ((5304, 5350), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['target_metrics', 'pred_metrics'], {}), '(target_metrics, pred_metrics)\n', (5320, 5350), False, 'from sklearn.metrics import confusion_matrix\n'), ((5953, 5972), 'os.path.isdir', 'osp.isdir', (['save_dir'], {}), '(save_dir)\n', (5962, 5972), True, 'import os.path as osp\n'), ((5986, 6007), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (5997, 6007), False, 'import os\n'), ((6082, 6102), 'json.dump', 'json.dump', (['result', 'f'], {}), '(result, f)\n', (6091, 6102), False, 'import json\n'), ((2372, 2418), 'numpy.append', 'np.append', (['prob[0:2, :]', 'prob[0:-2, :]'], {'axis': '(0)'}), '(prob[0:2, :], prob[0:-2, :], axis=0)\n', (2381, 2418), True, 'import numpy as np\n'), ((2476, 2520), 'numpy.append', 'np.append', (['prob[2:, :]', 'prob[-2:, :]'], {'axis': '(0)'}), '(prob[2:, :], prob[-2:, :], axis=0)\n', (2485, 2520), True, 'import numpy as np\n'), ((2601, 2660), 'numpy.concatenate', 'np.concatenate', (['(prob1, prob2, prob3, prob4, prob5)'], {'axis': '(0)'}), '((prob1, prob2, prob3, prob4, prob5), axis=0)\n', (2615, 2660), True, 'import numpy as np\n'), ((4461, 4492), 'os.path.join', 'osp.join', (['save_dir', 'result_file'], {}), '(save_dir, result_file)\n', (4469, 4492), True, 'import os.path as osp\n'), ((6026, 6057), 'os.path.join', 'osp.join', (['save_dir', 'result_file'], {}), '(save_dir, result_file)\n', (6034, 6057), True, 'import os.path as osp\n'), ((4608, 4639), 'os.path.join', 'osp.join', (['save_dir', 'result_file'], {}), '(save_dir, result_file)\n', (4616, 4639), True, 'import os.path as osp\n'), ((6173, 6204), 'os.path.join', 'osp.join', (['save_dir', 'result_file'], {}), '(save_dir, result_file)\n', (6181, 6204), True, 'import os.path as osp\n')]
|
import sys
import os
import glob
import numpy as np
import torch
import torch.optim as optim
from torch.optim import lr_scheduler
import math
from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score
from scipy.special import softmax
from Clf import *
from CBLoss import *
from FBeta_Loss import *
def load_conv_net(model_type, device):
'''
Function to load a CNN based on string input.
Inputs:
model_type [str]: specifies type of pre-trained model to load
device [torch.device]: cpu or gpu
instantiated using torch.device('cuda' if torch.cuda.is_available() else 'cpu')
Outputs:
conv_net [torch.nn]: CNN loaded onto specified device
input_size: image size suitable for CNN
'''
if model_type is not None:
if model_type.upper() == "EFFICIENTNET-B0":
from efficientnet_pytorch import EfficientNet
conv_net = EfficientNet.from_pretrained('efficientnet-b0').to(device)
conv_net._dropout = Identity()
conv_net._fc = Identity()
input_size = 224
elif model_type.upper() == "EFFICIENTNET-B1":
from efficientnet_pytorch import EfficientNet
conv_net = EfficientNet.from_pretrained('efficientnet-b1').to(device)
conv_net._dropout = Identity()
conv_net._fc = Identity()
input_size = 240
elif model_type.upper() == "MOBILENET-V2":
conv_net = torch.hub.load('pytorch/vision:v0.6.0', 'mobilenet_v2', pretrained=True).to(device)
conv_net.classifier = Identity()
input_size = 224
else:
print("Choose valid model.")
quit()
return conv_net, input_size
def choose_loss_fn(loss_fn, samples_per_cls, no_of_classes, weights_per_cls):
'''
Function that returns loss function object based on string input.
Inputs:
loss_fn [str]: specifies loss function to use
None: weighted cross entropy loss
fbetaX: FX soft loss (if X=1, then F1 soft loss)
focal: class balanced focal loss
sigmoid: class balanced sigmoid loss
samples_per_cls [np.array(n,)]: array storing count of samples for each of the n classes
no_of_classes [int]: number of classes
weights_per_cls [torch.tensor(n,)]: torch tensor of weights for each of the n classes, loaded onto device
Outputs:
criterion [torch.nn]: loss function object
'''
if loss_fn is None:
criterion = nn.CrossEntropyLoss(weight=weights_per_cls)
elif loss_fn[0:5] == "fbeta":
beta_val = float(loss_fn[5:])
criterion = FBetaLoss(beta=beta_val)
elif loss_fn == "focal" or loss_fn == "sigmoid":
criterion = CBLoss(samples_per_cls, no_of_classes, loss_fn, 0.9999, 0.5)
return criterion
def save_model(model, optimizer, epoch, accuracy, save_train_loss, save_val_loss, save_train_f1,
save_val_f1, save_train_acc, save_val_acc, clf_out, iteration, fine_tune):
'''
Function called within training which saves model to specified path. Saves model if model has best training/validation
loss, F1, or accuracy. Overrides models of previous best performance in each of the categories.
Inputs:
model [torch.nn]: model to be saved
optimizer [torch.optim]: optimizer object (also saved)
epoch [int]: epoch number
accuracy [float]: accuracy of classification for the given epoch
save_train_loss [bool]: true if saving model as it achieved best training loss
save_val_loss [bool]: true if saving model as it achieved best validation loss
save_train_f1 [bool]: true if saving model as it achieved best training F1
save_val_f1 [bool]: true if saving model as it achieved best validation F1
save_train_acc [bool]: true if saving model as it achieved best training accuracy
save_val_acc [bool]: true if saving model as it achieved best validation accuracy
clf_out [str]: path to where model is saved
iteration [int]: current ensembling iteration
fine_tune [bool]: true if saving a model that is being fine-tuned
'''
save_cond = [save_train_loss, save_val_loss, save_train_f1, save_val_f1, save_train_acc, save_val_acc]
prefix = ["t-loss", "v-loss", "t-f1", "v-f1", "t-acc", "v-acc"]
for i in range(len(save_cond)):
if clf_out is not None and save_cond[i]:
# delete previous model with same prefix
if fine_tune:
folder_path = clf_out + "ft/"
else:
folder_path = clf_out + str(iteration) + "/"
file_to_del = glob.glob( folder_path + "-" + prefix[i] + "*" )
if len(file_to_del) == 1:
os.remove(file_to_del[0])
# save clf
clf_save = folder_path + "-{}-{:02d}-{:.3f}-clf.tar".format(prefix[i], epoch, accuracy)
torch.save({
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
}, clf_save)
return
def print_scores(running_loss, data_length, phase, gt, pred):
'''
Utility function for printing model statistics - handled within training / testing / fine-tuning functions
'''
epoch_loss = running_loss / data_length[phase]
accuracy = accuracy_score(gt[phase].detach().cpu(), pred[phase].detach().cpu())
f1_class_scores = f1_score(gt[phase].detach().cpu(), pred[phase].detach().cpu(), average=None, labels=[0,1]).tolist()
string_out = " "
sys.stdout.write('%s\r' % string_out)
sys.stdout.flush()
if running_loss is not None:
print('{} \tLoss: {:.4f}, \tAccuracy: {:.4f}, \tF1: [{:.4f}, {:.4f}]'
.format(phase, epoch_loss, accuracy, f1_class_scores[0], f1_class_scores[1]))
return epoch_loss, accuracy, f1_class_scores
def clf_trainer(train_set, val_set, data_loader, data_length, end_epoch, batch_size,
conv_net, model, device, optimizer, scheduler, clf_out, loss_fn,
samples_per_cls, no_of_classes, weights_per_cls, iteration):
'''
Function for training a classification layer with fixed feature extractor.
Inputs:
train_set [BinaryDataset]: Dataset object for training set
val_set [BinaryDataset]: Dataset object for validation set
data_loader [dict]: dict of data loader objects
e.g. data_loader = {'Training': train_loader, 'Validation': val_loader}
where train_loader and val_loader are of type torch.utils.data.DataLoader
data_length [dict]: dict of data loader lengths
e.g. data_length = {'Training': len(train_loader), 'Validation': len(val_loader)}
where train_loader and val_loader are of type torch.utils.data.DataLoader
end_epoch [int]: number of epochs desired for training
batch_size [int]: batch size in training
conv_net [torch.nn]: feature extractor, loaded to device
model [torch.nn]: classification layer, loaded to device
device [torch.device]: cpu or gpu
instantiated using torch.device('cuda' if torch.cuda.is_available() else 'cpu')
optimizer [torch.optim]: optimizer object
scheduler [torch.optim.lr_scheduler]: learning rate scheduler object
clf_out [str]: path to where model is saved
loss_fn [str]: specifies loss function to use
samples_per_cls [np.array(n,)]: array storing count of samples for each of the n classes
no_of_classes [int]: number of classes
weights_per_cls [torch.tensor(n,)]: torch tensor of weights for each of the n classes, loaded onto device
iteration [int]: iteration in ensembling (i.e. 3rd iteration trains the 3rd classification layer)
'''
# save csv for training and validation sets
folder_path = clf_out + str(iteration) + "/"
if not os.path.exists(folder_path):
os.mkdir(folder_path)
train_set.df.to_csv(folder_path + "train_set_unique_labels.csv")
val_set.df.to_csv(folder_path + "val_set_unique_labels.csv")
# define variables for calculating statistics
pred_train = torch.zeros(len(train_set)).to(device)
labels_train = torch.zeros(len(train_set)).to(device)
pred_val = torch.zeros(len(val_set)).to(device)
labels_val = torch.zeros(len(val_set)).to(device)
pred = {'Training': pred_train, 'Validation': pred_val}
gt = {'Training': labels_train, 'Validation': labels_val}
min_train_loss = 999999999
min_val_loss = 999999999
max_train_f1 = 0
max_val_f1 = 0
max_train_acc = 0
max_val_acc = 0
# put feature extractor in evaluation mode (important for layers such as BN)
conv_net.eval()
# choose loss fn
criterion = choose_loss_fn(loss_fn, samples_per_cls, no_of_classes, weights_per_cls)
# training loop
for epoch in range(1, end_epoch + 1):
print('\nEpoch {}/{}'.format(epoch, end_epoch))
print('-' * 20)
# select between training or validation
for phase in ['Training']:#, 'Validation']:
if phase == 'Training':
model.train() # set classification layer to training mode
else:
model.eval() # set classification layer to evaluate mode
running_loss = 0.0
save_train_loss = False
save_val_loss = False
save_train_f1 = False
save_val_f1 = False
save_train_acc = False
save_val_acc = False
# iterate over data.
for i, batch in enumerate(data_loader[phase], 0):
# grab data
inputs, labels = batch
# zero the parameter gradients
optimizer.zero_grad()
# forward pass
with torch.no_grad():
features = conv_net(inputs.to(device))
with torch.set_grad_enabled(phase=='Training'):
outputs = model(features)
# rare case where final batch in data has size 1 - add leading dimension
output_size = [int(x) for x in outputs.shape]
if len(output_size) == 1:
outputs = torch.unsqueeze(outputs,0)
# choose loss fn based on function input
loss = criterion(outputs, labels.to(device))
# catch NaNs
if math.isnan(loss):
print("\n\nCont'd: got undefined loss (nan)\n\n")
del loss, outputs, inputs, labels
continue
pred[phase][ i*batch_size : (batch_size*(i+1)) ] = torch.argmax(outputs, 1).float()
gt[phase][ i*batch_size : (batch_size*(i+1)) ] = labels
# backward pass + optimization (only if in training mode)
if phase == 'Training':
loss.backward()
optimizer.step()
running_loss += loss.item()
string_out = "Epoch [{}/{}]\tStep [{}/{}]\tLoss: {:.5}".format(epoch, end_epoch, i+1, data_length[phase], running_loss / (i+1))
sys.stdout.write('%s\r' % string_out)
sys.stdout.flush()
del loss, outputs, inputs, labels
# print loss, accuracy, class f1 scores, and harmonic mean f1 for whole epoch
epoch_loss, accuracy, f1_class_scores = print_scores(running_loss, data_length, phase, gt, pred)
if phase == 'Training':
if epoch_loss < min_train_loss:
min_train_loss = epoch_loss
save_train_loss = True
if f1_class_scores[1] > max_train_f1:
max_train_f1 = f1_class_scores[1]
save_train_f1 = True
if accuracy > max_train_acc:
max_train_acc = accuracy
save_train_acc = True
else:
if epoch_loss < min_val_loss:
min_val_loss = epoch_loss
save_val_loss = True
if f1_class_scores[1] > max_val_f1:
max_val_f1 = f1_class_scores[1]
save_val_f1 = True
if accuracy > max_val_acc:
max_val_acc = accuracy
save_val_acc = True
save_model(model, optimizer, epoch, accuracy, save_train_loss, save_val_loss, save_train_f1,
save_val_f1, save_train_acc, save_val_acc, clf_out, iteration, False)
scheduler.step()
del pred_train, labels_train, pred_val, labels_val, pred, gt
print("Training for iteration {:.0f} finished\n\n".format(iteration))
return
def fine_tune(train_set, val_set, data_loader, data_length, batch_size, cnn, device,
learning_rate, clf_out, loss_fn, samples_per_cls, no_of_classes, weights_per_cls):
'''
Function for fine-tuning CNN.
Inputs:
train_set [BinaryDataset]: Dataset object for training set
val_set [BinaryDataset]: Dataset object for validation set
data_loader [dict]: dict of data loader objects
e.g. data_loader = {'Training': train_loader, 'Validation': val_loader}
where train_loader and val_loader are of type torch.utils.data.DataLoader
data_length [dict]: dict of data loader lengths
e.g. data_length = {'Training': len(train_loader), 'Validation': len(val_loader)}
where train_loader and val_loader are of type torch.utils.data.DataLoader
batch_size [int]: batch size in training
cnn [torch.nn]: CNN, loaded to device
device [torch.device]: cpu or gpu
instantiated using torch.device('cuda' if torch.cuda.is_available() else 'cpu')
learning_rate [float]: learning rate applied to backpropagation
clf_out [str]: path to where model is saved
loss_fn [str]: specifies loss function to use
samples_per_cls [np.array(n,)]: array storing count of samples for each of the n classes
no_of_classes [int]: number of classes
weights_per_cls [torch.tensor(n,)]: torch tensor of weights for each of the n classes, loaded onto device
'''
# save csv for training and validation sets
folder_path = clf_out + "1/"
train_set.df.to_csv(folder_path + "train_set_unique_labels.csv")
val_set.df.to_csv(folder_path + "val_set_unique_labels.csv")
# define variables for calculating statistics
pred_train = torch.zeros(len(train_set)).to(device)
labels_train = torch.zeros(len(train_set)).to(device)
pred_val = torch.zeros(len(val_set)).to(device)
labels_val = torch.zeros(len(val_set)).to(device)
pred = {'Training': pred_train, 'Validation': pred_val}
gt = {'Training': labels_train, 'Validation': labels_val}
min_train_loss = 999999999
min_val_loss = 999999999
max_train_f1 = 0
max_val_f1 = 0
max_train_acc = 0
max_val_acc = 0
# set all layers to trainable
for param in cnn.parameters():
param.requires_grad = True
# setup optimizer and learning rate scheduler
scheduler_step = 33
optimizer = optim.SGD(cnn.parameters(), lr=learning_rate, momentum=0.9, weight_decay=5e-4)
scheduler = lr_scheduler.StepLR(optimizer, step_size=scheduler_step, gamma=0.2)
# choose loss fn
criterion = choose_loss_fn(loss_fn, samples_per_cls, no_of_classes, weights_per_cls)
# training loop
end_epoch = 101
for epoch in range(1, end_epoch):
print('\nEpoch {}/{}'.format(epoch, end_epoch))
print('-' * 20)
# select between training or validation
for phase in ['Training', 'Validation']:
if phase == 'Training':
cnn.train() # set classification layer to training mode
else:
cnn.eval() # set classification layer to evaluate mode
running_loss = 0.0
save_train_loss = False
save_val_loss = False
save_train_f1 = False
save_val_f1 = False
save_train_acc = False
save_val_acc = False
# iterate over data.
for i, batch in enumerate(data_loader[phase], 0):
# grab data
inputs, labels = batch
# zero the parameter gradients
optimizer.zero_grad()
# forward pass
with torch.set_grad_enabled(phase=='Training'):
outputs = cnn(inputs.to(device))
# rare case where final batch in data has size 1 - add leading dimension
output_size = [int(x) for x in outputs.shape]
if len(output_size) == 1:
outputs = torch.unsqueeze(outputs,0)
# choose loss fn based on function input
loss = criterion(outputs, labels.to(device))
# catch NaNs
if math.isnan(loss):
print("\n\nCont'd: got undefined loss (nan)\n\n")
del loss, outputs, inputs, labels
continue
pred[phase][ i*batch_size : (batch_size*(i+1)) ] = torch.argmax(outputs, 1).float()
gt[phase][ i*batch_size : (batch_size*(i+1)) ] = labels
# backward pass + optimization (only if in training mode)
if phase == 'Training':
loss.backward()
optimizer.step()
running_loss += loss.item()
string_out = "Epoch [{}/{}]\tStep [{}/{}]\tLoss: {:.5}".format(epoch, end_epoch, i+1, data_length[phase], running_loss / (i+1))
sys.stdout.write('%s\r' % string_out)
sys.stdout.flush()
del loss, outputs, inputs, labels
# print loss, accuracy, class f1 scores, and harmonic mean f1 for whole epoch
epoch_loss, accuracy, f1_class_scores = print_scores(running_loss, data_length, phase, gt, pred)
if phase == 'Training':
if epoch_loss < min_train_loss:
min_train_loss = epoch_loss
save_train_loss = True
if f1_class_scores[1] > max_train_f1:
max_train_f1 = f1_class_scores[1]
save_train_f1 = True
if accuracy > max_train_acc:
max_train_acc = accuracy
save_train_acc = True
else:
if epoch_loss < min_val_loss:
min_val_loss = epoch_loss
save_val_loss = True
if f1_class_scores[1] > max_val_f1:
max_val_f1 = f1_class_scores[1]
save_val_f1 = True
if accuracy > max_val_acc:
max_val_acc = accuracy
save_val_acc = True
save_model(cnn, optimizer, epoch, accuracy, save_train_loss, save_val_loss, save_train_f1,
save_val_f1, save_train_acc, save_val_acc, clf_out, None, True)
scheduler.step()
del pred_train, labels_train, pred_val, labels_val, pred, gt
print("Fine tuning complete.\n\n")
return
def clf_tester(dataset, data_loader, batch_size, conv_net, model, device):
'''
Function for classifying data in evaluation mode. Returns boolean array corresponding
Inputs:
dataset [BinaryDataset]: Dataset object for training set
data_loader [torch.utils.data.DataLoader]: data loader object for dataset
batch_size [int]: batch size for evaluation
conv_net [torch.nn]: feature extractor, loaded to device
model [torch.nn]: classification layer, loaded to device
device [torch.device]: cpu or gpu
instantiated using torch.device('cuda' if torch.cuda.is_available() else 'cpu')
Outputs:
positives [np.array(n,)]: array marking the correctly classified samples
e.g. [1,1,0,1,0,0] means that samples 0, 1, 3 were correctly classified
'''
# define variables for calculating statistics
pred = torch.zeros(len(dataset)).to(device)
gt = torch.zeros(len(dataset)).to(device)
# set model to evaluation mode
conv_net.eval()
model.eval()
running_loss = 0.0
# iterate over data.
for i, batch in enumerate(data_loader, 0):
string_out = "Step [{}/{}]".format(i+1, len(data_loader))
sys.stdout.write('%s\r' % string_out)
sys.stdout.flush()
# grab data
inputs, labels = batch
# forward pass
with torch.no_grad():
features = conv_net(inputs.to(device))
outputs = model(features)
# rare case where final batch in data has size 1 - add leading dimension
output_size = [int(x) for x in outputs.shape]
if len(output_size) == 1:
outputs = torch.unsqueeze(outputs,0)
pred[ i*batch_size : (batch_size*(i+1)) ] = torch.argmax(outputs, 1).float()
gt[ i*batch_size : (batch_size*(i+1)) ] = labels
del outputs, inputs, labels
# print loss, accuracy, class f1 scores, and harmonic mean f1 for whole epoch
accuracy = accuracy_score(gt.detach().cpu(), pred.detach().cpu())
f1_class_scores = f1_score(gt.detach().cpu(), pred.detach().cpu(), average=None, labels=[0,1]).tolist()
print('Accuracy: {:.4f}, \tF1: [{:.4f}, {:.4f}]'
.format(accuracy, f1_class_scores[0], f1_class_scores[1]))
positives = (pred == gt).cpu().numpy()
del pred, gt
print("\n\n")
return positives
def create_meta_data(train_set, val_set, test_set, data_loader, data_length,
batch_size, conv_net, device, data_out, clfs):
'''
Function for creating meta-data using logit outputs from each classification layer.
Inputs:
train_set [BinaryDataset]: Dataset object for training set
val_set [BinaryDataset]: Dataset object for validation set
data_loader [dict]: dict of data loader objects
e.g. data_loader = {'Training': train_loader, 'Validation': val_loader}
where train_loader and val_loader are of type torch.utils.data.DataLoader
data_length [dict]: dict of data loader lengths
e.g. data_length = {'Training': len(train_loader), 'Validation': len(val_loader)}
where train_loader and val_loader are of type torch.utils.data.DataLoader
end_epoch [int]: number of epochs desired for training
batch_size [int]: batch size in training
conv_net [torch.nn]: feature extractor, loaded to device
device [torch.device]: cpu or gpu
instantiated using torch.device('cuda' if torch.cuda.is_available() else 'cpu')
data_out [str]: path for saving meta data (saved as type np.array)
clfs [list]: list where each element is a classification layer (already loaded to device)
'''
boost_data_train = torch.zeros(len(train_set), len(clfs) * 2).to(device)
boost_data_val = torch.zeros(len(val_set), len(clfs) * 2).to(device)
boost_data_test = torch.zeros(len(test_set), len(clfs) * 2).to(device)
boost_data = {'Training': boost_data_train, 'Validation': boost_data_val, 'Testing': boost_data_test}
labels_train = torch.zeros(len(train_set)).to(device)
labels_val = torch.zeros(len(val_set)).to(device)
labels_test = torch.zeros(len(test_set)).to(device)
gt = {'Training': labels_train, 'Validation': labels_val, 'Testing': labels_test}
conv_net.eval()
for i in range(len(clfs)):
clfs[i].eval()
# select between training or validation
for phase in ['Training', 'Validation', 'Testing']:
# iterate over data.
for i, batch in enumerate(data_loader[phase], 0):
string_out = "Step [{}/{}]".format(i+1, len(data_loader[phase]))
sys.stdout.write('%s\r' % string_out)
sys.stdout.flush()
# grab data
inputs, labels = batch
# forward pass
with torch.no_grad():
features = conv_net(inputs.to(device))
ens_in = torch.randn(0).to(device)
for j in range(len(clfs)):
with torch.no_grad():
ens_in = torch.cat((ens_in, clfs[j](features)), dim=1)
boost_data[phase][ i*batch_size : (batch_size*(i+1)), : ] = ens_in
gt[phase][ i*batch_size : (batch_size*(i+1)) ] = labels
# save for offline
boost_data_train = boost_data["Training"].cpu().detach().numpy()
boost_data_val = boost_data["Validation"].cpu().detach().numpy()
boost_data_test = boost_data["Testing"].cpu().detach().numpy()
np.save(data_out + "boost_data_train.npy", boost_data_train)
np.save(data_out + "boost_data_val.npy", boost_data_val)
np.save(data_out + "boost_data_test.npy", boost_data_test)
boost_labels_train = gt["Training"].cpu().detach().numpy()
boost_labels_val = gt["Validation"].cpu().detach().numpy()
boost_labels_test = gt["Testing"].cpu().detach().numpy()
np.save(data_out + "boost_labels_train.npy", boost_labels_train)
np.save(data_out + "boost_labels_val.npy", boost_labels_val)
np.save(data_out + "boost_labels_test.npy", boost_labels_test)
return
def fn_and_fp(y_test, test_preds):
# count false positives and false negatives
fn = 0
fp = 0
for i in range(y_test.shape[0]):
if test_preds[i] == 0 and y_test[i] == 1:
fn += 1
elif test_preds[i] == 1 and y_test[i] == 0:
fp += 1
print("# false negatives: ", fn)
print("# false positives: ", fp)
return
def softmax_transform(X):
X_softmax = np.empty(X.shape)
num_clf = int(X.shape[1] / 2)
for i in range(num_clf):
X_softmax[:,i:i+2] = softmax(X[:,i:i+2], axis=1)
return X_softmax
def model_average(X):
# X needs to be in softmax form
num_clf = int(X.shape[1] / 2)
X_logit_avg = np.zeros((X.shape[0],2))
for i in range(num_clf):
X_logit_avg[:,0:2] = X_logit_avg[:,0:2] + X[:,i:i+2]
return np.argmax(X_logit_avg, axis=1)
def majority_voting(X):
# X needs to be in softmax form
num_clf = int(X.shape[1] / 2)
X_votes = np.zeros((X.shape[0],2))
for i in range(X.shape[0]):
for j in range(num_clf):
if X[i,2*j+1] > X[i,2*j]:
X_votes[i,1] += 1
else:
X_votes[i,0] += 1
return np.argmax(X_votes, axis=1)
|
[
"sys.stdout.write",
"os.mkdir",
"os.remove",
"torch.optim.lr_scheduler.StepLR",
"numpy.argmax",
"numpy.empty",
"torch.argmax",
"torch.randn",
"sys.stdout.flush",
"glob.glob",
"torch.no_grad",
"os.path.exists",
"torch.hub.load",
"math.isnan",
"numpy.save",
"efficientnet_pytorch.EfficientNet.from_pretrained",
"torch.set_grad_enabled",
"torch.unsqueeze",
"scipy.special.softmax",
"numpy.zeros"
] |
[((5759, 5796), 'sys.stdout.write', 'sys.stdout.write', (["('%s\\r' % string_out)"], {}), "('%s\\r' % string_out)\n", (5775, 5796), False, 'import sys\n'), ((5801, 5819), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5817, 5819), False, 'import sys\n'), ((15583, 15650), 'torch.optim.lr_scheduler.StepLR', 'lr_scheduler.StepLR', (['optimizer'], {'step_size': 'scheduler_step', 'gamma': '(0.2)'}), '(optimizer, step_size=scheduler_step, gamma=0.2)\n', (15602, 15650), False, 'from torch.optim import lr_scheduler\n'), ((25136, 25196), 'numpy.save', 'np.save', (["(data_out + 'boost_data_train.npy')", 'boost_data_train'], {}), "(data_out + 'boost_data_train.npy', boost_data_train)\n", (25143, 25196), True, 'import numpy as np\n'), ((25201, 25257), 'numpy.save', 'np.save', (["(data_out + 'boost_data_val.npy')", 'boost_data_val'], {}), "(data_out + 'boost_data_val.npy', boost_data_val)\n", (25208, 25257), True, 'import numpy as np\n'), ((25262, 25320), 'numpy.save', 'np.save', (["(data_out + 'boost_data_test.npy')", 'boost_data_test'], {}), "(data_out + 'boost_data_test.npy', boost_data_test)\n", (25269, 25320), True, 'import numpy as np\n'), ((25513, 25577), 'numpy.save', 'np.save', (["(data_out + 'boost_labels_train.npy')", 'boost_labels_train'], {}), "(data_out + 'boost_labels_train.npy', boost_labels_train)\n", (25520, 25577), True, 'import numpy as np\n'), ((25582, 25642), 'numpy.save', 'np.save', (["(data_out + 'boost_labels_val.npy')", 'boost_labels_val'], {}), "(data_out + 'boost_labels_val.npy', boost_labels_val)\n", (25589, 25642), True, 'import numpy as np\n'), ((25647, 25709), 'numpy.save', 'np.save', (["(data_out + 'boost_labels_test.npy')", 'boost_labels_test'], {}), "(data_out + 'boost_labels_test.npy', boost_labels_test)\n", (25654, 25709), True, 'import numpy as np\n'), ((26137, 26154), 'numpy.empty', 'np.empty', (['X.shape'], {}), '(X.shape)\n', (26145, 26154), True, 'import numpy as np\n'), ((26407, 26432), 'numpy.zeros', 'np.zeros', (['(X.shape[0], 2)'], {}), '((X.shape[0], 2))\n', (26415, 26432), True, 'import numpy as np\n'), ((26533, 26563), 'numpy.argmax', 'np.argmax', (['X_logit_avg'], {'axis': '(1)'}), '(X_logit_avg, axis=1)\n', (26542, 26563), True, 'import numpy as np\n'), ((26673, 26698), 'numpy.zeros', 'np.zeros', (['(X.shape[0], 2)'], {}), '((X.shape[0], 2))\n', (26681, 26698), True, 'import numpy as np\n'), ((26898, 26924), 'numpy.argmax', 'np.argmax', (['X_votes'], {'axis': '(1)'}), '(X_votes, axis=1)\n', (26907, 26924), True, 'import numpy as np\n'), ((8135, 8162), 'os.path.exists', 'os.path.exists', (['folder_path'], {}), '(folder_path)\n', (8149, 8162), False, 'import os\n'), ((8172, 8193), 'os.mkdir', 'os.mkdir', (['folder_path'], {}), '(folder_path)\n', (8180, 8193), False, 'import os\n'), ((20825, 20862), 'sys.stdout.write', 'sys.stdout.write', (["('%s\\r' % string_out)"], {}), "('%s\\r' % string_out)\n", (20841, 20862), False, 'import sys\n'), ((20871, 20889), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (20887, 20889), False, 'import sys\n'), ((26247, 26277), 'scipy.special.softmax', 'softmax', (['X[:, i:i + 2]'], {'axis': '(1)'}), '(X[:, i:i + 2], axis=1)\n', (26254, 26277), False, 'from scipy.special import softmax\n'), ((4739, 4785), 'glob.glob', 'glob.glob', (["(folder_path + '-' + prefix[i] + '*')"], {}), "(folder_path + '-' + prefix[i] + '*')\n", (4748, 4785), False, 'import glob\n'), ((20979, 20994), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (20992, 20994), False, 'import torch\n'), ((21289, 21316), 'torch.unsqueeze', 'torch.unsqueeze', (['outputs', '(0)'], {}), '(outputs, 0)\n', (21304, 21316), False, 'import torch\n'), ((24284, 24321), 'sys.stdout.write', 'sys.stdout.write', (["('%s\\r' % string_out)"], {}), "('%s\\r' % string_out)\n", (24300, 24321), False, 'import sys\n'), ((24334, 24352), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (24350, 24352), False, 'import sys\n'), ((4842, 4867), 'os.remove', 'os.remove', (['file_to_del[0]'], {}), '(file_to_del[0])\n', (4851, 4867), False, 'import os\n'), ((10704, 10720), 'math.isnan', 'math.isnan', (['loss'], {}), '(loss)\n', (10714, 10720), False, 'import math\n'), ((11442, 11479), 'sys.stdout.write', 'sys.stdout.write', (["('%s\\r' % string_out)"], {}), "('%s\\r' % string_out)\n", (11458, 11479), False, 'import sys\n'), ((11496, 11514), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (11512, 11514), False, 'import sys\n'), ((17307, 17323), 'math.isnan', 'math.isnan', (['loss'], {}), '(loss)\n', (17317, 17323), False, 'import math\n'), ((18045, 18082), 'sys.stdout.write', 'sys.stdout.write', (["('%s\\r' % string_out)"], {}), "('%s\\r' % string_out)\n", (18061, 18082), False, 'import sys\n'), ((18099, 18117), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (18115, 18117), False, 'import sys\n'), ((21369, 21393), 'torch.argmax', 'torch.argmax', (['outputs', '(1)'], {}), '(outputs, 1)\n', (21381, 21393), False, 'import torch\n'), ((24458, 24473), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (24471, 24473), False, 'import torch\n'), ((936, 983), 'efficientnet_pytorch.EfficientNet.from_pretrained', 'EfficientNet.from_pretrained', (['"""efficientnet-b0"""'], {}), "('efficientnet-b0')\n", (964, 983), False, 'from efficientnet_pytorch import EfficientNet\n'), ((10082, 10097), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (10095, 10097), False, 'import torch\n'), ((10180, 10223), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (["(phase == 'Training')"], {}), "(phase == 'Training')\n", (10202, 10223), False, 'import torch\n'), ((10493, 10520), 'torch.unsqueeze', 'torch.unsqueeze', (['outputs', '(0)'], {}), '(outputs, 0)\n', (10508, 10520), False, 'import torch\n'), ((16776, 16819), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (["(phase == 'Training')"], {}), "(phase == 'Training')\n", (16798, 16819), False, 'import torch\n'), ((17096, 17123), 'torch.unsqueeze', 'torch.unsqueeze', (['outputs', '(0)'], {}), '(outputs, 0)\n', (17111, 17123), False, 'import torch\n'), ((24565, 24579), 'torch.randn', 'torch.randn', (['(0)'], {}), '(0)\n', (24576, 24579), False, 'import torch\n'), ((24651, 24666), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (24664, 24666), False, 'import torch\n'), ((1249, 1296), 'efficientnet_pytorch.EfficientNet.from_pretrained', 'EfficientNet.from_pretrained', (['"""efficientnet-b1"""'], {}), "('efficientnet-b1')\n", (1277, 1296), False, 'from efficientnet_pytorch import EfficientNet\n'), ((10943, 10967), 'torch.argmax', 'torch.argmax', (['outputs', '(1)'], {}), '(outputs, 1)\n', (10955, 10967), False, 'import torch\n'), ((17546, 17570), 'torch.argmax', 'torch.argmax', (['outputs', '(1)'], {}), '(outputs, 1)\n', (17558, 17570), False, 'import torch\n'), ((1501, 1573), 'torch.hub.load', 'torch.hub.load', (['"""pytorch/vision:v0.6.0"""', '"""mobilenet_v2"""'], {'pretrained': '(True)'}), "('pytorch/vision:v0.6.0', 'mobilenet_v2', pretrained=True)\n", (1515, 1573), False, 'import torch\n')]
|
import numpy as np
import scipy.cluster.vq as vq
import argparse
import matplotlib as mpl
mpl.use("qt4Agg")
import matplotlib.pyplot as plt
import thimbles as tmb
import json
import latbin
parser = argparse.ArgumentParser()
parser.add_argument("linelist")
parser.add_argument("--k-max", default=300, type=int)
parser.add_argument("--delta-log-wv", default=0.025, type=float)
parser.add_argument("--resolution", default=50000, type=float)
parser.add_argument("--delta-ep", default=0.4, type=float)
parser.add_argument("--strength-offset", type=float, default=0.0)
parser.add_argument("--delta-rel-strength", default=0.5, type=float)
parser.add_argument("--max-rel-strength", default=100.0, type=float)
parser.add_argument("--teff", default=5500.0, type=float)
parser.add_argument("--match-isotopes", action="store_true")
parser.add_argument("--output", required=True)
parser.add_argument("--output-mapping")
if __name__ == "__main__":
args = parser.parse_args()
full_ll = tmb.io.linelist_io.read_linelist(args.linelist)
lbs = tmb.transitions.lines_by_species(full_ll, match_isotopes=args.match_isotopes)
line_summaries = {}
ll_indexer = {full_ll[i]:i for i in range(len(full_ll))}
mapping_dict = {}
for species_id in lbs:
species_ll = lbs[species_id]
wvs = np.array([l.wv for l in species_ll])
eps = np.array([l.ep for l in species_ll])
psts = np.array([l.pseudo_strength(teff=args.teff) for l in species_ll])
rel_strengths = np.power(10.0, psts-args.strength_offset)
max_rl = args.max_rel_strength
rel_strengths = np.where(rel_strengths <= max_rl, rel_strengths, np.sqrt(rel_strengths - max_rl) + max_rl)
if False:
plt.scatter(psts, rel_strengths)
plt.show()
scaled_ll = np.array([
np.log10(wvs)/args.delta_log_wv,
eps/args.delta_ep,
rel_strengths/args.delta_rel_strength,
]).transpose()
alat = latbin.ALattice(3, scale=0.77)
binned_centers = alat.bin(scaled_ll).mean()
#choose the number of lines to keep
k_keep = min(args.k_max, len(scaled_ll))
#don't keep degenerate features
n_unique_wvs_eff = np.unique(np.around(np.log(wvs)*args.resolution))
k_keep = min(len(n_unique_wvs_eff), k_keep)
#don't keep more features than we have the ability to detect
strength_sum = int(np.sum(np.clip(rel_strengths, 0.0, 1.0)))
strength_sum = max(1, strength_sum)
k_keep = min(strength_sum, k_keep)
#carry out k-means
centroids, dist = vq.kmeans(binned_centers.values, k_keep)
#quantize onto the centroids
binned_ids, dist = vq.vq(scaled_ll, centroids)
cur_summary = []
#iterate through the transitions assigned to each centroid
for i in np.unique(binned_ids):
group_idxs = np.where(binned_ids == i)[0]
exemplar_idx = np.argmax(rel_strengths[group_idxs])
exemplar = species_ll[group_idxs[exemplar_idx]]
cur_summary.append(exemplar)
grouped_ll = [species_ll[gi] for gi in group_idxs]
mapping_dict[ll_indexer[exemplar]] = [ll_indexer[trans] for trans in grouped_ll]
line_summaries[species_id] = sorted(cur_summary, key=lambda x:x.wv)
output_transitions = []
for sp_key in sorted(line_summaries):
output_transitions.extend(line_summaries[sp_key])
if not args.output_mapping is None:
map_out_fname = args.output_mapping
map_file = open(map_out_fname+".json", "w")
json.dump(mapping_dict, map_file)
map_file.close()
tmb.io.linelist_io.write_linelist(args.output, output_transitions, file_type="moog")
|
[
"argparse.ArgumentParser",
"numpy.argmax",
"numpy.clip",
"numpy.unique",
"numpy.power",
"numpy.log10",
"thimbles.io.linelist_io.write_linelist",
"json.dump",
"matplotlib.pyplot.show",
"thimbles.io.linelist_io.read_linelist",
"matplotlib.use",
"latbin.ALattice",
"thimbles.transitions.lines_by_species",
"numpy.log",
"scipy.cluster.vq.kmeans",
"matplotlib.pyplot.scatter",
"scipy.cluster.vq.vq",
"numpy.where",
"numpy.array",
"numpy.sqrt"
] |
[((92, 109), 'matplotlib.use', 'mpl.use', (['"""qt4Agg"""'], {}), "('qt4Agg')\n", (99, 109), True, 'import matplotlib as mpl\n'), ((202, 227), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (225, 227), False, 'import argparse\n'), ((985, 1032), 'thimbles.io.linelist_io.read_linelist', 'tmb.io.linelist_io.read_linelist', (['args.linelist'], {}), '(args.linelist)\n', (1017, 1032), True, 'import thimbles as tmb\n'), ((1048, 1125), 'thimbles.transitions.lines_by_species', 'tmb.transitions.lines_by_species', (['full_ll'], {'match_isotopes': 'args.match_isotopes'}), '(full_ll, match_isotopes=args.match_isotopes)\n', (1080, 1125), True, 'import thimbles as tmb\n'), ((3785, 3873), 'thimbles.io.linelist_io.write_linelist', 'tmb.io.linelist_io.write_linelist', (['args.output', 'output_transitions'], {'file_type': '"""moog"""'}), "(args.output, output_transitions,\n file_type='moog')\n", (3818, 3873), True, 'import thimbles as tmb\n'), ((1321, 1357), 'numpy.array', 'np.array', (['[l.wv for l in species_ll]'], {}), '([l.wv for l in species_ll])\n', (1329, 1357), True, 'import numpy as np\n'), ((1372, 1408), 'numpy.array', 'np.array', (['[l.ep for l in species_ll]'], {}), '([l.ep for l in species_ll])\n', (1380, 1408), True, 'import numpy as np\n'), ((1524, 1567), 'numpy.power', 'np.power', (['(10.0)', '(psts - args.strength_offset)'], {}), '(10.0, psts - args.strength_offset)\n', (1532, 1567), True, 'import numpy as np\n'), ((2013, 2043), 'latbin.ALattice', 'latbin.ALattice', (['(3)'], {'scale': '(0.77)'}), '(3, scale=0.77)\n', (2028, 2043), False, 'import latbin\n'), ((2692, 2732), 'scipy.cluster.vq.kmeans', 'vq.kmeans', (['binned_centers.values', 'k_keep'], {}), '(binned_centers.values, k_keep)\n', (2701, 2732), True, 'import scipy.cluster.vq as vq\n'), ((2797, 2824), 'scipy.cluster.vq.vq', 'vq.vq', (['scaled_ll', 'centroids'], {}), '(scaled_ll, centroids)\n', (2802, 2824), True, 'import scipy.cluster.vq as vq\n'), ((2943, 2964), 'numpy.unique', 'np.unique', (['binned_ids'], {}), '(binned_ids)\n', (2952, 2964), True, 'import numpy as np\n'), ((3717, 3750), 'json.dump', 'json.dump', (['mapping_dict', 'map_file'], {}), '(mapping_dict, map_file)\n', (3726, 3750), False, 'import json\n'), ((1751, 1783), 'matplotlib.pyplot.scatter', 'plt.scatter', (['psts', 'rel_strengths'], {}), '(psts, rel_strengths)\n', (1762, 1783), True, 'import matplotlib.pyplot as plt\n'), ((1796, 1806), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1804, 1806), True, 'import matplotlib.pyplot as plt\n'), ((3047, 3083), 'numpy.argmax', 'np.argmax', (['rel_strengths[group_idxs]'], {}), '(rel_strengths[group_idxs])\n', (3056, 3083), True, 'import numpy as np\n'), ((1678, 1709), 'numpy.sqrt', 'np.sqrt', (['(rel_strengths - max_rl)'], {}), '(rel_strengths - max_rl)\n', (1685, 1709), True, 'import numpy as np\n'), ((2506, 2538), 'numpy.clip', 'np.clip', (['rel_strengths', '(0.0)', '(1.0)'], {}), '(rel_strengths, 0.0, 1.0)\n', (2513, 2538), True, 'import numpy as np\n'), ((2991, 3016), 'numpy.where', 'np.where', (['(binned_ids == i)'], {}), '(binned_ids == i)\n', (2999, 3016), True, 'import numpy as np\n'), ((2303, 2314), 'numpy.log', 'np.log', (['wvs'], {}), '(wvs)\n', (2309, 2314), True, 'import numpy as np\n'), ((1859, 1872), 'numpy.log10', 'np.log10', (['wvs'], {}), '(wvs)\n', (1867, 1872), True, 'import numpy as np\n')]
|
import unittest
import warnings
import numpy as np
import numpy.testing as npt
from squidward import utils
from squidward.utils import deprecated
# useful for debugging
np.set_printoptions(suppress=True)
class UtilitiesTestCase(unittest.TestCase):
"""Class for utilities tests."""
# ---------------------------------------------------------------------------------------------------------------------
# Array Checks
# ---------------------------------------------------------------------------------------------------------------------
def test_1(self):
"""
Exactly 1D
Test that at 1d always returns a 1d array.
"""
x = true = np.ones((10))
output = utils.exactly_1d(x)
npt.assert_almost_equal(output, x, decimal=10)
x = np.ones((10,1))
output = utils.exactly_1d(x)
npt.assert_almost_equal(output, true, decimal=10)
x = np.ones((1,10))
output = utils.exactly_1d(x)
npt.assert_almost_equal(output, true, decimal=10)
x = np.ones((2,10))
with self.assertRaises(Exception) as context:
utils.exactly_1d(x)
self.assertTrue('Not appropriate input shape.' in str(context.exception))
x = np.ones((2,10,1))
with self.assertRaises(Exception) as context:
utils.exactly_1d(x)
self.assertTrue('Not appropriate input shape.' in str(context.exception))
def test_2(self):
"""
Exactly 2D
Test that at least 2d always returns a >= 2d array.
"""
true = np.ones(10).reshape(-1,1)
x = np.ones(10).reshape(-1,1)
output = utils.exactly_2d(x)
npt.assert_almost_equal(output, true, decimal=10)
x = np.ones(10).reshape(1,-1)
output = utils.exactly_2d(x)
npt.assert_almost_equal(output, true, decimal=10)
x = np.ones(10)
output = utils.exactly_2d(x)
npt.assert_almost_equal(output, true, decimal=10)
true = np.ones((10,10))
x = np.ones((10,10,1))
output = utils.exactly_2d(x)
npt.assert_almost_equal(output, true, decimal=10)
x = np.ones((1,10,10))
output = utils.exactly_2d(x)
npt.assert_almost_equal(output, true, decimal=10)
x = np.ones((10,1,10))
with self.assertRaises(Exception) as context:
utils.exactly_2d(x)
self.assertTrue('Not appropriate input shape.' in str(context.exception))
x = np.ones((10,1,10,1))
with self.assertRaises(Exception) as context:
utils.exactly_2d(x)
self.assertTrue('Not appropriate input shape.' in str(context.exception))
# ---------------------------------------------------------------------------------------------------------------------
# Inversions
# ---------------------------------------------------------------------------------------------------------------------
def test_3(self):
"""
Is Intertible True
Test that non-singular matricies return true.
"""
arr = np.random.rand(10, 10)
arr = arr.dot(arr.T)
output = utils.is_invertible(arr, 'condition')
assert output
output = utils.is_invertible(arr, 'rank')
assert output
# cramer's rule method fails here due to
# floating point errors in np.linalg.det
# LU decomposition approximation of determinant
output = utils.is_invertible(arr, 'cramer')
assert ~output
def test_4(self):
"""
Is Invertible False
Test that singular matricies return false.
"""
arr = np.random.rand(10, 10)
arr[-1] = arr[0] + arr[1]
output = utils.is_invertible(arr, 'condition')
assert ~output
output = utils.is_invertible(arr, 'rank')
assert ~output
output = utils.is_invertible(arr, 'cramer')
assert ~output
def test_5(self):
"""
Check Valid Covariance
Test that the function that validates covariance matricies works.
"""
x = np.array([[1, 1, 1],[1, 0, 1],[1, 1, 0]])
output = output = utils.check_valid_cov(x)
assert output is None
x = np.array([[-1, 1, 1],[1, 0, 1],[1, 1, 0]])
with self.assertRaises(Exception) as context:
utils.check_valid_cov(x)
self.assertTrue('Negative values in diagonal of covariance matrix.\nLikely cause is kernel '
'inversion instability.\nCheck kernel variance.' in str(context.exception))
# pass a singular matrix
arr = np.random.rand(10, 10)
arr[-1] = arr[0] + arr[1]
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
utils.check_valid_cov(arr)
assert "Cov has high condition. Inverting matrix may result in errors." in str(w[-1].message)
# TODO: check show_warnings argument actually silences warnings
def test_6(self):
"""
Invert
Test that inversion methods work on a typical matrix input with a
reasonable condition.
"""
# create a test input that is not singular
arr = np.array([[0.08647087, 0.44631909, 0.20543369, 0.80556576, 0.484415 ],
[0.83409753, 0.7406405 , 0.72326909, 0.59616491, 0.86701306],
[0.83761527, 0.49645837, 0.64037925, 0.95100387, 0.13899134],
[0.97684547, 0.30623548, 0.95194714, 0.28353989, 0.831871 ],
[0.45327912, 0.74906165, 0.94224464, 0.30019356, 0.56802402]])
arr = arr.dot(arr.T)
true = np.array([[ 6.39675434, -7.5605537 , -1.01890231, 4.9418642 , 0.76873378],
[-7.5605537 , 15.51247025, -0.63188021, -8.45294531, -3.96604294],
[-1.01890231, -0.63188021, 1.95649949, -0.52470477, 0.14555444],
[ 4.9418642 , -8.45294531, -0.52470477, 6.88240616, 0.11092939],
[ 0.76873378, -3.96604294, 0.14555444, 0.11092939, 4.23098611]])
inv = utils.Invert("inv")
output = inv(arr)
npt.assert_almost_equal(output, true, decimal=7)
inv = utils.Invert("pinv")
output = inv(arr)
npt.assert_almost_equal(output, true, decimal=7)
inv = utils.Invert("solve")
output = inv(arr)
npt.assert_almost_equal(output, true, decimal=7)
inv = utils.Invert("cholesky")
output = inv(arr)
npt.assert_almost_equal(output, true, decimal=7)
inv = utils.Invert("svd")
output = inv(arr)
npt.assert_almost_equal(output, true, decimal=7)
inv = utils.Invert("lu")
output = inv(arr)
npt.assert_almost_equal(output, true, decimal=7)
# pass an invlaid inversion method
with self.assertRaises(Exception) as context:
utils.Invert("fake")
self.assertTrue('Invalid inversion method argument.' in str(context.exception))
# pass a singular matrix and catch warning
arr = np.random.rand(10, 10)
arr[-1] = arr[0] + arr[1]
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
inv = utils.Invert()
inv(arr)
# Verify some things
assert len(w) == 1
assert "Matrix has high condition." in str(w[-1].message)
# ---------------------------------------------------------------------------------------------------------------------
# Pre-processing
# ---------------------------------------------------------------------------------------------------------------------
def test_7(self):
"""
Onehot
Test that one hot returns the appropriate one hot array.
"""
y = np.array([0,1,0,2,1,0,1,2,0,1])
true = np.array([[1., 0., 0.],
[0., 1., 0.],
[1., 0., 0.],
[0., 0., 1.],
[0., 1., 0.],
[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.],
[1., 0., 0.],
[0., 1., 0.]])
output = utils.onehot(y,3)
npt.assert_almost_equal(output, true, decimal=10)
output = utils.onehot(y)
npt.assert_almost_equal(output, true, decimal=10)
with self.assertRaises(Exception) as context:
utils.onehot(y,4)
self.assertTrue('Number of unique values does not match num_classes argument.' in str(context.exception))
with self.assertRaises(Exception) as context:
utils.onehot(y,2)
self.assertTrue('Number of unique values does not match num_classes argument.' in str(context.exception))
def test_8(self):
"""
Reversehot
Test that reverse hot appropriately reverses one hot arrays. Should do
the exact opposite of one hot fucntion.
"""
y = np.array([[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.],
[1., 0., 0.],
[0., 1., 0.],
[1., 0., 0.],
[0., 0., 1.],
[0., 0., 1.],
[0., 0., 1.],
[1., 0., 0.]])
true = np.array([0.0,0.0,0.0,0.0,1.0,0.0,2.0,2.0,2.0,0.0])
output = utils.reversehot(y)
npt.assert_almost_equal(output, true, decimal=10)
output = utils.reversehot(true)
npt.assert_almost_equal(output, true, decimal=10)
output = utils.reversehot(true.T)
npt.assert_almost_equal(output, true, decimal=10)
y = true.reshape(-1,1)
output = utils.reversehot(y)
npt.assert_almost_equal(output, true, decimal=10)
y = true.reshape(1,-1)
output = utils.reversehot(y)
npt.assert_almost_equal(output, true, decimal=10)
# ---------------------------------------------------------------------------------------------------------------------
# Classification Specific
# ---------------------------------------------------------------------------------------------------------------------
def test_9(self):
"""
Sigmoid
Test sigmoid functions works.
"""
x = 0.458
true = 0.61253961344091512
output = utils.sigmoid(x)
npt.assert_almost_equal(output, true, decimal=10)
x = np.array([[-8, 0, 6],[8, 3, 1],[10, -300, 11]])
true = np.array([[0.0003353501, 0.5 , 0.9975273768],
[0.9996646499, 0.9525741268, 0.7310585786],
[0.9999546021, 0. , 0.9999832986]])
output = utils.sigmoid(x)
npt.assert_almost_equal(output, true, decimal=10)
output = utils.sigmoid(-12345)
true = 0.0
npt.assert_almost_equal(output, true, decimal=10)
output = utils.sigmoid(12345)
true = 1.0
npt.assert_almost_equal(output, true, decimal=10)
def test_10(self):
"""
Softmax
Test softmax function works.
"""
x = np.array([[-8, 0, 6],[8, 3, 1],[10, -300, 11]])
true = np.array([[2.2388575697e-004, 3.3380896059e-001, 6.6596715365e-001],
[3.7255082739e-001, 3.5500132884e-001, 2.7244784376e-001],
[4.9999282567e-001, 2.5741800386e-131, 5.0000717433e-001]])
output = utils.softmax(x)
npt.assert_almost_equal(output, true, decimal=10)
# ---------------------------------------------------------------------------------------------------------------------
# Miscellaneous
# ---------------------------------------------------------------------------------------------------------------------
def test_11(self):
"""
Deprecated
Ensure that the deprecated warning actually returns the right warning
to the user.
"""
@deprecated
def f():
pass
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
f()
# Verify some things
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert "deprecated" in str(w[-1].message)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"squidward.utils.exactly_2d",
"numpy.set_printoptions",
"squidward.utils.is_invertible",
"squidward.utils.onehot",
"squidward.utils.softmax",
"warnings.simplefilter",
"squidward.utils.Invert",
"numpy.testing.assert_almost_equal",
"numpy.ones",
"squidward.utils.sigmoid",
"numpy.array",
"squidward.utils.check_valid_cov",
"squidward.utils.reversehot",
"warnings.catch_warnings",
"numpy.random.rand",
"squidward.utils.exactly_1d"
] |
[((171, 205), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (190, 205), True, 'import numpy as np\n'), ((12808, 12823), 'unittest.main', 'unittest.main', ([], {}), '()\n', (12821, 12823), False, 'import unittest\n'), ((693, 704), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (700, 704), True, 'import numpy as np\n'), ((724, 743), 'squidward.utils.exactly_1d', 'utils.exactly_1d', (['x'], {}), '(x)\n', (740, 743), False, 'from squidward import utils\n'), ((752, 798), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['output', 'x'], {'decimal': '(10)'}), '(output, x, decimal=10)\n', (775, 798), True, 'import numpy.testing as npt\n'), ((812, 828), 'numpy.ones', 'np.ones', (['(10, 1)'], {}), '((10, 1))\n', (819, 828), True, 'import numpy as np\n'), ((845, 864), 'squidward.utils.exactly_1d', 'utils.exactly_1d', (['x'], {}), '(x)\n', (861, 864), False, 'from squidward import utils\n'), ((873, 922), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['output', 'true'], {'decimal': '(10)'}), '(output, true, decimal=10)\n', (896, 922), True, 'import numpy.testing as npt\n'), ((936, 952), 'numpy.ones', 'np.ones', (['(1, 10)'], {}), '((1, 10))\n', (943, 952), True, 'import numpy as np\n'), ((969, 988), 'squidward.utils.exactly_1d', 'utils.exactly_1d', (['x'], {}), '(x)\n', (985, 988), False, 'from squidward import utils\n'), ((997, 1046), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['output', 'true'], {'decimal': '(10)'}), '(output, true, decimal=10)\n', (1020, 1046), True, 'import numpy.testing as npt\n'), ((1060, 1076), 'numpy.ones', 'np.ones', (['(2, 10)'], {}), '((2, 10))\n', (1067, 1076), True, 'import numpy as np\n'), ((1257, 1276), 'numpy.ones', 'np.ones', (['(2, 10, 1)'], {}), '((2, 10, 1))\n', (1264, 1276), True, 'import numpy as np\n'), ((1666, 1685), 'squidward.utils.exactly_2d', 'utils.exactly_2d', (['x'], {}), '(x)\n', (1682, 1685), False, 'from squidward import utils\n'), ((1694, 1743), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['output', 'true'], {'decimal': '(10)'}), '(output, true, decimal=10)\n', (1717, 1743), True, 'import numpy.testing as npt\n'), ((1800, 1819), 'squidward.utils.exactly_2d', 'utils.exactly_2d', (['x'], {}), '(x)\n', (1816, 1819), False, 'from squidward import utils\n'), ((1828, 1877), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['output', 'true'], {'decimal': '(10)'}), '(output, true, decimal=10)\n', (1851, 1877), True, 'import numpy.testing as npt\n'), ((1891, 1902), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (1898, 1902), True, 'import numpy as np\n'), ((1920, 1939), 'squidward.utils.exactly_2d', 'utils.exactly_2d', (['x'], {}), '(x)\n', (1936, 1939), False, 'from squidward import utils\n'), ((1948, 1997), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['output', 'true'], {'decimal': '(10)'}), '(output, true, decimal=10)\n', (1971, 1997), True, 'import numpy.testing as npt\n'), ((2014, 2031), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (2021, 2031), True, 'import numpy as np\n'), ((2044, 2064), 'numpy.ones', 'np.ones', (['(10, 10, 1)'], {}), '((10, 10, 1))\n', (2051, 2064), True, 'import numpy as np\n'), ((2080, 2099), 'squidward.utils.exactly_2d', 'utils.exactly_2d', (['x'], {}), '(x)\n', (2096, 2099), False, 'from squidward import utils\n'), ((2108, 2157), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['output', 'true'], {'decimal': '(10)'}), '(output, true, decimal=10)\n', (2131, 2157), True, 'import numpy.testing as npt\n'), ((2171, 2191), 'numpy.ones', 'np.ones', (['(1, 10, 10)'], {}), '((1, 10, 10))\n', (2178, 2191), True, 'import numpy as np\n'), ((2207, 2226), 'squidward.utils.exactly_2d', 'utils.exactly_2d', (['x'], {}), '(x)\n', (2223, 2226), False, 'from squidward import utils\n'), ((2235, 2284), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['output', 'true'], {'decimal': '(10)'}), '(output, true, decimal=10)\n', (2258, 2284), True, 'import numpy.testing as npt\n'), ((2298, 2318), 'numpy.ones', 'np.ones', (['(10, 1, 10)'], {}), '((10, 1, 10))\n', (2305, 2318), True, 'import numpy as np\n'), ((2498, 2521), 'numpy.ones', 'np.ones', (['(10, 1, 10, 1)'], {}), '((10, 1, 10, 1))\n', (2505, 2521), True, 'import numpy as np\n'), ((3095, 3117), 'numpy.random.rand', 'np.random.rand', (['(10)', '(10)'], {}), '(10, 10)\n', (3109, 3117), True, 'import numpy as np\n'), ((3165, 3202), 'squidward.utils.is_invertible', 'utils.is_invertible', (['arr', '"""condition"""'], {}), "(arr, 'condition')\n", (3184, 3202), False, 'from squidward import utils\n'), ((3243, 3275), 'squidward.utils.is_invertible', 'utils.is_invertible', (['arr', '"""rank"""'], {}), "(arr, 'rank')\n", (3262, 3275), False, 'from squidward import utils\n'), ((3470, 3504), 'squidward.utils.is_invertible', 'utils.is_invertible', (['arr', '"""cramer"""'], {}), "(arr, 'cramer')\n", (3489, 3504), False, 'from squidward import utils\n'), ((3668, 3690), 'numpy.random.rand', 'np.random.rand', (['(10)', '(10)'], {}), '(10, 10)\n', (3682, 3690), True, 'import numpy as np\n'), ((3743, 3780), 'squidward.utils.is_invertible', 'utils.is_invertible', (['arr', '"""condition"""'], {}), "(arr, 'condition')\n", (3762, 3780), False, 'from squidward import utils\n'), ((3822, 3854), 'squidward.utils.is_invertible', 'utils.is_invertible', (['arr', '"""rank"""'], {}), "(arr, 'rank')\n", (3841, 3854), False, 'from squidward import utils\n'), ((3896, 3930), 'squidward.utils.is_invertible', 'utils.is_invertible', (['arr', '"""cramer"""'], {}), "(arr, 'cramer')\n", (3915, 3930), False, 'from squidward import utils\n'), ((4118, 4161), 'numpy.array', 'np.array', (['[[1, 1, 1], [1, 0, 1], [1, 1, 0]]'], {}), '([[1, 1, 1], [1, 0, 1], [1, 1, 0]])\n', (4126, 4161), True, 'import numpy as np\n'), ((4186, 4210), 'squidward.utils.check_valid_cov', 'utils.check_valid_cov', (['x'], {}), '(x)\n', (4207, 4210), False, 'from squidward import utils\n'), ((4254, 4298), 'numpy.array', 'np.array', (['[[-1, 1, 1], [1, 0, 1], [1, 1, 0]]'], {}), '([[-1, 1, 1], [1, 0, 1], [1, 1, 0]])\n', (4262, 4298), True, 'import numpy as np\n'), ((4637, 4659), 'numpy.random.rand', 'np.random.rand', (['(10)', '(10)'], {}), '(10, 10)\n', (4651, 4659), True, 'import numpy as np\n'), ((5334, 5667), 'numpy.array', 'np.array', (['[[0.08647087, 0.44631909, 0.20543369, 0.80556576, 0.484415], [0.83409753, \n 0.7406405, 0.72326909, 0.59616491, 0.86701306], [0.83761527, 0.49645837,\n 0.64037925, 0.95100387, 0.13899134], [0.97684547, 0.30623548, \n 0.95194714, 0.28353989, 0.831871], [0.45327912, 0.74906165, 0.94224464,\n 0.30019356, 0.56802402]]'], {}), '([[0.08647087, 0.44631909, 0.20543369, 0.80556576, 0.484415], [\n 0.83409753, 0.7406405, 0.72326909, 0.59616491, 0.86701306], [0.83761527,\n 0.49645837, 0.64037925, 0.95100387, 0.13899134], [0.97684547, \n 0.30623548, 0.95194714, 0.28353989, 0.831871], [0.45327912, 0.74906165,\n 0.94224464, 0.30019356, 0.56802402]])\n', (5342, 5667), True, 'import numpy as np\n'), ((5796, 6145), 'numpy.array', 'np.array', (['[[6.39675434, -7.5605537, -1.01890231, 4.9418642, 0.76873378], [-7.5605537,\n 15.51247025, -0.63188021, -8.45294531, -3.96604294], [-1.01890231, -\n 0.63188021, 1.95649949, -0.52470477, 0.14555444], [4.9418642, -\n 8.45294531, -0.52470477, 6.88240616, 0.11092939], [0.76873378, -\n 3.96604294, 0.14555444, 0.11092939, 4.23098611]]'], {}), '([[6.39675434, -7.5605537, -1.01890231, 4.9418642, 0.76873378], [-\n 7.5605537, 15.51247025, -0.63188021, -8.45294531, -3.96604294], [-\n 1.01890231, -0.63188021, 1.95649949, -0.52470477, 0.14555444], [\n 4.9418642, -8.45294531, -0.52470477, 6.88240616, 0.11092939], [\n 0.76873378, -3.96604294, 0.14555444, 0.11092939, 4.23098611]])\n', (5804, 6145), True, 'import numpy as np\n'), ((6257, 6276), 'squidward.utils.Invert', 'utils.Invert', (['"""inv"""'], {}), "('inv')\n", (6269, 6276), False, 'from squidward import utils\n'), ((6311, 6359), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['output', 'true'], {'decimal': '(7)'}), '(output, true, decimal=7)\n', (6334, 6359), True, 'import numpy.testing as npt\n'), ((6375, 6395), 'squidward.utils.Invert', 'utils.Invert', (['"""pinv"""'], {}), "('pinv')\n", (6387, 6395), False, 'from squidward import utils\n'), ((6430, 6478), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['output', 'true'], {'decimal': '(7)'}), '(output, true, decimal=7)\n', (6453, 6478), True, 'import numpy.testing as npt\n'), ((6494, 6515), 'squidward.utils.Invert', 'utils.Invert', (['"""solve"""'], {}), "('solve')\n", (6506, 6515), False, 'from squidward import utils\n'), ((6550, 6598), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['output', 'true'], {'decimal': '(7)'}), '(output, true, decimal=7)\n', (6573, 6598), True, 'import numpy.testing as npt\n'), ((6614, 6638), 'squidward.utils.Invert', 'utils.Invert', (['"""cholesky"""'], {}), "('cholesky')\n", (6626, 6638), False, 'from squidward import utils\n'), ((6673, 6721), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['output', 'true'], {'decimal': '(7)'}), '(output, true, decimal=7)\n', (6696, 6721), True, 'import numpy.testing as npt\n'), ((6737, 6756), 'squidward.utils.Invert', 'utils.Invert', (['"""svd"""'], {}), "('svd')\n", (6749, 6756), False, 'from squidward import utils\n'), ((6791, 6839), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['output', 'true'], {'decimal': '(7)'}), '(output, true, decimal=7)\n', (6814, 6839), True, 'import numpy.testing as npt\n'), ((6855, 6873), 'squidward.utils.Invert', 'utils.Invert', (['"""lu"""'], {}), "('lu')\n", (6867, 6873), False, 'from squidward import utils\n'), ((6908, 6956), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['output', 'true'], {'decimal': '(7)'}), '(output, true, decimal=7)\n', (6931, 6956), True, 'import numpy.testing as npt\n'), ((7243, 7265), 'numpy.random.rand', 'np.random.rand', (['(10)', '(10)'], {}), '(10, 10)\n', (7257, 7265), True, 'import numpy as np\n'), ((8088, 8128), 'numpy.array', 'np.array', (['[0, 1, 0, 2, 1, 0, 1, 2, 0, 1]'], {}), '([0, 1, 0, 2, 1, 0, 1, 2, 0, 1])\n', (8096, 8128), True, 'import numpy as np\n'), ((8135, 8324), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, \n 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0,\n 0.0], [0.0, 1.0, 0.0]]'], {}), '([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0\n ], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0],\n [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]])\n', (8143, 8324), True, 'import numpy as np\n'), ((8529, 8547), 'squidward.utils.onehot', 'utils.onehot', (['y', '(3)'], {}), '(y, 3)\n', (8541, 8547), False, 'from squidward import utils\n'), ((8555, 8604), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['output', 'true'], {'decimal': '(10)'}), '(output, true, decimal=10)\n', (8578, 8604), True, 'import numpy.testing as npt\n'), ((8623, 8638), 'squidward.utils.onehot', 'utils.onehot', (['y'], {}), '(y)\n', (8635, 8638), False, 'from squidward import utils\n'), ((8647, 8696), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['output', 'true'], {'decimal': '(10)'}), '(output, true, decimal=10)\n', (8670, 8696), True, 'import numpy.testing as npt\n'), ((9300, 9489), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, \n 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 0.0,\n 1.0], [1.0, 0.0, 0.0]]'], {}), '([[1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 0.0, 0.0\n ], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, 0.0, 1.0],\n [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]])\n', (9308, 9489), True, 'import numpy as np\n'), ((9665, 9725), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 2.0, 2.0, 2.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 2.0, 2.0, 2.0, 0.0])\n', (9673, 9725), True, 'import numpy as np\n'), ((9735, 9754), 'squidward.utils.reversehot', 'utils.reversehot', (['y'], {}), '(y)\n', (9751, 9754), False, 'from squidward import utils\n'), ((9763, 9812), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['output', 'true'], {'decimal': '(10)'}), '(output, true, decimal=10)\n', (9786, 9812), True, 'import numpy.testing as npt\n'), ((9831, 9853), 'squidward.utils.reversehot', 'utils.reversehot', (['true'], {}), '(true)\n', (9847, 9853), False, 'from squidward import utils\n'), ((9862, 9911), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['output', 'true'], {'decimal': '(10)'}), '(output, true, decimal=10)\n', (9885, 9911), True, 'import numpy.testing as npt\n'), ((9930, 9954), 'squidward.utils.reversehot', 'utils.reversehot', (['true.T'], {}), '(true.T)\n', (9946, 9954), False, 'from squidward import utils\n'), ((9963, 10012), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['output', 'true'], {'decimal': '(10)'}), '(output, true, decimal=10)\n', (9986, 10012), True, 'import numpy.testing as npt\n'), ((10062, 10081), 'squidward.utils.reversehot', 'utils.reversehot', (['y'], {}), '(y)\n', (10078, 10081), False, 'from squidward import utils\n'), ((10090, 10139), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['output', 'true'], {'decimal': '(10)'}), '(output, true, decimal=10)\n', (10113, 10139), True, 'import numpy.testing as npt\n'), ((10189, 10208), 'squidward.utils.reversehot', 'utils.reversehot', (['y'], {}), '(y)\n', (10205, 10208), False, 'from squidward import utils\n'), ((10217, 10266), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['output', 'true'], {'decimal': '(10)'}), '(output, true, decimal=10)\n', (10240, 10266), True, 'import numpy.testing as npt\n'), ((10718, 10734), 'squidward.utils.sigmoid', 'utils.sigmoid', (['x'], {}), '(x)\n', (10731, 10734), False, 'from squidward import utils\n'), ((10743, 10792), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['output', 'true'], {'decimal': '(10)'}), '(output, true, decimal=10)\n', (10766, 10792), True, 'import numpy.testing as npt\n'), ((10806, 10855), 'numpy.array', 'np.array', (['[[-8, 0, 6], [8, 3, 1], [10, -300, 11]]'], {}), '([[-8, 0, 6], [8, 3, 1], [10, -300, 11]])\n', (10814, 10855), True, 'import numpy as np\n'), ((10869, 10998), 'numpy.array', 'np.array', (['[[0.0003353501, 0.5, 0.9975273768], [0.9996646499, 0.9525741268, \n 0.7310585786], [0.9999546021, 0.0, 0.9999832986]]'], {}), '([[0.0003353501, 0.5, 0.9975273768], [0.9996646499, 0.9525741268, \n 0.7310585786], [0.9999546021, 0.0, 0.9999832986]])\n', (10877, 10998), True, 'import numpy as np\n'), ((11080, 11096), 'squidward.utils.sigmoid', 'utils.sigmoid', (['x'], {}), '(x)\n', (11093, 11096), False, 'from squidward import utils\n'), ((11105, 11154), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['output', 'true'], {'decimal': '(10)'}), '(output, true, decimal=10)\n', (11128, 11154), True, 'import numpy.testing as npt\n'), ((11173, 11194), 'squidward.utils.sigmoid', 'utils.sigmoid', (['(-12345)'], {}), '(-12345)\n', (11186, 11194), False, 'from squidward import utils\n'), ((11222, 11271), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['output', 'true'], {'decimal': '(10)'}), '(output, true, decimal=10)\n', (11245, 11271), True, 'import numpy.testing as npt\n'), ((11290, 11310), 'squidward.utils.sigmoid', 'utils.sigmoid', (['(12345)'], {}), '(12345)\n', (11303, 11310), False, 'from squidward import utils\n'), ((11338, 11387), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['output', 'true'], {'decimal': '(10)'}), '(output, true, decimal=10)\n', (11361, 11387), True, 'import numpy.testing as npt\n'), ((11501, 11550), 'numpy.array', 'np.array', (['[[-8, 0, 6], [8, 3, 1], [10, -300, 11]]'], {}), '([[-8, 0, 6], [8, 3, 1], [10, -300, 11]])\n', (11509, 11550), True, 'import numpy as np\n'), ((11564, 11731), 'numpy.array', 'np.array', (['[[0.00022388575697, 0.33380896059, 0.66596715365], [0.37255082739, \n 0.35500132884, 0.27244784376], [0.49999282567, 2.5741800386e-131, \n 0.50000717433]]'], {}), '([[0.00022388575697, 0.33380896059, 0.66596715365], [0.37255082739,\n 0.35500132884, 0.27244784376], [0.49999282567, 2.5741800386e-131, \n 0.50000717433]])\n', (11572, 11731), True, 'import numpy as np\n'), ((11820, 11836), 'squidward.utils.softmax', 'utils.softmax', (['x'], {}), '(x)\n', (11833, 11836), False, 'from squidward import utils\n'), ((11845, 11894), 'numpy.testing.assert_almost_equal', 'npt.assert_almost_equal', (['output', 'true'], {'decimal': '(10)'}), '(output, true, decimal=10)\n', (11868, 11894), True, 'import numpy.testing as npt\n'), ((1142, 1161), 'squidward.utils.exactly_1d', 'utils.exactly_1d', (['x'], {}), '(x)\n', (1158, 1161), False, 'from squidward import utils\n'), ((1341, 1360), 'squidward.utils.exactly_1d', 'utils.exactly_1d', (['x'], {}), '(x)\n', (1357, 1360), False, 'from squidward import utils\n'), ((2383, 2402), 'squidward.utils.exactly_2d', 'utils.exactly_2d', (['x'], {}), '(x)\n', (2399, 2402), False, 'from squidward import utils\n'), ((2585, 2604), 'squidward.utils.exactly_2d', 'utils.exactly_2d', (['x'], {}), '(x)\n', (2601, 2604), False, 'from squidward import utils\n'), ((4363, 4387), 'squidward.utils.check_valid_cov', 'utils.check_valid_cov', (['x'], {}), '(x)\n', (4384, 4387), False, 'from squidward import utils\n'), ((4707, 4743), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (4730, 4743), False, 'import warnings\n'), ((4819, 4850), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (4840, 4850), False, 'import warnings\n'), ((4896, 4922), 'squidward.utils.check_valid_cov', 'utils.check_valid_cov', (['arr'], {}), '(arr)\n', (4917, 4922), False, 'from squidward import utils\n'), ((7067, 7087), 'squidward.utils.Invert', 'utils.Invert', (['"""fake"""'], {}), "('fake')\n", (7079, 7087), False, 'from squidward import utils\n'), ((7314, 7350), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (7337, 7350), False, 'import warnings\n'), ((7426, 7457), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (7447, 7457), False, 'import warnings\n'), ((7509, 7523), 'squidward.utils.Invert', 'utils.Invert', ([], {}), '()\n', (7521, 7523), False, 'from squidward import utils\n'), ((8764, 8782), 'squidward.utils.onehot', 'utils.onehot', (['y', '(4)'], {}), '(y, 4)\n', (8776, 8782), False, 'from squidward import utils\n'), ((8963, 8981), 'squidward.utils.onehot', 'utils.onehot', (['y', '(2)'], {}), '(y, 2)\n', (8975, 8981), False, 'from squidward import utils\n'), ((12398, 12434), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (12421, 12434), False, 'import warnings\n'), ((12510, 12541), 'warnings.simplefilter', 'warnings.simplefilter', (['"""always"""'], {}), "('always')\n", (12531, 12541), False, 'import warnings\n'), ((1584, 1595), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (1591, 1595), True, 'import numpy as np\n'), ((1623, 1634), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (1630, 1634), True, 'import numpy as np\n'), ((1757, 1768), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (1764, 1768), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Compares two bottom detections.
Copyright (c) 2021, Contributors to the CRIMAC project.
Licensed under the MIT license.
"""
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import xarray as xr
def compare(zarr_file: str,
a_bottom_parquet_file: str,
b_bottom_parquet_file: str,
out_parquet_file: str) -> None:
"""
Compares two bottom detections for the same data file and writes the result to an output file.
:param zarr_file: the sv data file
:param a_bottom_parquet_file: the first bottom detection file
:param b_bottom_parquet_file: the second bottom detection file
:param out_parquet_file: the output file for the comparison
"""
dataset = xr.open_zarr(zarr_file, chunks={'frequency': 'auto', 'ping_time': 'auto', 'range': -1})
print()
print(f'zarr_file: {zarr_file}')
print(dataset)
channel_index = 0
channel_id = dataset['channel_id'][channel_index].values
ping_time = dataset['ping_time'].values
r = dataset['range']
heave = dataset['heave']
sv = dataset['sv'][channel_index]
transducer_draft = dataset['transducer_draft']
min_range = r[0]
sample_distance = r[1] - r[0]
heave_corrected_transducer_depth = heave + transducer_draft[channel_index]
a_bottom_depth = read_bottom_depth(a_bottom_parquet_file, channel_id, ping_time)
a_bottom_range = a_bottom_depth - heave_corrected_transducer_depth
a_bottom_indexes = (a_bottom_range - min_range) / sample_distance
b_bottom_depth = read_bottom_depth(b_bottom_parquet_file, channel_id, ping_time)
b_bottom_range = b_bottom_depth - heave_corrected_transducer_depth
b_bottom_indexes = (b_bottom_range - min_range) / sample_distance
depth_diff = (a_bottom_depth - b_bottom_depth) \
.rename('depth_diff')
sv_sum_diff = xr.apply_ufunc(per_ping_sv_sum_diff,
sv,
a_bottom_indexes,
b_bottom_indexes,
input_core_dims=[['range'], [], []],
kwargs={},
vectorize=True,
dask='parallelized',
output_dtypes=[np.float64])
imr_constant = 4.0 * np.pi * 1852.0 * 1852.0
sa_diff = (imr_constant * sample_distance * sv_sum_diff) \
.rename('sa_diff')
a_has_bottom = a_bottom_depth.where(np.isnan(a_bottom_depth), 1.0).fillna(0.0)
b_has_bottom = b_bottom_depth.where(np.isnan(b_bottom_depth), 1.0).fillna(0.0)
has_bottom_diff = (a_has_bottom - b_has_bottom) \
.rename('has_bottom_diff')
df = depth_diff.to_dataframe()
df.reset_index(level=0, inplace=True)
df = df.assign(
sa_diff=sa_diff,
has_bottom_diff=has_bottom_diff,
)
table = pa.Table.from_pandas(df)
with pq.ParquetWriter(out_parquet_file, table.schema) as writer:
writer.write_table(table=table)
print()
print(f'out_parquet_file: {out_parquet_file}')
print(table)
def read_bottom_depth(parquet_file: str, channel_id, ping_time) -> xr.DataArray:
df = pd.read_parquet(parquet_file)
df = df[df['channel_id'] == channel_id][df['object_id'] == 'bottom']
df = df[['ping_time', 'mask_depth_upper']]
df = df.set_index('ping_time')
df = df.reindex(ping_time)
return df['mask_depth_upper'][ping_time].to_xarray()
def per_ping_sv_sum_diff(sv: xr.DataArray, a_index: float, b_index: float) -> float:
i_a = len(sv) if np.isnan(a_index) else int(a_index)
i_b = len(sv) if np.isnan(b_index) else int(b_index)
if i_a >= i_b:
return sv[i_b:i_a].sum()
else:
return -sv[i_a:i_b].sum()
def print_comparison(comparison_parquet_file: str) -> None:
df = pd.read_parquet(comparison_parquet_file)
ping_time = df['ping_time']
depth_diff = df['depth_diff']
has_bottom_diff = df['has_bottom_diff']
sa_diff = df['sa_diff']
print()
print(f'comparison_parquet_file: {comparison_parquet_file}')
print(f'ping_time:')
print(f' min: {ping_time.min()}')
print(f' max: {ping_time.max()}')
print(f'depth_diff:')
print(f' min: {depth_diff.min()}')
print(f' max: {depth_diff.max()}')
print(f' sum: {depth_diff.abs().sum()}')
print(f'sa_diff:')
print(f' min: {sa_diff.min()}')
print(f' max: {sa_diff.max()}')
print(f' sum: {sa_diff.abs().sum()}')
print(f'has_bottom_diff:')
print(f' count +1: {sum(map(lambda x: x > 0, has_bottom_diff))}')
print(f' count -1: {sum(map(lambda x: x < 0, has_bottom_diff))}')
|
[
"numpy.isnan",
"pyarrow.Table.from_pandas",
"pandas.read_parquet",
"xarray.open_zarr",
"xarray.apply_ufunc",
"pyarrow.parquet.ParquetWriter"
] |
[((783, 874), 'xarray.open_zarr', 'xr.open_zarr', (['zarr_file'], {'chunks': "{'frequency': 'auto', 'ping_time': 'auto', 'range': -1}"}), "(zarr_file, chunks={'frequency': 'auto', 'ping_time': 'auto',\n 'range': -1})\n", (795, 874), True, 'import xarray as xr\n'), ((1904, 2102), 'xarray.apply_ufunc', 'xr.apply_ufunc', (['per_ping_sv_sum_diff', 'sv', 'a_bottom_indexes', 'b_bottom_indexes'], {'input_core_dims': "[['range'], [], []]", 'kwargs': '{}', 'vectorize': '(True)', 'dask': '"""parallelized"""', 'output_dtypes': '[np.float64]'}), "(per_ping_sv_sum_diff, sv, a_bottom_indexes, b_bottom_indexes,\n input_core_dims=[['range'], [], []], kwargs={}, vectorize=True, dask=\n 'parallelized', output_dtypes=[np.float64])\n", (1918, 2102), True, 'import xarray as xr\n'), ((2936, 2960), 'pyarrow.Table.from_pandas', 'pa.Table.from_pandas', (['df'], {}), '(df)\n', (2956, 2960), True, 'import pyarrow as pa\n'), ((3243, 3272), 'pandas.read_parquet', 'pd.read_parquet', (['parquet_file'], {}), '(parquet_file)\n', (3258, 3272), True, 'import pandas as pd\n'), ((3884, 3924), 'pandas.read_parquet', 'pd.read_parquet', (['comparison_parquet_file'], {}), '(comparison_parquet_file)\n', (3899, 3924), True, 'import pandas as pd\n'), ((2970, 3018), 'pyarrow.parquet.ParquetWriter', 'pq.ParquetWriter', (['out_parquet_file', 'table.schema'], {}), '(out_parquet_file, table.schema)\n', (2986, 3018), True, 'import pyarrow.parquet as pq\n'), ((3624, 3641), 'numpy.isnan', 'np.isnan', (['a_index'], {}), '(a_index)\n', (3632, 3641), True, 'import numpy as np\n'), ((3681, 3698), 'numpy.isnan', 'np.isnan', (['b_index'], {}), '(b_index)\n', (3689, 3698), True, 'import numpy as np\n'), ((2538, 2562), 'numpy.isnan', 'np.isnan', (['a_bottom_depth'], {}), '(a_bottom_depth)\n', (2546, 2562), True, 'import numpy as np\n'), ((2621, 2645), 'numpy.isnan', 'np.isnan', (['b_bottom_depth'], {}), '(b_bottom_depth)\n', (2629, 2645), True, 'import numpy as np\n')]
|
# Copyright 2022 Quantapix Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import os
import pickle
import time
import numpy as np
from ...tokenization_utils import PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import (
cached_path,
is_datasets_available,
is_faiss_available,
is_remote_url,
logging,
requires_backends,
)
from .configuration_rag import RagConfig
from .tokenization_rag import RagTokenizer
if is_datasets_available():
from datasets import Dataset, load_dataset, load_from_disk
if is_faiss_available():
import faiss
logger = logging.get_logger(__name__)
LEGACY_INDEX_PATH = "https://storage.googleapis.com/huggingface-nlp/datasets/wiki_dpr/"
class Index:
def get_doc_dicts(self, doc_ids: np.ndarray):
raise NotImplementedError
def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5):
raise NotImplementedError
def is_initialized(self):
raise NotImplementedError
def init_index(self):
raise NotImplementedError
class LegacyIndex(Index):
INDEX_FILENAME = "hf_bert_base.hnswSQ8_correct_phi_128.c_index"
PASSAGE_FILENAME = "psgs_w100.tsv.pkl"
def __init__(self, vector_size, index_path):
self.index_id_to_db_id = []
self.index_path = index_path
self.passages = self._load_passages()
self.vector_size = vector_size
self.index = None
self._index_initialized = False
def _resolve_path(self, index_path, filename):
assert os.path.isdir(index_path) or is_remote_url(
index_path
), "Please specify a valid `index_path`."
archive_file = os.path.join(index_path, filename)
try:
# Load from URL or cache if already cached
resolved_archive_file = cached_path(archive_file)
except EnvironmentError:
msg = (
f"Can't load '{archive_file}'. Make sure that:\n\n"
f"- '{index_path}' is a correct remote path to a directory containing a file named {filename}\n\n"
f"- or '{index_path}' is the correct path to a directory containing a file named {filename}.\n\n"
)
raise EnvironmentError(msg)
if resolved_archive_file == archive_file:
logger.info(f"loading file {archive_file}")
else:
logger.info(f"loading file {archive_file} from cache at {resolved_archive_file}")
return resolved_archive_file
def _load_passages(self):
logger.info(f"Loading passages from {self.index_path}")
passages_path = self._resolve_path(self.index_path, self.PASSAGE_FILENAME)
with open(passages_path, "rb") as passages_file:
passages = pickle.load(passages_file)
return passages
def _deserialize_index(self):
logger.info(f"Loading index from {self.index_path}")
resolved_index_path = self._resolve_path(
self.index_path, self.INDEX_FILENAME + ".index.dpr"
)
self.index = faiss.read_index(resolved_index_path)
resolved_meta_path = self._resolve_path(
self.index_path, self.INDEX_FILENAME + ".index_meta.dpr"
)
with open(resolved_meta_path, "rb") as metadata_file:
self.index_id_to_db_id = pickle.load(metadata_file)
assert (
len(self.index_id_to_db_id) == self.index.ntotal
), "Deserialized index_id_to_db_id should match faiss index size"
def is_initialized(self):
return self._index_initialized
def init_index(self):
index = faiss.IndexHNSWFlat(self.vector_size + 1, 512)
index.hnsw.efSearch = 128
index.hnsw.efConstruction = 200
self.index = index
self._deserialize_index()
self._index_initialized = True
def get_doc_dicts(self, doc_ids: np.array):
doc_list = []
for doc_ids_i in doc_ids:
ids = [str(int(doc_id)) for doc_id in doc_ids_i]
docs = [self.passages[doc_id] for doc_id in ids]
doc_list.append(docs)
doc_dicts = []
for docs in doc_list:
doc_dict = {}
doc_dict["title"] = [doc[1] for doc in docs]
doc_dict["text"] = [doc[0] for doc in docs]
doc_dicts.append(doc_dict)
return doc_dicts
def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5):
aux_dim = np.zeros(len(question_hidden_states), dtype="float32").reshape(-1, 1)
query_nhsw_vectors = np.hstack((question_hidden_states, aux_dim))
_, docs_ids = self.index.search(query_nhsw_vectors, n_docs)
vectors = [
[self.index.reconstruct(int(doc_id))[:-1] for doc_id in doc_ids] for doc_ids in docs_ids
]
ids = [[int(self.index_id_to_db_id[doc_id]) for doc_id in doc_ids] for doc_ids in docs_ids]
return np.array(ids), np.array(vectors)
class HFIndexBase(Index):
def __init__(self, vector_size, dataset, index_initialized=False):
self.vector_size = vector_size
self.dataset = dataset
self._index_initialized = index_initialized
self._check_dataset_format(with_index=index_initialized)
dataset.set_format(
"numpy", columns=["embeddings"], output_all_columns=True, dtype="float32"
)
def _check_dataset_format(self, with_index):
if not isinstance(self.dataset, Dataset):
raise ValueError(
f"Dataset should be a datasets.Dataset object, but got {type(self.dataset)}"
)
if len({"title", "text", "embeddings"} - set(self.dataset.column_names)) > 0:
raise ValueError(
"Dataset should be a dataset with the following columns: "
"title (str), text (str) and embeddings (arrays of dimension vector_size), "
f"but got columns {self.dataset.column_names}"
)
if with_index and "embeddings" not in self.dataset.list_indexes():
raise ValueError(
"Missing faiss index in the dataset. Make sure you called `dataset.add_faiss_index` to compute it "
"or `dataset.load_faiss_index` to load one from the disk."
)
def init_index(self):
raise NotImplementedError()
def is_initialized(self):
return self._index_initialized
def get_doc_dicts(self, doc_ids: np.ndarray):
return [self.dataset[doc_ids[i].tolist()] for i in range(doc_ids.shape[0])]
def get_top_docs(self, question_hidden_states: np.ndarray, n_docs=5):
_, ids = self.dataset.search_batch("embeddings", question_hidden_states, n_docs)
docs = [self.dataset[[i for i in indices if i >= 0]] for indices in ids]
vectors = [doc["embeddings"] for doc in docs]
for i in range(len(vectors)):
if len(vectors[i]) < n_docs:
vectors[i] = np.vstack(
[vectors[i], np.zeros((n_docs - len(vectors[i]), self.vector_size))]
)
return np.array(ids), np.array(
vectors
) # shapes (batch_size, n_docs) and (batch_size, n_docs, d)
class CanonicalHFIndex(HFIndexBase):
def __init__(
self,
vector_size,
dataset_name="wiki_dpr",
dataset_split="train",
index_name=None,
index_path=None,
use_dummy_dataset=False,
):
if int(index_path is None) + int(index_name is None) != 1:
raise ValueError("Please provide `index_name` or `index_path`.")
self.dataset_name = dataset_name
self.dataset_split = dataset_split
self.index_name = index_name
self.index_path = index_path
self.use_dummy_dataset = use_dummy_dataset
logger.info(f"Loading passages from {self.dataset_name}")
dataset = load_dataset(
self.dataset_name,
with_index=False,
split=self.dataset_split,
dummy=self.use_dummy_dataset,
)
super().__init__(vector_size, dataset, index_initialized=False)
def init_index(self):
if self.index_path is not None:
logger.info(f"Loading index from {self.index_path}")
self.dataset.load_faiss_index("embeddings", file=self.index_path)
else:
logger.info(f"Loading index from {self.dataset_name} with index name {self.index_name}")
self.dataset = load_dataset(
self.dataset_name,
with_embeddings=True,
with_index=True,
split=self.dataset_split,
index_name=self.index_name,
dummy=self.use_dummy_dataset,
)
self.dataset.set_format("numpy", columns=["embeddings"], output_all_columns=True)
self._index_initialized = True
class CustomHFIndex(HFIndexBase):
def __init__(self, vector_size, dataset, index_path=None):
super().__init__(vector_size, dataset, index_initialized=index_path is None)
self.index_path = index_path
@classmethod
def load_from_disk(cls, vector_size, dataset_path, index_path):
logger.info(f"Loading passages from {dataset_path}")
if dataset_path is None or index_path is None:
raise ValueError(
"Please provide `dataset_path` and `index_path` after calling `dataset.save_to_disk(dataset_path)` "
"and `dataset.get_index('embeddings').save(index_path)`."
)
dataset = load_from_disk(dataset_path)
return cls(vector_size=vector_size, dataset=dataset, index_path=index_path)
def init_index(self):
if not self.is_initialized():
logger.info(f"Loading index from {self.index_path}")
self.dataset.load_faiss_index("embeddings", file=self.index_path)
self._index_initialized = True
class RagRetriever:
def __init__(
self,
config,
question_encoder_tokenizer,
generator_tokenizer,
index=None,
init_retrieval=True,
):
self._init_retrieval = init_retrieval
requires_backends(self, ["datasets", "faiss"])
super().__init__()
self.index = index or self._build_index(config)
self.generator_tokenizer = generator_tokenizer
self.question_encoder_tokenizer = question_encoder_tokenizer
self.n_docs = config.n_docs
self.batch_size = config.retrieval_batch_size
self.config = config
if self._init_retrieval:
self.init_retrieval()
self.ctx_encoder_tokenizer = None
self.return_tokenized_docs = False
@staticmethod
def _build_index(config):
if config.index_name == "legacy":
return LegacyIndex(
config.retrieval_vector_size,
config.index_path or LEGACY_INDEX_PATH,
)
elif config.index_name == "custom":
return CustomHFIndex.load_from_disk(
vector_size=config.retrieval_vector_size,
dataset_path=config.passages_path,
index_path=config.index_path,
)
else:
return CanonicalHFIndex(
vector_size=config.retrieval_vector_size,
dataset_name=config.dataset,
dataset_split=config.dataset_split,
index_name=config.index_name,
index_path=config.index_path,
use_dummy_dataset=config.use_dummy_dataset,
)
@classmethod
def from_pretrained(cls, retriever_name_or_path, indexed_dataset=None, **kw):
requires_backends(cls, ["datasets", "faiss"])
config = kw.pop("config", None) or RagConfig.from_pretrained(retriever_name_or_path, **kw)
rag_tokenizer = RagTokenizer.from_pretrained(retriever_name_or_path, config=config)
question_encoder_tokenizer = rag_tokenizer.question_encoder
generator_tokenizer = rag_tokenizer.generator
if indexed_dataset is not None:
config.index_name = "custom"
index = CustomHFIndex(config.retrieval_vector_size, indexed_dataset)
else:
index = cls._build_index(config)
return cls(
config,
question_encoder_tokenizer=question_encoder_tokenizer,
generator_tokenizer=generator_tokenizer,
index=index,
)
def save_pretrained(self, save_directory):
if isinstance(self.index, CustomHFIndex):
if self.config.index_path is None:
index_path = os.path.join(save_directory, "hf_dataset_index.faiss")
self.index.dataset.get_index("embeddings").save(index_path)
self.config.index_path = index_path
if self.config.passages_path is None:
passages_path = os.path.join(save_directory, "hf_dataset")
# datasets don't support save_to_disk with indexes right now
faiss_index = self.index.dataset._indexes.pop("embeddings")
self.index.dataset.save_to_disk(passages_path)
self.index.dataset._indexes["embeddings"] = faiss_index
self.config.passages_path = passages_path
self.config.save_pretrained(save_directory)
rag_tokenizer = RagTokenizer(
question_encoder=self.question_encoder_tokenizer,
generator=self.generator_tokenizer,
)
rag_tokenizer.save_pretrained(save_directory)
def init_retrieval(self):
logger.info("initializing retrieval")
self.index.init_index()
def postprocess_docs(self, docs, input_strings, prefix, n_docs, return_tensors=None):
def cat_input_and_doc(doc_title, doc_text, input_string, prefix):
if doc_title.startswith('"'):
doc_title = doc_title[1:]
if doc_title.endswith('"'):
doc_title = doc_title[:-1]
if prefix is None:
prefix = ""
out = (
prefix
+ doc_title
+ self.config.title_sep
+ doc_text
+ self.config.doc_sep
+ input_string
).replace(" ", " ")
return out
rag_input_strings = [
cat_input_and_doc(
docs[i]["title"][j],
docs[i]["text"][j],
input_strings[i],
prefix,
)
for i in range(len(docs))
for j in range(n_docs)
]
contextualized_inputs = self.generator_tokenizer.batch_encode_plus(
rag_input_strings,
max_length=self.config.max_combined_length,
return_tensors=return_tensors,
padding="max_length",
truncation=True,
)
return contextualized_inputs["input_ids"], contextualized_inputs["attention_mask"]
def _chunk_tensor(self, t, chunk_size):
return [t[i : i + chunk_size] for i in range(0, len(t), chunk_size)]
def _main_retrieve(self, question_hidden_states: np.ndarray, n_docs):
question_hidden_states_batched = self._chunk_tensor(question_hidden_states, self.batch_size)
ids_batched = []
vectors_batched = []
for question_hidden_states in question_hidden_states_batched:
start_time = time.time()
ids, vectors = self.index.get_top_docs(question_hidden_states, n_docs)
logger.debug(
f"index search time: {time.time() - start_time} sec, batch size {question_hidden_states.shape}"
)
ids_batched.extend(ids)
vectors_batched.extend(vectors)
return (
np.array(ids_batched),
np.array(vectors_batched),
) # shapes (batch_size, n_docs) and (batch_size, n_docs, d)
def retrieve(self, question_hidden_states: np.ndarray, n_docs):
doc_ids, retrieved_doc_embeds = self._main_retrieve(question_hidden_states, n_docs)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(doc_ids)
def set_ctx_encoder_tokenizer(self, ctx_encoder_tokenizer: PreTrainedTokenizer):
# used in end2end retriever training
self.ctx_encoder_tokenizer = ctx_encoder_tokenizer
self.return_tokenized_docs = True
def __call__(
self,
question_input_ids,
question_hidden_states,
prefix=None,
n_docs=None,
return_tensors=None,
):
n_docs = n_docs if n_docs is not None else self.n_docs
prefix = prefix if prefix is not None else self.config.generator.prefix
retrieved_doc_embeds, doc_ids, docs = self.retrieve(question_hidden_states, n_docs)
input_strings = self.question_encoder_tokenizer.batch_decode(
question_input_ids, skip_special_tokens=True
)
context_input_ids, context_attention_mask = self.postprocess_docs(
docs, input_strings, prefix, n_docs, return_tensors=return_tensors
)
if self.return_tokenized_docs:
retrived_doc_text = []
retrived_doc_title = []
for b_idx in range(len(docs)):
for doc_idx in range(n_docs):
retrived_doc_text.append(docs[b_idx]["text"][doc_idx])
retrived_doc_title.append(docs[b_idx]["title"][doc_idx])
tokenized_docs = self.ctx_encoder_tokenizer(
retrived_doc_title,
retrived_doc_text,
truncation=True,
padding="longest",
return_tensors=return_tensors,
)
return BatchEncoding(
{
"context_input_ids": context_input_ids,
"context_attention_mask": context_attention_mask,
"retrieved_doc_embeds": retrieved_doc_embeds,
"doc_ids": doc_ids,
"tokenized_doc_ids": tokenized_docs["input_ids"],
"tokenized_doc_attention_mask": tokenized_docs["attention_mask"],
},
tensor_type=return_tensors,
)
else:
return BatchEncoding(
{
"context_input_ids": context_input_ids,
"context_attention_mask": context_attention_mask,
"retrieved_doc_embeds": retrieved_doc_embeds,
"doc_ids": doc_ids,
},
tensor_type=return_tensors,
)
|
[
"datasets.load_dataset",
"faiss.IndexHNSWFlat",
"faiss.read_index",
"os.path.isdir",
"numpy.hstack",
"time.time",
"pickle.load",
"datasets.load_from_disk",
"numpy.array",
"os.path.join"
] |
[((2300, 2334), 'os.path.join', 'os.path.join', (['index_path', 'filename'], {}), '(index_path, filename)\n', (2312, 2334), False, 'import os\n'), ((3670, 3707), 'faiss.read_index', 'faiss.read_index', (['resolved_index_path'], {}), '(resolved_index_path)\n', (3686, 3707), False, 'import faiss\n'), ((4227, 4273), 'faiss.IndexHNSWFlat', 'faiss.IndexHNSWFlat', (['(self.vector_size + 1)', '(512)'], {}), '(self.vector_size + 1, 512)\n', (4246, 4273), False, 'import faiss\n'), ((5157, 5201), 'numpy.hstack', 'np.hstack', (['(question_hidden_states, aux_dim)'], {}), '((question_hidden_states, aux_dim))\n', (5166, 5201), True, 'import numpy as np\n'), ((8472, 8581), 'datasets.load_dataset', 'load_dataset', (['self.dataset_name'], {'with_index': '(False)', 'split': 'self.dataset_split', 'dummy': 'self.use_dummy_dataset'}), '(self.dataset_name, with_index=False, split=self.dataset_split,\n dummy=self.use_dummy_dataset)\n', (8484, 8581), False, 'from datasets import Dataset, load_dataset, load_from_disk\n'), ((10136, 10164), 'datasets.load_from_disk', 'load_from_disk', (['dataset_path'], {}), '(dataset_path)\n', (10150, 10164), False, 'from datasets import Dataset, load_dataset, load_from_disk\n'), ((2160, 2185), 'os.path.isdir', 'os.path.isdir', (['index_path'], {}), '(index_path)\n', (2173, 2185), False, 'import os\n'), ((3378, 3404), 'pickle.load', 'pickle.load', (['passages_file'], {}), '(passages_file)\n', (3389, 3404), False, 'import pickle\n'), ((3935, 3961), 'pickle.load', 'pickle.load', (['metadata_file'], {}), '(metadata_file)\n', (3946, 3961), False, 'import pickle\n'), ((5516, 5529), 'numpy.array', 'np.array', (['ids'], {}), '(ids)\n', (5524, 5529), True, 'import numpy as np\n'), ((5531, 5548), 'numpy.array', 'np.array', (['vectors'], {}), '(vectors)\n', (5539, 5548), True, 'import numpy as np\n'), ((7675, 7688), 'numpy.array', 'np.array', (['ids'], {}), '(ids)\n', (7683, 7688), True, 'import numpy as np\n'), ((7690, 7707), 'numpy.array', 'np.array', (['vectors'], {}), '(vectors)\n', (7698, 7707), True, 'import numpy as np\n'), ((9061, 9224), 'datasets.load_dataset', 'load_dataset', (['self.dataset_name'], {'with_embeddings': '(True)', 'with_index': '(True)', 'split': 'self.dataset_split', 'index_name': 'self.index_name', 'dummy': 'self.use_dummy_dataset'}), '(self.dataset_name, with_embeddings=True, with_index=True,\n split=self.dataset_split, index_name=self.index_name, dummy=self.\n use_dummy_dataset)\n', (9073, 9224), False, 'from datasets import Dataset, load_dataset, load_from_disk\n'), ((15993, 16004), 'time.time', 'time.time', ([], {}), '()\n', (16002, 16004), False, 'import time\n'), ((16349, 16370), 'numpy.array', 'np.array', (['ids_batched'], {}), '(ids_batched)\n', (16357, 16370), True, 'import numpy as np\n'), ((16384, 16409), 'numpy.array', 'np.array', (['vectors_batched'], {}), '(vectors_batched)\n', (16392, 16409), True, 'import numpy as np\n'), ((13203, 13257), 'os.path.join', 'os.path.join', (['save_directory', '"""hf_dataset_index.faiss"""'], {}), "(save_directory, 'hf_dataset_index.faiss')\n", (13215, 13257), False, 'import os\n'), ((13468, 13510), 'os.path.join', 'os.path.join', (['save_directory', '"""hf_dataset"""'], {}), "(save_directory, 'hf_dataset')\n", (13480, 13510), False, 'import os\n'), ((16152, 16163), 'time.time', 'time.time', ([], {}), '()\n', (16161, 16163), False, 'import time\n')]
|
import sys
import os, os.path
import subprocess
import numpy
if __name__ == "__main__":
formula = sys.argv[1]
kind = sys.argv[2]
program = "bsub"
params = ["-n", "4", "-W", "04:00", "-R", "\"rusage[mem=4096]\""]
run_string = "\"mpirun -n 4 gpaw-python run_doping.py {0} {1} {2:.2f}\""
for fermi_shift in numpy.linspace(-1.0, 1.0, 41):
call_cmd = [program] + params + [run_string.format(formula, kind, fermi_shift)]
print(" ".join(call_cmd))
choice = input("Should we proceed? [y/n]")
if choice in ("y", "Y"):
for fermi_shift in numpy.linspace(-1.0, 1.0, 41):
call_cmd = [program] + params + [run_string.format(formula, kind, fermi_shift)]
proc = subprocess.run(" ".join(call_cmd),
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
print(proc.stdout.decode("utf8"))
|
[
"numpy.linspace"
] |
[((329, 358), 'numpy.linspace', 'numpy.linspace', (['(-1.0)', '(1.0)', '(41)'], {}), '(-1.0, 1.0, 41)\n', (343, 358), False, 'import numpy\n'), ((585, 614), 'numpy.linspace', 'numpy.linspace', (['(-1.0)', '(1.0)', '(41)'], {}), '(-1.0, 1.0, 41)\n', (599, 614), False, 'import numpy\n')]
|
import numpy as np
import utils
from torch import nn
import torch
import torch.nn.functional as F
from metrics import AllInOneMeter
import time
import torchvision.transforms as transforms
def validation_binary(model: nn.Module, criterion, valid_loader, device, device_id, num_classes=None):
with torch.no_grad():
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
meter = AllInOneMeter(device_id)
start_time = time.time()
w1 = 1.0
w2 = 0.5
w3 = 0.5
for valid_image, valid_mask, valid_mask_ind, _ in valid_loader:
valid_image = valid_image.to(device) # [N, 1, H, W]
valid_mask = valid_mask.to(device).type(
torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor)
valid_image = valid_image.permute(0, 3, 1, 2)
valid_mask = valid_mask.permute(0, 3, 1, 2)
valid_mask_ind = valid_mask_ind.to(device).type(
torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor)
outputs, outputs_mask_ind1, outputs_mask_ind2 = model(valid_image)
valid_prob = F.sigmoid(outputs)
valid_mask_ind_prob1 = F.sigmoid(outputs_mask_ind1)
valid_mask_ind_prob2 = F.sigmoid(outputs_mask_ind1)
loss1 = criterion(outputs, valid_mask)
loss2 = F.binary_cross_entropy_with_logits(outputs_mask_ind1, valid_mask_ind)
loss3 = F.binary_cross_entropy_with_logits(outputs_mask_ind2, valid_mask_ind)
loss = loss1 * w1 + loss2 * w2 + loss3 * w3
meter.add(valid_prob, valid_mask, valid_mask_ind_prob1, valid_mask_ind_prob2, valid_mask_ind,
loss1.item(), loss2.item(), loss3.item(), loss.item())
valid_metrics = meter.value()
epoch_time = time.time() - start_time
valid_metrics['epoch_time'] = epoch_time
valid_metrics['image'] = valid_image.data
valid_metrics['mask'] = valid_mask.data
valid_metrics['prob'] = valid_prob.data
return valid_metrics
def get_jaccard(y_true, y_pred):
epsilon = 1e-15
intersection = (y_pred * y_true).sum(dim=-2).sum(dim=-1)
union = y_true.sum(dim=-2).sum(dim=-1) + y_pred.sum(dim=-2).sum(dim=-1)
return (intersection / (union - intersection + epsilon)).mean()
def calculate_confusion_matrix_from_arrays(prediction, ground_truth, nr_labels):
replace_indices = np.vstack((
ground_truth.flatten(),
prediction.flatten())
).T
confusion_matrix, _ = np.histogramdd(
replace_indices,
bins=(nr_labels, nr_labels),
range=[(0, nr_labels), (0, nr_labels)]
)
confusion_matrix = confusion_matrix.astype(np.uint32)
return confusion_matrix
def calculate_iou(confusion_matrix):
ious = []
for index in range(confusion_matrix.shape[0]):
true_positives = confusion_matrix[index, index]
false_positives = confusion_matrix[:, index].sum() - true_positives
false_negatives = confusion_matrix[index, :].sum() - true_positives
denom = true_positives + false_positives + false_negatives
if denom == 0:
iou = 0
else:
iou = float(true_positives) / denom
ious.append(iou)
return ious
def calculate_dice(confusion_matrix):
dices = []
for index in range(confusion_matrix.shape[0]):
true_positives = confusion_matrix[index, index]
false_positives = confusion_matrix[:, index].sum() - true_positives
false_negatives = confusion_matrix[index, :].sum() - true_positives
denom = 2 * true_positives + false_positives + false_negatives
if denom == 0:
dice = 0
else:
dice = 2 * float(true_positives) / denom
dices.append(dice)
return dices
|
[
"numpy.histogramdd",
"torch.nn.functional.binary_cross_entropy_with_logits",
"time.time",
"torch.cuda.is_available",
"torch.nn.functional.sigmoid",
"metrics.AllInOneMeter",
"torchvision.transforms.Normalize",
"torch.no_grad"
] |
[((2637, 2741), 'numpy.histogramdd', 'np.histogramdd', (['replace_indices'], {'bins': '(nr_labels, nr_labels)', 'range': '[(0, nr_labels), (0, nr_labels)]'}), '(replace_indices, bins=(nr_labels, nr_labels), range=[(0,\n nr_labels), (0, nr_labels)])\n', (2651, 2741), True, 'import numpy as np\n'), ((303, 318), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (316, 318), False, 'import torch\n'), ((341, 416), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (361, 416), True, 'import torchvision.transforms as transforms\n'), ((474, 498), 'metrics.AllInOneMeter', 'AllInOneMeter', (['device_id'], {}), '(device_id)\n', (487, 498), False, 'from metrics import AllInOneMeter\n'), ((520, 531), 'time.time', 'time.time', ([], {}), '()\n', (529, 531), False, 'import time\n'), ((1237, 1255), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['outputs'], {}), '(outputs)\n', (1246, 1255), True, 'import torch.nn.functional as F\n'), ((1291, 1319), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['outputs_mask_ind1'], {}), '(outputs_mask_ind1)\n', (1300, 1319), True, 'import torch.nn.functional as F\n'), ((1355, 1383), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['outputs_mask_ind1'], {}), '(outputs_mask_ind1)\n', (1364, 1383), True, 'import torch.nn.functional as F\n'), ((1457, 1526), 'torch.nn.functional.binary_cross_entropy_with_logits', 'F.binary_cross_entropy_with_logits', (['outputs_mask_ind1', 'valid_mask_ind'], {}), '(outputs_mask_ind1, valid_mask_ind)\n', (1491, 1526), True, 'import torch.nn.functional as F\n'), ((1547, 1616), 'torch.nn.functional.binary_cross_entropy_with_logits', 'F.binary_cross_entropy_with_logits', (['outputs_mask_ind2', 'valid_mask_ind'], {}), '(outputs_mask_ind2, valid_mask_ind)\n', (1581, 1616), True, 'import torch.nn.functional as F\n'), ((1917, 1928), 'time.time', 'time.time', ([], {}), '()\n', (1926, 1928), False, 'import time\n'), ((815, 840), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (838, 840), False, 'import torch\n'), ((1082, 1107), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1105, 1107), False, 'import torch\n')]
|
import numpy
from .base import Algorithm, Model
from collections import OrderedDict
class LinearRegression(Algorithm):
def __init__(self, features=[], label='label', prediction='prediction', fit_intercept=True):
super().__init__(features=features, label=label, prediction=prediction, fit_intercept=fit_intercept)
def _fit(self, X, y):
if self.params.fit_intercept:
X = numpy.insert(X, 0, 1, axis=1)
X_t = numpy.transpose(X)
XX_inv = numpy.linalg.inv(numpy.dot(X_t, X))
_coef = numpy.dot(numpy.dot(XX_inv, X_t), y)
if self.params.fit_intercept:
coef = OrderedDict(zip(['intercept'] + self.params._featuresCol, _coef))
else:
coef = OrderedDict(zip(self.params._featuresCol, _coef))
model = LinearRegressionModel(coef=coef, _coef=_coef, params=self.params.freeze())
return model
class LinearRegressionModel(Model):
def _predict(self, X):
if self.params.fit_intercept:
X = numpy.insert(X, 0, 1, axis=1)
return numpy.dot(X, self._coef)
|
[
"numpy.dot",
"numpy.transpose",
"numpy.insert"
] |
[((453, 471), 'numpy.transpose', 'numpy.transpose', (['X'], {}), '(X)\n', (468, 471), False, 'import numpy\n'), ((1061, 1085), 'numpy.dot', 'numpy.dot', (['X', 'self._coef'], {}), '(X, self._coef)\n', (1070, 1085), False, 'import numpy\n'), ((409, 438), 'numpy.insert', 'numpy.insert', (['X', '(0)', '(1)'], {'axis': '(1)'}), '(X, 0, 1, axis=1)\n', (421, 438), False, 'import numpy\n'), ((506, 523), 'numpy.dot', 'numpy.dot', (['X_t', 'X'], {}), '(X_t, X)\n', (515, 523), False, 'import numpy\n'), ((551, 573), 'numpy.dot', 'numpy.dot', (['XX_inv', 'X_t'], {}), '(XX_inv, X_t)\n', (560, 573), False, 'import numpy\n'), ((1016, 1045), 'numpy.insert', 'numpy.insert', (['X', '(0)', '(1)'], {'axis': '(1)'}), '(X, 0, 1, axis=1)\n', (1028, 1045), False, 'import numpy\n')]
|
"""
module documentation
"""
import numpy as np
import imageio
import tensorflow as tf
import requests # für http
from . import checkpoint
from . import layer
from . import graph
from .ops import * # das müsste ok sein, weil operations sehr spezielle namen haben und sich da nichts in die quere kommt
from . import model
import math
import matplotlib.pyplot as plt
import skimage
from . import constants
# pfad für alle daten, standardmäßig colab
constants.datapath = "/drive/My Drive/colab/data"
def setDataPath(path):
constants.datapath = path
# standard objekt inspect
def inspect(obj):
for key in dir(obj):
if(not key.startswith("_")):
value = getattr(obj,key)
print(f"{key}: {type(value)}")
if(value.__doc__ != None):
print("\t"+value.__doc__.split("\n")[0])
# todo: könnte man auch über import * also __all__ machen
def imports(g):
""" importiere alle standard namen. muss mit globals() aufgerufen werden:
stupid.imports(globals())
"""
import tensorflow as tf
import IPython.display as display
import matplotlib.pyplot as plt # zeigt bilder an plt.imshow(image)
import matplotlib as mpl
import numpy as np
import functools
import tensorflow_datasets as tfds
import matplotlib.image as mpimg
import keras
g["tf"] = tf
g["display"] = display
g["plt"] = plt
g["mpl"] = mpl
g["numpy"] = np
g["np"] = np
g["functools"] = functools
g["tfds"] = tfds
g["mpimg"] = mpimg
g["keras"] = keras
print("verfügbare Variablen: tf, display, plt, np/numpy, functools, tfds")
def img(filename='steine.jpg'):
""" lade bild aus drive/colab/images
google drive muss gemountet sein"""
return mpimg.imread('/content/drive/My Drive/colab/images/'+filename)
def load(path_to_img):
""" ziel: allgemeine load methode. im moment nur bilder
img pfad -> tensor mit maxdim 512 und values 0..1
"""
max_dim = 512
img = tf.io.read_file(path_to_img)
img = tf.image.decode_image(img, channels=3)
img = tf.image.convert_image_dtype(img, tf.float32)
shape = tf.cast(tf.shape(img)[:-1], tf.float32)
long_dim = max(shape)
scale = max_dim / long_dim
new_shape = tf.cast(shape * scale, tf.int32)
img = tf.image.resize(img, new_shape)
#img = img[tf.newaxis, :]
return img
def gilbert():
""" gibt die gilbert katze als tensor """
url = "https://raw.githubusercontent.com/bomelino/stupid/master/images/gilbert.jpg"
img = tf.image.decode_image(requests.get(url).content, channels=3) #, name="jpeg_reader")
img = tf.image.convert_image_dtype(img, tf.float32)
return img
def img(name):
""" gibt bild aus dem github folder /images"""
url = "https://raw.githubusercontent.com/bomelino/stupid/master/images/"+name
img = tf.image.decode_image(requests.get(url).content, channels=3) #, name="jpeg_reader")
img = tf.image.convert_image_dtype(img, tf.float32)
return img
def get_img(src):
img = imageio.imread(src)
if not (len(img.shape) == 3 and img.shape[2] == 3):
img = np.dstack((img,img,img))
return img
def norm(t):
""" normalisiere einen numpy zu werten zwischen 0 und 1 """
return (t - t.min()) / (t.max()-t.min())
def batch(t):
""" füge batch dimension hinzu """
return tf.expand_dims(t,axis=0)
def to_image(obj):
""" allgemeine funktion zum anschauen von allen objekttypen (work in progress)
gibt image (numpy arry),description zurück
description sagt, was alles gemacht wurde um bild darzustellen
"""
import logging
descr = ""
if (tf.is_tensor(obj)):
obj = obj.numpy()
logger = logging.getLogger()
old_level = logger.level
logger.setLevel(100)
if obj.shape:
#print(f"Max {max(obj)}")
if len(obj.shape) == 2: # grayscale image
obj = norm(obj)
descr += f"Grayscale Image, mean:{obj.mean()}, var:{obj.var()} \n"
if (obj.var() < 0.01):
descr += f"Mean abgzogen {obj.mean()} \n"
obj = obj - obj.mean()
if (obj.mean() < 0.01):
i = 0
while (obj.mean() < 0.1 and obj.shape[0] > 10):
i += 1
obj = skimage.measure.block_reduce(obj, (2,2), np.max)
descr += f"Sehr dunkles Bild, maxpooling ({i} mal)"
# in "rgb" umwandeln
obj = np.stack((obj,)*3, axis=-1)
return obj,descr
elif len(obj.shape) == 3: # könnte ein bild sein
if obj.shape[0] == 3:
obj = np.transpose(obj,(1,2,0))
descr += "channel first \n"
if obj.shape[2] == 3: # normales bild
obj = norm(obj)
descr += f"Mean {obj.mean()}, Variance {obj.var()}\n"
if (obj.var() < 0.1):
obj = obj - obj.mean()
descr += f"Mean abgezogen \n"
if (obj.mean() < 0.1):
i= 0
while (obj.mean() < 0.1 and obj.shape[0] > 10):
i += 1
obj = skimage.measure.block_reduce(obj, (2,2,1), np.max)
descr += f"Bild zu dunkel, maxpooling ({i} mal)"
return obj,descr
else : ## feature map
## zeige ein paar davon
n = math.floor(math.sqrt(obj.shape[2]/3))
n = min(n,8)
f, axs = plt.subplots(n,n,figsize=(15,15))
descr += f"{obj.shape[2]} Feature Maps mit Shape {obj.shape[0:2]}"
print(f'Zeige {n*n*3} Feature Maps via RGB:')
for i in range(n*n):
r = norm(obj[:,:,i*3])
g = norm(obj[:,:,i*3+1])
b = norm(obj[:,:,i*3+2])
axs.flat[i].set_title(f'{i*3} - {i*3+3}')
axs.flat[i].imshow(np.moveaxis(np.array([r,g,b]), 0, 2)) # channels first -> channels last
#axs.flat[i].imshow(r,cmap='gray')
axs.flat[i].axis('off')
elif len(obj.shape) == 4 and obj.shape[0] == 3 and obj.shape[0] == 3: # convolution kernel
descr += f"Convolution Kernel {obj.shape}"
obj = np.transpose(obj,(2,3,0,1))
obj = np.reshape(obj,(obj.shape[0],-1,3))
#obj = obj[:,:,:3]
return to_image(obj)
else:
print("Tensor ",obj.shape)
print(obj)
logger.setLevel(old_level)
else:
return None, "Object of type "+str(type(obj))
def view(obj,verbose=False):
result,descr = to_image(obj)
#if result != None:
if verbose: print(descr)
plt.imshow(result)
plt.show()
def tnorm(t):
return (t - tf.math.reduce_min(t)) / (tf.math.reduce_max(t)-tf.math.reduce_min(t))
|
[
"skimage.measure.block_reduce",
"matplotlib.pyplot.imshow",
"numpy.transpose",
"tensorflow.cast",
"numpy.reshape",
"requests.get",
"tensorflow.io.read_file",
"matplotlib.pyplot.subplots",
"numpy.dstack",
"numpy.stack",
"matplotlib.image.imread",
"matplotlib.pyplot.show",
"math.sqrt",
"tensorflow.math.reduce_max",
"imageio.imread",
"tensorflow.is_tensor",
"tensorflow.expand_dims",
"tensorflow.math.reduce_min",
"tensorflow.shape",
"tensorflow.image.decode_image",
"numpy.array",
"tensorflow.image.resize",
"logging.getLogger",
"tensorflow.image.convert_image_dtype"
] |
[((1641, 1705), 'matplotlib.image.imread', 'mpimg.imread', (["('/content/drive/My Drive/colab/images/' + filename)"], {}), "('/content/drive/My Drive/colab/images/' + filename)\n", (1653, 1705), True, 'import matplotlib.image as mpimg\n'), ((1864, 1892), 'tensorflow.io.read_file', 'tf.io.read_file', (['path_to_img'], {}), '(path_to_img)\n', (1879, 1892), True, 'import tensorflow as tf\n'), ((1900, 1938), 'tensorflow.image.decode_image', 'tf.image.decode_image', (['img'], {'channels': '(3)'}), '(img, channels=3)\n', (1921, 1938), True, 'import tensorflow as tf\n'), ((1946, 1991), 'tensorflow.image.convert_image_dtype', 'tf.image.convert_image_dtype', (['img', 'tf.float32'], {}), '(img, tf.float32)\n', (1974, 1991), True, 'import tensorflow as tf\n'), ((2107, 2139), 'tensorflow.cast', 'tf.cast', (['(shape * scale)', 'tf.int32'], {}), '(shape * scale, tf.int32)\n', (2114, 2139), True, 'import tensorflow as tf\n'), ((2147, 2178), 'tensorflow.image.resize', 'tf.image.resize', (['img', 'new_shape'], {}), '(img, new_shape)\n', (2162, 2178), True, 'import tensorflow as tf\n'), ((2460, 2505), 'tensorflow.image.convert_image_dtype', 'tf.image.convert_image_dtype', (['img', 'tf.float32'], {}), '(img, tf.float32)\n', (2488, 2505), True, 'import tensorflow as tf\n'), ((2759, 2804), 'tensorflow.image.convert_image_dtype', 'tf.image.convert_image_dtype', (['img', 'tf.float32'], {}), '(img, tf.float32)\n', (2787, 2804), True, 'import tensorflow as tf\n'), ((2846, 2865), 'imageio.imread', 'imageio.imread', (['src'], {}), '(src)\n', (2860, 2865), False, 'import imageio\n'), ((3152, 3177), 'tensorflow.expand_dims', 'tf.expand_dims', (['t'], {'axis': '(0)'}), '(t, axis=0)\n', (3166, 3177), True, 'import tensorflow as tf\n'), ((3431, 3448), 'tensorflow.is_tensor', 'tf.is_tensor', (['obj'], {}), '(obj)\n', (3443, 3448), True, 'import tensorflow as tf\n'), ((3490, 3509), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (3507, 3509), False, 'import logging\n'), ((6172, 6190), 'matplotlib.pyplot.imshow', 'plt.imshow', (['result'], {}), '(result)\n', (6182, 6190), True, 'import matplotlib.pyplot as plt\n'), ((6195, 6205), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6203, 6205), True, 'import matplotlib.pyplot as plt\n'), ((2934, 2960), 'numpy.dstack', 'np.dstack', (['(img, img, img)'], {}), '((img, img, img))\n', (2943, 2960), True, 'import numpy as np\n'), ((2010, 2023), 'tensorflow.shape', 'tf.shape', (['img'], {}), '(img)\n', (2018, 2023), True, 'import tensorflow as tf\n'), ((2391, 2408), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2403, 2408), False, 'import requests\n'), ((2690, 2707), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (2702, 2707), False, 'import requests\n'), ((4149, 4178), 'numpy.stack', 'np.stack', (['((obj,) * 3)'], {'axis': '(-1)'}), '((obj,) * 3, axis=-1)\n', (4157, 4178), True, 'import numpy as np\n'), ((6235, 6256), 'tensorflow.math.reduce_min', 'tf.math.reduce_min', (['t'], {}), '(t)\n', (6253, 6256), True, 'import tensorflow as tf\n'), ((6261, 6282), 'tensorflow.math.reduce_max', 'tf.math.reduce_max', (['t'], {}), '(t)\n', (6279, 6282), True, 'import tensorflow as tf\n'), ((6283, 6304), 'tensorflow.math.reduce_min', 'tf.math.reduce_min', (['t'], {}), '(t)\n', (6301, 6304), True, 'import tensorflow as tf\n'), ((4001, 4050), 'skimage.measure.block_reduce', 'skimage.measure.block_reduce', (['obj', '(2, 2)', 'np.max'], {}), '(obj, (2, 2), np.max)\n', (4029, 4050), False, 'import skimage\n'), ((4297, 4325), 'numpy.transpose', 'np.transpose', (['obj', '(1, 2, 0)'], {}), '(obj, (1, 2, 0))\n', (4309, 4325), True, 'import numpy as np\n'), ((5048, 5084), 'matplotlib.pyplot.subplots', 'plt.subplots', (['n', 'n'], {'figsize': '(15, 15)'}), '(n, n, figsize=(15, 15))\n', (5060, 5084), True, 'import matplotlib.pyplot as plt\n'), ((5762, 5793), 'numpy.transpose', 'np.transpose', (['obj', '(2, 3, 0, 1)'], {}), '(obj, (2, 3, 0, 1))\n', (5774, 5793), True, 'import numpy as np\n'), ((5802, 5840), 'numpy.reshape', 'np.reshape', (['obj', '(obj.shape[0], -1, 3)'], {}), '(obj, (obj.shape[0], -1, 3))\n', (5812, 5840), True, 'import numpy as np\n'), ((4976, 5003), 'math.sqrt', 'math.sqrt', (['(obj.shape[2] / 3)'], {}), '(obj.shape[2] / 3)\n', (4985, 5003), False, 'import math\n'), ((4736, 4788), 'skimage.measure.block_reduce', 'skimage.measure.block_reduce', (['obj', '(2, 2, 1)', 'np.max'], {}), '(obj, (2, 2, 1), np.max)\n', (4764, 4788), False, 'import skimage\n'), ((5463, 5482), 'numpy.array', 'np.array', (['[r, g, b]'], {}), '([r, g, b])\n', (5471, 5482), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
import mechbayes.util as util
import mechbayes.jhu as jhu
from pathlib import Path
import warnings
'''Submission'''
def create_submission_file(prefix, forecast_date, model, data, places, submit_args):
print(f"Creating submission file in {prefix}")
samples_directory = f"{prefix}/samples"
model_name = submit_args["model_name"]
team_name = submit_args["team_name"]
num_weeks = submit_args["num_weeks"]
quantiles = submit_args["quantiles"]
targets_to_run = submit_args["targets"]
pad_strategy = submit_args.get("pad_strategy") or "shift"
forecast_df = pd.DataFrame()
forecast_date = pd.to_datetime(forecast_date)
if (forecast_date.dayofweek != 6):
raise ValueError(f"Submission files only supported for forecasts made on Sunday")
has_any_missing = False
for target in targets_to_run:
target_df, has_missing_place = generate_forecast_df(forecast_date,
model,
data,
target,
places,
quantiles,
num_weeks,
samples_directory,
pad_strategy)
has_any_missing = has_any_missing or has_missing_place
forecast_df = forecast_df.append(target_df)
forecast_date_str = forecast_date.strftime('%Y-%m-%d')
if has_any_missing:
fname = f"{prefix}/{forecast_date_str}-{team_name}-{model_name}-error.csv"
warnings.warn(f"Submission file incomplete. Writing partial file to {fname}")
else:
fname = f"{prefix}/{forecast_date_str}-{team_name}-{model_name}.csv"
file_exists = Path(fname).exists()
forecast_df.to_csv(fname, float_format="%.0f", index=False)
if not file_exists:
Path(fname).chmod(0o664)
def get_location_codes():
resource_dir = (Path(__file__).parent / "resources").resolve()
'''Get US codes'''
df = pd.read_csv(f"{resource_dir}/locations.csv")
# for states and US: map from abbreviation in locations.csv to location
has_abbrev = ~ df['abbreviation'].isnull()
state_and_us_codes = {abbrev : code for abbrev, code in zip(df.loc[has_abbrev, 'abbreviation'],
df.loc[has_abbrev, 'location'])}
# for counties, do a merge on FIPS to subset to counties that are recognized by the hub
# use the index from jhu.county_info() as keys
# use FIPS column from JHU as location code (it is identical to location column from forecast hub)
county_info = jhu.get_county_info()
county_info['index'] = county_info.index
county_info = county_info.merge(df, left_on="FIPS", right_on="location", how="inner")
assert(county_info['FIPS'].equals(county_info['location']))
us_county_codes = {key: fips for key, fips in zip(county_info['index'], county_info['FIPS'])}
'''Get EU codes'''
df = pd.read_csv(f"{resource_dir}/locations_eu.csv")
eu_country_codes = {name: location for name, location in zip(df['location_name'], df['location'])}
return dict(state_and_us_codes, **us_county_codes, **eu_country_codes)
# mapping from hub target names to mechbayes variable names
target2var = {'inc case' : 'dy',
'cum case' : 'y',
'inc death' : 'dz',
'cum death' : 'z'};
# JHU truth variable corresponding to target
target2jhu = {'inc case' : 'confirmed',
'cum case' : 'confirmed',
'inc death' : 'death',
'cum death' : 'death'};
def generate_forecast_df(forecast_date,
model,
data,
target,
places,
quantiles,
num_weeks,
samples_directory,
pad_strategy="shift"):
forecast_start = forecast_date #+ pd.Timedelta("1d")
variable_name = target2var[target];
# empty forecast data structure
forecast = {'quantile': [],
'value': [],
'type': [],
'location': [],
'target': [],
'forecast_date': [],
'target_end_date': []}
forecast["forecast_date"] = forecast_date
next_saturday = pd.Timedelta('6 days')
has_missing_place = False
for place in places:
try:
prior_samples, mcmc_samples, post_pred_samples, forecast_samples = \
util.load_samples(f"{samples_directory}/{place}.npz")
except Exception as e:
warnings.warn(f"Failed to load data: {samples_directory}/{place}.npz")
has_missing_place = True
continue
jhu_variable = target2jhu[target]
truth_data = data[place]['data'][jhu_variable]
forecast_samples = model.get(forecast_samples, variable_name, forecast=True)
daily_df = util.construct_daily_df(forecast_start, forecast_samples, target, truth_data=truth_data, pad_strategy=pad_strategy)
weekly_df = util.resample_to_weekly(daily_df, target)
for week_ahead in range(1, num_weeks+1):
target_week_start = forecast_date + pd.Timedelta(weeks=week_ahead-1)
samples = weekly_df.loc[target_week_start]
target_end_date_datetime = pd.to_datetime(target_week_start) + next_saturday
target_end_date = target_end_date_datetime.strftime("%Y-%m-%d")
week_ahead_target = f"{week_ahead:d} wk ahead {target}"
for q in quantiles:
prediction = np.percentile(samples, q*100)
forecast["quantile"].append("{:.3f}".format(q))
forecast["value"].append(prediction)
forecast["type"].append("quantile")
forecast["location"].append(place)
forecast["target"].append(week_ahead_target)
forecast["target_end_date"].append(target_end_date)
if q==0.50:
forecast["quantile"].append("NA")
forecast["value"].append(prediction)
forecast["type"].append("point")
forecast["location"].append(place)
forecast["target"].append(week_ahead_target)
forecast["target_end_date"].append(target_end_date)
forecast_df = pd.DataFrame(forecast)
location_codes = get_location_codes()
forecast_df['location'] = forecast_df['location'].replace(location_codes)
return forecast_df, has_missing_place
|
[
"pandas.DataFrame",
"mechbayes.util.resample_to_weekly",
"pandas.read_csv",
"numpy.percentile",
"pathlib.Path",
"pandas.to_datetime",
"mechbayes.util.load_samples",
"pandas.Timedelta",
"warnings.warn",
"mechbayes.util.construct_daily_df",
"mechbayes.jhu.get_county_info"
] |
[((638, 652), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (650, 652), True, 'import pandas as pd\n'), ((674, 703), 'pandas.to_datetime', 'pd.to_datetime', (['forecast_date'], {}), '(forecast_date)\n', (688, 703), True, 'import pandas as pd\n'), ((2285, 2329), 'pandas.read_csv', 'pd.read_csv', (['f"""{resource_dir}/locations.csv"""'], {}), "(f'{resource_dir}/locations.csv')\n", (2296, 2329), True, 'import pandas as pd\n'), ((2925, 2946), 'mechbayes.jhu.get_county_info', 'jhu.get_county_info', ([], {}), '()\n', (2944, 2946), True, 'import mechbayes.jhu as jhu\n'), ((3280, 3327), 'pandas.read_csv', 'pd.read_csv', (['f"""{resource_dir}/locations_eu.csv"""'], {}), "(f'{resource_dir}/locations_eu.csv')\n", (3291, 3327), True, 'import pandas as pd\n'), ((4680, 4702), 'pandas.Timedelta', 'pd.Timedelta', (['"""6 days"""'], {}), "('6 days')\n", (4692, 4702), True, 'import pandas as pd\n'), ((6777, 6799), 'pandas.DataFrame', 'pd.DataFrame', (['forecast'], {}), '(forecast)\n', (6789, 6799), True, 'import pandas as pd\n'), ((1830, 1907), 'warnings.warn', 'warnings.warn', (['f"""Submission file incomplete. Writing partial file to {fname}"""'], {}), "(f'Submission file incomplete. Writing partial file to {fname}')\n", (1843, 1907), False, 'import warnings\n'), ((5307, 5426), 'mechbayes.util.construct_daily_df', 'util.construct_daily_df', (['forecast_start', 'forecast_samples', 'target'], {'truth_data': 'truth_data', 'pad_strategy': 'pad_strategy'}), '(forecast_start, forecast_samples, target,\n truth_data=truth_data, pad_strategy=pad_strategy)\n', (5330, 5426), True, 'import mechbayes.util as util\n'), ((5443, 5484), 'mechbayes.util.resample_to_weekly', 'util.resample_to_weekly', (['daily_df', 'target'], {}), '(daily_df, target)\n', (5466, 5484), True, 'import mechbayes.util as util\n'), ((2014, 2025), 'pathlib.Path', 'Path', (['fname'], {}), '(fname)\n', (2018, 2025), False, 'from pathlib import Path\n'), ((4870, 4923), 'mechbayes.util.load_samples', 'util.load_samples', (['f"""{samples_directory}/{place}.npz"""'], {}), "(f'{samples_directory}/{place}.npz')\n", (4887, 4923), True, 'import mechbayes.util as util\n'), ((2131, 2142), 'pathlib.Path', 'Path', (['fname'], {}), '(fname)\n', (2135, 2142), False, 'from pathlib import Path\n'), ((4967, 5037), 'warnings.warn', 'warnings.warn', (['f"""Failed to load data: {samples_directory}/{place}.npz"""'], {}), "(f'Failed to load data: {samples_directory}/{place}.npz')\n", (4980, 5037), False, 'import warnings\n'), ((5583, 5617), 'pandas.Timedelta', 'pd.Timedelta', ([], {'weeks': '(week_ahead - 1)'}), '(weeks=week_ahead - 1)\n', (5595, 5617), True, 'import pandas as pd\n'), ((5710, 5743), 'pandas.to_datetime', 'pd.to_datetime', (['target_week_start'], {}), '(target_week_start)\n', (5724, 5743), True, 'import pandas as pd\n'), ((5978, 6009), 'numpy.percentile', 'np.percentile', (['samples', '(q * 100)'], {}), '(samples, q * 100)\n', (5991, 6009), True, 'import numpy as np\n'), ((2205, 2219), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (2209, 2219), False, 'from pathlib import Path\n')]
|
import os
import numpy as np
from keras.layers import Dense
from keras.models import Sequential
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
class Model:
def name(self):
return "Keras MLP"
def train(self, x_train, y_train):
num_classes = y_train.shape[1]
self.model = Sequential()
self.model.add(Dense(20, input_dim=x_train.shape[1], activation="relu"))
self.model.add(Dense(num_classes, activation="softmax"))
self.model.compile(loss="mse", optimizer="adam", metrics=["accuracy"])
self.model.fit(x_train, y_train, epochs=10, batch_size=10, verbose=0)
def sample(self, x):
y_pred = self.model.predict(x)
bool_preds = [[1 if i == max(row) else 0 for i in row] for row in y_pred]
return np.asarray(bool_preds)
|
[
"keras.models.Sequential",
"numpy.asarray",
"keras.layers.Dense"
] |
[((302, 314), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (312, 314), False, 'from keras.models import Sequential\n'), ((780, 802), 'numpy.asarray', 'np.asarray', (['bool_preds'], {}), '(bool_preds)\n', (790, 802), True, 'import numpy as np\n'), ((338, 394), 'keras.layers.Dense', 'Dense', (['(20)'], {'input_dim': 'x_train.shape[1]', 'activation': '"""relu"""'}), "(20, input_dim=x_train.shape[1], activation='relu')\n", (343, 394), False, 'from keras.layers import Dense\n'), ((419, 459), 'keras.layers.Dense', 'Dense', (['num_classes'], {'activation': '"""softmax"""'}), "(num_classes, activation='softmax')\n", (424, 459), False, 'from keras.layers import Dense\n')]
|
import pandas as pd
import numpy as np
s = pd.Series(np.tile([3,5],2))
print(s)
|
[
"numpy.tile"
] |
[((55, 73), 'numpy.tile', 'np.tile', (['[3, 5]', '(2)'], {}), '([3, 5], 2)\n', (62, 73), True, 'import numpy as np\n')]
|
"""
Main script to fine-tuning the Wav2Vec model.
author: <NAME>. Adapted from the tutorial: https://colab.research.google.com/github/m3hrdadfi/soxan/blob/main/notebooks/Emotion_recognition_in_Greek_speech_using_Wav2Vec2.ipynb
date: 03/2022
Usage:
e.g.
python3 MMEmotionRecognition/src/Audio/FineTuningWav2Vec/main_FineTuneWav2Vec_CV.py
--audios_dir <RAVDESS_dir>/audios_16kHz
--cache_dir MMEmotionRecognition/data/Audio/cache_dir
--out_dir <RAVDESS_dir>/FineTuningWav2Vec2_out
--model_id jonatasgrosman/wav2vec2-large-xlsr-53-english
Options:
--audios_dir Path to the directory with the audios to train the model
--cache_dir Path to the folder to save auxiliar data such as transformer models
--out_dir: Path to save the trained models, datasets and logs
--model_id: Name of the model from the Hugging Face repository to use as baseline. In our case: 'jonatasgrosman/wav2vec2-large-xlsr-53-english'
"""
import os
import sys
import argparse
sys.path.append('.')
sys.path.append('..')
sys.path.append('../../')
sys.path.append('../../../')
os.environ['LC_ALL'] ='C.UTF-8'
os.environ['LANG'] = 'C.UTF-8'
os.environ['CUDA_LAUNCH_BLOCKING'] = '1'
import random
import numpy as np
import pandas as pd
import time
from pathlib import Path
from tqdm import tqdm
import torchaudio
#PREPARE DATA
from datasets import load_dataset, load_metric
#LOAD PRE-TRAINED MODEL ON ASR
from transformers import AutoConfig
from src.Audio.FineTuningWav2Vec.DataCollatorCTCWithPadding import *
from src.Audio.FineTuningWav2Vec.Wav2VecAuxClasses import *
from src.Audio.FineTuningWav2Vec.CTCTrainer import *
#TRAINING
from transformers import EvalPrediction
from transformers import TrainingArguments
from datetime import datetime
from datasets import set_caching_enabled
set_caching_enabled(False)
def seed_libs(seed=2020):
"""
Fix the seeds for the random generators of torch and other libraries
:param seed: Seed to pass to the random seed generators
"""
random.seed(seed)
np.random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
def seed_torch(seed=2020):
"""
Fix the seeds for the random generators of torch and other libraries
:param seed: Seed to pass to the random seed generators
"""
seed_libs(2020)
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8"
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def prepare_RAVDESS_DS(path_audios):
"""
Generation of the dataframe with the information of the dataset. The dataframe has the following structure:
______________________________________________________________________________________________________________________________
| name | path | emotion | actor |
______________________________________________________________________________________________________________________________
| 01-01-01-01-01-01-01.wav | <RAVDESS_dir>/audios_16kHz/01-01-01-01-01-01-01.wav | Neutral | 1 |
______________________________________________________________________________________________________________________________
...
:param path_audios: Path to the folder that contains all the audios in .wav format, 16kHz and single-channel(mono)
"""
dict_emotions_ravdess = {
0: 'Neutral',
1: 'Calm',
2: 'Happy',
3: 'Sad',
4: 'Angry',
5: 'Fear',
6: 'Disgust',
7: 'Surprise'
}
data = []
for path in tqdm(Path(path_audios).glob("**/*.wav")):
name = str(path).split('/')[-1].split('.')[0]
label = dict_emotions_ravdess[int(name.split("-")[2]) - 1] # Start emotions in 0
actor = int(name.split("-")[-1])
try:
data.append({
"name": name,
"path": path,
"emotion": label,
"actor": actor
})
except Exception as e:
# print(str(path), e)
pass
df = pd.DataFrame(data)
return df
def generate_train_test(fold, df, save_path=""):
"""
Divide the data in train and test in a subject-wise 5-CV way. The division is generated before running the training
of each fold.
:param fold:[int] Fold to create the train and test sets [ranging from 0 - 4]
:param df:[DataFrame] Dataframe with the complete list of files generated by prepare_RAVDESS_DS(..) function
:param save_path:[str] Path to save the train.csv and test.csv per fold
"""
actors_per_fold = {
0: [2,5,14,15,16],
1: [3, 6, 7, 13, 18],
2: [10, 11, 12, 19, 20],
3: [8, 17, 21, 23, 24],
4: [1, 4, 9, 22],
}
test_df = df.loc[df['actor'].isin(actors_per_fold[fold])]
train_df = df.loc[~df['actor'].isin(actors_per_fold[fold])]
train_df = train_df.reset_index(drop=True)
test_df = test_df.reset_index(drop=True)
if(save_path!=""):
train_df.to_csv(f"{save_path}/train.csv", sep="\t", encoding="utf-8", index=False)
test_df.to_csv(f"{save_path}/test.csv", sep="\t", encoding="utf-8", index=False)
return train_df, test_df
def speech_file_to_array_fn(path):
"""
Loader of audio recordings. It loads the recordings and convert them to a specific sampling rate if required, and returns
an array with the samples of the audio.
:param path:[str] Path to the wav file.
:param target_sampling_rate:[int] Global variable with the expected sampling rate of the model
"""
speech_array, sampling_rate = torchaudio.load(path)
resampler = torchaudio.transforms.Resample(sampling_rate, target_sampling_rate)
speech = resampler(speech_array).squeeze().numpy()
return speech
def label_to_id(label, label_list):
if len(label_list) > 0:
return label_list.index(label) if label in label_list else -1
return label
def preprocess_function(examples, input_column = "path", output_column = "emotion"):
"""
Load the recordings with their labels.
:param examples:[DataFrame] with the samples of the training or test sets.
:param input_column:[str] Column that contain the paths to the recordings
:param output_column:[str] Column that contain the emotion associated to each recording
:param target_sampling_rate:[int] Global variable with the expected sampling rate of the model
"""
speech_list = [speech_file_to_array_fn(path) for path in examples[input_column]]
target_list = [label_to_id(label, label_list) for label in examples[output_column]]
result = processor(speech_list, sampling_rate=target_sampling_rate)
result["labels"] = list(target_list)
return result
def compute_metrics(p: EvalPrediction):
"""
Extract the metrics of the model from the predictions.
-MSE for regression tasks
-Accuracy for classification tasks
:param p: EvalPrediction: Predictions of the model.
"""
preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1)
if is_regression:
return {"mse": ((preds - p.label_ids) ** 2).mean().item()}
else:
return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()}
if __name__ == '__main__':
#Read input parameters
parser = argparse.ArgumentParser(description="Configuration of setup and training process")
parser.add_argument('-audios', '--audios_dir', type=str, required=True,
help='Path with the audios to train/test the model')
parser.add_argument('-cache', '--cache_dir', type=str, required=True,
help='Path to save aux. data in cache')
parser.add_argument('-out', '--out_dir', type=str, help='Path to save the embeddings extracted from the model',
default='./')
parser.add_argument('-model', '--model_id', type=str,
help='Model identificator in Hugging Face library [default: jonatasgrosman/wav2vec2-large-xlsr-53-english]',
default='jonatasgrosman/wav2vec2-large-xlsr-53-english')
args = parser.parse_args()
#PARAMETERS #######################
out_dir_models = os.path.join(args.out_dir, "trained_models/wav2vec2-xlsr-ravdess-speech-emotion-recognition") #out path to save trained models
data_path = os.path.join(args.out_dir,"data") #Path to save csvs generated containing the recording information (path, name, emotion...)
# We need to specify the input and output column
input_column = "path" # Name of the column that will contain the path of the recordings
output_column = "emotion" # Name of the column that will contain the labels of the recordings
pooling_mode = "mean" #Type of pooling to apply to the embeddings generated ath the output of the transformer module to collapse all the timesteps of the recordingsinto a single vector
now = datetime.now()
current_time = now.strftime("%Y%m%d_%H%M%S")
seed = 2020
epochs = 10 #Epochs to train the model
#PARAMETERS #######################
seed_torch(seed=seed) #Set random seeds
for fold in range(5): # 5-CV strategy
#Define paths, create aux. folders and callbacks to save data
out_dir_models_path = os.path.join(out_dir_models, current_time, "fold"+str(fold))
save_path = os.path.join(data_path, current_time, "fold"+str(fold))
os.environ['TRANSFORMERS_CACHE'] = os.path.join(args.cache_dir, current_time, "fold"+str(fold))
os.environ['HF_DATASETS_CACHE'] = os.path.join(args.cache_dir, current_time, "fold"+str(fold))
os.makedirs(save_path, exist_ok=True)
print("SAVING DATA IN: ", save_path)
callbackTB = transformers.integrations.TensorBoardCallback()
#######################
#PREPARE DATASET
#Generate complete dataframe with RAVDESS samples
df = prepare_RAVDESS_DS(args.audios_dir)
_, _ = generate_train_test(fold, df, save_path)
time.sleep(10) #wait some time to get the dataset ready
data_files = {
"train": os.path.join(save_path, "train.csv"),
"validation": os.path.join(save_path, "test.csv"),
}
#Load data
dataset = load_dataset("csv", data_files=data_files, delimiter="\t", )
train_dataset = dataset["train"]
eval_dataset = dataset["validation"]
print("Processing fold: ", str(fold), " - actors in Train fold: ",set(train_dataset["actor"]))
print("Processing fold: ", str(fold), " - actors in Eval fold: ", set(eval_dataset["actor"]))
label_list = train_dataset.unique(output_column)
label_list.sort() # Let's sort it for determinism
num_labels = len(label_list)
print(f"A classification problem with {num_labels} classes: {label_list}")
# LOAD PRE-TRAINED MODEL ON ASR
# config
config = AutoConfig.from_pretrained(
args.model_id, #path to the model of HuggingFace lib. that we will use as baseline to fine-tune.
num_labels=num_labels, # num classes
label2id={label: i for i, label in enumerate(label_list)}, # dict that maps emotions -> numbers
id2label={i: label for i, label in enumerate(label_list)}, # dict that maps numbers -> emotions
finetuning_task="wav2vec2_clf",
)
#Add in the config variable the 'pooling_mode'
setattr(config, 'pooling_mode', pooling_mode)
#Load the processor for the type of model (Wav2Vec2.0 in our case) and get the expected sampling rate (16kHZ in our case)
processor = Wav2Vec2Processor.from_pretrained(args.model_id, )
target_sampling_rate = processor.feature_extractor.sampling_rate
print(f"The target sampling rate: {target_sampling_rate}")
print("Generating training...")
train_dataset = train_dataset.map(
preprocess_function,
batch_size=100,
batched=True,
num_proc=4
)
print("Generating test...")
eval_dataset = eval_dataset.map(
preprocess_function,
batch_size=100,
batched=True,
num_proc=4
)
#MODEL
print("Training model...")
data_collator = DataCollatorCTCWithPadding(processor=processor, padding=True)
is_regression = False
#Create the architecture: Wav2Vec2.0 model + mean pooling + MLP (1024, 8)
model = Wav2Vec2ForSpeechClassification.from_pretrained(
args.model_id,
config=config,
)
#Freeze feature encoder layers (CNNs) of wav2vec2.0 & train the transformer module and the MLP that we have added (randomly initialized)
model.freeze_feature_extractor()
#Set trainig arguments/parameters
training_args = TrainingArguments(
output_dir=out_dir_models_path,
per_device_train_batch_size=4,
per_device_eval_batch_size=4,
gradient_accumulation_steps=2,
evaluation_strategy="steps",
prediction_loss_only=False,
num_train_epochs=epochs,
fp16=True,
save_steps=10,
eval_steps=10,
logging_steps=10,
learning_rate=1e-4,
save_total_limit=5,
load_best_model_at_end=True,
metric_for_best_model="eval_accuracy",
seed=seed, )
#Set data collator to pad the small recordings
trainer = CTCTrainer(
model=model,
data_collator=data_collator,
args=training_args,
compute_metrics=compute_metrics,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
tokenizer=processor.feature_extractor,
callbacks = [callbackTB])
#Start training the network using the train_dataset & evaluating it on the eval_dataset passed as parameters
# to the CTCTrainer
trainer.train()
|
[
"sys.path.append",
"pandas.DataFrame",
"datasets.load_dataset",
"transformers.TrainingArguments",
"numpy.random.seed",
"argparse.ArgumentParser",
"os.makedirs",
"torchaudio.transforms.Resample",
"numpy.argmax",
"datetime.datetime.now",
"time.sleep",
"pathlib.Path",
"random.seed",
"torchaudio.load",
"datasets.set_caching_enabled",
"numpy.squeeze",
"os.path.join"
] |
[((996, 1016), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (1011, 1016), False, 'import sys\n'), ((1017, 1038), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (1032, 1038), False, 'import sys\n'), ((1039, 1064), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (1054, 1064), False, 'import sys\n'), ((1065, 1093), 'sys.path.append', 'sys.path.append', (['"""../../../"""'], {}), "('../../../')\n", (1080, 1093), False, 'import sys\n'), ((1809, 1835), 'datasets.set_caching_enabled', 'set_caching_enabled', (['(False)'], {}), '(False)\n', (1828, 1835), False, 'from datasets import set_caching_enabled\n'), ((2025, 2042), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (2036, 2042), False, 'import random\n'), ((2047, 2067), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2061, 2067), True, 'import numpy as np\n'), ((2117, 2137), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2131, 2137), True, 'import numpy as np\n'), ((4131, 4149), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (4143, 4149), True, 'import pandas as pd\n'), ((5673, 5694), 'torchaudio.load', 'torchaudio.load', (['path'], {}), '(path)\n', (5688, 5694), False, 'import torchaudio\n'), ((5711, 5778), 'torchaudio.transforms.Resample', 'torchaudio.transforms.Resample', (['sampling_rate', 'target_sampling_rate'], {}), '(sampling_rate, target_sampling_rate)\n', (5741, 5778), False, 'import torchaudio\n'), ((7472, 7559), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Configuration of setup and training process"""'}), "(description=\n 'Configuration of setup and training process')\n", (7495, 7559), False, 'import argparse\n'), ((8365, 8462), 'os.path.join', 'os.path.join', (['args.out_dir', '"""trained_models/wav2vec2-xlsr-ravdess-speech-emotion-recognition"""'], {}), "(args.out_dir,\n 'trained_models/wav2vec2-xlsr-ravdess-speech-emotion-recognition')\n", (8377, 8462), False, 'import os\n'), ((8508, 8542), 'os.path.join', 'os.path.join', (['args.out_dir', '"""data"""'], {}), "(args.out_dir, 'data')\n", (8520, 8542), False, 'import os\n'), ((9075, 9089), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (9087, 9089), False, 'from datetime import datetime\n'), ((7153, 7170), 'numpy.squeeze', 'np.squeeze', (['preds'], {}), '(preds)\n', (7163, 7170), True, 'import numpy as np\n'), ((7193, 7217), 'numpy.argmax', 'np.argmax', (['preds'], {'axis': '(1)'}), '(preds, axis=1)\n', (7202, 7217), True, 'import numpy as np\n'), ((9779, 9816), 'os.makedirs', 'os.makedirs', (['save_path'], {'exist_ok': '(True)'}), '(save_path, exist_ok=True)\n', (9790, 9816), False, 'import os\n'), ((10160, 10174), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (10170, 10174), False, 'import time\n'), ((10410, 10468), 'datasets.load_dataset', 'load_dataset', (['"""csv"""'], {'data_files': 'data_files', 'delimiter': '"""\t"""'}), "('csv', data_files=data_files, delimiter='\\t')\n", (10422, 10468), False, 'from datasets import load_dataset, load_metric\n'), ((13011, 13439), 'transformers.TrainingArguments', 'TrainingArguments', ([], {'output_dir': 'out_dir_models_path', 'per_device_train_batch_size': '(4)', 'per_device_eval_batch_size': '(4)', 'gradient_accumulation_steps': '(2)', 'evaluation_strategy': '"""steps"""', 'prediction_loss_only': '(False)', 'num_train_epochs': 'epochs', 'fp16': '(True)', 'save_steps': '(10)', 'eval_steps': '(10)', 'logging_steps': '(10)', 'learning_rate': '(0.0001)', 'save_total_limit': '(5)', 'load_best_model_at_end': '(True)', 'metric_for_best_model': '"""eval_accuracy"""', 'seed': 'seed'}), "(output_dir=out_dir_models_path,\n per_device_train_batch_size=4, per_device_eval_batch_size=4,\n gradient_accumulation_steps=2, evaluation_strategy='steps',\n prediction_loss_only=False, num_train_epochs=epochs, fp16=True,\n save_steps=10, eval_steps=10, logging_steps=10, learning_rate=0.0001,\n save_total_limit=5, load_best_model_at_end=True, metric_for_best_model=\n 'eval_accuracy', seed=seed)\n", (13028, 13439), False, 'from transformers import TrainingArguments\n'), ((10262, 10298), 'os.path.join', 'os.path.join', (['save_path', '"""train.csv"""'], {}), "(save_path, 'train.csv')\n", (10274, 10298), False, 'import os\n'), ((10326, 10361), 'os.path.join', 'os.path.join', (['save_path', '"""test.csv"""'], {}), "(save_path, 'test.csv')\n", (10338, 10361), False, 'import os\n'), ((3638, 3655), 'pathlib.Path', 'Path', (['path_audios'], {}), '(path_audios)\n', (3642, 3655), False, 'from pathlib import Path\n')]
|
import model3 as M
import numpy as np
import tensorflow as tf
import data_reader
class VariationalDrop(M.Model):
# arxiv 1512.05287
def initialize(self, drop_rate):
self.drop_rate = drop_rate
def _get_mask(self, shape):
# (time, batch, dim)
mask = np.random.choice(2, size=(1, shape[1], shape[2]), p=[1-self.drop_rate, self.drop_rate])
return tf.convert_to_tensor(mask)
def forward(self, x):
shape = x.shape()
mask = self._get_mask(shape)
x = x * mask
return x
class predNet(M.Model):
def initialize(self):
self.enc = M.Dense(128)
self.LSTM = M.LSTM(128)
self.dec = M.Dense(17*3)
def forward(self, x):
x = [self.enc(_) for _ in x]
y = self.LSTM(x[:-1])
y = [self.dec(_) for _ in y]
return y
def loss_grad(x, model):
label = x[1:]
with tf.GradientTape() as tape:
out = model(x)
# print(len(out))
sub = [tf.square(o-l) for o,l in zip(out, label)]
loss = tf.reduce_mean(sub)
grad = tape.gradient(loss, model.trainable_variables)
return grad, [loss]
reader = data_reader.data_reader()
model = predNet()
optim = tf.optimizers.Adam(0.001)
saver = M.Saver(model)
ITER = 10000
for i in range(ITER + 1):
data = reader.get_next(32, 16)
grad, ls = loss_grad(data, model)
optim.apply_gradients(zip(grad, model.trainable_variables))
if i%10==0:
print('Iter:%d\tLoss:%.4f'%(i, ls[0]))
if i%2000==0:
saver.save('./model/model.ckpt')
|
[
"model3.Saver",
"model3.LSTM",
"tensorflow.square",
"data_reader.data_reader",
"tensorflow.convert_to_tensor",
"tensorflow.reduce_mean",
"tensorflow.optimizers.Adam",
"model3.Dense",
"numpy.random.choice",
"tensorflow.GradientTape"
] |
[((1014, 1039), 'data_reader.data_reader', 'data_reader.data_reader', ([], {}), '()\n', (1037, 1039), False, 'import data_reader\n'), ((1067, 1092), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', (['(0.001)'], {}), '(0.001)\n', (1085, 1092), True, 'import tensorflow as tf\n'), ((1102, 1116), 'model3.Saver', 'M.Saver', (['model'], {}), '(model)\n', (1109, 1116), True, 'import model3 as M\n'), ((261, 354), 'numpy.random.choice', 'np.random.choice', (['(2)'], {'size': '(1, shape[1], shape[2])', 'p': '[1 - self.drop_rate, self.drop_rate]'}), '(2, size=(1, shape[1], shape[2]), p=[1 - self.drop_rate,\n self.drop_rate])\n', (277, 354), True, 'import numpy as np\n'), ((358, 384), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['mask'], {}), '(mask)\n', (378, 384), True, 'import tensorflow as tf\n'), ((548, 560), 'model3.Dense', 'M.Dense', (['(128)'], {}), '(128)\n', (555, 560), True, 'import model3 as M\n'), ((575, 586), 'model3.LSTM', 'M.LSTM', (['(128)'], {}), '(128)\n', (581, 586), True, 'import model3 as M\n'), ((600, 615), 'model3.Dense', 'M.Dense', (['(17 * 3)'], {}), '(17 * 3)\n', (607, 615), True, 'import model3 as M\n'), ((782, 799), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (797, 799), True, 'import tensorflow as tf\n'), ((907, 926), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['sub'], {}), '(sub)\n', (921, 926), True, 'import tensorflow as tf\n'), ((855, 871), 'tensorflow.square', 'tf.square', (['(o - l)'], {}), '(o - l)\n', (864, 871), True, 'import tensorflow as tf\n')]
|
import pytest
import numpy as np
from pytsmp import pytsmp
from tests import helpers
class TestMatrixProfile:
def test_MatrixProfile_init(self):
with pytest.raises(TypeError):
t = np.random.rand(1000)
mp = pytsmp.MatrixProfile(t, window_size=100, verbose=False)
class TestSTAMP:
def test_STAMP_init_incorrect_window_size1(self):
with pytest.raises(ValueError) as excinfo:
t = np.random.rand(1000)
mp = pytsmp.STAMP(t, window_size=0, verbose=False)
assert str(excinfo.value) == "Incorrect window size specified."
def test_STAMP_init_incorrect_window_size2(self):
with pytest.raises(ValueError) as excinfo:
t = np.random.rand(1000)
mp = pytsmp.STAMP(t, window_size=2.3, verbose=False)
assert str(excinfo.value) == "Incorrect window size specified."
def test_STAMP_init_incorrect_window_size3(self):
with pytest.raises(ValueError) as excinfo:
t1 = np.random.rand(1000)
t2 = np.random.rand(500)
mp = pytsmp.STAMP(t1, t2, window_size=501, verbose=False)
assert str(excinfo.value) == "Incorrect window size specified."
def test_STAMP_init_incorrect_exclusion_zone(self):
with pytest.raises(ValueError) as excinfo:
t = np.random.rand(1000)
mp = pytsmp.STAMP(t, window_size=10, exclusion_zone=-1, verbose=False)
assert str(excinfo.value) == "Exclusion zone must be non-negative."
def test_STAMP_init_incorrect_s_size1(self):
with pytest.raises(ValueError) as excinfo:
t = np.random.rand(1000)
mp = pytsmp.STAMP(t, window_size=10, s_size=0, verbose=False)
assert str(excinfo.value) == "s_size must be between 0 and 1."
def test_STAMP_init_incorrect_s_size2(self):
with pytest.raises(ValueError) as excinfo:
t = np.random.rand(1000)
mp = pytsmp.STAMP(t, window_size=10, s_size=1.2, verbose=False)
assert str(excinfo.value) == "s_size must be between 0 and 1."
def test_STAMP_is_anytime(self):
t = np.random.rand(1000)
mp = pytsmp.STAMP(t, window_size=10, s_size=1, verbose=True) # for coverage purpose
is_anytime = mp.is_anytime
assert is_anytime == True, "STAMP_is_anytime: STAMP should be an anytime algorithm."
def test_STAMP_init_check_mutation(self):
t1 = np.random.rand(100)
t2 = np.random.rand(100)
w = 10
mp = pytsmp.STAMP(t1, t2, window_size=w, exclusion_zone=0, verbose=False)
t1[0] = -10
t2[0] = -10
assert t1[0] != mp.ts1[0], "STAMP_init_check_mutation: Matrix profile init should leave original array intact."
assert t2[0] != mp.ts2[0], "STAMP_init_check_mutation: Matrix profile init should leave original array intact."
def test_STAMP_get_profiles_check_length(self):
n = np.random.randint(100, 1000)
m = np.random.randint(100, 1000)
t1 = np.random.rand(n)
t2 = np.random.rand(m)
w = np.random.randint(10, min(n, m))
mp = pytsmp.STAMP(t1, t2, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
assert len(mpro) == n - w + 1, "STAMP_get_profile_check_length: Matrix profile should have correct length"
assert len(ipro) == n - w + 1, "STAMP_get_profile_check_length: Index profile should have correct length"
def test_STAMP_get_profiles_check_mutation(self):
t = np.random.rand(1000)
w = 10
mp = pytsmp.STAMP(t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
mpro[0] = -1
ipro[0] = -1
mpro2, ipro2 = mp.get_profiles()
assert mpro[0] != mpro2[0], "STAMP_get_profile_check_mutation: " \
"Get profile should return a copy of the matrix profile, not the internal one."
assert ipro[0] != ipro2[0], "STAMP_get_profile_check_mutation: " \
"Get profile should return a copy of the index profile, not the internal one."
def test_STAMP_compute_matrix_profile_sanity(self):
t = np.random.rand(1000)
w = 10
mp = pytsmp.STAMP(t, t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
assert np.allclose(mpro, np.zeros(len(t) - w + 1), atol=1e-5), "STAMP_compute_matrix_profile_sanity: " \
"Should compute the matrix profile correctly in the trivial case."
assert np.array_equal(ipro, np.arange(len(t) - w + 1)), "STAMP_compute_matrix_profile_sanity: " \
"Should compute the index profile correctly in the trivial case."
def test_STAMP_compute_matrix_profile_same_random_data(self):
n = np.random.randint(100, 200) # anything larger will be too time-consuming
t = np.random.rand(n)
w = np.random.randint(10, n // 4)
mp = pytsmp.STAMP(t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
mp_naive, ip_naive = helpers.naive_matrix_profile(t, window_size=w)
assert np.allclose(mpro, mp_naive), "STAMP_compute_matrix_profile_same_random_data: " \
"Should compute the matrix profile correctly."
assert np.allclose(ipro, ip_naive), "STAMP_compute_matrix_profile_same_random_data: " \
"Should compute the index profile correctly."
def test_STAMP_compute_matrix_profile_random_data(self):
n = np.random.randint(100, 200)
m = np.random.randint(100, 200)
t1 = np.random.rand(n)
t2 = np.random.rand(m)
w = np.random.randint(10, min(n, m) // 4)
mp = pytsmp.STAMP(t1, t2, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
mp_naive, ip_naive = helpers.naive_matrix_profile(t1, t2, window_size=w)
assert np.allclose(mpro, mp_naive), "STAMP_compute_matrix_profile_random_data: " \
"Should compute the matrix profile correctly."
assert np.allclose(ipro, ip_naive), "STAMP_compute_matrix_profile_random_data: " \
"Should compute the index profile correctly."
def test_STAMP_compute_matrix_profile_data1(self):
t = np.loadtxt("./data/random_walk_data.csv")
mpro_ans = np.loadtxt("./data/random_walk_data_mpro.csv")
ipro_ans = np.loadtxt("./data/random_walk_data_ipro.csv")
w = 50
mp = pytsmp.STAMP(t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
assert np.allclose(mpro, mpro_ans), "STAMP_compute_matrix_profile_data1: " \
"Should compute the matrix profile correctly. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro_ans)))
# assert np.allclose(ipro, ipro_ans), "STAMP_compute_matrix_profile_data1: " \
# "Should compute the index profile correctly."
def test_STAMP_compute_matrix_profile_data2(self):
t = np.loadtxt("./data/candy_production.csv")
mpro_ans = np.loadtxt("./data/candy_production_mpro.csv")
ipro_ans = np.loadtxt("./data/candy_production_ipro.csv")
w = 80
mp = pytsmp.STAMP(t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
assert np.allclose(mpro, mpro_ans), "STAMP_compute_matrix_profile_data2: " \
"Should compute the matrix profile correctly. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro_ans)))
assert np.allclose(ipro, ipro_ans), "STAMP_compute_matrix_profile_data1: " \
"Should compute the index profile correctly."
def test_STAMP_compute_matrix_profile_data3(self):
t = np.loadtxt("./data/bitcoin_price.csv")
mpro_ans = np.loadtxt("./data/bitcoin_price_mpro.csv")
ipro_ans = np.loadtxt("./data/bitcoin_price_ipro.csv")
w = 100
mp = pytsmp.STAMP(t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
assert np.allclose(mpro, mpro_ans), "STAMP_compute_matrix_profile_data3: " \
"Should compute the matrix profile correctly. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro_ans)))
assert np.allclose(ipro, ipro_ans), "STAMP_compute_matrix_profile_data3: " \
"Should compute the index profile correctly."
class TestConvFunctions:
"""
The class for tests of helper functions independent of matrix profile classes.
"""
def test_update_ts1_random_data(self):
n = np.random.randint(200, 1000)
m = np.random.randint(200, 1000)
t1 = np.random.rand(n)
t2 = np.random.rand(m)
w = np.random.randint(10, min(n, m) // 4)
mp = pytsmp.STAMP(t1[:-1], t2, window_size=w, verbose=False)
mp.update_ts1(t1[-1])
mpro, ipro = mp.get_profiles()
mp2 = pytsmp.STAMP(t1, t2, window_size=w, verbose=False)
mpro2, ipro2 = mp2.get_profiles()
assert np.allclose(mpro, mpro2), "update_ts1_random_data: " \
"update_ts1 should update the matrix profile properly on random data. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro2)))
assert np.allclose(ipro, ipro2), "update_ts1_random_data: " \
"update_ts1 should update the index profile properly on random data."
def test_update_ts1_multiple_random_data(self):
n = np.random.randint(200, 1000)
m = np.random.randint(200, 1000)
t1 = np.random.rand(n)
t2 = np.random.rand(m)
w = np.random.randint(10, min(n, m) // 4)
times = np.random.randint(5, 50)
mp = pytsmp.STAMP(t1[:-times], t2, window_size=w, verbose=False)
for i in range(-times, 0, 1):
mp.update_ts1(t1[i])
mpro, ipro = mp.get_profiles()
mp2 = pytsmp.STAMP(t1, t2, window_size=w, verbose=False)
mpro2, ipro2 = mp2.get_profiles()
assert np.allclose(mpro, mpro2), "update_ts1_multiple_random_data: " \
"update_ts1 should update the matrix profile multiple times properly on random data. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro2)))
assert np.allclose(ipro, ipro2), "update_ts1_random_data: " \
"update_ts1 should update the index profile multiple times properly on random data."
def test_update_ts2_random_data(self):
n = np.random.randint(200, 1000)
m = np.random.randint(200, 1000)
t1 = np.random.rand(n)
t2 = np.random.rand(m)
w = np.random.randint(10, min(n, m) // 4)
mp = pytsmp.STAMP(t1, t2[:-1], window_size=w, verbose=False)
mp.update_ts2(t2[-1])
mpro, ipro = mp.get_profiles()
mp2 = pytsmp.STAMP(t1, t2, window_size=w, verbose=False)
mpro2, ipro2 = mp2.get_profiles()
assert np.allclose(mpro, mpro2), "update_ts2_random_data: " \
"update_ts2 should update the matrix profile properly on random data. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro2)))
assert np.allclose(ipro, ipro2), "update_ts2_random_data: " \
"update_ts2 should update the index profile properly on random data."
def test_update_ts2_multiple_random_data(self):
n = np.random.randint(200, 1000)
m = np.random.randint(200, 1000)
t1 = np.random.rand(n)
t2 = np.random.rand(m)
w = np.random.randint(10, min(n, m) // 4)
times = np.random.randint(5, 50)
mp = pytsmp.STAMP(t1, t2[:-times], window_size=w, verbose=False)
for i in range(-times, 0, 1):
mp.update_ts2(t2[i])
mpro, ipro = mp.get_profiles()
mp2 = pytsmp.STAMP(t1, t2, window_size=w, verbose=False)
mpro2, ipro2 = mp2.get_profiles()
assert np.allclose(mpro, mpro2), "update_ts2_multiple_random_data: " \
"update_ts2 should update the matrix profile multiple times properly on random data. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro2)))
assert np.allclose(ipro, ipro2), "update_ts2_random_data: " \
"update_ts2 should update the index profile multiple times properly on random data."
def test_update_interleave_random_data(self):
n = np.random.randint(200, 1000)
m = np.random.randint(200, 1000)
t1 = np.random.rand(n)
t2 = np.random.rand(m)
w = np.random.randint(10, min(n, m) // 4)
times = np.random.randint(5, 25)
mp = pytsmp.STAMP(t1[:-times], t2[:-times], window_size=w, verbose=False)
for i in range(-times, 0, 1):
mp.update_ts1(t1[i])
mp.update_ts2(t2[i])
mpro, ipro = mp.get_profiles()
mp2 = pytsmp.STAMP(t1, t2, window_size=w, verbose=False)
mpro2, ipro2 = mp2.get_profiles()
assert np.allclose(mpro, mpro2), "update_interleave_random_data: " \
"update_ts1 and update_ts2 should update the matrix profile multiple times " \
"properly on random data. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro2)))
assert np.allclose(ipro, ipro2), "update_interleave_random_data: " \
"update_ts1 and update_ts2 should update the index profile multiple times " \
"properly on random data."
def test_update_ts1_same_data(self):
n = np.random.randint(200, 1000)
t = np.random.rand(n)
w = np.random.randint(10, n // 4)
mp = pytsmp.STAMP(t[:-1], window_size=w, verbose=False)
mp.update_ts1(t[-1])
mpro, ipro = mp.get_profiles()
mp2 = pytsmp.STAMP(t, window_size=w, verbose=False)
mpro2, ipro2 = mp2.get_profiles()
assert np.allclose(mpro, mpro2), "update_ts1_same_data: " \
"update_ts1 should update the matrix profile properly when ts1 == ts2. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro2)))
assert np.allclose(ipro, ipro2), "update_ts1_same_data: " \
"update_ts1 should update the index profile properly when ts1 == ts2."
def test_update_ts1_multiple_same_data(self):
n = np.random.randint(200, 1000)
t = np.random.rand(n)
w = np.random.randint(10, n // 4)
times = np.random.randint(5, 50)
mp = pytsmp.STAMP(t[:-times], window_size=w, verbose=False)
for i in range(-times, 0, 1):
mp.update_ts1(t[i])
mpro, ipro = mp.get_profiles()
mp2 = pytsmp.STAMP(t, window_size=w, verbose=False)
mpro2, ipro2 = mp2.get_profiles()
assert np.allclose(mpro, mpro2), "update_ts1_multiple_same_data: " \
"update_ts1 should update the matrix profile multiple times properly when ts1 == ts2. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro2)))
assert np.allclose(ipro, ipro2), "update_ts1_multiple_same_data: " \
"update_ts1 should update the index profile multiple times properly when ts1 == ts2."
def test_update_ts2_same_data(self):
n = np.random.randint(200, 1000)
t = np.random.rand(n)
w = np.random.randint(10, n // 4)
mp = pytsmp.STAMP(t[:-1], window_size=w, verbose=False)
mp.update_ts2(t[-1])
mpro, ipro = mp.get_profiles()
mp2 = pytsmp.STAMP(t, window_size=w, verbose=False)
mpro2, ipro2 = mp2.get_profiles()
assert np.allclose(mpro, mpro2), "update_ts2_same_data: " \
"update_ts2 should update the matrix profile properly when ts1 == ts2. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro2)))
assert np.allclose(ipro, ipro2), "update_ts2_same_data: " \
"update_ts2 should update the index profile properly when ts1 == ts2."
def test_update_ts2_multiple_same_data(self):
n = np.random.randint(200, 1000)
t = np.random.rand(n)
w = np.random.randint(10, n // 4)
times = np.random.randint(5, 50)
mp = pytsmp.STAMP(t[:-times], window_size=w, verbose=False)
for i in range(-times, 0, 1):
mp.update_ts2(t[i])
mpro, ipro = mp.get_profiles()
mp2 = pytsmp.STAMP(t, window_size=w, verbose=False)
mpro2, ipro2 = mp2.get_profiles()
assert np.allclose(mpro, mpro2), "update_ts2_multiple_same_data: " \
"update_ts2 should update the matrix profile multiple times properly when ts1 == ts2. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro2)))
assert np.allclose(ipro, ipro2), "update_ts2_multiple_same_data: " \
"update_ts2 should update the index profile multiple times properly when ts1 == ts2."
def test_update_interleave_same_data(self):
n = np.random.randint(200, 1000)
t = np.random.rand(n)
w = np.random.randint(10, n // 4)
times = np.random.randint(5, 25)
mp = pytsmp.STAMP(t[:-times], window_size=w, verbose=False)
for i in range(-times, 0, 1):
if i % 2 == 0:
mp.update_ts1(t[i])
else:
mp.update_ts2(t[i])
mpro, ipro = mp.get_profiles()
mp2 = pytsmp.STAMP(t, window_size=w, verbose=False)
mpro2, ipro2 = mp2.get_profiles()
assert np.allclose(mpro, mpro2), "update_interleave_same_data: " \
"update_ts1 and update_ts2 should update the matrix profile multiple times " \
"properly when ts1 == ts2. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro2)))
assert np.allclose(ipro, ipro2), "update_interleave_same_data: " \
"update_ts1 and update_ts2 should update the index profile multiple times " \
"properly when ts1 == ts2."
def test_find_discords_incorrect_num_discords1(self):
with pytest.raises(ValueError) as excinfo:
t = np.random.rand(1000)
mp = pytsmp.STAMP(t, window_size=10, verbose=False)
discords = mp.find_discords(-1)
assert str(excinfo.value) == "Incorrect num_discords entered."
def test_find_discords_incorrect_num_discords2(self):
with pytest.raises(ValueError) as excinfo:
t = np.random.rand(1000)
mp = pytsmp.STAMP(t, window_size=10, verbose=False)
discords = mp.find_discords(4.2)
assert str(excinfo.value) == "Incorrect num_discords entered."
def test_find_discords_incorrect_num_discords3(self):
with pytest.raises(ValueError) as excinfo:
t = np.random.rand(1000)
mp = pytsmp.STAMP(t, window_size=10, verbose=False)
discords = mp.find_discords(0)
assert str(excinfo.value) == "Incorrect num_discords entered."
def test_find_discords_incorrect_exclusion_zone(self):
with pytest.raises(ValueError) as excinfo:
t = np.random.rand(1000)
mp = pytsmp.STAMP(t, window_size=10, verbose=False)
discords = mp.find_discords(3, exclusion_zone=-1)
assert str(excinfo.value) == "Exclusion zone must be non-negative."
def test_find_discords_sanity1(self):
n = np.random.randint(200, 1000)
t = np.random.rand(n)
w = np.random.randint(10, n // 4)
mp = pytsmp.STAMP(t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
discords = mp.find_discords(n - w + 1, exclusion_zone=0)
mp_discords = mpro[discords]
assert len(discords) == n - w + 1, "find_discords_snaity1: find_discords should return the correct number of discords."
assert (mp_discords[1:] <= mp_discords[:-1]).all(), "find_discords_sanity1: find_discords should return " \
"discords in descending order of profile values."
def test_find_discords_sanity2(self):
n = np.random.randint(200, 1000)
t = np.random.rand(n)
w = np.random.randint(10, n // 4)
mp = pytsmp.STAMP(t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
discords = mp.find_discords(n - w + 1) # exclusion_zone=None
mp_discords = mpro[discords]
assert (n - w + 1) // w <= len(discords) <= (n - w + 1) // w * 2 + 1, \
"find_discords_snaity2: find_discords should not return more than the max possible number of discords."
assert (mp_discords[1:] <= mp_discords[:-1]).all(), "find_discords_sanity2: find_discords should return " \
"discords in descending order of profile values."
def test_find_discords_sanity3(self):
n = np.random.randint(200, 1000)
t = np.random.rand(n)
w = np.random.randint(10, n // 5)
num_discords = 5
mp = pytsmp.STAMP(t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
discords = mp.find_discords(num_discords, exclusion_zone=1/2)
mp_discords = mpro[discords]
assert len(discords) == num_discords, "find_discords_snaity3: find_discords should return the desired number of discords."
assert (mp_discords[1:] <= mp_discords[:-1]).all(), "find_discords_sanity3: find_discords should return " \
"discords in descending order of profile values."
def test_find_discords_anomaly(self):
"""
find_discords should be able to locate obvious anomaly.
"""
n = np.random.randint(200, 500)
t = np.random.rand(n)
t = np.tile(t, 4)
w = np.random.randint(10, n // 4)
ab = np.random.randint(len(t))
t[ab] += 5
mp = pytsmp.STAMP(t, window_size=w, verbose=False)
discords = np.sort(mp.find_discords(1, exclusion_zone=1/2))
assert len(discords) == 1, "find_discords_anomaly: find_discords should return the desired number of discords."
assert np.abs(ab - discords[0]) < w, \
"find_discords_anomaly: find_discords should be able to locate obvious anomaly."
def test_find_motifs_incorrect_num_discords1(self):
with pytest.raises(ValueError) as excinfo:
t = np.random.rand(1000)
mp = pytsmp.STAMP(t, window_size=10, verbose=False)
motifs = mp.find_motifs(-1)
assert str(excinfo.value) == "Incorrect num_motifs entered."
def test_find_motifs_incorrect_num_motifs2(self):
with pytest.raises(ValueError) as excinfo:
t = np.random.rand(1000)
mp = pytsmp.STAMP(t, window_size=10, verbose=False)
motifs = mp.find_motifs(4.2)
assert str(excinfo.value) == "Incorrect num_motifs entered."
def test_find_motifs_incorrect_num_motifs3(self):
with pytest.raises(ValueError) as excinfo:
t = np.random.rand(1000)
mp = pytsmp.STAMP(t, window_size=10, verbose=False)
motifs = mp.find_motifs(0)
assert str(excinfo.value) == "Incorrect num_motifs entered."
def test_find_motifs_incorrect_exclusion_zone(self):
with pytest.raises(ValueError) as excinfo:
t = np.random.rand(1000)
mp = pytsmp.STAMP(t, window_size=10, verbose=False)
motifs = mp.find_motifs(5, exclusion_zone=-1)
assert str(excinfo.value) == "Exclusion zone must be non-negative."
def test_find_motifs_sanity1(self):
n = np.random.randint(200, 1000)
t = np.random.rand(n)
w = np.random.randint(10, n // 4)
mp = pytsmp.STAMP(t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
num_motifs = 3
motifs = mp.find_motifs(num_motifs, exclusion_zone=1/2)
mp_motifs = mpro[motifs]
assert len(motifs) == num_motifs, "find_motifs_snaity1: find_motifs should return the desired number of motifs."
assert (mp_motifs[1:, 0] >= mp_motifs[:-1, 0]).all(), "find_motifs_sanity1: find_motifs should return " \
"motifs in ascending order of profile values."
def test_find_motifs_sanity2(self):
n = np.random.randint(200, 1000)
t = np.random.rand(n)
w = np.random.randint(10, n // 4)
mp = pytsmp.STAMP(t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
motifs = mp.find_motifs(n - w + 1) # exclusion_zone=None
mp_motifs = mpro[motifs]
assert (n - w + 1) // (2 * w) <= len(motifs) <= (n - w + 1) // w * 2 + 1, \
"find_motifs_snaity2: find_motifs should not return more than the max possible number of motifs."
assert (mp_motifs[1:, 0] >= mp_motifs[:-1, 0]).all(), "find_motifs_sanity2: find_motifs should return " \
"motifs in descending order of profile values."
class TestSTOMP:
def test_STOMP_is_anytime(self):
t = np.random.rand(1000)
mp = pytsmp.STOMP(t, window_size=10, s_size=1, verbose=True)
is_anytime = mp.is_anytime
assert is_anytime == False, "STOMP_is_anytime: STOMP should not be an anytime algorithm."
def test_STOMP_get_profiles_check_length(self):
n = np.random.randint(100, 1000)
m = np.random.randint(100, 1000)
t1 = np.random.rand(n)
t2 = np.random.rand(m)
w = np.random.randint(10, min(n, m))
mp = pytsmp.STOMP(t1, t2, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
assert len(mpro) == n - w + 1, "STOMP_get_profile_check_length: Matrix profile should have correct length"
assert len(ipro) == n - w + 1, "STOMP_get_profile_check_length: Index profile should have correct length"
def test_STOMP_get_profiles_check_mutation(self):
t = np.random.rand(1000)
w = 10
mp = pytsmp.STOMP(t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
mpro[0] = -1
ipro[0] = -1
mpro2, ipro2 = mp.get_profiles()
assert mpro[0] != mpro2[0], "STOMP_get_profile_check_mutation: " \
"Get profile should return a copy of the matrix profile, not the internal one."
assert ipro[0] != ipro2[0], "STOMP_get_profile_check_mutation: " \
"Get profile should return a copy of the index profile, not the internal one."
def test_STOMP_compute_matrix_profile_sanity(self):
t = np.random.rand(1000)
w = 10
mp = pytsmp.STOMP(t, t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
assert np.allclose(mpro, np.zeros(len(t) - w + 1), atol=1e-5), "STOMP_compute_matrix_profile_sanity: " \
"Should compute the matrix profile correctly in the trivial case."
assert np.array_equal(ipro, np.arange(len(t) - w + 1)), "STOMP_compute_matrix_profile_sanity: " \
"Should compute the index profile correctly in the trivial case."
def test_STOMP_compute_matrix_profile_same_random_data(self):
n = np.random.randint(100, 200) # anything larger will be too time-consuming
t = np.random.rand(n)
w = np.random.randint(10, n // 4)
mp = pytsmp.STOMP(t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
mp_naive, ip_naive = helpers.naive_matrix_profile(t, window_size=w)
assert np.allclose(mpro, mp_naive), "STOMP_compute_matrix_profile_same_random_data: " \
"Should compute the matrix profile correctly."
assert np.allclose(ipro, ip_naive), "STOMP_compute_matrix_profile_same_random_data: " \
"Should compute the index profile correctly."
def test_STOMP_compute_matrix_profile_random_data(self):
n = np.random.randint(100, 200)
m = np.random.randint(100, 200)
t1 = np.random.rand(n)
t2 = np.random.rand(m)
w = np.random.randint(10, min(n, m) // 4)
mp = pytsmp.STOMP(t1, t2, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
mp_naive, ip_naive = helpers.naive_matrix_profile(t1, t2, window_size=w)
assert np.allclose(mpro, mp_naive), "STOMP_compute_matrix_profile_random_data: " \
"Should compute the matrix profile correctly."
assert np.allclose(ipro, ip_naive), "STOMP_compute_matrix_profile_random_data: " \
"Should compute the index profile correctly."
def test_STOMP_compute_matrix_profile_data1(self):
t = np.loadtxt("./data/random_walk_data.csv")
mpro_ans = np.loadtxt("./data/random_walk_data_mpro.csv")
ipro_ans = np.loadtxt("./data/random_walk_data_ipro.csv")
w = 50
mp = pytsmp.STOMP(t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
assert np.allclose(mpro, mpro_ans), "STOMP_compute_matrix_profile_data1: " \
"Should compute the matrix profile correctly. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro_ans)))
# assert np.allclose(ipro, ipro_ans), "STOMP_compute_matrix_profile_data1: " \
# "Should compute the index profile correctly."
def test_STOMP_compute_matrix_profile_data2(self):
t = np.loadtxt("./data/candy_production.csv")
mpro_ans = np.loadtxt("./data/candy_production_mpro.csv")
ipro_ans = np.loadtxt("./data/candy_production_ipro.csv")
w = 80
mp = pytsmp.STOMP(t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
assert np.allclose(mpro, mpro_ans), "STOMP_compute_matrix_profile_data2: " \
"Should compute the matrix profile correctly. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro_ans)))
assert np.allclose(ipro, ipro_ans), "STOMP_compute_matrix_profile_data1: " \
"Should compute the index profile correctly."
def test_STOMP_compute_matrix_profile_data3(self):
t = np.loadtxt("./data/bitcoin_price.csv")
mpro_ans = np.loadtxt("./data/bitcoin_price_mpro.csv")
ipro_ans = np.loadtxt("./data/bitcoin_price_ipro.csv")
w = 100
mp = pytsmp.STOMP(t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
assert np.allclose(mpro, mpro_ans), "STOMP_compute_matrix_profile_data3: " \
"Should compute the matrix profile correctly. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro_ans)))
assert np.allclose(ipro, ipro_ans), "STOMP_compute_matrix_profile_data3: " \
"Should compute the index profile correctly."
class TestSCRIMP:
def test_SCRIMP_is_anytime(self):
t = np.random.rand(1000)
mp = pytsmp.SCRIMP(t, window_size=10, s_size=1, verbose=True, pre_scrimp=1)
is_anytime = mp.is_anytime
assert is_anytime == True, "SCRIMP_is_anytime: SCRIMP should be an anytime algorithm."
def test_SCRIMP_get_profiles_check_length(self):
n = np.random.randint(100, 1000)
m = np.random.randint(100, 1000)
t1 = np.random.rand(n)
t2 = np.random.rand(m)
w = np.random.randint(10, min(n, m))
mp = pytsmp.SCRIMP(t1, t2, window_size=w, verbose=False, pre_scrimp=0)
mpro, ipro = mp.get_profiles()
assert len(mpro) == n - w + 1, "SCRIMP_get_profile_check_length: Matrix profile should have correct length"
assert len(ipro) == n - w + 1, "SCRIMP_get_profile_check_length: Index profile should have correct length"
def test_SCRIMP_get_profiles_check_mutation(self):
t = np.random.rand(1000)
w = 10
mp = pytsmp.SCRIMP(t, window_size=w, verbose=False, pre_scrimp=0)
mpro, ipro = mp.get_profiles()
mpro[0] = -1
ipro[0] = -1
mpro2, ipro2 = mp.get_profiles()
assert mpro[0] != mpro2[0], "SCRIMP_get_profile_check_mutation: " \
"Get profile should return a copy of the matrix profile, not the internal one."
assert ipro[0] != ipro2[0], "SCRIMP_get_profile_check_mutation: " \
"Get profile should return a copy of the index profile, not the internal one."
def test_SCRIMP_compute_matrix_profile_sanity(self):
t = np.random.rand(1000)
w = 10
mp = pytsmp.SCRIMP(t, t, window_size=w, verbose=False, pre_scrimp=0)
mpro, ipro = mp.get_profiles()
assert np.allclose(mpro, np.zeros(len(t) - w + 1), atol=1e-5), "SCRIMP_compute_matrix_profile_sanity: " \
"Should compute the matrix profile correctly in the trivial case."
assert np.array_equal(ipro, np.arange(len(t) - w + 1)), "SCRIMP_compute_matrix_profile_sanity: " \
"Should compute the index profile correctly in the trivial case."
def test_SCRIMP_compute_matrix_profile_same_random_data(self):
n = np.random.randint(100, 200) # anything larger will be too time-consuming
t = np.random.rand(n)
w = np.random.randint(10, n // 4)
mp = pytsmp.SCRIMP(t, window_size=w, verbose=False, pre_scrimp=0)
mpro, ipro = mp.get_profiles()
mp_naive, ip_naive = helpers.naive_matrix_profile(t, window_size=w)
assert np.allclose(mpro, mp_naive), "SCRIMP_compute_matrix_profile_same_random_data: " \
"Should compute the matrix profile correctly."
assert np.allclose(ipro, ip_naive), "SCRIMP_compute_matrix_profile_same_random_data: " \
"Should compute the index profile correctly."
def test_SCRIMP_compute_matrix_profile_random_data(self):
n = np.random.randint(100, 200)
m = np.random.randint(100, 200)
t1 = np.random.rand(n)
t2 = np.random.rand(m)
w = np.random.randint(10, min(n, m) // 4)
mp = pytsmp.SCRIMP(t1, t2, window_size=w, verbose=False, pre_scrimp=0)
mpro, ipro = mp.get_profiles()
mp_naive, ip_naive = helpers.naive_matrix_profile(t1, t2, window_size=w)
assert np.allclose(mpro, mp_naive), "SCRIMP_compute_matrix_profile_random_data: " \
"Should compute the matrix profile correctly."
assert np.allclose(ipro, ip_naive), "SCRIMP_compute_matrix_profile_random_data: " \
"Should compute the index profile correctly."
def test_SCRIMP_compute_matrix_profile_data1(self):
t = np.loadtxt("./data/random_walk_data.csv")
mpro_ans = np.loadtxt("./data/random_walk_data_mpro.csv")
ipro_ans = np.loadtxt("./data/random_walk_data_ipro.csv")
w = 50
mp = pytsmp.SCRIMP(t, window_size=w, verbose=False, pre_scrimp=0)
mpro, ipro = mp.get_profiles()
assert np.allclose(mpro, mpro_ans), "SCRIMP_compute_matrix_profile_data1: " \
"Should compute the matrix profile correctly. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro_ans)))
# assert np.allclose(ipro, ipro_ans), "SCRIMP_compute_matrix_profile_data1: " \
# "Should compute the index profile correctly."
def test_SCRIMP_compute_matrix_profile_data2(self):
t = np.loadtxt("./data/candy_production.csv")
mpro_ans = np.loadtxt("./data/candy_production_mpro.csv")
ipro_ans = np.loadtxt("./data/candy_production_ipro.csv")
w = 80
mp = pytsmp.SCRIMP(t, window_size=w, verbose=False, pre_scrimp=0)
mpro, ipro = mp.get_profiles()
assert np.allclose(mpro, mpro_ans), "SCRIMP_compute_matrix_profile_data2: " \
"Should compute the matrix profile correctly. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro_ans)))
assert np.allclose(ipro, ipro_ans), "SCRIMP_compute_matrix_profile_data1: " \
"Should compute the index profile correctly."
def test_SCRIMP_compute_matrix_profile_data3(self):
t = np.loadtxt("./data/bitcoin_price.csv")
mpro_ans = np.loadtxt("./data/bitcoin_price_mpro.csv")
ipro_ans = np.loadtxt("./data/bitcoin_price_ipro.csv")
w = 100
mp = pytsmp.SCRIMP(t, window_size=w, verbose=False, pre_scrimp=0)
mpro, ipro = mp.get_profiles()
assert np.allclose(mpro, mpro_ans), "SCRIMP_compute_matrix_profile_data3: " \
"Should compute the matrix profile correctly. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro_ans)))
assert np.allclose(ipro, ipro_ans), "SCRIMP_compute_matrix_profile_data3: " \
"Should compute the index profile correctly."
class TestPreSCRIMP:
def test_PreSCRIMP_is_anytime(self):
t = np.random.rand(1000)
mp = pytsmp.PreSCRIMP(t, window_size=10, s_size=1, verbose=True)
is_anytime = mp.is_anytime
assert is_anytime == True, "PreSCRIMP_is_anytime: PreSCRIMP should be an anytime algorithm."
def test_PreSCRIMP_init_incorrect_pre_scrimp1(self):
with pytest.raises(ValueError) as excinfo:
t = np.random.rand(1000)
mp = pytsmp.PreSCRIMP(t, window_size=10, verbose=False, sample_rate=0)
assert str(excinfo.value) == "sample_rate must be positive."
def test_PreSCRIMP_init_incorrect_pre_scrimp2(self):
with pytest.raises(ValueError) as excinfo:
t = np.random.rand(1000)
mp = pytsmp.PreSCRIMP(t, window_size=10, verbose=False, sample_rate=-2)
assert str(excinfo.value) == "sample_rate must be positive."
def test_PreSCRIMP_get_profiles_check_length(self):
n = np.random.randint(100, 1000)
m = np.random.randint(100, 1000)
t1 = np.random.rand(n)
t2 = np.random.rand(m)
w = np.random.randint(10, min(n, m))
mp = pytsmp.PreSCRIMP(t1, t2, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
assert len(mpro) == n - w + 1, "PreSCRIMP_get_profile_check_length: Matrix profile should have correct length"
assert len(ipro) == n - w + 1, "PreSCRIMP_get_profile_check_length: Index profile should have correct length"
def test_PreSCRIMP_get_profiles_check_mutation(self):
t = np.random.rand(1000)
w = 10
mp = pytsmp.PreSCRIMP(t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
mpro[0] = -1
ipro[0] = -1
mpro2, ipro2 = mp.get_profiles()
assert mpro[0] != mpro2[0], "PreSCRIMP_get_profile_check_mutation: " \
"Get profile should return a copy of the matrix profile, not the internal one."
assert ipro[0] != ipro2[0], "PreSCRIMP_get_profile_check_mutation: " \
"Get profile should return a copy of the index profile, not the internal one."
def test_PreSCRIMP_compute_matrix_profile_sanity1(self):
t = np.random.rand(1000)
w = 10
mp = pytsmp.PreSCRIMP(t, t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
assert np.allclose(mpro, np.zeros(len(t) - w + 1), atol=1e-5), "PreSCRIMP_compute_matrix_profile_sanity1: " \
"Should compute the matrix profile correctly in the trivial case."
assert np.array_equal(ipro, np.arange(len(t) - w + 1)), "PreSCRIMP_compute_matrix_profile_sanity1: " \
"Should compute the index profile correctly in the trivial case."
def test_PreSCRIMP_compute_matrix_profile_sanity2(self):
t = np.random.rand(1000)
w = 50
mpp = pytsmp.PreSCRIMP(t, t, window_size=w, verbose=False)
mprop, iprop = mpp.get_profiles()
mp = pytsmp.SCRIMP(t, t, window_size=w, verbose=False, pre_scrimp=0)
mpro, ipro = mp.get_profiles()
assert (mprop > mpro - 1e-5).all(), "PreSCRIMP_compute_matrix_profile_sanity2: PreSCRIMP should be an " \
"upper approximation for the actual matrix profile."
@pytest.mark.skip(reason="Randomized tests on approximate algorithms do not seem a correct thing to do.")
def test_PreSCRIMP_compute_matrix_profile_same_random_data(self):
n = np.random.randint(100, 200) # anything larger will be too time-consuming
t = np.random.rand(n)
w = np.random.randint(10, n // 4)
mp = pytsmp.PreSCRIMP(t, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
mp_naive, ip_naive = helpers.naive_matrix_profile(t, window_size=w)
assert np.allclose(mpro, mp_naive), "PreSCRIMP_compute_matrix_profile_same_random_data: " \
"Should compute the matrix profile correctly."
assert np.allclose(ipro, ip_naive), "PreSCRIMP_compute_matrix_profile_same_random_data: " \
"Should compute the index profile correctly."
@pytest.mark.skip(reason="Randomized tests on approximate algorithms do not seem a correct thing to do.")
def test_PreSCRIMP_compute_matrix_profile_random_data(self):
n = np.random.randint(100, 200)
m = np.random.randint(100, 200)
t1 = np.random.rand(n)
t2 = np.random.rand(m)
w = np.random.randint(10, min(n, m) // 4)
mp = pytsmp.PreSCRIMP(t1, t2, window_size=w, verbose=False)
mpro, ipro = mp.get_profiles()
mp_naive, ip_naive = helpers.naive_matrix_profile(t1, t2, window_size=w)
assert np.allclose(mpro, mp_naive), "PreSCRIMP_compute_matrix_profile_random_data: " \
"Should compute the matrix profile correctly."
assert np.allclose(ipro, ip_naive), "PreSCRIMP_compute_matrix_profile_random_data: " \
"Should compute the index profile correctly."
class TestSCRIMP_PreSCRIMP:
def test_SCRIMP_init_incorrect_pre_scrimp(self):
with pytest.raises(ValueError) as excinfo:
t = np.random.rand(1000)
mp = pytsmp.SCRIMP(t, window_size=10, verbose=False, pre_scrimp=-1)
assert str(excinfo.value) == "pre_scrimp parameter must be non-negative."
def test_SCRIMP_init_pre_scrimp_zero(self):
t = np.random.rand(1000)
mp = pytsmp.SCRIMP(t, window_size=10, s_size=1, verbose=False, pre_scrimp=0)
assert getattr(mp, "_pre_scrimp_class", None) is None, "SCRIMP_init_pre_scrimp_zero: " \
"PreSCRIMP should not run if pre_scrimp = 0."
def test_SCRIMP_init_pre_scrimp_nonzero(self):
t = np.random.rand(1000)
mp = pytsmp.SCRIMP(t, window_size=10, s_size=1, verbose=False, pre_scrimp=1/2)
assert getattr(mp, "_pre_scrimp_class", None) is not None, "SCRIMP_init_pre_scrimp_nonzero: " \
"PreSCRIMP should run if pre_scrimp > 0."
def test_SCRIMP_PreSCRIMP_get_profiles_check_length(self):
n = np.random.randint(100, 1000)
m = np.random.randint(100, 1000)
t1 = np.random.rand(n)
t2 = np.random.rand(m)
w = np.random.randint(10, min(n, m))
mp = pytsmp.SCRIMP(t1, t2, window_size=w, verbose=False, pre_scrimp=1/4)
mpro, ipro = mp.get_profiles()
assert len(mpro) == n - w + 1, "SCRIMP_get_profile_check_length: Matrix profile should have correct length"
assert len(ipro) == n - w + 1, "SCRIMP_get_profile_check_length: Index profile should have correct length"
def test_SCRIMP_PreSCRIMP_get_profiles_check_mutation(self):
t = np.random.rand(1000)
w = 10
mp = pytsmp.SCRIMP(t, window_size=w, verbose=False, pre_scrimp=1/4)
mpro, ipro = mp.get_profiles()
mpro[0] = -1
ipro[0] = -1
mpro2, ipro2 = mp.get_profiles()
assert mpro[0] != mpro2[0], "SCRIMP_get_profile_check_mutation: " \
"Get profile should return a copy of the matrix profile, not the internal one."
assert ipro[0] != ipro2[0], "SCRIMP_get_profile_check_mutation: " \
"Get profile should return a copy of the index profile, not the internal one."
def test_SCRIMP_PreSCRIMP_compute_matrix_profile_sanity(self):
t = np.random.rand(1000)
w = 10
mp = pytsmp.SCRIMP(t, t, window_size=w, verbose=False, pre_scrimp=1/4)
mpro, ipro = mp.get_profiles()
assert np.allclose(mpro, np.zeros(len(t) - w + 1), atol=1e-5), "SCRIMP_compute_matrix_profile_sanity: " \
"Should compute the matrix profile correctly in the trivial case."
assert np.array_equal(ipro, np.arange(len(t) - w + 1)), "SCRIMP_compute_matrix_profile_sanity: " \
"Should compute the index profile correctly in the trivial case."
def test_SCRIMP_PreSCRIMP_compute_matrix_profile_same_random_data(self):
n = np.random.randint(100, 200) # anything larger will be too time-consuming
t = np.random.rand(n)
w = np.random.randint(10, n // 4)
mp = pytsmp.SCRIMP(t, window_size=w, verbose=False, pre_scrimp=1/4)
mpro, ipro = mp.get_profiles()
mp_naive, ip_naive = helpers.naive_matrix_profile(t, window_size=w)
assert np.allclose(mpro, mp_naive), "SCRIMP_compute_matrix_profile_same_random_data: " \
"Should compute the matrix profile correctly."
assert np.allclose(ipro, ip_naive), "SCRIMP_compute_matrix_profile_same_random_data: " \
"Should compute the index profile correctly."
def test_SCRIMP_PreSCRIMP_compute_matrix_profile_random_data(self):
n = np.random.randint(100, 200)
m = np.random.randint(100, 200)
t1 = np.random.rand(n)
t2 = np.random.rand(m)
w = np.random.randint(10, min(n, m) // 4)
mp = pytsmp.SCRIMP(t1, t2, window_size=w, verbose=False, pre_scrimp=1/4)
mpro, ipro = mp.get_profiles()
mp_naive, ip_naive = helpers.naive_matrix_profile(t1, t2, window_size=w)
assert np.allclose(mpro, mp_naive), "SCRIMP_compute_matrix_profile_random_data: " \
"Should compute the matrix profile correctly."
assert np.allclose(ipro, ip_naive), "SCRIMP_compute_matrix_profile_random_data: " \
"Should compute the index profile correctly."
def test_SCRIMP_PreSCRIMP_compute_matrix_profile_data1(self):
t = np.loadtxt("./data/random_walk_data.csv")
mpro_ans = np.loadtxt("./data/random_walk_data_mpro.csv")
ipro_ans = np.loadtxt("./data/random_walk_data_ipro.csv")
w = 50
mp = pytsmp.SCRIMP(t, window_size=w, verbose=False, pre_scrimp=1/4)
mpro, ipro = mp.get_profiles()
assert np.allclose(mpro, mpro_ans), "SCRIMP_compute_matrix_profile_data1: " \
"Should compute the matrix profile correctly. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro_ans)))
# assert np.allclose(ipro, ipro_ans), "SCRIMP_compute_matrix_profile_data1: " \
# "Should compute the index profile correctly."
def test_SCRIMP_PreSCRIMP_compute_matrix_profile_data2(self):
t = np.loadtxt("./data/candy_production.csv")
mpro_ans = np.loadtxt("./data/candy_production_mpro.csv")
ipro_ans = np.loadtxt("./data/candy_production_ipro.csv")
w = 80
mp = pytsmp.SCRIMP(t, window_size=w, verbose=False, pre_scrimp=1/4)
mpro, ipro = mp.get_profiles()
assert np.allclose(mpro, mpro_ans), "SCRIMP_compute_matrix_profile_data2: " \
"Should compute the matrix profile correctly. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro_ans)))
assert np.allclose(ipro, ipro_ans), "SCRIMP_compute_matrix_profile_data1: " \
"Should compute the index profile correctly."
def test_SCRIMP_PreSCRIMP_compute_matrix_profile_data3(self):
t = np.loadtxt("./data/bitcoin_price.csv")
mpro_ans = np.loadtxt("./data/bitcoin_price_mpro.csv")
ipro_ans = np.loadtxt("./data/bitcoin_price_ipro.csv")
w = 100
mp = pytsmp.SCRIMP(t, window_size=w, verbose=False, pre_scrimp=1/4)
mpro, ipro = mp.get_profiles()
assert np.allclose(mpro, mpro_ans), "SCRIMP_compute_matrix_profile_data3: " \
"Should compute the matrix profile correctly. " \
"Max error is {}".format(np.max(np.abs(mpro - mpro_ans)))
assert np.allclose(ipro, ipro_ans), "SCRIMP_compute_matrix_profile_data3: " \
"Should compute the index profile correctly."
|
[
"numpy.abs",
"numpy.allclose",
"tests.helpers.naive_matrix_profile",
"pytsmp.pytsmp.STAMP",
"pytsmp.pytsmp.SCRIMP",
"pytsmp.pytsmp.MatrixProfile",
"pytest.raises",
"numpy.random.randint",
"pytsmp.pytsmp.PreSCRIMP",
"numpy.loadtxt",
"numpy.tile",
"numpy.random.rand",
"pytest.mark.skip",
"pytsmp.pytsmp.STOMP"
] |
[((42167, 42281), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""Randomized tests on approximate algorithms do not seem a correct thing to do."""'}), "(reason=\n 'Randomized tests on approximate algorithms do not seem a correct thing to do.'\n )\n", (42183, 42281), False, 'import pytest\n'), ((43065, 43179), 'pytest.mark.skip', 'pytest.mark.skip', ([], {'reason': '"""Randomized tests on approximate algorithms do not seem a correct thing to do."""'}), "(reason=\n 'Randomized tests on approximate algorithms do not seem a correct thing to do.'\n )\n", (43081, 43179), False, 'import pytest\n'), ((2145, 2165), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (2159, 2165), True, 'import numpy as np\n'), ((2179, 2234), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t'], {'window_size': '(10)', 's_size': '(1)', 'verbose': '(True)'}), '(t, window_size=10, s_size=1, verbose=True)\n', (2191, 2234), False, 'from pytsmp import pytsmp\n'), ((2447, 2466), 'numpy.random.rand', 'np.random.rand', (['(100)'], {}), '(100)\n', (2461, 2466), True, 'import numpy as np\n'), ((2480, 2499), 'numpy.random.rand', 'np.random.rand', (['(100)'], {}), '(100)\n', (2494, 2499), True, 'import numpy as np\n'), ((2528, 2596), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t1', 't2'], {'window_size': 'w', 'exclusion_zone': '(0)', 'verbose': '(False)'}), '(t1, t2, window_size=w, exclusion_zone=0, verbose=False)\n', (2540, 2596), False, 'from pytsmp import pytsmp\n'), ((2942, 2970), 'numpy.random.randint', 'np.random.randint', (['(100)', '(1000)'], {}), '(100, 1000)\n', (2959, 2970), True, 'import numpy as np\n'), ((2983, 3011), 'numpy.random.randint', 'np.random.randint', (['(100)', '(1000)'], {}), '(100, 1000)\n', (3000, 3011), True, 'import numpy as np\n'), ((3025, 3042), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (3039, 3042), True, 'import numpy as np\n'), ((3056, 3073), 'numpy.random.rand', 'np.random.rand', (['m'], {}), '(m)\n', (3070, 3073), True, 'import numpy as np\n'), ((3132, 3182), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t1', 't2'], {'window_size': 'w', 'verbose': '(False)'}), '(t1, t2, window_size=w, verbose=False)\n', (3144, 3182), False, 'from pytsmp import pytsmp\n'), ((3518, 3538), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (3532, 3538), True, 'import numpy as np\n'), ((3567, 3612), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t'], {'window_size': 'w', 'verbose': '(False)'}), '(t, window_size=w, verbose=False)\n', (3579, 3612), False, 'from pytsmp import pytsmp\n'), ((4185, 4205), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (4199, 4205), True, 'import numpy as np\n'), ((4234, 4282), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t', 't'], {'window_size': 'w', 'verbose': '(False)'}), '(t, t, window_size=w, verbose=False)\n', (4246, 4282), False, 'from pytsmp import pytsmp\n'), ((4865, 4892), 'numpy.random.randint', 'np.random.randint', (['(100)', '(200)'], {}), '(100, 200)\n', (4882, 4892), True, 'import numpy as np\n'), ((4951, 4968), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (4965, 4968), True, 'import numpy as np\n'), ((4981, 5010), 'numpy.random.randint', 'np.random.randint', (['(10)', '(n // 4)'], {}), '(10, n // 4)\n', (4998, 5010), True, 'import numpy as np\n'), ((5024, 5069), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t'], {'window_size': 'w', 'verbose': '(False)'}), '(t, window_size=w, verbose=False)\n', (5036, 5069), False, 'from pytsmp import pytsmp\n'), ((5138, 5184), 'tests.helpers.naive_matrix_profile', 'helpers.naive_matrix_profile', (['t'], {'window_size': 'w'}), '(t, window_size=w)\n', (5166, 5184), False, 'from tests import helpers\n'), ((5200, 5227), 'numpy.allclose', 'np.allclose', (['mpro', 'mp_naive'], {}), '(mpro, mp_naive)\n', (5211, 5227), True, 'import numpy as np\n'), ((5387, 5414), 'numpy.allclose', 'np.allclose', (['ipro', 'ip_naive'], {}), '(ipro, ip_naive)\n', (5398, 5414), True, 'import numpy as np\n'), ((5632, 5659), 'numpy.random.randint', 'np.random.randint', (['(100)', '(200)'], {}), '(100, 200)\n', (5649, 5659), True, 'import numpy as np\n'), ((5672, 5699), 'numpy.random.randint', 'np.random.randint', (['(100)', '(200)'], {}), '(100, 200)\n', (5689, 5699), True, 'import numpy as np\n'), ((5713, 5730), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (5727, 5730), True, 'import numpy as np\n'), ((5744, 5761), 'numpy.random.rand', 'np.random.rand', (['m'], {}), '(m)\n', (5758, 5761), True, 'import numpy as np\n'), ((5825, 5875), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t1', 't2'], {'window_size': 'w', 'verbose': '(False)'}), '(t1, t2, window_size=w, verbose=False)\n', (5837, 5875), False, 'from pytsmp import pytsmp\n'), ((5944, 5995), 'tests.helpers.naive_matrix_profile', 'helpers.naive_matrix_profile', (['t1', 't2'], {'window_size': 'w'}), '(t1, t2, window_size=w)\n', (5972, 5995), False, 'from tests import helpers\n'), ((6011, 6038), 'numpy.allclose', 'np.allclose', (['mpro', 'mp_naive'], {}), '(mpro, mp_naive)\n', (6022, 6038), True, 'import numpy as np\n'), ((6193, 6220), 'numpy.allclose', 'np.allclose', (['ipro', 'ip_naive'], {}), '(ipro, ip_naive)\n', (6204, 6220), True, 'import numpy as np\n'), ((6427, 6468), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/random_walk_data.csv"""'], {}), "('./data/random_walk_data.csv')\n", (6437, 6468), True, 'import numpy as np\n'), ((6488, 6534), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/random_walk_data_mpro.csv"""'], {}), "('./data/random_walk_data_mpro.csv')\n", (6498, 6534), True, 'import numpy as np\n'), ((6554, 6600), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/random_walk_data_ipro.csv"""'], {}), "('./data/random_walk_data_ipro.csv')\n", (6564, 6600), True, 'import numpy as np\n'), ((6629, 6674), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t'], {'window_size': 'w', 'verbose': '(False)'}), '(t, window_size=w, verbose=False)\n', (6641, 6674), False, 'from pytsmp import pytsmp\n'), ((6729, 6756), 'numpy.allclose', 'np.allclose', (['mpro', 'mpro_ans'], {}), '(mpro, mpro_ans)\n', (6740, 6756), True, 'import numpy as np\n'), ((7264, 7305), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/candy_production.csv"""'], {}), "('./data/candy_production.csv')\n", (7274, 7305), True, 'import numpy as np\n'), ((7325, 7371), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/candy_production_mpro.csv"""'], {}), "('./data/candy_production_mpro.csv')\n", (7335, 7371), True, 'import numpy as np\n'), ((7391, 7437), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/candy_production_ipro.csv"""'], {}), "('./data/candy_production_ipro.csv')\n", (7401, 7437), True, 'import numpy as np\n'), ((7466, 7511), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t'], {'window_size': 'w', 'verbose': '(False)'}), '(t, window_size=w, verbose=False)\n', (7478, 7511), False, 'from pytsmp import pytsmp\n'), ((7566, 7593), 'numpy.allclose', 'np.allclose', (['mpro', 'mpro_ans'], {}), '(mpro, mpro_ans)\n', (7577, 7593), True, 'import numpy as np\n'), ((7847, 7874), 'numpy.allclose', 'np.allclose', (['ipro', 'ipro_ans'], {}), '(ipro, ipro_ans)\n', (7858, 7874), True, 'import numpy as np\n'), ((8075, 8113), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/bitcoin_price.csv"""'], {}), "('./data/bitcoin_price.csv')\n", (8085, 8113), True, 'import numpy as np\n'), ((8133, 8176), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/bitcoin_price_mpro.csv"""'], {}), "('./data/bitcoin_price_mpro.csv')\n", (8143, 8176), True, 'import numpy as np\n'), ((8196, 8239), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/bitcoin_price_ipro.csv"""'], {}), "('./data/bitcoin_price_ipro.csv')\n", (8206, 8239), True, 'import numpy as np\n'), ((8269, 8314), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t'], {'window_size': 'w', 'verbose': '(False)'}), '(t, window_size=w, verbose=False)\n', (8281, 8314), False, 'from pytsmp import pytsmp\n'), ((8369, 8396), 'numpy.allclose', 'np.allclose', (['mpro', 'mpro_ans'], {}), '(mpro, mpro_ans)\n', (8380, 8396), True, 'import numpy as np\n'), ((8650, 8677), 'numpy.allclose', 'np.allclose', (['ipro', 'ipro_ans'], {}), '(ipro, ipro_ans)\n', (8661, 8677), True, 'import numpy as np\n'), ((8991, 9019), 'numpy.random.randint', 'np.random.randint', (['(200)', '(1000)'], {}), '(200, 1000)\n', (9008, 9019), True, 'import numpy as np\n'), ((9032, 9060), 'numpy.random.randint', 'np.random.randint', (['(200)', '(1000)'], {}), '(200, 1000)\n', (9049, 9060), True, 'import numpy as np\n'), ((9074, 9091), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (9088, 9091), True, 'import numpy as np\n'), ((9105, 9122), 'numpy.random.rand', 'np.random.rand', (['m'], {}), '(m)\n', (9119, 9122), True, 'import numpy as np\n'), ((9186, 9241), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t1[:-1]', 't2'], {'window_size': 'w', 'verbose': '(False)'}), '(t1[:-1], t2, window_size=w, verbose=False)\n', (9198, 9241), False, 'from pytsmp import pytsmp\n'), ((9325, 9375), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t1', 't2'], {'window_size': 'w', 'verbose': '(False)'}), '(t1, t2, window_size=w, verbose=False)\n', (9337, 9375), False, 'from pytsmp import pytsmp\n'), ((9433, 9457), 'numpy.allclose', 'np.allclose', (['mpro', 'mpro2'], {}), '(mpro, mpro2)\n', (9444, 9457), True, 'import numpy as np\n'), ((9714, 9738), 'numpy.allclose', 'np.allclose', (['ipro', 'ipro2'], {}), '(ipro, ipro2)\n', (9725, 9738), True, 'import numpy as np\n'), ((9945, 9973), 'numpy.random.randint', 'np.random.randint', (['(200)', '(1000)'], {}), '(200, 1000)\n', (9962, 9973), True, 'import numpy as np\n'), ((9986, 10014), 'numpy.random.randint', 'np.random.randint', (['(200)', '(1000)'], {}), '(200, 1000)\n', (10003, 10014), True, 'import numpy as np\n'), ((10028, 10045), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (10042, 10045), True, 'import numpy as np\n'), ((10059, 10076), 'numpy.random.rand', 'np.random.rand', (['m'], {}), '(m)\n', (10073, 10076), True, 'import numpy as np\n'), ((10143, 10167), 'numpy.random.randint', 'np.random.randint', (['(5)', '(50)'], {}), '(5, 50)\n', (10160, 10167), True, 'import numpy as np\n'), ((10181, 10240), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t1[:-times]', 't2'], {'window_size': 'w', 'verbose': '(False)'}), '(t1[:-times], t2, window_size=w, verbose=False)\n', (10193, 10240), False, 'from pytsmp import pytsmp\n'), ((10365, 10415), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t1', 't2'], {'window_size': 'w', 'verbose': '(False)'}), '(t1, t2, window_size=w, verbose=False)\n', (10377, 10415), False, 'from pytsmp import pytsmp\n'), ((10473, 10497), 'numpy.allclose', 'np.allclose', (['mpro', 'mpro2'], {}), '(mpro, mpro2)\n', (10484, 10497), True, 'import numpy as np\n'), ((10778, 10802), 'numpy.allclose', 'np.allclose', (['ipro', 'ipro2'], {}), '(ipro, ipro2)\n', (10789, 10802), True, 'import numpy as np\n'), ((11015, 11043), 'numpy.random.randint', 'np.random.randint', (['(200)', '(1000)'], {}), '(200, 1000)\n', (11032, 11043), True, 'import numpy as np\n'), ((11056, 11084), 'numpy.random.randint', 'np.random.randint', (['(200)', '(1000)'], {}), '(200, 1000)\n', (11073, 11084), True, 'import numpy as np\n'), ((11098, 11115), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (11112, 11115), True, 'import numpy as np\n'), ((11129, 11146), 'numpy.random.rand', 'np.random.rand', (['m'], {}), '(m)\n', (11143, 11146), True, 'import numpy as np\n'), ((11210, 11265), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t1', 't2[:-1]'], {'window_size': 'w', 'verbose': '(False)'}), '(t1, t2[:-1], window_size=w, verbose=False)\n', (11222, 11265), False, 'from pytsmp import pytsmp\n'), ((11349, 11399), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t1', 't2'], {'window_size': 'w', 'verbose': '(False)'}), '(t1, t2, window_size=w, verbose=False)\n', (11361, 11399), False, 'from pytsmp import pytsmp\n'), ((11457, 11481), 'numpy.allclose', 'np.allclose', (['mpro', 'mpro2'], {}), '(mpro, mpro2)\n', (11468, 11481), True, 'import numpy as np\n'), ((11738, 11762), 'numpy.allclose', 'np.allclose', (['ipro', 'ipro2'], {}), '(ipro, ipro2)\n', (11749, 11762), True, 'import numpy as np\n'), ((11969, 11997), 'numpy.random.randint', 'np.random.randint', (['(200)', '(1000)'], {}), '(200, 1000)\n', (11986, 11997), True, 'import numpy as np\n'), ((12010, 12038), 'numpy.random.randint', 'np.random.randint', (['(200)', '(1000)'], {}), '(200, 1000)\n', (12027, 12038), True, 'import numpy as np\n'), ((12052, 12069), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (12066, 12069), True, 'import numpy as np\n'), ((12083, 12100), 'numpy.random.rand', 'np.random.rand', (['m'], {}), '(m)\n', (12097, 12100), True, 'import numpy as np\n'), ((12167, 12191), 'numpy.random.randint', 'np.random.randint', (['(5)', '(50)'], {}), '(5, 50)\n', (12184, 12191), True, 'import numpy as np\n'), ((12205, 12264), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t1', 't2[:-times]'], {'window_size': 'w', 'verbose': '(False)'}), '(t1, t2[:-times], window_size=w, verbose=False)\n', (12217, 12264), False, 'from pytsmp import pytsmp\n'), ((12389, 12439), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t1', 't2'], {'window_size': 'w', 'verbose': '(False)'}), '(t1, t2, window_size=w, verbose=False)\n', (12401, 12439), False, 'from pytsmp import pytsmp\n'), ((12497, 12521), 'numpy.allclose', 'np.allclose', (['mpro', 'mpro2'], {}), '(mpro, mpro2)\n', (12508, 12521), True, 'import numpy as np\n'), ((12802, 12826), 'numpy.allclose', 'np.allclose', (['ipro', 'ipro2'], {}), '(ipro, ipro2)\n', (12813, 12826), True, 'import numpy as np\n'), ((13046, 13074), 'numpy.random.randint', 'np.random.randint', (['(200)', '(1000)'], {}), '(200, 1000)\n', (13063, 13074), True, 'import numpy as np\n'), ((13087, 13115), 'numpy.random.randint', 'np.random.randint', (['(200)', '(1000)'], {}), '(200, 1000)\n', (13104, 13115), True, 'import numpy as np\n'), ((13129, 13146), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (13143, 13146), True, 'import numpy as np\n'), ((13160, 13177), 'numpy.random.rand', 'np.random.rand', (['m'], {}), '(m)\n', (13174, 13177), True, 'import numpy as np\n'), ((13244, 13268), 'numpy.random.randint', 'np.random.randint', (['(5)', '(25)'], {}), '(5, 25)\n', (13261, 13268), True, 'import numpy as np\n'), ((13282, 13350), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t1[:-times]', 't2[:-times]'], {'window_size': 'w', 'verbose': '(False)'}), '(t1[:-times], t2[:-times], window_size=w, verbose=False)\n', (13294, 13350), False, 'from pytsmp import pytsmp\n'), ((13508, 13558), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t1', 't2'], {'window_size': 'w', 'verbose': '(False)'}), '(t1, t2, window_size=w, verbose=False)\n', (13520, 13558), False, 'from pytsmp import pytsmp\n'), ((13616, 13640), 'numpy.allclose', 'np.allclose', (['mpro', 'mpro2'], {}), '(mpro, mpro2)\n', (13627, 13640), True, 'import numpy as np\n'), ((13980, 14004), 'numpy.allclose', 'np.allclose', (['ipro', 'ipro2'], {}), '(ipro, ipro2)\n', (13991, 14004), True, 'import numpy as np\n'), ((14283, 14311), 'numpy.random.randint', 'np.random.randint', (['(200)', '(1000)'], {}), '(200, 1000)\n', (14300, 14311), True, 'import numpy as np\n'), ((14324, 14341), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (14338, 14341), True, 'import numpy as np\n'), ((14354, 14383), 'numpy.random.randint', 'np.random.randint', (['(10)', '(n // 4)'], {}), '(10, n // 4)\n', (14371, 14383), True, 'import numpy as np\n'), ((14397, 14447), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t[:-1]'], {'window_size': 'w', 'verbose': '(False)'}), '(t[:-1], window_size=w, verbose=False)\n', (14409, 14447), False, 'from pytsmp import pytsmp\n'), ((14530, 14575), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t'], {'window_size': 'w', 'verbose': '(False)'}), '(t, window_size=w, verbose=False)\n', (14542, 14575), False, 'from pytsmp import pytsmp\n'), ((14633, 14657), 'numpy.allclose', 'np.allclose', (['mpro', 'mpro2'], {}), '(mpro, mpro2)\n', (14644, 14657), True, 'import numpy as np\n'), ((14913, 14937), 'numpy.allclose', 'np.allclose', (['ipro', 'ipro2'], {}), '(ipro, ipro2)\n', (14924, 14937), True, 'import numpy as np\n'), ((15141, 15169), 'numpy.random.randint', 'np.random.randint', (['(200)', '(1000)'], {}), '(200, 1000)\n', (15158, 15169), True, 'import numpy as np\n'), ((15182, 15199), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (15196, 15199), True, 'import numpy as np\n'), ((15212, 15241), 'numpy.random.randint', 'np.random.randint', (['(10)', '(n // 4)'], {}), '(10, n // 4)\n', (15229, 15241), True, 'import numpy as np\n'), ((15258, 15282), 'numpy.random.randint', 'np.random.randint', (['(5)', '(50)'], {}), '(5, 50)\n', (15275, 15282), True, 'import numpy as np\n'), ((15296, 15350), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t[:-times]'], {'window_size': 'w', 'verbose': '(False)'}), '(t[:-times], window_size=w, verbose=False)\n', (15308, 15350), False, 'from pytsmp import pytsmp\n'), ((15474, 15519), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t'], {'window_size': 'w', 'verbose': '(False)'}), '(t, window_size=w, verbose=False)\n', (15486, 15519), False, 'from pytsmp import pytsmp\n'), ((15577, 15601), 'numpy.allclose', 'np.allclose', (['mpro', 'mpro2'], {}), '(mpro, mpro2)\n', (15588, 15601), True, 'import numpy as np\n'), ((15881, 15905), 'numpy.allclose', 'np.allclose', (['ipro', 'ipro2'], {}), '(ipro, ipro2)\n', (15892, 15905), True, 'import numpy as np\n'), ((16124, 16152), 'numpy.random.randint', 'np.random.randint', (['(200)', '(1000)'], {}), '(200, 1000)\n', (16141, 16152), True, 'import numpy as np\n'), ((16165, 16182), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (16179, 16182), True, 'import numpy as np\n'), ((16195, 16224), 'numpy.random.randint', 'np.random.randint', (['(10)', '(n // 4)'], {}), '(10, n // 4)\n', (16212, 16224), True, 'import numpy as np\n'), ((16238, 16288), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t[:-1]'], {'window_size': 'w', 'verbose': '(False)'}), '(t[:-1], window_size=w, verbose=False)\n', (16250, 16288), False, 'from pytsmp import pytsmp\n'), ((16371, 16416), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t'], {'window_size': 'w', 'verbose': '(False)'}), '(t, window_size=w, verbose=False)\n', (16383, 16416), False, 'from pytsmp import pytsmp\n'), ((16474, 16498), 'numpy.allclose', 'np.allclose', (['mpro', 'mpro2'], {}), '(mpro, mpro2)\n', (16485, 16498), True, 'import numpy as np\n'), ((16754, 16778), 'numpy.allclose', 'np.allclose', (['ipro', 'ipro2'], {}), '(ipro, ipro2)\n', (16765, 16778), True, 'import numpy as np\n'), ((16982, 17010), 'numpy.random.randint', 'np.random.randint', (['(200)', '(1000)'], {}), '(200, 1000)\n', (16999, 17010), True, 'import numpy as np\n'), ((17023, 17040), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (17037, 17040), True, 'import numpy as np\n'), ((17053, 17082), 'numpy.random.randint', 'np.random.randint', (['(10)', '(n // 4)'], {}), '(10, n // 4)\n', (17070, 17082), True, 'import numpy as np\n'), ((17099, 17123), 'numpy.random.randint', 'np.random.randint', (['(5)', '(50)'], {}), '(5, 50)\n', (17116, 17123), True, 'import numpy as np\n'), ((17137, 17191), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t[:-times]'], {'window_size': 'w', 'verbose': '(False)'}), '(t[:-times], window_size=w, verbose=False)\n', (17149, 17191), False, 'from pytsmp import pytsmp\n'), ((17315, 17360), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t'], {'window_size': 'w', 'verbose': '(False)'}), '(t, window_size=w, verbose=False)\n', (17327, 17360), False, 'from pytsmp import pytsmp\n'), ((17418, 17442), 'numpy.allclose', 'np.allclose', (['mpro', 'mpro2'], {}), '(mpro, mpro2)\n', (17429, 17442), True, 'import numpy as np\n'), ((17722, 17746), 'numpy.allclose', 'np.allclose', (['ipro', 'ipro2'], {}), '(ipro, ipro2)\n', (17733, 17746), True, 'import numpy as np\n'), ((17972, 18000), 'numpy.random.randint', 'np.random.randint', (['(200)', '(1000)'], {}), '(200, 1000)\n', (17989, 18000), True, 'import numpy as np\n'), ((18013, 18030), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (18027, 18030), True, 'import numpy as np\n'), ((18043, 18072), 'numpy.random.randint', 'np.random.randint', (['(10)', '(n // 4)'], {}), '(10, n // 4)\n', (18060, 18072), True, 'import numpy as np\n'), ((18089, 18113), 'numpy.random.randint', 'np.random.randint', (['(5)', '(25)'], {}), '(5, 25)\n', (18106, 18113), True, 'import numpy as np\n'), ((18127, 18181), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t[:-times]'], {'window_size': 'w', 'verbose': '(False)'}), '(t[:-times], window_size=w, verbose=False)\n', (18139, 18181), False, 'from pytsmp import pytsmp\n'), ((18390, 18435), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t'], {'window_size': 'w', 'verbose': '(False)'}), '(t, window_size=w, verbose=False)\n', (18402, 18435), False, 'from pytsmp import pytsmp\n'), ((18493, 18517), 'numpy.allclose', 'np.allclose', (['mpro', 'mpro2'], {}), '(mpro, mpro2)\n', (18504, 18517), True, 'import numpy as np\n'), ((18856, 18880), 'numpy.allclose', 'np.allclose', (['ipro', 'ipro2'], {}), '(ipro, ipro2)\n', (18867, 18880), True, 'import numpy as np\n'), ((20503, 20531), 'numpy.random.randint', 'np.random.randint', (['(200)', '(1000)'], {}), '(200, 1000)\n', (20520, 20531), True, 'import numpy as np\n'), ((20544, 20561), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (20558, 20561), True, 'import numpy as np\n'), ((20574, 20603), 'numpy.random.randint', 'np.random.randint', (['(10)', '(n // 4)'], {}), '(10, n // 4)\n', (20591, 20603), True, 'import numpy as np\n'), ((20617, 20662), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t'], {'window_size': 'w', 'verbose': '(False)'}), '(t, window_size=w, verbose=False)\n', (20629, 20662), False, 'from pytsmp import pytsmp\n'), ((21213, 21241), 'numpy.random.randint', 'np.random.randint', (['(200)', '(1000)'], {}), '(200, 1000)\n', (21230, 21241), True, 'import numpy as np\n'), ((21254, 21271), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (21268, 21271), True, 'import numpy as np\n'), ((21284, 21313), 'numpy.random.randint', 'np.random.randint', (['(10)', '(n // 4)'], {}), '(10, n // 4)\n', (21301, 21313), True, 'import numpy as np\n'), ((21327, 21372), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t'], {'window_size': 'w', 'verbose': '(False)'}), '(t, window_size=w, verbose=False)\n', (21339, 21372), False, 'from pytsmp import pytsmp\n'), ((21996, 22024), 'numpy.random.randint', 'np.random.randint', (['(200)', '(1000)'], {}), '(200, 1000)\n', (22013, 22024), True, 'import numpy as np\n'), ((22037, 22054), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (22051, 22054), True, 'import numpy as np\n'), ((22067, 22096), 'numpy.random.randint', 'np.random.randint', (['(10)', '(n // 5)'], {}), '(10, n // 5)\n', (22084, 22096), True, 'import numpy as np\n'), ((22135, 22180), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t'], {'window_size': 'w', 'verbose': '(False)'}), '(t, window_size=w, verbose=False)\n', (22147, 22180), False, 'from pytsmp import pytsmp\n'), ((22827, 22854), 'numpy.random.randint', 'np.random.randint', (['(200)', '(500)'], {}), '(200, 500)\n', (22844, 22854), True, 'import numpy as np\n'), ((22867, 22884), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (22881, 22884), True, 'import numpy as np\n'), ((22897, 22910), 'numpy.tile', 'np.tile', (['t', '(4)'], {}), '(t, 4)\n', (22904, 22910), True, 'import numpy as np\n'), ((22923, 22952), 'numpy.random.randint', 'np.random.randint', (['(10)', '(n // 4)'], {}), '(10, n // 4)\n', (22940, 22952), True, 'import numpy as np\n'), ((23024, 23069), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t'], {'window_size': 'w', 'verbose': '(False)'}), '(t, window_size=w, verbose=False)\n', (23036, 23069), False, 'from pytsmp import pytsmp\n'), ((24761, 24789), 'numpy.random.randint', 'np.random.randint', (['(200)', '(1000)'], {}), '(200, 1000)\n', (24778, 24789), True, 'import numpy as np\n'), ((24802, 24819), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (24816, 24819), True, 'import numpy as np\n'), ((24832, 24861), 'numpy.random.randint', 'np.random.randint', (['(10)', '(n // 4)'], {}), '(10, n // 4)\n', (24849, 24861), True, 'import numpy as np\n'), ((24875, 24920), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t'], {'window_size': 'w', 'verbose': '(False)'}), '(t, window_size=w, verbose=False)\n', (24887, 24920), False, 'from pytsmp import pytsmp\n'), ((25475, 25503), 'numpy.random.randint', 'np.random.randint', (['(200)', '(1000)'], {}), '(200, 1000)\n', (25492, 25503), True, 'import numpy as np\n'), ((25516, 25533), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (25530, 25533), True, 'import numpy as np\n'), ((25546, 25575), 'numpy.random.randint', 'np.random.randint', (['(10)', '(n // 4)'], {}), '(10, n // 4)\n', (25563, 25575), True, 'import numpy as np\n'), ((25589, 25634), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t'], {'window_size': 'w', 'verbose': '(False)'}), '(t, window_size=w, verbose=False)\n', (25601, 25634), False, 'from pytsmp import pytsmp\n'), ((26257, 26277), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (26271, 26277), True, 'import numpy as np\n'), ((26291, 26346), 'pytsmp.pytsmp.STOMP', 'pytsmp.STOMP', (['t'], {'window_size': '(10)', 's_size': '(1)', 'verbose': '(True)'}), '(t, window_size=10, s_size=1, verbose=True)\n', (26303, 26346), False, 'from pytsmp import pytsmp\n'), ((26545, 26573), 'numpy.random.randint', 'np.random.randint', (['(100)', '(1000)'], {}), '(100, 1000)\n', (26562, 26573), True, 'import numpy as np\n'), ((26586, 26614), 'numpy.random.randint', 'np.random.randint', (['(100)', '(1000)'], {}), '(100, 1000)\n', (26603, 26614), True, 'import numpy as np\n'), ((26628, 26645), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (26642, 26645), True, 'import numpy as np\n'), ((26659, 26676), 'numpy.random.rand', 'np.random.rand', (['m'], {}), '(m)\n', (26673, 26676), True, 'import numpy as np\n'), ((26735, 26785), 'pytsmp.pytsmp.STOMP', 'pytsmp.STOMP', (['t1', 't2'], {'window_size': 'w', 'verbose': '(False)'}), '(t1, t2, window_size=w, verbose=False)\n', (26747, 26785), False, 'from pytsmp import pytsmp\n'), ((27121, 27141), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (27135, 27141), True, 'import numpy as np\n'), ((27170, 27215), 'pytsmp.pytsmp.STOMP', 'pytsmp.STOMP', (['t'], {'window_size': 'w', 'verbose': '(False)'}), '(t, window_size=w, verbose=False)\n', (27182, 27215), False, 'from pytsmp import pytsmp\n'), ((27788, 27808), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (27802, 27808), True, 'import numpy as np\n'), ((27837, 27885), 'pytsmp.pytsmp.STOMP', 'pytsmp.STOMP', (['t', 't'], {'window_size': 'w', 'verbose': '(False)'}), '(t, t, window_size=w, verbose=False)\n', (27849, 27885), False, 'from pytsmp import pytsmp\n'), ((28468, 28495), 'numpy.random.randint', 'np.random.randint', (['(100)', '(200)'], {}), '(100, 200)\n', (28485, 28495), True, 'import numpy as np\n'), ((28554, 28571), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (28568, 28571), True, 'import numpy as np\n'), ((28584, 28613), 'numpy.random.randint', 'np.random.randint', (['(10)', '(n // 4)'], {}), '(10, n // 4)\n', (28601, 28613), True, 'import numpy as np\n'), ((28627, 28672), 'pytsmp.pytsmp.STOMP', 'pytsmp.STOMP', (['t'], {'window_size': 'w', 'verbose': '(False)'}), '(t, window_size=w, verbose=False)\n', (28639, 28672), False, 'from pytsmp import pytsmp\n'), ((28741, 28787), 'tests.helpers.naive_matrix_profile', 'helpers.naive_matrix_profile', (['t'], {'window_size': 'w'}), '(t, window_size=w)\n', (28769, 28787), False, 'from tests import helpers\n'), ((28803, 28830), 'numpy.allclose', 'np.allclose', (['mpro', 'mp_naive'], {}), '(mpro, mp_naive)\n', (28814, 28830), True, 'import numpy as np\n'), ((28990, 29017), 'numpy.allclose', 'np.allclose', (['ipro', 'ip_naive'], {}), '(ipro, ip_naive)\n', (29001, 29017), True, 'import numpy as np\n'), ((29235, 29262), 'numpy.random.randint', 'np.random.randint', (['(100)', '(200)'], {}), '(100, 200)\n', (29252, 29262), True, 'import numpy as np\n'), ((29275, 29302), 'numpy.random.randint', 'np.random.randint', (['(100)', '(200)'], {}), '(100, 200)\n', (29292, 29302), True, 'import numpy as np\n'), ((29316, 29333), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (29330, 29333), True, 'import numpy as np\n'), ((29347, 29364), 'numpy.random.rand', 'np.random.rand', (['m'], {}), '(m)\n', (29361, 29364), True, 'import numpy as np\n'), ((29428, 29478), 'pytsmp.pytsmp.STOMP', 'pytsmp.STOMP', (['t1', 't2'], {'window_size': 'w', 'verbose': '(False)'}), '(t1, t2, window_size=w, verbose=False)\n', (29440, 29478), False, 'from pytsmp import pytsmp\n'), ((29547, 29598), 'tests.helpers.naive_matrix_profile', 'helpers.naive_matrix_profile', (['t1', 't2'], {'window_size': 'w'}), '(t1, t2, window_size=w)\n', (29575, 29598), False, 'from tests import helpers\n'), ((29614, 29641), 'numpy.allclose', 'np.allclose', (['mpro', 'mp_naive'], {}), '(mpro, mp_naive)\n', (29625, 29641), True, 'import numpy as np\n'), ((29796, 29823), 'numpy.allclose', 'np.allclose', (['ipro', 'ip_naive'], {}), '(ipro, ip_naive)\n', (29807, 29823), True, 'import numpy as np\n'), ((30030, 30071), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/random_walk_data.csv"""'], {}), "('./data/random_walk_data.csv')\n", (30040, 30071), True, 'import numpy as np\n'), ((30091, 30137), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/random_walk_data_mpro.csv"""'], {}), "('./data/random_walk_data_mpro.csv')\n", (30101, 30137), True, 'import numpy as np\n'), ((30157, 30203), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/random_walk_data_ipro.csv"""'], {}), "('./data/random_walk_data_ipro.csv')\n", (30167, 30203), True, 'import numpy as np\n'), ((30232, 30277), 'pytsmp.pytsmp.STOMP', 'pytsmp.STOMP', (['t'], {'window_size': 'w', 'verbose': '(False)'}), '(t, window_size=w, verbose=False)\n', (30244, 30277), False, 'from pytsmp import pytsmp\n'), ((30332, 30359), 'numpy.allclose', 'np.allclose', (['mpro', 'mpro_ans'], {}), '(mpro, mpro_ans)\n', (30343, 30359), True, 'import numpy as np\n'), ((30845, 30886), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/candy_production.csv"""'], {}), "('./data/candy_production.csv')\n", (30855, 30886), True, 'import numpy as np\n'), ((30906, 30952), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/candy_production_mpro.csv"""'], {}), "('./data/candy_production_mpro.csv')\n", (30916, 30952), True, 'import numpy as np\n'), ((30972, 31018), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/candy_production_ipro.csv"""'], {}), "('./data/candy_production_ipro.csv')\n", (30982, 31018), True, 'import numpy as np\n'), ((31047, 31092), 'pytsmp.pytsmp.STOMP', 'pytsmp.STOMP', (['t'], {'window_size': 'w', 'verbose': '(False)'}), '(t, window_size=w, verbose=False)\n', (31059, 31092), False, 'from pytsmp import pytsmp\n'), ((31147, 31174), 'numpy.allclose', 'np.allclose', (['mpro', 'mpro_ans'], {}), '(mpro, mpro_ans)\n', (31158, 31174), True, 'import numpy as np\n'), ((31428, 31455), 'numpy.allclose', 'np.allclose', (['ipro', 'ipro_ans'], {}), '(ipro, ipro_ans)\n', (31439, 31455), True, 'import numpy as np\n'), ((31656, 31694), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/bitcoin_price.csv"""'], {}), "('./data/bitcoin_price.csv')\n", (31666, 31694), True, 'import numpy as np\n'), ((31714, 31757), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/bitcoin_price_mpro.csv"""'], {}), "('./data/bitcoin_price_mpro.csv')\n", (31724, 31757), True, 'import numpy as np\n'), ((31777, 31820), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/bitcoin_price_ipro.csv"""'], {}), "('./data/bitcoin_price_ipro.csv')\n", (31787, 31820), True, 'import numpy as np\n'), ((31850, 31895), 'pytsmp.pytsmp.STOMP', 'pytsmp.STOMP', (['t'], {'window_size': 'w', 'verbose': '(False)'}), '(t, window_size=w, verbose=False)\n', (31862, 31895), False, 'from pytsmp import pytsmp\n'), ((31950, 31977), 'numpy.allclose', 'np.allclose', (['mpro', 'mpro_ans'], {}), '(mpro, mpro_ans)\n', (31961, 31977), True, 'import numpy as np\n'), ((32231, 32258), 'numpy.allclose', 'np.allclose', (['ipro', 'ipro_ans'], {}), '(ipro, ipro_ans)\n', (32242, 32258), True, 'import numpy as np\n'), ((32461, 32481), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (32475, 32481), True, 'import numpy as np\n'), ((32495, 32565), 'pytsmp.pytsmp.SCRIMP', 'pytsmp.SCRIMP', (['t'], {'window_size': '(10)', 's_size': '(1)', 'verbose': '(True)', 'pre_scrimp': '(1)'}), '(t, window_size=10, s_size=1, verbose=True, pre_scrimp=1)\n', (32508, 32565), False, 'from pytsmp import pytsmp\n'), ((32762, 32790), 'numpy.random.randint', 'np.random.randint', (['(100)', '(1000)'], {}), '(100, 1000)\n', (32779, 32790), True, 'import numpy as np\n'), ((32803, 32831), 'numpy.random.randint', 'np.random.randint', (['(100)', '(1000)'], {}), '(100, 1000)\n', (32820, 32831), True, 'import numpy as np\n'), ((32845, 32862), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (32859, 32862), True, 'import numpy as np\n'), ((32876, 32893), 'numpy.random.rand', 'np.random.rand', (['m'], {}), '(m)\n', (32890, 32893), True, 'import numpy as np\n'), ((32952, 33017), 'pytsmp.pytsmp.SCRIMP', 'pytsmp.SCRIMP', (['t1', 't2'], {'window_size': 'w', 'verbose': '(False)', 'pre_scrimp': '(0)'}), '(t1, t2, window_size=w, verbose=False, pre_scrimp=0)\n', (32965, 33017), False, 'from pytsmp import pytsmp\n'), ((33356, 33376), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (33370, 33376), True, 'import numpy as np\n'), ((33405, 33465), 'pytsmp.pytsmp.SCRIMP', 'pytsmp.SCRIMP', (['t'], {'window_size': 'w', 'verbose': '(False)', 'pre_scrimp': '(0)'}), '(t, window_size=w, verbose=False, pre_scrimp=0)\n', (33418, 33465), False, 'from pytsmp import pytsmp\n'), ((34041, 34061), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (34055, 34061), True, 'import numpy as np\n'), ((34090, 34153), 'pytsmp.pytsmp.SCRIMP', 'pytsmp.SCRIMP', (['t', 't'], {'window_size': 'w', 'verbose': '(False)', 'pre_scrimp': '(0)'}), '(t, t, window_size=w, verbose=False, pre_scrimp=0)\n', (34103, 34153), False, 'from pytsmp import pytsmp\n'), ((34739, 34766), 'numpy.random.randint', 'np.random.randint', (['(100)', '(200)'], {}), '(100, 200)\n', (34756, 34766), True, 'import numpy as np\n'), ((34825, 34842), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (34839, 34842), True, 'import numpy as np\n'), ((34855, 34884), 'numpy.random.randint', 'np.random.randint', (['(10)', '(n // 4)'], {}), '(10, n // 4)\n', (34872, 34884), True, 'import numpy as np\n'), ((34898, 34958), 'pytsmp.pytsmp.SCRIMP', 'pytsmp.SCRIMP', (['t'], {'window_size': 'w', 'verbose': '(False)', 'pre_scrimp': '(0)'}), '(t, window_size=w, verbose=False, pre_scrimp=0)\n', (34911, 34958), False, 'from pytsmp import pytsmp\n'), ((35027, 35073), 'tests.helpers.naive_matrix_profile', 'helpers.naive_matrix_profile', (['t'], {'window_size': 'w'}), '(t, window_size=w)\n', (35055, 35073), False, 'from tests import helpers\n'), ((35089, 35116), 'numpy.allclose', 'np.allclose', (['mpro', 'mp_naive'], {}), '(mpro, mp_naive)\n', (35100, 35116), True, 'import numpy as np\n'), ((35277, 35304), 'numpy.allclose', 'np.allclose', (['ipro', 'ip_naive'], {}), '(ipro, ip_naive)\n', (35288, 35304), True, 'import numpy as np\n'), ((35524, 35551), 'numpy.random.randint', 'np.random.randint', (['(100)', '(200)'], {}), '(100, 200)\n', (35541, 35551), True, 'import numpy as np\n'), ((35564, 35591), 'numpy.random.randint', 'np.random.randint', (['(100)', '(200)'], {}), '(100, 200)\n', (35581, 35591), True, 'import numpy as np\n'), ((35605, 35622), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (35619, 35622), True, 'import numpy as np\n'), ((35636, 35653), 'numpy.random.rand', 'np.random.rand', (['m'], {}), '(m)\n', (35650, 35653), True, 'import numpy as np\n'), ((35717, 35782), 'pytsmp.pytsmp.SCRIMP', 'pytsmp.SCRIMP', (['t1', 't2'], {'window_size': 'w', 'verbose': '(False)', 'pre_scrimp': '(0)'}), '(t1, t2, window_size=w, verbose=False, pre_scrimp=0)\n', (35730, 35782), False, 'from pytsmp import pytsmp\n'), ((35851, 35902), 'tests.helpers.naive_matrix_profile', 'helpers.naive_matrix_profile', (['t1', 't2'], {'window_size': 'w'}), '(t1, t2, window_size=w)\n', (35879, 35902), False, 'from tests import helpers\n'), ((35918, 35945), 'numpy.allclose', 'np.allclose', (['mpro', 'mp_naive'], {}), '(mpro, mp_naive)\n', (35929, 35945), True, 'import numpy as np\n'), ((36101, 36128), 'numpy.allclose', 'np.allclose', (['ipro', 'ip_naive'], {}), '(ipro, ip_naive)\n', (36112, 36128), True, 'import numpy as np\n'), ((36337, 36378), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/random_walk_data.csv"""'], {}), "('./data/random_walk_data.csv')\n", (36347, 36378), True, 'import numpy as np\n'), ((36398, 36444), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/random_walk_data_mpro.csv"""'], {}), "('./data/random_walk_data_mpro.csv')\n", (36408, 36444), True, 'import numpy as np\n'), ((36464, 36510), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/random_walk_data_ipro.csv"""'], {}), "('./data/random_walk_data_ipro.csv')\n", (36474, 36510), True, 'import numpy as np\n'), ((36539, 36599), 'pytsmp.pytsmp.SCRIMP', 'pytsmp.SCRIMP', (['t'], {'window_size': 'w', 'verbose': '(False)', 'pre_scrimp': '(0)'}), '(t, window_size=w, verbose=False, pre_scrimp=0)\n', (36552, 36599), False, 'from pytsmp import pytsmp\n'), ((36654, 36681), 'numpy.allclose', 'np.allclose', (['mpro', 'mpro_ans'], {}), '(mpro, mpro_ans)\n', (36665, 36681), True, 'import numpy as np\n'), ((37170, 37211), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/candy_production.csv"""'], {}), "('./data/candy_production.csv')\n", (37180, 37211), True, 'import numpy as np\n'), ((37231, 37277), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/candy_production_mpro.csv"""'], {}), "('./data/candy_production_mpro.csv')\n", (37241, 37277), True, 'import numpy as np\n'), ((37297, 37343), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/candy_production_ipro.csv"""'], {}), "('./data/candy_production_ipro.csv')\n", (37307, 37343), True, 'import numpy as np\n'), ((37372, 37432), 'pytsmp.pytsmp.SCRIMP', 'pytsmp.SCRIMP', (['t'], {'window_size': 'w', 'verbose': '(False)', 'pre_scrimp': '(0)'}), '(t, window_size=w, verbose=False, pre_scrimp=0)\n', (37385, 37432), False, 'from pytsmp import pytsmp\n'), ((37487, 37514), 'numpy.allclose', 'np.allclose', (['mpro', 'mpro_ans'], {}), '(mpro, mpro_ans)\n', (37498, 37514), True, 'import numpy as np\n'), ((37769, 37796), 'numpy.allclose', 'np.allclose', (['ipro', 'ipro_ans'], {}), '(ipro, ipro_ans)\n', (37780, 37796), True, 'import numpy as np\n'), ((37999, 38037), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/bitcoin_price.csv"""'], {}), "('./data/bitcoin_price.csv')\n", (38009, 38037), True, 'import numpy as np\n'), ((38057, 38100), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/bitcoin_price_mpro.csv"""'], {}), "('./data/bitcoin_price_mpro.csv')\n", (38067, 38100), True, 'import numpy as np\n'), ((38120, 38163), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/bitcoin_price_ipro.csv"""'], {}), "('./data/bitcoin_price_ipro.csv')\n", (38130, 38163), True, 'import numpy as np\n'), ((38193, 38253), 'pytsmp.pytsmp.SCRIMP', 'pytsmp.SCRIMP', (['t'], {'window_size': 'w', 'verbose': '(False)', 'pre_scrimp': '(0)'}), '(t, window_size=w, verbose=False, pre_scrimp=0)\n', (38206, 38253), False, 'from pytsmp import pytsmp\n'), ((38308, 38335), 'numpy.allclose', 'np.allclose', (['mpro', 'mpro_ans'], {}), '(mpro, mpro_ans)\n', (38319, 38335), True, 'import numpy as np\n'), ((38590, 38617), 'numpy.allclose', 'np.allclose', (['ipro', 'ipro_ans'], {}), '(ipro, ipro_ans)\n', (38601, 38617), True, 'import numpy as np\n'), ((38827, 38847), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (38841, 38847), True, 'import numpy as np\n'), ((38861, 38920), 'pytsmp.pytsmp.PreSCRIMP', 'pytsmp.PreSCRIMP', (['t'], {'window_size': '(10)', 's_size': '(1)', 'verbose': '(True)'}), '(t, window_size=10, s_size=1, verbose=True)\n', (38877, 38920), False, 'from pytsmp import pytsmp\n'), ((39731, 39759), 'numpy.random.randint', 'np.random.randint', (['(100)', '(1000)'], {}), '(100, 1000)\n', (39748, 39759), True, 'import numpy as np\n'), ((39772, 39800), 'numpy.random.randint', 'np.random.randint', (['(100)', '(1000)'], {}), '(100, 1000)\n', (39789, 39800), True, 'import numpy as np\n'), ((39814, 39831), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (39828, 39831), True, 'import numpy as np\n'), ((39845, 39862), 'numpy.random.rand', 'np.random.rand', (['m'], {}), '(m)\n', (39859, 39862), True, 'import numpy as np\n'), ((39921, 39975), 'pytsmp.pytsmp.PreSCRIMP', 'pytsmp.PreSCRIMP', (['t1', 't2'], {'window_size': 'w', 'verbose': '(False)'}), '(t1, t2, window_size=w, verbose=False)\n', (39937, 39975), False, 'from pytsmp import pytsmp\n'), ((40323, 40343), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (40337, 40343), True, 'import numpy as np\n'), ((40372, 40421), 'pytsmp.pytsmp.PreSCRIMP', 'pytsmp.PreSCRIMP', (['t'], {'window_size': 'w', 'verbose': '(False)'}), '(t, window_size=w, verbose=False)\n', (40388, 40421), False, 'from pytsmp import pytsmp\n'), ((41007, 41027), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (41021, 41027), True, 'import numpy as np\n'), ((41056, 41108), 'pytsmp.pytsmp.PreSCRIMP', 'pytsmp.PreSCRIMP', (['t', 't'], {'window_size': 'w', 'verbose': '(False)'}), '(t, t, window_size=w, verbose=False)\n', (41072, 41108), False, 'from pytsmp import pytsmp\n'), ((41696, 41716), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (41710, 41716), True, 'import numpy as np\n'), ((41746, 41798), 'pytsmp.pytsmp.PreSCRIMP', 'pytsmp.PreSCRIMP', (['t', 't'], {'window_size': 'w', 'verbose': '(False)'}), '(t, t, window_size=w, verbose=False)\n', (41762, 41798), False, 'from pytsmp import pytsmp\n'), ((41854, 41917), 'pytsmp.pytsmp.SCRIMP', 'pytsmp.SCRIMP', (['t', 't'], {'window_size': 'w', 'verbose': '(False)', 'pre_scrimp': '(0)'}), '(t, t, window_size=w, verbose=False, pre_scrimp=0)\n', (41867, 41917), False, 'from pytsmp import pytsmp\n'), ((42354, 42381), 'numpy.random.randint', 'np.random.randint', (['(100)', '(200)'], {}), '(100, 200)\n', (42371, 42381), True, 'import numpy as np\n'), ((42440, 42457), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (42454, 42457), True, 'import numpy as np\n'), ((42470, 42499), 'numpy.random.randint', 'np.random.randint', (['(10)', '(n // 4)'], {}), '(10, n // 4)\n', (42487, 42499), True, 'import numpy as np\n'), ((42513, 42562), 'pytsmp.pytsmp.PreSCRIMP', 'pytsmp.PreSCRIMP', (['t'], {'window_size': 'w', 'verbose': '(False)'}), '(t, window_size=w, verbose=False)\n', (42529, 42562), False, 'from pytsmp import pytsmp\n'), ((42631, 42677), 'tests.helpers.naive_matrix_profile', 'helpers.naive_matrix_profile', (['t'], {'window_size': 'w'}), '(t, window_size=w)\n', (42659, 42677), False, 'from tests import helpers\n'), ((42693, 42720), 'numpy.allclose', 'np.allclose', (['mpro', 'mp_naive'], {}), '(mpro, mp_naive)\n', (42704, 42720), True, 'import numpy as np\n'), ((42884, 42911), 'numpy.allclose', 'np.allclose', (['ipro', 'ip_naive'], {}), '(ipro, ip_naive)\n', (42895, 42911), True, 'import numpy as np\n'), ((43247, 43274), 'numpy.random.randint', 'np.random.randint', (['(100)', '(200)'], {}), '(100, 200)\n', (43264, 43274), True, 'import numpy as np\n'), ((43287, 43314), 'numpy.random.randint', 'np.random.randint', (['(100)', '(200)'], {}), '(100, 200)\n', (43304, 43314), True, 'import numpy as np\n'), ((43328, 43345), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (43342, 43345), True, 'import numpy as np\n'), ((43359, 43376), 'numpy.random.rand', 'np.random.rand', (['m'], {}), '(m)\n', (43373, 43376), True, 'import numpy as np\n'), ((43440, 43494), 'pytsmp.pytsmp.PreSCRIMP', 'pytsmp.PreSCRIMP', (['t1', 't2'], {'window_size': 'w', 'verbose': '(False)'}), '(t1, t2, window_size=w, verbose=False)\n', (43456, 43494), False, 'from pytsmp import pytsmp\n'), ((43563, 43614), 'tests.helpers.naive_matrix_profile', 'helpers.naive_matrix_profile', (['t1', 't2'], {'window_size': 'w'}), '(t1, t2, window_size=w)\n', (43591, 43614), False, 'from tests import helpers\n'), ((43630, 43657), 'numpy.allclose', 'np.allclose', (['mpro', 'mp_naive'], {}), '(mpro, mp_naive)\n', (43641, 43657), True, 'import numpy as np\n'), ((43816, 43843), 'numpy.allclose', 'np.allclose', (['ipro', 'ip_naive'], {}), '(ipro, ip_naive)\n', (43827, 43843), True, 'import numpy as np\n'), ((44384, 44404), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (44398, 44404), True, 'import numpy as np\n'), ((44418, 44489), 'pytsmp.pytsmp.SCRIMP', 'pytsmp.SCRIMP', (['t'], {'window_size': '(10)', 's_size': '(1)', 'verbose': '(False)', 'pre_scrimp': '(0)'}), '(t, window_size=10, s_size=1, verbose=False, pre_scrimp=0)\n', (44431, 44489), False, 'from pytsmp import pytsmp\n'), ((44760, 44780), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (44774, 44780), True, 'import numpy as np\n'), ((44794, 44869), 'pytsmp.pytsmp.SCRIMP', 'pytsmp.SCRIMP', (['t'], {'window_size': '(10)', 's_size': '(1)', 'verbose': '(False)', 'pre_scrimp': '(1 / 2)'}), '(t, window_size=10, s_size=1, verbose=False, pre_scrimp=1 / 2)\n', (44807, 44869), False, 'from pytsmp import pytsmp\n'), ((45157, 45185), 'numpy.random.randint', 'np.random.randint', (['(100)', '(1000)'], {}), '(100, 1000)\n', (45174, 45185), True, 'import numpy as np\n'), ((45198, 45226), 'numpy.random.randint', 'np.random.randint', (['(100)', '(1000)'], {}), '(100, 1000)\n', (45215, 45226), True, 'import numpy as np\n'), ((45240, 45257), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (45254, 45257), True, 'import numpy as np\n'), ((45271, 45288), 'numpy.random.rand', 'np.random.rand', (['m'], {}), '(m)\n', (45285, 45288), True, 'import numpy as np\n'), ((45347, 45416), 'pytsmp.pytsmp.SCRIMP', 'pytsmp.SCRIMP', (['t1', 't2'], {'window_size': 'w', 'verbose': '(False)', 'pre_scrimp': '(1 / 4)'}), '(t1, t2, window_size=w, verbose=False, pre_scrimp=1 / 4)\n', (45360, 45416), False, 'from pytsmp import pytsmp\n'), ((45763, 45783), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (45777, 45783), True, 'import numpy as np\n'), ((45812, 45876), 'pytsmp.pytsmp.SCRIMP', 'pytsmp.SCRIMP', (['t'], {'window_size': 'w', 'verbose': '(False)', 'pre_scrimp': '(1 / 4)'}), '(t, window_size=w, verbose=False, pre_scrimp=1 / 4)\n', (45825, 45876), False, 'from pytsmp import pytsmp\n'), ((46460, 46480), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (46474, 46480), True, 'import numpy as np\n'), ((46509, 46576), 'pytsmp.pytsmp.SCRIMP', 'pytsmp.SCRIMP', (['t', 't'], {'window_size': 'w', 'verbose': '(False)', 'pre_scrimp': '(1 / 4)'}), '(t, t, window_size=w, verbose=False, pre_scrimp=1 / 4)\n', (46522, 46576), False, 'from pytsmp import pytsmp\n'), ((47170, 47197), 'numpy.random.randint', 'np.random.randint', (['(100)', '(200)'], {}), '(100, 200)\n', (47187, 47197), True, 'import numpy as np\n'), ((47256, 47273), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (47270, 47273), True, 'import numpy as np\n'), ((47286, 47315), 'numpy.random.randint', 'np.random.randint', (['(10)', '(n // 4)'], {}), '(10, n // 4)\n', (47303, 47315), True, 'import numpy as np\n'), ((47329, 47393), 'pytsmp.pytsmp.SCRIMP', 'pytsmp.SCRIMP', (['t'], {'window_size': 'w', 'verbose': '(False)', 'pre_scrimp': '(1 / 4)'}), '(t, window_size=w, verbose=False, pre_scrimp=1 / 4)\n', (47342, 47393), False, 'from pytsmp import pytsmp\n'), ((47460, 47506), 'tests.helpers.naive_matrix_profile', 'helpers.naive_matrix_profile', (['t'], {'window_size': 'w'}), '(t, window_size=w)\n', (47488, 47506), False, 'from tests import helpers\n'), ((47522, 47549), 'numpy.allclose', 'np.allclose', (['mpro', 'mp_naive'], {}), '(mpro, mp_naive)\n', (47533, 47549), True, 'import numpy as np\n'), ((47710, 47737), 'numpy.allclose', 'np.allclose', (['ipro', 'ip_naive'], {}), '(ipro, ip_naive)\n', (47721, 47737), True, 'import numpy as np\n'), ((47967, 47994), 'numpy.random.randint', 'np.random.randint', (['(100)', '(200)'], {}), '(100, 200)\n', (47984, 47994), True, 'import numpy as np\n'), ((48007, 48034), 'numpy.random.randint', 'np.random.randint', (['(100)', '(200)'], {}), '(100, 200)\n', (48024, 48034), True, 'import numpy as np\n'), ((48048, 48065), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (48062, 48065), True, 'import numpy as np\n'), ((48079, 48096), 'numpy.random.rand', 'np.random.rand', (['m'], {}), '(m)\n', (48093, 48096), True, 'import numpy as np\n'), ((48160, 48229), 'pytsmp.pytsmp.SCRIMP', 'pytsmp.SCRIMP', (['t1', 't2'], {'window_size': 'w', 'verbose': '(False)', 'pre_scrimp': '(1 / 4)'}), '(t1, t2, window_size=w, verbose=False, pre_scrimp=1 / 4)\n', (48173, 48229), False, 'from pytsmp import pytsmp\n'), ((48296, 48347), 'tests.helpers.naive_matrix_profile', 'helpers.naive_matrix_profile', (['t1', 't2'], {'window_size': 'w'}), '(t1, t2, window_size=w)\n', (48324, 48347), False, 'from tests import helpers\n'), ((48363, 48390), 'numpy.allclose', 'np.allclose', (['mpro', 'mp_naive'], {}), '(mpro, mp_naive)\n', (48374, 48390), True, 'import numpy as np\n'), ((48546, 48573), 'numpy.allclose', 'np.allclose', (['ipro', 'ip_naive'], {}), '(ipro, ip_naive)\n', (48557, 48573), True, 'import numpy as np\n'), ((48792, 48833), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/random_walk_data.csv"""'], {}), "('./data/random_walk_data.csv')\n", (48802, 48833), True, 'import numpy as np\n'), ((48853, 48899), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/random_walk_data_mpro.csv"""'], {}), "('./data/random_walk_data_mpro.csv')\n", (48863, 48899), True, 'import numpy as np\n'), ((48919, 48965), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/random_walk_data_ipro.csv"""'], {}), "('./data/random_walk_data_ipro.csv')\n", (48929, 48965), True, 'import numpy as np\n'), ((48994, 49058), 'pytsmp.pytsmp.SCRIMP', 'pytsmp.SCRIMP', (['t'], {'window_size': 'w', 'verbose': '(False)', 'pre_scrimp': '(1 / 4)'}), '(t, window_size=w, verbose=False, pre_scrimp=1 / 4)\n', (49007, 49058), False, 'from pytsmp import pytsmp\n'), ((49111, 49138), 'numpy.allclose', 'np.allclose', (['mpro', 'mpro_ans'], {}), '(mpro, mpro_ans)\n', (49122, 49138), True, 'import numpy as np\n'), ((49637, 49678), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/candy_production.csv"""'], {}), "('./data/candy_production.csv')\n", (49647, 49678), True, 'import numpy as np\n'), ((49698, 49744), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/candy_production_mpro.csv"""'], {}), "('./data/candy_production_mpro.csv')\n", (49708, 49744), True, 'import numpy as np\n'), ((49764, 49810), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/candy_production_ipro.csv"""'], {}), "('./data/candy_production_ipro.csv')\n", (49774, 49810), True, 'import numpy as np\n'), ((49839, 49903), 'pytsmp.pytsmp.SCRIMP', 'pytsmp.SCRIMP', (['t'], {'window_size': 'w', 'verbose': '(False)', 'pre_scrimp': '(1 / 4)'}), '(t, window_size=w, verbose=False, pre_scrimp=1 / 4)\n', (49852, 49903), False, 'from pytsmp import pytsmp\n'), ((49956, 49983), 'numpy.allclose', 'np.allclose', (['mpro', 'mpro_ans'], {}), '(mpro, mpro_ans)\n', (49967, 49983), True, 'import numpy as np\n'), ((50238, 50265), 'numpy.allclose', 'np.allclose', (['ipro', 'ipro_ans'], {}), '(ipro, ipro_ans)\n', (50249, 50265), True, 'import numpy as np\n'), ((50478, 50516), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/bitcoin_price.csv"""'], {}), "('./data/bitcoin_price.csv')\n", (50488, 50516), True, 'import numpy as np\n'), ((50536, 50579), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/bitcoin_price_mpro.csv"""'], {}), "('./data/bitcoin_price_mpro.csv')\n", (50546, 50579), True, 'import numpy as np\n'), ((50599, 50642), 'numpy.loadtxt', 'np.loadtxt', (['"""./data/bitcoin_price_ipro.csv"""'], {}), "('./data/bitcoin_price_ipro.csv')\n", (50609, 50642), True, 'import numpy as np\n'), ((50672, 50736), 'pytsmp.pytsmp.SCRIMP', 'pytsmp.SCRIMP', (['t'], {'window_size': 'w', 'verbose': '(False)', 'pre_scrimp': '(1 / 4)'}), '(t, window_size=w, verbose=False, pre_scrimp=1 / 4)\n', (50685, 50736), False, 'from pytsmp import pytsmp\n'), ((50789, 50816), 'numpy.allclose', 'np.allclose', (['mpro', 'mpro_ans'], {}), '(mpro, mpro_ans)\n', (50800, 50816), True, 'import numpy as np\n'), ((51071, 51098), 'numpy.allclose', 'np.allclose', (['ipro', 'ipro_ans'], {}), '(ipro, ipro_ans)\n', (51082, 51098), True, 'import numpy as np\n'), ((165, 189), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (178, 189), False, 'import pytest\n'), ((207, 227), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (221, 227), True, 'import numpy as np\n'), ((245, 300), 'pytsmp.pytsmp.MatrixProfile', 'pytsmp.MatrixProfile', (['t'], {'window_size': '(100)', 'verbose': '(False)'}), '(t, window_size=100, verbose=False)\n', (265, 300), False, 'from pytsmp import pytsmp\n'), ((386, 411), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (399, 411), False, 'import pytest\n'), ((440, 460), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (454, 460), True, 'import numpy as np\n'), ((478, 523), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t'], {'window_size': '(0)', 'verbose': '(False)'}), '(t, window_size=0, verbose=False)\n', (490, 523), False, 'from pytsmp import pytsmp\n'), ((668, 693), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (681, 693), False, 'import pytest\n'), ((722, 742), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (736, 742), True, 'import numpy as np\n'), ((760, 807), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t'], {'window_size': '(2.3)', 'verbose': '(False)'}), '(t, window_size=2.3, verbose=False)\n', (772, 807), False, 'from pytsmp import pytsmp\n'), ((952, 977), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (965, 977), False, 'import pytest\n'), ((1007, 1027), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (1021, 1027), True, 'import numpy as np\n'), ((1045, 1064), 'numpy.random.rand', 'np.random.rand', (['(500)'], {}), '(500)\n', (1059, 1064), True, 'import numpy as np\n'), ((1082, 1134), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t1', 't2'], {'window_size': '(501)', 'verbose': '(False)'}), '(t1, t2, window_size=501, verbose=False)\n', (1094, 1134), False, 'from pytsmp import pytsmp\n'), ((1281, 1306), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1294, 1306), False, 'import pytest\n'), ((1335, 1355), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (1349, 1355), True, 'import numpy as np\n'), ((1373, 1438), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t'], {'window_size': '(10)', 'exclusion_zone': '(-1)', 'verbose': '(False)'}), '(t, window_size=10, exclusion_zone=-1, verbose=False)\n', (1385, 1438), False, 'from pytsmp import pytsmp\n'), ((1582, 1607), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1595, 1607), False, 'import pytest\n'), ((1636, 1656), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (1650, 1656), True, 'import numpy as np\n'), ((1674, 1730), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t'], {'window_size': '(10)', 's_size': '(0)', 'verbose': '(False)'}), '(t, window_size=10, s_size=0, verbose=False)\n', (1686, 1730), False, 'from pytsmp import pytsmp\n'), ((1869, 1894), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1882, 1894), False, 'import pytest\n'), ((1923, 1943), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (1937, 1943), True, 'import numpy as np\n'), ((1961, 2019), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t'], {'window_size': '(10)', 's_size': '(1.2)', 'verbose': '(False)'}), '(t, window_size=10, s_size=1.2, verbose=False)\n', (1973, 2019), False, 'from pytsmp import pytsmp\n'), ((19176, 19201), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (19189, 19201), False, 'import pytest\n'), ((19230, 19250), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (19244, 19250), True, 'import numpy as np\n'), ((19268, 19314), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t'], {'window_size': '(10)', 'verbose': '(False)'}), '(t, window_size=10, verbose=False)\n', (19280, 19314), False, 'from pytsmp import pytsmp\n'), ((19506, 19531), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (19519, 19531), False, 'import pytest\n'), ((19560, 19580), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (19574, 19580), True, 'import numpy as np\n'), ((19598, 19644), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t'], {'window_size': '(10)', 'verbose': '(False)'}), '(t, window_size=10, verbose=False)\n', (19610, 19644), False, 'from pytsmp import pytsmp\n'), ((19837, 19862), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (19850, 19862), False, 'import pytest\n'), ((19891, 19911), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (19905, 19911), True, 'import numpy as np\n'), ((19929, 19975), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t'], {'window_size': '(10)', 'verbose': '(False)'}), '(t, window_size=10, verbose=False)\n', (19941, 19975), False, 'from pytsmp import pytsmp\n'), ((20167, 20192), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (20180, 20192), False, 'import pytest\n'), ((20221, 20241), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (20235, 20241), True, 'import numpy as np\n'), ((20259, 20305), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t'], {'window_size': '(10)', 'verbose': '(False)'}), '(t, window_size=10, verbose=False)\n', (20271, 20305), False, 'from pytsmp import pytsmp\n'), ((23273, 23297), 'numpy.abs', 'np.abs', (['(ab - discords[0])'], {}), '(ab - discords[0])\n', (23279, 23297), True, 'import numpy as np\n'), ((23468, 23493), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (23481, 23493), False, 'import pytest\n'), ((23522, 23542), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (23536, 23542), True, 'import numpy as np\n'), ((23560, 23606), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t'], {'window_size': '(10)', 'verbose': '(False)'}), '(t, window_size=10, verbose=False)\n', (23572, 23606), False, 'from pytsmp import pytsmp\n'), ((23788, 23813), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (23801, 23813), False, 'import pytest\n'), ((23842, 23862), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (23856, 23862), True, 'import numpy as np\n'), ((23880, 23926), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t'], {'window_size': '(10)', 'verbose': '(False)'}), '(t, window_size=10, verbose=False)\n', (23892, 23926), False, 'from pytsmp import pytsmp\n'), ((24109, 24134), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (24122, 24134), False, 'import pytest\n'), ((24163, 24183), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (24177, 24183), True, 'import numpy as np\n'), ((24201, 24247), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t'], {'window_size': '(10)', 'verbose': '(False)'}), '(t, window_size=10, verbose=False)\n', (24213, 24247), False, 'from pytsmp import pytsmp\n'), ((24431, 24456), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (24444, 24456), False, 'import pytest\n'), ((24485, 24505), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (24499, 24505), True, 'import numpy as np\n'), ((24523, 24569), 'pytsmp.pytsmp.STAMP', 'pytsmp.STAMP', (['t'], {'window_size': '(10)', 'verbose': '(False)'}), '(t, window_size=10, verbose=False)\n', (24535, 24569), False, 'from pytsmp import pytsmp\n'), ((39128, 39153), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (39141, 39153), False, 'import pytest\n'), ((39182, 39202), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (39196, 39202), True, 'import numpy as np\n'), ((39220, 39285), 'pytsmp.pytsmp.PreSCRIMP', 'pytsmp.PreSCRIMP', (['t'], {'window_size': '(10)', 'verbose': '(False)', 'sample_rate': '(0)'}), '(t, window_size=10, verbose=False, sample_rate=0)\n', (39236, 39285), False, 'from pytsmp import pytsmp\n'), ((39430, 39455), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (39443, 39455), False, 'import pytest\n'), ((39484, 39504), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (39498, 39504), True, 'import numpy as np\n'), ((39522, 39588), 'pytsmp.pytsmp.PreSCRIMP', 'pytsmp.PreSCRIMP', (['t'], {'window_size': '(10)', 'verbose': '(False)', 'sample_rate': '(-2)'}), '(t, window_size=10, verbose=False, sample_rate=-2)\n', (39538, 39588), False, 'from pytsmp import pytsmp\n'), ((44082, 44107), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (44095, 44107), False, 'import pytest\n'), ((44136, 44156), 'numpy.random.rand', 'np.random.rand', (['(1000)'], {}), '(1000)\n', (44150, 44156), True, 'import numpy as np\n'), ((44174, 44236), 'pytsmp.pytsmp.SCRIMP', 'pytsmp.SCRIMP', (['t'], {'window_size': '(10)', 'verbose': '(False)', 'pre_scrimp': '(-1)'}), '(t, window_size=10, verbose=False, pre_scrimp=-1)\n', (44187, 44236), False, 'from pytsmp import pytsmp\n'), ((6969, 6992), 'numpy.abs', 'np.abs', (['(mpro - mpro_ans)'], {}), '(mpro - mpro_ans)\n', (6975, 6992), True, 'import numpy as np\n'), ((7806, 7829), 'numpy.abs', 'np.abs', (['(mpro - mpro_ans)'], {}), '(mpro - mpro_ans)\n', (7812, 7829), True, 'import numpy as np\n'), ((8609, 8632), 'numpy.abs', 'np.abs', (['(mpro - mpro_ans)'], {}), '(mpro - mpro_ans)\n', (8615, 8632), True, 'import numpy as np\n'), ((9676, 9696), 'numpy.abs', 'np.abs', (['(mpro - mpro2)'], {}), '(mpro - mpro2)\n', (9682, 9696), True, 'import numpy as np\n'), ((10740, 10760), 'numpy.abs', 'np.abs', (['(mpro - mpro2)'], {}), '(mpro - mpro2)\n', (10746, 10760), True, 'import numpy as np\n'), ((11700, 11720), 'numpy.abs', 'np.abs', (['(mpro - mpro2)'], {}), '(mpro - mpro2)\n', (11706, 11720), True, 'import numpy as np\n'), ((12764, 12784), 'numpy.abs', 'np.abs', (['(mpro - mpro2)'], {}), '(mpro - mpro2)\n', (12770, 12784), True, 'import numpy as np\n'), ((13942, 13962), 'numpy.abs', 'np.abs', (['(mpro - mpro2)'], {}), '(mpro - mpro2)\n', (13948, 13962), True, 'import numpy as np\n'), ((14875, 14895), 'numpy.abs', 'np.abs', (['(mpro - mpro2)'], {}), '(mpro - mpro2)\n', (14881, 14895), True, 'import numpy as np\n'), ((15843, 15863), 'numpy.abs', 'np.abs', (['(mpro - mpro2)'], {}), '(mpro - mpro2)\n', (15849, 15863), True, 'import numpy as np\n'), ((16716, 16736), 'numpy.abs', 'np.abs', (['(mpro - mpro2)'], {}), '(mpro - mpro2)\n', (16722, 16736), True, 'import numpy as np\n'), ((17684, 17704), 'numpy.abs', 'np.abs', (['(mpro - mpro2)'], {}), '(mpro - mpro2)\n', (17690, 17704), True, 'import numpy as np\n'), ((18818, 18838), 'numpy.abs', 'np.abs', (['(mpro - mpro2)'], {}), '(mpro - mpro2)\n', (18824, 18838), True, 'import numpy as np\n'), ((30572, 30595), 'numpy.abs', 'np.abs', (['(mpro - mpro_ans)'], {}), '(mpro - mpro_ans)\n', (30578, 30595), True, 'import numpy as np\n'), ((31387, 31410), 'numpy.abs', 'np.abs', (['(mpro - mpro_ans)'], {}), '(mpro - mpro_ans)\n', (31393, 31410), True, 'import numpy as np\n'), ((32190, 32213), 'numpy.abs', 'np.abs', (['(mpro - mpro_ans)'], {}), '(mpro - mpro_ans)\n', (32196, 32213), True, 'import numpy as np\n'), ((36895, 36918), 'numpy.abs', 'np.abs', (['(mpro - mpro_ans)'], {}), '(mpro - mpro_ans)\n', (36901, 36918), True, 'import numpy as np\n'), ((37728, 37751), 'numpy.abs', 'np.abs', (['(mpro - mpro_ans)'], {}), '(mpro - mpro_ans)\n', (37734, 37751), True, 'import numpy as np\n'), ((38549, 38572), 'numpy.abs', 'np.abs', (['(mpro - mpro_ans)'], {}), '(mpro - mpro_ans)\n', (38555, 38572), True, 'import numpy as np\n'), ((49352, 49375), 'numpy.abs', 'np.abs', (['(mpro - mpro_ans)'], {}), '(mpro - mpro_ans)\n', (49358, 49375), True, 'import numpy as np\n'), ((50197, 50220), 'numpy.abs', 'np.abs', (['(mpro - mpro_ans)'], {}), '(mpro - mpro_ans)\n', (50203, 50220), True, 'import numpy as np\n'), ((51030, 51053), 'numpy.abs', 'np.abs', (['(mpro - mpro_ans)'], {}), '(mpro - mpro_ans)\n', (51036, 51053), True, 'import numpy as np\n')]
|
#!/home/andrew/.envs/venv38/bin/python3
import sys
import numpy as np
def get_input():
for line in sys.stdin:
line = line.strip()
if len(line) == 0:
continue
if line.startswith("target area:"):
fields = line.split()
x_region = tuple(int(x) for x in fields[2].strip("x=,").split(".."))
y_region = tuple(int(y) for y in fields[3].strip("y=,").split(".."))
target = {"x":x_region, "y":y_region}
return target
def trial_range(target):
"""Estimate a trial range of velocities."""
assert min(target["x"]) > 0
assert max(target["y"]) < 0
min_vy = min(target["y"])
max_vy = -min(target["y"])
min_vx = 1
max_vx = max(target["x"])
vrange = {"x":(min_vx, max_vx), "y":(min_vy, max_vy)}
return vrange
def fire(v0, target):
"""Returns an array of coordinates that the probe would follow if it was
launched from (0,0) with initial velocity v0. Stop when the probe is
clearly below the target region and has negative y-velocity.
Example: v0 = {"x":10, "y":-2}
"""
assert v0["x"] >= 0
n_points = 5
beyond_the_target = False
points = None
while not beyond_the_target:
n_points *= 2
velocities_y = np.arange(v0["y"], v0["y"] - n_points, -1, dtype=int)
positions_y = np.cumsum(velocities_y)
if (positions_y[-1] < min(target["y"])) and (velocities_y[-1] < 0):
beyond_the_target = True
velocities_x = np.arange(v0["x"], v0["x"] - n_points, -1, dtype=int)
velocities_x = np.maximum(0, velocities_x)
positions_x = np.cumsum(velocities_x)
points = np.transpose(np.array([positions_x, positions_y]))
return points
def is_hit(points, target):
"""For each point [x,y] in the 2D numpy array of points, indicate True
if the point is within the target region.
"""
x_hit = (points[:,0] >= target["x"][0]) & (points[:,0] <= target["x"][1])
y_hit = (points[:,1] >= target["y"][0]) & (points[:,1] <= target["y"][1])
return x_hit & y_hit
##########################################################################
target = get_input()
print("Target region:", target)
vrange = trial_range(target)
print("Trial velocity ranges:", vrange)
max_y = 0 # maximum y of any trajectory that hits the target area
num_velocities_that_hit = 0
for vx in range(vrange["x"][0], vrange["x"][1]+1):
for vy in range(vrange["y"][0], vrange["y"][1]+1):
v0 = {"x":vx, "y":vy}
points = fire(v0, target)
any_hits = np.any(is_hit(points, target))
if any_hits:
max_y = max(max_y, np.max(points[:,1]))
num_velocities_that_hit += 1
print("v0=%s" % str(v0),
"hit=%s" % str(any_hits),
"max_y=%d" % np.max(points[:,1]))
print("Maximum y of any trajectory that hits the target area:", max_y)
print("Number of velocities that hit the target area:", num_velocities_that_hit)
|
[
"numpy.maximum",
"numpy.cumsum",
"numpy.max",
"numpy.array",
"numpy.arange"
] |
[((1280, 1333), 'numpy.arange', 'np.arange', (["v0['y']", "(v0['y'] - n_points)", '(-1)'], {'dtype': 'int'}), "(v0['y'], v0['y'] - n_points, -1, dtype=int)\n", (1289, 1333), True, 'import numpy as np\n'), ((1356, 1379), 'numpy.cumsum', 'np.cumsum', (['velocities_y'], {}), '(velocities_y)\n', (1365, 1379), True, 'import numpy as np\n'), ((1516, 1569), 'numpy.arange', 'np.arange', (["v0['x']", "(v0['x'] - n_points)", '(-1)'], {'dtype': 'int'}), "(v0['x'], v0['x'] - n_points, -1, dtype=int)\n", (1525, 1569), True, 'import numpy as np\n'), ((1593, 1620), 'numpy.maximum', 'np.maximum', (['(0)', 'velocities_x'], {}), '(0, velocities_x)\n', (1603, 1620), True, 'import numpy as np\n'), ((1643, 1666), 'numpy.cumsum', 'np.cumsum', (['velocities_x'], {}), '(velocities_x)\n', (1652, 1666), True, 'import numpy as np\n'), ((1697, 1733), 'numpy.array', 'np.array', (['[positions_x, positions_y]'], {}), '([positions_x, positions_y])\n', (1705, 1733), True, 'import numpy as np\n'), ((2662, 2682), 'numpy.max', 'np.max', (['points[:, 1]'], {}), '(points[:, 1])\n', (2668, 2682), True, 'import numpy as np\n'), ((2832, 2852), 'numpy.max', 'np.max', (['points[:, 1]'], {}), '(points[:, 1])\n', (2838, 2852), True, 'import numpy as np\n')]
|
import numpy as np
class NeuralNetwork():
def __init__(self):
# DO NOT CHANGE PARAMETERS
self.input_to_hidden_weights = np.matrix('1 1; 1 1; 1 1')
self.hidden_to_output_weights = np.matrix('1 1 1')
self.biases = np.matrix('0; 0; 0')
self.learning_rate = .001
self.epochs_to_train = 10
self.training_points = [((2, 1), 10), ((3, 3), 21), ((4, 5), 32), ((6, 6), 42)]
self.testing_points = [(1,1), (2,2), (3,3), (5,5), (10,10)]
def train(self, x1, x2, y):
def rectified_linear_unit(x):
return np.maximum(0, x)
def rectified_linear_unit_derivative(x):
if x>0:
return 1
else:
return 0
### Forward propagation ###
input_values = np.matrix([[x1],[x2]]) # 2 by 1
input_values = np.array([[x1],[x2]]).astype('float64')
##Converting to numpy array
self_input_to_hidden_weights = np.array(self.input_to_hidden_weights).astype('float64')
self_hidden_to_output_weights = np.array(self.hidden_to_output_weights).astype('float64')
self_biases = np.array(self.biases).astype('float64')
#Activation functions
relu= np.vectorize(rectified_linear_unit)
relu_dev = np.vectorize(rectified_linear_unit_derivative)
# Calculate the input and activation of the hidden layer
hidden_layer_weighted_input = np.dot(self_input_to_hidden_weights, input_values) + self_biases # TODO (3 by 1 matrix)
hidden_layer_activation = relu(hidden_layer_weighted_input) # TODO (3 by 1 matrix)
hidden_to_output= np.dot(self_hidden_to_output_weights, hidden_layer_activation) # TODO 1x1
output = hidden_to_output # TODO 1x1
activated_output = relu(hidden_to_output) # TODO 1X1
### Backpropagation ###
# Compute gradients
output_layer_error = (y- activated_output) * relu_dev(hidden_to_output)
hidden_layer_error = output_layer_error * relu_dev(hidden_layer_weighted_input) # TODO (3 by 1 matrix)
bias_gradients = (y- activated_output)* relu_dev(hidden_layer_weighted_input)
hidden_to_output_weight_gradients = output * (y- activated_output)* relu_dev(activated_output)
input_to_hidden_weight_gradients = (np.dot(np.dot(relu_dev(hidden_layer_weighted_input), input_values.T).T, relu_dev(self_hidden_to_output_weights.T)) * relu_dev(hidden_to_output) * (y- activated_output)).T
# Use gradients to adjust weights and biases using gradient descent
new_biases = self_biases - (self.learning_rate*bias_gradients)
new_input_to_hidden_weights = self_input_to_hidden_weights - (self.learning_rate*input_to_hidden_weight_gradients)
new_hidden_to_output_weights = self_hidden_to_output_weights - (self.learning_rate*hidden_to_output_weight_gradients)
##This goes at the end to convert back the matrices
#And at the end, I convert back after calculating the new biases and weights:
self.biases = np.matrix(new_biases)
self.input_to_hidden_weights = np.matrix(new_input_to_hidden_weights)
self.hidden_to_output_weights = np.matrix(new_hidden_to_output_weights)
# Run this to train your neural network once you complete the train method
def train_neural_network(self):
print('Training pairs: ', self.training_points)
print('Starting params: ')
print('')
print('(Input --> Hidden Layer) Weights: ', self.input_to_hidden_weights)
print('(Hidden --> Output Layer) Weights: ', self.hidden_to_output_weights)
print('Biases: ', self.biases)
for epoch in range(self.epochs_to_train):
print('')
print('Epoch ', epoch)
for x,y in self.training_points:
self.train(x[0], x[1], y)
print('(Input --> Hidden Layer) Weights: ', self.input_to_hidden_weights)
print('(Hidden --> Output Layer) Weights: ', self.hidden_to_output_weights)
print('Biases: ', self.biases)
##Testing the Neural Network
x = NeuralNetwork()
x.train_neural_network()
|
[
"numpy.matrix",
"numpy.vectorize",
"numpy.maximum",
"numpy.array",
"numpy.dot"
] |
[((142, 168), 'numpy.matrix', 'np.matrix', (['"""1 1; 1 1; 1 1"""'], {}), "('1 1; 1 1; 1 1')\n", (151, 168), True, 'import numpy as np\n'), ((209, 227), 'numpy.matrix', 'np.matrix', (['"""1 1 1"""'], {}), "('1 1 1')\n", (218, 227), True, 'import numpy as np\n'), ((250, 270), 'numpy.matrix', 'np.matrix', (['"""0; 0; 0"""'], {}), "('0; 0; 0')\n", (259, 270), True, 'import numpy as np\n'), ((821, 844), 'numpy.matrix', 'np.matrix', (['[[x1], [x2]]'], {}), '([[x1], [x2]])\n', (830, 844), True, 'import numpy as np\n'), ((1253, 1288), 'numpy.vectorize', 'np.vectorize', (['rectified_linear_unit'], {}), '(rectified_linear_unit)\n', (1265, 1288), True, 'import numpy as np\n'), ((1308, 1354), 'numpy.vectorize', 'np.vectorize', (['rectified_linear_unit_derivative'], {}), '(rectified_linear_unit_derivative)\n', (1320, 1354), True, 'import numpy as np\n'), ((1673, 1735), 'numpy.dot', 'np.dot', (['self_hidden_to_output_weights', 'hidden_layer_activation'], {}), '(self_hidden_to_output_weights, hidden_layer_activation)\n', (1679, 1735), True, 'import numpy as np\n'), ((3090, 3111), 'numpy.matrix', 'np.matrix', (['new_biases'], {}), '(new_biases)\n', (3099, 3111), True, 'import numpy as np\n'), ((3151, 3189), 'numpy.matrix', 'np.matrix', (['new_input_to_hidden_weights'], {}), '(new_input_to_hidden_weights)\n', (3160, 3189), True, 'import numpy as np\n'), ((3231, 3270), 'numpy.matrix', 'np.matrix', (['new_hidden_to_output_weights'], {}), '(new_hidden_to_output_weights)\n', (3240, 3270), True, 'import numpy as np\n'), ((593, 609), 'numpy.maximum', 'np.maximum', (['(0)', 'x'], {}), '(0, x)\n', (603, 609), True, 'import numpy as np\n'), ((1467, 1517), 'numpy.dot', 'np.dot', (['self_input_to_hidden_weights', 'input_values'], {}), '(self_input_to_hidden_weights, input_values)\n', (1473, 1517), True, 'import numpy as np\n'), ((876, 898), 'numpy.array', 'np.array', (['[[x1], [x2]]'], {}), '([[x1], [x2]])\n', (884, 898), True, 'import numpy as np\n'), ((992, 1030), 'numpy.array', 'np.array', (['self.input_to_hidden_weights'], {}), '(self.input_to_hidden_weights)\n', (1000, 1030), True, 'import numpy as np\n'), ((1089, 1128), 'numpy.array', 'np.array', (['self.hidden_to_output_weights'], {}), '(self.hidden_to_output_weights)\n', (1097, 1128), True, 'import numpy as np\n'), ((1169, 1190), 'numpy.array', 'np.array', (['self.biases'], {}), '(self.biases)\n', (1177, 1190), True, 'import numpy as np\n')]
|
import numpy as np
from tensorflow.contrib.keras.api.keras.models import Sequential,load_model
from tensorflow.contrib.keras.api.keras.layers import Conv2D, MaxPooling2D
from tensorflow.contrib.keras.api.keras.layers import Dropout, Flatten, Dense
import cv2
class Classifier():
def __init__(self,img_shape):
self.img_shape=img_shape
def inference(self):
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same', activation='relu', input_shape=self.img_shape))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(25, activation='softmax'))
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
return model
def save(self,model,model_path):
model.save(model_path)
def load(self,model_path):
model=load_model(model_path)
return model
def train(self,train_images,train_labels_oh,val_images,val_labels_oh,batch_size=64,get_saved=False,save=False,model_path="model.h5",epochs=10,num_classes=25):
if get_saved:
model=self.load(model_path)
else:
model=self.inference()
model.fit(train_images, train_labels_oh, batch_size=batch_size, epochs=epochs, verbose=1,
validation_data=(val_images, val_labels_oh))
if save:
self.save(model,model_path)
return model
def evalaute(self,model,test_images,test_labels_oh):
model.evaluate(test_images,test_labels_oh)
def predict(self,model,image):
image=np.array(image)
norm_image = np.zeros_like(image,dtype=np.float)
norm_image=cv2.normalize(image, norm_image, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
norm_image=norm_image.reshape((1,self.img_shape[0],self.img_shape[1],self.img_shape[2]))
label_oh=model.predict(norm_image)
return label_oh
|
[
"numpy.zeros_like",
"tensorflow.contrib.keras.api.keras.layers.Conv2D",
"tensorflow.contrib.keras.api.keras.models.Sequential",
"tensorflow.contrib.keras.api.keras.layers.MaxPooling2D",
"tensorflow.contrib.keras.api.keras.layers.Dense",
"tensorflow.contrib.keras.api.keras.layers.Flatten",
"numpy.array",
"cv2.normalize",
"tensorflow.contrib.keras.api.keras.models.load_model",
"tensorflow.contrib.keras.api.keras.layers.Dropout"
] |
[((408, 420), 'tensorflow.contrib.keras.api.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (418, 420), False, 'from tensorflow.contrib.keras.api.keras.models import Sequential, load_model\n'), ((1527, 1549), 'tensorflow.contrib.keras.api.keras.models.load_model', 'load_model', (['model_path'], {}), '(model_path)\n', (1537, 1549), False, 'from tensorflow.contrib.keras.api.keras.models import Sequential, load_model\n'), ((2257, 2272), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (2265, 2272), True, 'import numpy as np\n'), ((2294, 2330), 'numpy.zeros_like', 'np.zeros_like', (['image'], {'dtype': 'np.float'}), '(image, dtype=np.float)\n', (2307, 2330), True, 'import numpy as np\n'), ((2349, 2447), 'cv2.normalize', 'cv2.normalize', (['image', 'norm_image'], {'alpha': '(0)', 'beta': '(1)', 'norm_type': 'cv2.NORM_MINMAX', 'dtype': 'cv2.CV_32F'}), '(image, norm_image, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX,\n dtype=cv2.CV_32F)\n', (2362, 2447), False, 'import cv2\n'), ((439, 525), 'tensorflow.contrib.keras.api.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""', 'activation': '"""relu"""', 'input_shape': 'self.img_shape'}), "(32, (3, 3), padding='same', activation='relu', input_shape=self.\n img_shape)\n", (445, 525), False, 'from tensorflow.contrib.keras.api.keras.layers import Conv2D, MaxPooling2D\n'), ((540, 577), 'tensorflow.contrib.keras.api.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), "(32, (3, 3), activation='relu')\n", (546, 577), False, 'from tensorflow.contrib.keras.api.keras.layers import Conv2D, MaxPooling2D\n'), ((597, 627), 'tensorflow.contrib.keras.api.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (609, 627), False, 'from tensorflow.contrib.keras.api.keras.layers import Conv2D, MaxPooling2D\n'), ((647, 660), 'tensorflow.contrib.keras.api.keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (654, 660), False, 'from tensorflow.contrib.keras.api.keras.layers import Dropout, Flatten, Dense\n'), ((691, 744), 'tensorflow.contrib.keras.api.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(64, (3, 3), padding='same', activation='relu')\n", (697, 744), False, 'from tensorflow.contrib.keras.api.keras.layers import Conv2D, MaxPooling2D\n'), ((764, 801), 'tensorflow.contrib.keras.api.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (770, 801), False, 'from tensorflow.contrib.keras.api.keras.layers import Conv2D, MaxPooling2D\n'), ((821, 851), 'tensorflow.contrib.keras.api.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (833, 851), False, 'from tensorflow.contrib.keras.api.keras.layers import Conv2D, MaxPooling2D\n'), ((871, 884), 'tensorflow.contrib.keras.api.keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (878, 884), False, 'from tensorflow.contrib.keras.api.keras.layers import Dropout, Flatten, Dense\n'), ((915, 968), 'tensorflow.contrib.keras.api.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(64, (3, 3), padding='same', activation='relu')\n", (921, 968), False, 'from tensorflow.contrib.keras.api.keras.layers import Conv2D, MaxPooling2D\n'), ((988, 1025), 'tensorflow.contrib.keras.api.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), "(64, (3, 3), activation='relu')\n", (994, 1025), False, 'from tensorflow.contrib.keras.api.keras.layers import Conv2D, MaxPooling2D\n'), ((1045, 1075), 'tensorflow.contrib.keras.api.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1057, 1075), False, 'from tensorflow.contrib.keras.api.keras.layers import Conv2D, MaxPooling2D\n'), ((1095, 1108), 'tensorflow.contrib.keras.api.keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (1102, 1108), False, 'from tensorflow.contrib.keras.api.keras.layers import Dropout, Flatten, Dense\n'), ((1139, 1148), 'tensorflow.contrib.keras.api.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1146, 1148), False, 'from tensorflow.contrib.keras.api.keras.layers import Dropout, Flatten, Dense\n'), ((1168, 1197), 'tensorflow.contrib.keras.api.keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (1173, 1197), False, 'from tensorflow.contrib.keras.api.keras.layers import Dropout, Flatten, Dense\n'), ((1217, 1229), 'tensorflow.contrib.keras.api.keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (1224, 1229), False, 'from tensorflow.contrib.keras.api.keras.layers import Dropout, Flatten, Dense\n'), ((1249, 1280), 'tensorflow.contrib.keras.api.keras.layers.Dense', 'Dense', (['(25)'], {'activation': '"""softmax"""'}), "(25, activation='softmax')\n", (1254, 1280), False, 'from tensorflow.contrib.keras.api.keras.layers import Dropout, Flatten, Dense\n')]
|
## THIS FUNCTION IS UNUSED - THE ACTIVE VERSION LIES IN hs.py
from numba import double, jit, njit, vectorize
from numba import int32, float32, uint8, float64, int64, boolean
import numpy as np
import time
# Apply a line and step function
from numpy import cos, sin, radians
@njit#@vectorize(["boolean (float32, float32, float32, float32)"])
def line_test(x,y, r, theta):
# function to test if a pixel lies above or below a line segment
# Find start and end points of line segments for different cases of theta and r
# Line segment to interogation point
q1x = x
q1y = y
# Vector magnitude cases
theta = theta % 360
if r == 0:
r = 1e-8
# Rotation cases
if theta == 0. or theta == 360.: # vertical to right
x1 = r
x2 = q1x
if x2 > x1:
return False
else:
return True
elif theta == 90.: # horizontal line above
y1 = r
y2 = q1y
if y2>y1:
return False
else:
return True
elif theta == 180.: # vertical to left
x1 = -r
x2 = q1x
if x2 > x1:
return True
else:
return False
elif theta == 270.: # horizontal below
y1 = -r
y2 = q1y
if y2 < y1:
return False
else:
return True
elif theta>0 and theta<180:
theta = radians(theta)
# Tangent line segment
t1x = r*cos(theta)
t1y = r*sin(theta)
m = -1*(cos(theta)/sin(theta))
c = t1y - m*t1x
y1 = q1y
y2 = m*q1x + c
if y1>y2:
return False
else:
return True
elif theta>180 and theta<360:
theta = radians(theta)
# Tangent line segment
t1x = r*cos(theta)
t1y = r*sin(theta)
m = -1*cos(theta)/sin(theta)
c = t1y - m*t1x
y1 = q1y
y2 = m*q1x + c
if y1<y2:
return False
else:
return True
def gen_hsfilter(a,b, r, theta):
# preallocate arrays
subsets = np.stack((a,b),axis=2)
filter_arr = np.zeros((subsets.shape[0],subsets.shape[0]), dtype=np.bool)
xc = filter_arr.shape[0]/2
yc = filter_arr.shape[1]/2
xc = xc
yc = yc
r = np.array([r],dtype=np.float32)
theta = np.array([theta],dtype=np.float32)
# Create x and y coordinates which are centred
x_length = np.linspace(-xc, xc,subsets.shape[0], dtype=np.float32)
y_length = np.linspace(-yc,yc,subsets.shape[0], dtype=np.float32)
xs,ys = np.meshgrid(x_length,y_length)
hsfilter = filt_loop(subsets, r[0], theta[0], xs, ys, filter_arr)
return hsfilter
@njit
def filt_loop(subsets, r, t, xs, ys, filter_arr):
# cols = np.arange(subsets.shape[0])
# rows = np.arange(subsets.shape[0])
#print(ys.dtype)
#filter_arr = np.empty((subsets.shape[0],subsets.shape[0]), dtype=np.bool)
xs = xs.astype(np.float32)
ys = ys.astype(np.float32)
flag = np.bool
# iterate pixel by pixel
for col in range(subsets.shape[0]):
for row in range(subsets.shape[1]):
#rasters through columns and rows for a given coordinate in xy
# Note that y axis is mirrored
x = xs[row, col]
y = np.multiply(ys[row, col],-1)
# Test if pixel is beyond the discontinuity line
filter_arr[row,col] = line_test(float32(x), float32(y), float32(r), float32(t))
return filter_arr
|
[
"numpy.stack",
"numpy.radians",
"numpy.meshgrid",
"numpy.multiply",
"numba.float32",
"numpy.zeros",
"numpy.sin",
"numpy.array",
"numpy.linspace",
"numpy.cos"
] |
[((2085, 2109), 'numpy.stack', 'np.stack', (['(a, b)'], {'axis': '(2)'}), '((a, b), axis=2)\n', (2093, 2109), True, 'import numpy as np\n'), ((2125, 2186), 'numpy.zeros', 'np.zeros', (['(subsets.shape[0], subsets.shape[0])'], {'dtype': 'np.bool'}), '((subsets.shape[0], subsets.shape[0]), dtype=np.bool)\n', (2133, 2186), True, 'import numpy as np\n'), ((2280, 2311), 'numpy.array', 'np.array', (['[r]'], {'dtype': 'np.float32'}), '([r], dtype=np.float32)\n', (2288, 2311), True, 'import numpy as np\n'), ((2323, 2358), 'numpy.array', 'np.array', (['[theta]'], {'dtype': 'np.float32'}), '([theta], dtype=np.float32)\n', (2331, 2358), True, 'import numpy as np\n'), ((2424, 2480), 'numpy.linspace', 'np.linspace', (['(-xc)', 'xc', 'subsets.shape[0]'], {'dtype': 'np.float32'}), '(-xc, xc, subsets.shape[0], dtype=np.float32)\n', (2435, 2480), True, 'import numpy as np\n'), ((2495, 2551), 'numpy.linspace', 'np.linspace', (['(-yc)', 'yc', 'subsets.shape[0]'], {'dtype': 'np.float32'}), '(-yc, yc, subsets.shape[0], dtype=np.float32)\n', (2506, 2551), True, 'import numpy as np\n'), ((2562, 2593), 'numpy.meshgrid', 'np.meshgrid', (['x_length', 'y_length'], {}), '(x_length, y_length)\n', (2573, 2593), True, 'import numpy as np\n'), ((3280, 3309), 'numpy.multiply', 'np.multiply', (['ys[row, col]', '(-1)'], {}), '(ys[row, col], -1)\n', (3291, 3309), True, 'import numpy as np\n'), ((3415, 3425), 'numba.float32', 'float32', (['x'], {}), '(x)\n', (3422, 3425), False, 'from numba import int32, float32, uint8, float64, int64, boolean\n'), ((3427, 3437), 'numba.float32', 'float32', (['y'], {}), '(y)\n', (3434, 3437), False, 'from numba import int32, float32, uint8, float64, int64, boolean\n'), ((3439, 3449), 'numba.float32', 'float32', (['r'], {}), '(r)\n', (3446, 3449), False, 'from numba import int32, float32, uint8, float64, int64, boolean\n'), ((3451, 3461), 'numba.float32', 'float32', (['t'], {}), '(t)\n', (3458, 3461), False, 'from numba import int32, float32, uint8, float64, int64, boolean\n'), ((1396, 1410), 'numpy.radians', 'radians', (['theta'], {}), '(theta)\n', (1403, 1410), False, 'from numpy import cos, sin, radians\n'), ((1458, 1468), 'numpy.cos', 'cos', (['theta'], {}), '(theta)\n', (1461, 1468), False, 'from numpy import cos, sin, radians\n'), ((1485, 1495), 'numpy.sin', 'sin', (['theta'], {}), '(theta)\n', (1488, 1495), False, 'from numpy import cos, sin, radians\n'), ((1730, 1744), 'numpy.radians', 'radians', (['theta'], {}), '(theta)\n', (1737, 1744), False, 'from numpy import cos, sin, radians\n'), ((1512, 1522), 'numpy.cos', 'cos', (['theta'], {}), '(theta)\n', (1515, 1522), False, 'from numpy import cos, sin, radians\n'), ((1523, 1533), 'numpy.sin', 'sin', (['theta'], {}), '(theta)\n', (1526, 1533), False, 'from numpy import cos, sin, radians\n'), ((1792, 1802), 'numpy.cos', 'cos', (['theta'], {}), '(theta)\n', (1795, 1802), False, 'from numpy import cos, sin, radians\n'), ((1819, 1829), 'numpy.sin', 'sin', (['theta'], {}), '(theta)\n', (1822, 1829), False, 'from numpy import cos, sin, radians\n'), ((1856, 1866), 'numpy.sin', 'sin', (['theta'], {}), '(theta)\n', (1859, 1866), False, 'from numpy import cos, sin, radians\n'), ((1845, 1855), 'numpy.cos', 'cos', (['theta'], {}), '(theta)\n', (1848, 1855), False, 'from numpy import cos, sin, radians\n')]
|
from __future__ import absolute_import
import numpy as np
import os
import unittest
from numpy.testing import assert_array_almost_equal
from .. import parse_spectrum
FIXTURE_PATH = os.path.dirname(__file__)
FIXTURE_DATA = np.array([[0.4,3.2],[1.2,2.7],[2.0,5.4]])
class TextFormatTests(unittest.TestCase):
def test_tsv_data(self):
x = parse_spectrum(os.path.join(FIXTURE_PATH, 'fixture.tsv'))
assert_array_almost_equal(x, FIXTURE_DATA)
def test_csv_data(self):
x = parse_spectrum(os.path.join(FIXTURE_PATH, 'fixture.csv'))
assert_array_almost_equal(x, FIXTURE_DATA)
def test_loose_data(self):
x = parse_spectrum(os.path.join(FIXTURE_PATH, 'fixture.txt'))
assert_array_almost_equal(x, FIXTURE_DATA)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"os.path.dirname",
"numpy.array",
"numpy.testing.assert_array_almost_equal",
"os.path.join"
] |
[((183, 208), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (198, 208), False, 'import os\n'), ((224, 270), 'numpy.array', 'np.array', (['[[0.4, 3.2], [1.2, 2.7], [2.0, 5.4]]'], {}), '([[0.4, 3.2], [1.2, 2.7], [2.0, 5.4]])\n', (232, 270), True, 'import numpy as np\n'), ((764, 779), 'unittest.main', 'unittest.main', ([], {}), '()\n', (777, 779), False, 'import unittest\n'), ((407, 449), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['x', 'FIXTURE_DATA'], {}), '(x, FIXTURE_DATA)\n', (432, 449), False, 'from numpy.testing import assert_array_almost_equal\n'), ((548, 590), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['x', 'FIXTURE_DATA'], {}), '(x, FIXTURE_DATA)\n', (573, 590), False, 'from numpy.testing import assert_array_almost_equal\n'), ((691, 733), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['x', 'FIXTURE_DATA'], {}), '(x, FIXTURE_DATA)\n', (716, 733), False, 'from numpy.testing import assert_array_almost_equal\n'), ((360, 401), 'os.path.join', 'os.path.join', (['FIXTURE_PATH', '"""fixture.tsv"""'], {}), "(FIXTURE_PATH, 'fixture.tsv')\n", (372, 401), False, 'import os\n'), ((501, 542), 'os.path.join', 'os.path.join', (['FIXTURE_PATH', '"""fixture.csv"""'], {}), "(FIXTURE_PATH, 'fixture.csv')\n", (513, 542), False, 'import os\n'), ((644, 685), 'os.path.join', 'os.path.join', (['FIXTURE_PATH', '"""fixture.txt"""'], {}), "(FIXTURE_PATH, 'fixture.txt')\n", (656, 685), False, 'import os\n')]
|
from typing import Dict, List, Union, Any
import numpy as np
import numpy.linalg as la
from graphik.robots import RobotPlanar
from graphik.graphs.graph_base import ProblemGraph
from graphik.utils import *
from liegroups.numpy import SE2, SO2
import networkx as nx
from numpy import cos, pi
from math import sqrt
class ProblemGraphPlanar(ProblemGraph):
def __init__(self, robot: RobotPlanar, params: Dict = {}):
super(ProblemGraphPlanar, self).__init__(robot, params)
#
base = self.base_subgraph()
structure = self.structure_subgraph()
composition = nx.compose(base, structure)
self.add_nodes_from(composition.nodes(data=True))
self.add_edges_from(composition.edges(data=True))
self.set_limits()
self.root_angle_limits()
def base_subgraph(self) -> nx.DiGraph:
base = nx.DiGraph([("p0", "x"), ("p0", "y"), ("x", "y")])
# Invert x axis because of the way joint limits are set up, makes no difference
base.add_nodes_from(
[
("p0", {POS: np.array([0, 0]), TYPE: [BASE, ROBOT]}),
("x", {POS: np.array([-1, 0]), TYPE: [BASE]}),
("y", {POS: np.array([0, 1]), TYPE: [BASE]}),
]
)
for u, v in base.edges():
base[u][v][DIST] = la.norm(base.nodes[u][POS] - base.nodes[v][POS])
base[u][v][LOWER] = base[u][v][DIST]
base[u][v][UPPER] = base[u][v][DIST]
base[u][v][BOUNDED] = []
return base
def structure_subgraph(self) -> nx.DiGraph:
robot = self.robot
end_effectors = self.robot.end_effectors
kinematic_map = self.robot.kinematic_map
structure = nx.empty_graph(create_using=nx.DiGraph)
for ee in end_effectors:
k_map = kinematic_map[ROOT][ee]
for idx in range(len(k_map)):
cur = k_map[idx]
cur_pos = robot.nodes[cur]["T0"].trans
# Add nodes for joint and edge between them
structure.add_nodes_from([(cur, {POS: cur_pos, TYPE: [ROBOT]})])
if cur == ROOT:
structure.nodes[cur][TYPE] += [BASE]
# If there exists a preceeding joint, connect it to new
if idx != 0:
pred = k_map[idx - 1]
dist = la.norm(
structure.nodes[cur][POS] - structure.nodes[pred][POS]
)
structure.add_edge(
pred,
cur,
**{DIST: dist, LOWER: dist, UPPER: dist, BOUNDED: []},
)
if cur in self.robot.end_effectors:
structure.nodes[cur][TYPE] += [END_EFFECTOR]
structure.nodes[pred][TYPE] += [END_EFFECTOR]
# Delete positions used for weights
for u in structure.nodes:
del structure.nodes[u][POS]
return structure
def root_angle_limits(self):
ax = "x"
S = self.structure
l1 = la.norm(self.nodes[ax][POS])
for node in S.successors(ROOT):
if DIST in S[ROOT][node]:
l2 = S[ROOT][node][DIST]
lb = self.robot.lb[node]
ub = self.robot.ub[node]
lim = max(abs(ub), abs(lb))
# Assumes bounds are less than pi in magnitude
self.add_edge(ax, node)
self[ax][node][UPPER] = l1 + l2
self[ax][node][LOWER] = sqrt(
l1 ** 2 + l2 ** 2 - 2 * l1 * l2 * cos(pi - lim)
)
self[ax][node][BOUNDED] = BELOW
def set_limits(self):
"""
Sets known bounds on the distances between joints.
This is induced by link length and joint limits.
"""
S = self.structure
for u in S:
# direct successors are fully known
for v in (suc for suc in S.successors(u) if suc):
self[u][v][UPPER] = S[u][v][DIST]
self[u][v][LOWER] = S[u][v][DIST]
for v in (des for des in level2_descendants(S, u) if des):
ids = self.robot.kinematic_map[u][v] # TODO generate this at init
l1 = self.robot.l[ids[1]]
l2 = self.robot.l[ids[2]]
lb = self.robot.lb[ids[2]] # symmetric limit
ub = self.robot.ub[ids[2]] # symmetric limit
lim = max(abs(ub), abs(lb))
self.add_edge(u, v)
self[u][v][UPPER] = l1 + l2
self[u][v][LOWER] = sqrt(
l1 ** 2 + l2 ** 2 - 2 * l1 * l2 * cos(pi - lim)
)
self[u][v][BOUNDED] = BELOW
def _pose_goal(self, T_goal: Dict[str, SE2]) -> Dict[str, ArrayLike]:
pos = {}
for u, T_goal_u in T_goal.items():
for v in self.structure.predecessors(u):
if DIST in self[v][u]:
d = self[v][u][DIST]
z = T_goal_u.rot.as_matrix()[0:2, 0]
pos[u] = T_goal_u.trans
pos[v] = T_goal_u.trans - z * d
return pos
def joint_variables(self, G: nx.Graph) -> Dict[str, float]:
"""
Finds the set of decision variables corresponding to the
graph realization G.
:param G: networkx.DiGraph with known vertex positions
:returns: array of joint variables t
:rtype: np.ndarray
"""
joint_variables = {}
# resolve rotation of entire point set
R_, t_ = best_fit_transform(np.vstack((G.nodes[ROOT][POS],
G.nodes["x"][POS],
G.nodes["y"][POS])) ,
np.vstack(([0,0], [-1,0], [0,1])))
R = {ROOT: SO2.identity()}
for u, v, dat in self.structure.edges(data=DIST):
if dat:
diff_uv = R_.dot(G.nodes[v][POS] - G.nodes[u][POS])
len_uv = np.linalg.norm(diff_uv)
sol = R[u].as_matrix().T.dot(diff_uv/len_uv)
theta_idx = np.math.atan2(sol[1], sol[0])
joint_variables[v] = wraptopi(theta_idx)
Rz = SO2.from_angle(theta_idx)
R[v] = R[u].dot(Rz)
return joint_variables
def get_pose(
self, joint_angles: Dict[str, float], query_node: Union[List[str], str]
) -> Union[Dict[str, SE2], SE2]:
return self.robot.pose(joint_angles, query_node)
|
[
"numpy.math.atan2",
"liegroups.numpy.SO2.identity",
"numpy.linalg.norm",
"liegroups.numpy.SO2.from_angle",
"networkx.compose",
"networkx.empty_graph",
"numpy.array",
"numpy.cos",
"networkx.DiGraph",
"numpy.vstack"
] |
[((597, 624), 'networkx.compose', 'nx.compose', (['base', 'structure'], {}), '(base, structure)\n', (607, 624), True, 'import networkx as nx\n'), ((860, 910), 'networkx.DiGraph', 'nx.DiGraph', (["[('p0', 'x'), ('p0', 'y'), ('x', 'y')]"], {}), "([('p0', 'x'), ('p0', 'y'), ('x', 'y')])\n", (870, 910), True, 'import networkx as nx\n'), ((1728, 1767), 'networkx.empty_graph', 'nx.empty_graph', ([], {'create_using': 'nx.DiGraph'}), '(create_using=nx.DiGraph)\n', (1742, 1767), True, 'import networkx as nx\n'), ((3121, 3149), 'numpy.linalg.norm', 'la.norm', (['self.nodes[ax][POS]'], {}), '(self.nodes[ax][POS])\n', (3128, 3149), True, 'import numpy.linalg as la\n'), ((1328, 1376), 'numpy.linalg.norm', 'la.norm', (['(base.nodes[u][POS] - base.nodes[v][POS])'], {}), '(base.nodes[u][POS] - base.nodes[v][POS])\n', (1335, 1376), True, 'import numpy.linalg as la\n'), ((5681, 5750), 'numpy.vstack', 'np.vstack', (["(G.nodes[ROOT][POS], G.nodes['x'][POS], G.nodes['y'][POS])"], {}), "((G.nodes[ROOT][POS], G.nodes['x'][POS], G.nodes['y'][POS]))\n", (5690, 5750), True, 'import numpy as np\n'), ((5883, 5919), 'numpy.vstack', 'np.vstack', (['([0, 0], [-1, 0], [0, 1])'], {}), '(([0, 0], [-1, 0], [0, 1]))\n', (5892, 5919), True, 'import numpy as np\n'), ((5938, 5952), 'liegroups.numpy.SO2.identity', 'SO2.identity', ([], {}), '()\n', (5950, 5952), False, 'from liegroups.numpy import SE2, SO2\n'), ((6126, 6149), 'numpy.linalg.norm', 'np.linalg.norm', (['diff_uv'], {}), '(diff_uv)\n', (6140, 6149), True, 'import numpy as np\n'), ((6239, 6268), 'numpy.math.atan2', 'np.math.atan2', (['sol[1]', 'sol[0]'], {}), '(sol[1], sol[0])\n', (6252, 6268), True, 'import numpy as np\n'), ((6347, 6372), 'liegroups.numpy.SO2.from_angle', 'SO2.from_angle', (['theta_idx'], {}), '(theta_idx)\n', (6361, 6372), False, 'from liegroups.numpy import SE2, SO2\n'), ((2378, 2441), 'numpy.linalg.norm', 'la.norm', (['(structure.nodes[cur][POS] - structure.nodes[pred][POS])'], {}), '(structure.nodes[cur][POS] - structure.nodes[pred][POS])\n', (2385, 2441), True, 'import numpy.linalg as la\n'), ((1072, 1088), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (1080, 1088), True, 'import numpy as np\n'), ((1141, 1158), 'numpy.array', 'np.array', (['[-1, 0]'], {}), '([-1, 0])\n', (1149, 1158), True, 'import numpy as np\n'), ((1204, 1220), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (1212, 1220), True, 'import numpy as np\n'), ((3647, 3660), 'numpy.cos', 'cos', (['(pi - lim)'], {}), '(pi - lim)\n', (3650, 3660), False, 'from numpy import cos, pi\n'), ((4733, 4746), 'numpy.cos', 'cos', (['(pi - lim)'], {}), '(pi - lim)\n', (4736, 4746), False, 'from numpy import cos, pi\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 2 19:07:01 2021
@author: wyattpetryshen
"""
# Code templates for Ornstein-Uhlenbeck process and Brownian motion are from IPython Interactive Computing and Visualization Cookbook, Second Edition (2018), by <NAME>.
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from scipy import stats
import time as timetime
import random
import itertools
#Calculate angle between vectors
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / np.linalg.norm(vector)
def vectorTransform(end_Point,old_origin):
""" Returns vector translated to origin."""
newP = np.subtract(end_Point,old_origin)
return newP
def angle_between(v1, v2):
""" Returns the angle in degrees between vectors 'v1' and 'v2'."""
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.degrees(np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)))
#Equation for Ornstein-Uhlenbeck process
def binVals(array,step):
""" Bins array and calculates mean for specified bin size."""
start = 0
step = step
stop = step
iterations = int(len(array)/step)
meanvals = []
for i in np.arange(0,iterations):
tempbin = array[start:stop]
meanbin = np.mean(tempbin)
start = start + step
stop = stop + step
meanvals.append(meanbin)
return(meanvals)
#Code for figure 1 in supplementary information
#Change the parameters accordingly
#Sine wave
sample_rate = 1000
time = np.arange(0, 10, 1/sample_rate)
frequency = 0.1
amplitude = 4
theta = 0
sinewave = amplitude * np.sin(2 * np.pi * frequency * time + theta)
##Model parameters
sigma = 1 #standard deviation
mu = 0 #mean
tau = 0.05 #time constant
##simulation parameters
dt = 0.0001 #Time step
T = 1 #Total time
n = int(T/dt) #Number of steps
t = np.linspace(0., T, n) #Vector of times
##Calculated randomized variables
sigma_bis = sigma * np.sqrt(2. / tau)
sqrtdt = np.sqrt(dt)
#Plot of Sine wave
plt.plot(time,sinewave)
plt.title(r'SineWave with amplitude = {}, frequency = {}'.format(amplitude,frequency))
plt.axis([0, 10, -4, 4])
#Random Drift
for iters in range(100):
##Store results
x = np.zeros(n)
#Euler-Maruyama method
for i in range(n - 1):
x[i + 1] = x[i] + dt * (-(x[i] - sinewave[i]) / tau) + sigma_bis * sqrtdt * np.random.randn()
array = x
plt.plot(time,array,linewidth=0.1)
plt.title(r'OH Drift with amplitude = {}, frequency = {}'.format(amplitude,frequency))
#Time-averaged drift
for iters in range(100):
##Store results
x = np.zeros(n)
#Euler-Maruyama method
for i in range(n - 1):
x[i + 1] = x[i] + dt * (-(x[i] - sinewave[i]) / tau) + sigma_bis * sqrtdt * np.random.randn()
array = x
meanarray = binVals(array,int(sample_rate))
plt.plot(time[int(sample_rate/2):-1:int(sample_rate)],meanarray,linewidth=0.1)
plt.scatter(time[int(sample_rate/2):-1:int(sample_rate)],meanarray,linewidth=0.1)
plt.title(r'OH Drift time-averaged with amplitude = {}, frequency = {}'.format(amplitude,frequency))
plt.axis([0, 10, -6, 6])
#plt.plot(time,x)
#Iterate OH means and calculate the angle between vectors
start_time = timetime.time()
angle_list = []
for iters in range(100):
x1 = np.zeros(n)
x2 = np.zeros(n)
for i in range(n - 1):
x1[i + 1] = x1[i] + dt * (-(x1[i] - sinewave[i]) / tau) + sigma_bis * sqrtdt * np.random.randn()
x2[i + 1] = x2[i] + dt * (-(x2[i] - sinewave[i]) / tau) + sigma_bis * sqrtdt * np.random.randn()
meanarray1, meanarray2 = binVals(x1,int(sample_rate)),binVals(x2,int(sample_rate))
for j in np.arange(1,len(meanarray1)):
if j != len(meanarray1)-1:
Idx_O = j
Idx_E = j+1
v1 = vectorTransform((time[int(sample_rate/2):-1:int(sample_rate)][Idx_O],meanarray1[Idx_O]),(time[int(sample_rate/2):-1:int(sample_rate)][Idx_E],meanarray1[Idx_E]))
v2 = vectorTransform((time[int(sample_rate/2):-1:int(sample_rate)][Idx_O],meanarray2[Idx_O]),(time[int(sample_rate/2):-1:int(sample_rate)][Idx_E],meanarray2[Idx_E]))
vector_angle = angle_between(v1,v2)
angle_list.append(vector_angle)
else:
pass
plt.hist(angle_list, bins = np.arange(0,180,5))
plt.xlabel('Angle')
plt.ylabel('Probability')
plt.title(r'Histogram of OH Trait Drift a=4, frequency=0.1')
print("--- %s seconds ---" % (timetime.time() - start_time))
###Brownian motion
#simulation parameters
n = 100000 #time steps
#Two one dimensional cases that can be combined into two dimensional case
x = np.cumsum(np.random.randn(n))
y = np.cumsum(np.random.randn(n))
xP = np.cumsum(np.random.randn(n))
yP = np.cumsum(np.random.randn(n))
# We add 10 intermediary points between two
# successive points. We interpolate x and y.
k = 50
x2 = np.interp(np.arange(n * k), np.arange(n) * k, x)
y2 = np.interp(np.arange(n * k), np.arange(n) * k, y)
xP2 = np.interp(np.arange(n * k), np.arange(n) * k, xP)
yP2 = np.interp(np.arange(n * k), np.arange(n) * k, yP)
# Now, we draw our points with a gradient of colors.
fig, ax = plt.subplots(1, 1, figsize=(8, 8))
plt.scatter(x2, y2, c=range(n * k), linewidths=0,
marker='o', s=3, cmap=plt.cm.jet,)
plt.axis('equal')
plt.scatter(xP2, yP2, c = range(n*k), linewidths=0,
marker='o', s=3, cmap=plt.cm.jet,)
plt.plot(xP, yP)
plt.plot(x2, y2)
#Brownian Time-averaged drift for single lineages
for iters in range(1000):
##Store results
n = 1000 #time steps
#Two one dimensional cases that can be combined into two dimensional case
x = np.cumsum(np.random.randn(n))
y = np.cumsum(np.random.randn(n))
#Find mean values
meanx = binVals(x,int(100))
meany = binVals(y,int(100))
#plot
plt.plot(meanx,meany,linewidth=0.5)
plt.scatter(meanx,meany,linewidth=0.1, s = 4)
plt.title(r'Brownian Drift time-averaged with equal rates')
#plt.axis([0, 10, -6, 6])
#Iterate BM means and calculate the angle between vectors
start_time = timetime.time()
BW_angle_list = []
for iters in range(10000):
runs = 1000 #time steps
Tavg = 100 #Average years; for random number use random.randrange(i,j)
rate_One = 1 #rate change
rate_Two = 10 #rate change
x, y = np.cumsum(np.random.randn(runs * rate_One)), np.cumsum(np.random.randn(runs * rate_One))
x2, y2 = np.cumsum(np.random.randn(runs * rate_Two)), np.cumsum(np.random.randn(runs * rate_Two))
meanx,meany = binVals(x,int(Tavg)*rate_One),binVals(y,int(Tavg)*rate_One)
meanx2,meany2 = binVals(x2,int(Tavg)*rate_Two),binVals(y2,int(Tavg)*rate_Two)
for j in np.arange(1,len(meanx)):
if j != len(meanx)-1:
Idx_O = j
Idx_E = j+1
v1 = vectorTransform((meanx[Idx_O],meany[Idx_O]),(meanx[Idx_E],meany[Idx_E]))
v2 = vectorTransform((meanx2[Idx_O],meany2[Idx_O]),(meanx2[Idx_E],meany2[Idx_E]))
vector_angle = angle_between(v1,v2)
BW_angle_list.append(vector_angle)
else:
pass
plt.hist(BW_angle_list, bins = np.arange(0,180,1))
plt.xlabel('Angle')
plt.ylabel('Probability')
plt.title(r'Histogram of BW Parallelism')
print("--- %s seconds ---" % (timetime.time() - start_time))
|
[
"matplotlib.pyplot.title",
"numpy.subtract",
"matplotlib.pyplot.plot",
"numpy.random.randn",
"matplotlib.pyplot.scatter",
"numpy.zeros",
"matplotlib.pyplot.axis",
"time.time",
"numpy.sin",
"numpy.arange",
"numpy.linalg.norm",
"numpy.linspace",
"numpy.mean",
"numpy.dot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots",
"numpy.sqrt"
] |
[((1568, 1601), 'numpy.arange', 'np.arange', (['(0)', '(10)', '(1 / sample_rate)'], {}), '(0, 10, 1 / sample_rate)\n', (1577, 1601), True, 'import numpy as np\n'), ((1904, 1926), 'numpy.linspace', 'np.linspace', (['(0.0)', 'T', 'n'], {}), '(0.0, T, n)\n', (1915, 1926), True, 'import numpy as np\n'), ((2026, 2037), 'numpy.sqrt', 'np.sqrt', (['dt'], {}), '(dt)\n', (2033, 2037), True, 'import numpy as np\n'), ((2058, 2082), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'sinewave'], {}), '(time, sinewave)\n', (2066, 2082), True, 'import matplotlib.pyplot as plt\n'), ((2169, 2193), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, 10, -4, 4]'], {}), '([0, 10, -4, 4])\n', (2177, 2193), True, 'import matplotlib.pyplot as plt\n'), ((3277, 3292), 'time.time', 'timetime.time', ([], {}), '()\n', (3290, 3292), True, 'import time as timetime\n'), ((4351, 4370), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Angle"""'], {}), "('Angle')\n", (4361, 4370), True, 'import matplotlib.pyplot as plt\n'), ((4371, 4396), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability"""'], {}), "('Probability')\n", (4381, 4396), True, 'import matplotlib.pyplot as plt\n'), ((4397, 4456), 'matplotlib.pyplot.title', 'plt.title', (['"""Histogram of OH Trait Drift a=4, frequency=0.1"""'], {}), "('Histogram of OH Trait Drift a=4, frequency=0.1')\n", (4406, 4456), True, 'import matplotlib.pyplot as plt\n'), ((5181, 5215), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(8, 8)'}), '(1, 1, figsize=(8, 8))\n', (5193, 5215), True, 'import matplotlib.pyplot as plt\n'), ((5312, 5329), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (5320, 5329), True, 'import matplotlib.pyplot as plt\n'), ((5430, 5446), 'matplotlib.pyplot.plot', 'plt.plot', (['xP', 'yP'], {}), '(xP, yP)\n', (5438, 5446), True, 'import matplotlib.pyplot as plt\n'), ((5447, 5463), 'matplotlib.pyplot.plot', 'plt.plot', (['x2', 'y2'], {}), '(x2, y2)\n', (5455, 5463), True, 'import matplotlib.pyplot as plt\n'), ((6094, 6109), 'time.time', 'timetime.time', ([], {}), '()\n', (6107, 6109), True, 'import time as timetime\n'), ((7157, 7176), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Angle"""'], {}), "('Angle')\n", (7167, 7176), True, 'import matplotlib.pyplot as plt\n'), ((7177, 7202), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability"""'], {}), "('Probability')\n", (7187, 7202), True, 'import matplotlib.pyplot as plt\n'), ((7203, 7243), 'matplotlib.pyplot.title', 'plt.title', (['"""Histogram of BW Parallelism"""'], {}), "('Histogram of BW Parallelism')\n", (7212, 7243), True, 'import matplotlib.pyplot as plt\n'), ((716, 750), 'numpy.subtract', 'np.subtract', (['end_Point', 'old_origin'], {}), '(end_Point, old_origin)\n', (727, 750), True, 'import numpy as np\n'), ((1240, 1264), 'numpy.arange', 'np.arange', (['(0)', 'iterations'], {}), '(0, iterations)\n', (1249, 1264), True, 'import numpy as np\n'), ((1663, 1707), 'numpy.sin', 'np.sin', (['(2 * np.pi * frequency * time + theta)'], {}), '(2 * np.pi * frequency * time + theta)\n', (1669, 1707), True, 'import numpy as np\n'), ((1999, 2017), 'numpy.sqrt', 'np.sqrt', (['(2.0 / tau)'], {}), '(2.0 / tau)\n', (2006, 2017), True, 'import numpy as np\n'), ((2262, 2273), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (2270, 2273), True, 'import numpy as np\n'), ((2448, 2484), 'matplotlib.pyplot.plot', 'plt.plot', (['time', 'array'], {'linewidth': '(0.1)'}), '(time, array, linewidth=0.1)\n', (2456, 2484), True, 'import matplotlib.pyplot as plt\n'), ((2649, 2660), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (2657, 2660), True, 'import numpy as np\n'), ((3157, 3181), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, 10, -6, 6]'], {}), '([0, 10, -6, 6])\n', (3165, 3181), True, 'import matplotlib.pyplot as plt\n'), ((3343, 3354), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (3351, 3354), True, 'import numpy as np\n'), ((3364, 3375), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (3372, 3375), True, 'import numpy as np\n'), ((4674, 4692), 'numpy.random.randn', 'np.random.randn', (['n'], {}), '(n)\n', (4689, 4692), True, 'import numpy as np\n'), ((4708, 4726), 'numpy.random.randn', 'np.random.randn', (['n'], {}), '(n)\n', (4723, 4726), True, 'import numpy as np\n'), ((4744, 4762), 'numpy.random.randn', 'np.random.randn', (['n'], {}), '(n)\n', (4759, 4762), True, 'import numpy as np\n'), ((4779, 4797), 'numpy.random.randn', 'np.random.randn', (['n'], {}), '(n)\n', (4794, 4797), True, 'import numpy as np\n'), ((4912, 4928), 'numpy.arange', 'np.arange', (['(n * k)'], {}), '(n * k)\n', (4921, 4928), True, 'import numpy as np\n'), ((4966, 4982), 'numpy.arange', 'np.arange', (['(n * k)'], {}), '(n * k)\n', (4975, 4982), True, 'import numpy as np\n'), ((5021, 5037), 'numpy.arange', 'np.arange', (['(n * k)'], {}), '(n * k)\n', (5030, 5037), True, 'import numpy as np\n'), ((5077, 5093), 'numpy.arange', 'np.arange', (['(n * k)'], {}), '(n * k)\n', (5086, 5093), True, 'import numpy as np\n'), ((5841, 5878), 'matplotlib.pyplot.plot', 'plt.plot', (['meanx', 'meany'], {'linewidth': '(0.5)'}), '(meanx, meany, linewidth=0.5)\n', (5849, 5878), True, 'import matplotlib.pyplot as plt\n'), ((5881, 5926), 'matplotlib.pyplot.scatter', 'plt.scatter', (['meanx', 'meany'], {'linewidth': '(0.1)', 's': '(4)'}), '(meanx, meany, linewidth=0.1, s=4)\n', (5892, 5926), True, 'import matplotlib.pyplot as plt\n'), ((5931, 5989), 'matplotlib.pyplot.title', 'plt.title', (['"""Brownian Drift time-averaged with equal rates"""'], {}), "('Brownian Drift time-averaged with equal rates')\n", (5940, 5989), True, 'import matplotlib.pyplot as plt\n'), ((590, 612), 'numpy.linalg.norm', 'np.linalg.norm', (['vector'], {}), '(vector)\n', (604, 612), True, 'import numpy as np\n'), ((1319, 1335), 'numpy.mean', 'np.mean', (['tempbin'], {}), '(tempbin)\n', (1326, 1335), True, 'import numpy as np\n'), ((4331, 4351), 'numpy.arange', 'np.arange', (['(0)', '(180)', '(5)'], {}), '(0, 180, 5)\n', (4340, 4351), True, 'import numpy as np\n'), ((4930, 4942), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (4939, 4942), True, 'import numpy as np\n'), ((4984, 4996), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (4993, 4996), True, 'import numpy as np\n'), ((5039, 5051), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (5048, 5051), True, 'import numpy as np\n'), ((5095, 5107), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (5104, 5107), True, 'import numpy as np\n'), ((5683, 5701), 'numpy.random.randn', 'np.random.randn', (['n'], {}), '(n)\n', (5698, 5701), True, 'import numpy as np\n'), ((5721, 5739), 'numpy.random.randn', 'np.random.randn', (['n'], {}), '(n)\n', (5736, 5739), True, 'import numpy as np\n'), ((7137, 7157), 'numpy.arange', 'np.arange', (['(0)', '(180)', '(1)'], {}), '(0, 180, 1)\n', (7146, 7157), True, 'import numpy as np\n'), ((4488, 4503), 'time.time', 'timetime.time', ([], {}), '()\n', (4501, 4503), True, 'import time as timetime\n'), ((6341, 6373), 'numpy.random.randn', 'np.random.randn', (['(runs * rate_One)'], {}), '(runs * rate_One)\n', (6356, 6373), True, 'import numpy as np\n'), ((6386, 6418), 'numpy.random.randn', 'np.random.randn', (['(runs * rate_One)'], {}), '(runs * rate_One)\n', (6401, 6418), True, 'import numpy as np\n'), ((6443, 6475), 'numpy.random.randn', 'np.random.randn', (['(runs * rate_Two)'], {}), '(runs * rate_Two)\n', (6458, 6475), True, 'import numpy as np\n'), ((6488, 6520), 'numpy.random.randn', 'np.random.randn', (['(runs * rate_Two)'], {}), '(runs * rate_Two)\n', (6503, 6520), True, 'import numpy as np\n'), ((7275, 7290), 'time.time', 'timetime.time', ([], {}), '()\n', (7288, 7290), True, 'import time as timetime\n'), ((959, 977), 'numpy.dot', 'np.dot', (['v1_u', 'v2_u'], {}), '(v1_u, v2_u)\n', (965, 977), True, 'import numpy as np\n'), ((2412, 2429), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (2427, 2429), True, 'import numpy as np\n'), ((2799, 2816), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (2814, 2816), True, 'import numpy as np\n'), ((3490, 3507), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (3505, 3507), True, 'import numpy as np\n'), ((3595, 3612), 'numpy.random.randn', 'np.random.randn', ([], {}), '()\n', (3610, 3612), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
import pickle
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
fast_file_name = 'photobleaching_mixture00_grid.csv'
slow_file_name = 'photobleaching_mixture01_grid.csv'
data_fast = np.genfromtxt(fast_file_name, delimiter = ',', skip_header = True)
data_slow = np.genfromtxt(slow_file_name, delimiter = ',', skip_header = True)
# features are both the same in each case
features = data_fast[:, :-1]
fast_targets = data_fast[:, -1]
slow_targets = data_fast[:, -1]
data = {'features': features, 'fast_targets': fast_targets, 'slow_targets': slow_targets }
print(data)
print(data['features'].shape, data['fast_targets'].shape, data['slow_targets'].shape)
with open('dataset.pkl', 'wb') as content:
pickle.dump(data, content)
|
[
"pickle.dump",
"numpy.genfromtxt"
] |
[((231, 293), 'numpy.genfromtxt', 'np.genfromtxt', (['fast_file_name'], {'delimiter': '""","""', 'skip_header': '(True)'}), "(fast_file_name, delimiter=',', skip_header=True)\n", (244, 293), True, 'import numpy as np\n'), ((310, 372), 'numpy.genfromtxt', 'np.genfromtxt', (['slow_file_name'], {'delimiter': '""","""', 'skip_header': '(True)'}), "(slow_file_name, delimiter=',', skip_header=True)\n", (323, 372), True, 'import numpy as np\n'), ((752, 778), 'pickle.dump', 'pickle.dump', (['data', 'content'], {}), '(data, content)\n', (763, 778), False, 'import pickle\n')]
|
"""
do gradients flow into vqvae codebook?
"""
import torch
from torch import nn, optim, autograd
import numpy as np
import math, time
def run():
num_codes = 5
N = 7
K = 3
np.random.seed(123)
torch.manual_seed(123)
Z = torch.from_numpy(np.random.choice(num_codes, N, replace=True))
print('Z', Z)
codebook = nn.Parameter(torch.rand(num_codes, 3))
# inputs = nn.Parametertorch.rand
codebook_out = codebook[Z]
print('codebook_out.requires_grad', codebook_out.requires_grad)
target_out = torch.rand(N, K)
loss = (codebook_out - target_out).pow(2).mean()
loss.backward()
print('codebook_out.grad', codebook_out.grad)
print('codebook.grad', codebook.grad)
if __name__ == '__main__':
run()
|
[
"torch.manual_seed",
"numpy.random.choice",
"numpy.random.seed",
"torch.rand"
] |
[((189, 208), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (203, 208), True, 'import numpy as np\n'), ((213, 235), 'torch.manual_seed', 'torch.manual_seed', (['(123)'], {}), '(123)\n', (230, 235), False, 'import torch\n'), ((534, 550), 'torch.rand', 'torch.rand', (['N', 'K'], {}), '(N, K)\n', (544, 550), False, 'import torch\n'), ((261, 305), 'numpy.random.choice', 'np.random.choice', (['num_codes', 'N'], {'replace': '(True)'}), '(num_codes, N, replace=True)\n', (277, 305), True, 'import numpy as np\n'), ((353, 377), 'torch.rand', 'torch.rand', (['num_codes', '(3)'], {}), '(num_codes, 3)\n', (363, 377), False, 'import torch\n')]
|
from typing import List
import numpy as np
Tensor = List[float]
def single_output(xdata: List[Tensor], ydata: List[Tensor]) -> List[Tensor]:
xdata = np.asarray(xdata)
ydata = np.asarray(ydata)
|
[
"numpy.asarray"
] |
[((155, 172), 'numpy.asarray', 'np.asarray', (['xdata'], {}), '(xdata)\n', (165, 172), True, 'import numpy as np\n'), ((185, 202), 'numpy.asarray', 'np.asarray', (['ydata'], {}), '(ydata)\n', (195, 202), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 17 14:53:05 2017
@author: manu
Step 1 : Modify the 1KGP OMNI maps to have same physical positions(bp) by interpolation
"""
import pandas as pd
import os
from scipy.interpolate import interp1d
import numpy as np
omni="../OMNI/" #Relative path to the 1KGP OMNI maps
omni_new="../OMNI_INTERPOLATED/" #Relative path for creating the interpolated maps
positions={}
maps={}
for pop in os.listdir(omni):
maps[pop]={}
print("Reading",pop)
for chrom in os.listdir(omni+pop):
tmap=pd.read_table(omni+pop+"/"+chrom, header=0,names=["pos","rate","map","filter"]) #Reading maps into dataframes
tmap=tmap[tmap["filter"]==0] #Removing filtered positions
maps[pop][chrom]=tmap
if not chrom in positions.keys():
positions[chrom]=[]
positions[chrom]=list(set(positions[chrom]+list(tmap["pos"]))) #Creating a union set of positions from all the maps
for chrom in positions.keys():
positions[chrom]=sorted(positions[chrom]) #Sorting the chromosome-wise markers by genomic locations
#%%
if not os.path.exists(omni_new):
os.makedirs(omni_new) #creating directories
#%%
for pop in os.listdir(omni):
print("Computing interpolated map:",pop)
if not os.path.exists(omni_new+pop):
os.makedirs(omni_new+pop)
for chrom in os.listdir(omni+pop):
tmap=maps[pop][chrom]
fmap=interp1d(tmap["pos"],tmap["map"]) #interpolating function
nmap=pd.DataFrame(columns=["chr","pos"])
nmap["pos"]=positions[chrom]
nmap["chr"]=chrom[:-4]
nmap=pd.merge(nmap,tmap[["pos","map"]],how="left",on="pos") #copying map values where positions are equal
nmap.loc[(nmap["map"].isnull()) & (nmap["pos"]<tmap["pos"].iloc[0]),"map"]=0 #setting map units as 0 for positions preceeding the map
nmap.loc[(nmap["map"].isnull()) & (nmap["pos"]>tmap["pos"].iloc[-1]),"map"]=tmap["map"].iloc[-1] #setting map units as the highest map unit for positions exceeding the map
nmap.loc[(nmap["map"].isnull()),"map"]=fmap(nmap.loc[(nmap["map"].isnull()),"pos"]) #interpolating for the rest
nmap["rate"]=list(np.diff(nmap["map"])/(np.diff(nmap["pos"])/1e6))+[0] #calculating rate for all the intervals
nmap=nmap.round(6)
nmap.to_csv(omni_new+pop+"/"+chrom, sep="\t", index=False,columns=["chr","pos","rate","map"]) #writing the interpolated map
print("Done!")
|
[
"pandas.DataFrame",
"os.makedirs",
"pandas.merge",
"os.path.exists",
"numpy.diff",
"pandas.read_table",
"scipy.interpolate.interp1d",
"os.listdir"
] |
[((474, 490), 'os.listdir', 'os.listdir', (['omni'], {}), '(omni)\n', (484, 490), False, 'import os\n'), ((1431, 1447), 'os.listdir', 'os.listdir', (['omni'], {}), '(omni)\n', (1441, 1447), False, 'import os\n'), ((551, 573), 'os.listdir', 'os.listdir', (['(omni + pop)'], {}), '(omni + pop)\n', (561, 573), False, 'import os\n'), ((1273, 1297), 'os.path.exists', 'os.path.exists', (['omni_new'], {}), '(omni_new)\n', (1287, 1297), False, 'import os\n'), ((1311, 1332), 'os.makedirs', 'os.makedirs', (['omni_new'], {}), '(omni_new)\n', (1322, 1332), False, 'import os\n'), ((1590, 1612), 'os.listdir', 'os.listdir', (['(omni + pop)'], {}), '(omni + pop)\n', (1600, 1612), False, 'import os\n'), ((586, 679), 'pandas.read_table', 'pd.read_table', (["(omni + pop + '/' + chrom)"], {'header': '(0)', 'names': "['pos', 'rate', 'map', 'filter']"}), "(omni + pop + '/' + chrom, header=0, names=['pos', 'rate',\n 'map', 'filter'])\n", (599, 679), True, 'import pandas as pd\n'), ((1505, 1535), 'os.path.exists', 'os.path.exists', (['(omni_new + pop)'], {}), '(omni_new + pop)\n', (1519, 1535), False, 'import os\n'), ((1547, 1574), 'os.makedirs', 'os.makedirs', (['(omni_new + pop)'], {}), '(omni_new + pop)\n', (1558, 1574), False, 'import os\n'), ((1655, 1689), 'scipy.interpolate.interp1d', 'interp1d', (["tmap['pos']", "tmap['map']"], {}), "(tmap['pos'], tmap['map'])\n", (1663, 1689), False, 'from scipy.interpolate import interp1d\n'), ((1774, 1810), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['chr', 'pos']"}), "(columns=['chr', 'pos'])\n", (1786, 1810), True, 'import pandas as pd\n'), ((1891, 1949), 'pandas.merge', 'pd.merge', (['nmap', "tmap[['pos', 'map']]"], {'how': '"""left"""', 'on': '"""pos"""'}), "(nmap, tmap[['pos', 'map']], how='left', on='pos')\n", (1899, 1949), True, 'import pandas as pd\n'), ((2509, 2529), 'numpy.diff', 'np.diff', (["nmap['map']"], {}), "(nmap['map'])\n", (2516, 2529), True, 'import numpy as np\n'), ((2531, 2551), 'numpy.diff', 'np.diff', (["nmap['pos']"], {}), "(nmap['pos'])\n", (2538, 2551), True, 'import numpy as np\n')]
|
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
import random
import time
import sys
def display_image(window_name, img):
"""
Displays image with given window name.
:param window_name: name of the window
:param img: image object to display
"""
cv.imshow(window_name, img)
cv.waitKey(0)
cv.destroyAllWindows()
def my_integral(img):
# insert a border of 1 pixel
img_integ = cv.copyMakeBorder(
img, 1, 1, 1, 1, cv.BORDER_CONSTANT, value=0).astype(np.uint64)
# computation of the integral image
for i in range(img.shape[0] + 1):
for j in range(img.shape[1] + 1):
img_integ[i, j] = (
(img_integ[i, j] + img_integ[i - 1, j]
+ img_integ[i, j - 1] - img_integ[i-1, j-1]))
# remove border of 1 pixel
# at the bottom and right
return img_integ[:-1, :-1]
def mean_4_image(img_mean, yx, w_shape):
# decrease of one the dimension of the window
w_shape = (w_shape[0] - 1, w_shape[1] - 1)
sum = 0
for y in range(yx[0], yx[0] + w_shape[0]):
for x in range(yx[1], yx[1] + w_shape[1]):
sum += img_mean[y, x]
mean = int(sum) // np.size(img_mean)
return mean
def mean_4_integral(img_mean, yx, w_shape):
# decrease of one the dimension of the window
w_shape = (w_shape[0] - 1, w_shape[1] - 1)
a = img_mean[yx[0] + w_shape[0], yx[1] + w_shape[1]]
b = img_mean[yx[0], yx[1] + w_shape[0]]
c = img_mean[yx[0] + w_shape[0], yx[1]]
d = img_mean[yx[0], yx[1]]
sum = (a - b - c + d)
mean = sum / np.size(img_mean)
return mean.astype(np.uint8)
def calc_mean_exec_time(img, func_mean, YX, func_integral=None):
start_time = time.time()
means = []
# if func_integral is None, img_mean is euqual to img
# this because the mean gray value is computed by
# summing up each pixel and not using the integral func
img_mean = func_integral(img) if func_integral else img
for yx in YX:
means.append(func_mean(img_mean, yx, (square_l, square_l)))
print("- run-time: %ss" % (time.time() - start_time))
# we are not outputting the mean gray values because
# it is not required
# print(means)
def max_pwise_error(img1, img2):
# computation of the absolute pixel wise difference
errors = abs(img1.astype(np.int16) -
img2.astype(np.int16))
return errors.max()
def gaussian_blur(img, k_size, sigma):
if k_size == (0, 0):
# get the kernel size extracted by the formula
# at the link https://bit.ly/33xESq3
k_size = int((sigma - 0.35) / 0.15)
# computing the kernel
kernel = np.zeros(k_size)
for y in range(k_size[0]):
for x in range(k_size[1]):
a = (x - (k_size[1]-1)/2)**2
b = (y - (k_size[0]-1)/2)**2
num = -1 * (a + b)
kernel[y, x] = np.exp(num/(2*sigma**2))
# normalization
kernel /= np.sum(kernel)
return cv.filter2D(img, -1, kernel)
def gaussian_blur_w_sep(img, k_size, sigma):
if k_size == (0, 0):
# get the kernel size extracted by the formula
# at the link https://bit.ly/33xESq3
k_size = int((sigma - 0.35) / 0.15)
# computing the kernel Y
kernelY = np.zeros((k_size[0], 1))
for y in range(k_size[0]):
num = -1 * ((y - (k_size[0]-1)/2)**2)
kernelY[y, 0] = np.exp(num/(2*sigma**2))
# computing the kernel X
kernelX = np.zeros(k_size[1])
for x in range(k_size[1]):
num = -1 * ((x - (k_size[1]-1)/2)**2)
kernelX[x] = np.exp(num/(2*sigma**2))
# normalization
kernelY /= np.sum(kernelY[:, 0])
kernelX /= np.sum(kernelX)
# obtaining the final kernel
kernel = kernelY * kernelX
return cv.filter2D(img, -1, kernel)
def salt_n_pepper(img):
img_sp_gaus = img.copy()
# creation of the salt n pepper noise
for y in range(img_sp_gaus.shape[0]):
for x in range(img_sp_gaus.shape[1]):
# access only the 30% of time to the pixel
if random.uniform(0, 1) <= 0.30:
# assign randomly 255 or 0
img_sp_gaus[y, x] = 255 if random.randint(0, 2) else 0
return img_sp_gaus
def distance_mean_gray_val(img1, img2):
mean1 = (np.sum(img1.astype(np.int16)) /
np.size(img1))
mean2 = (np.sum(img2.astype(np.int16)) /
np.size(img2))
return abs(mean1 - mean2)
def filter_SVD(img, kernel):
img_svd = img.copy()
w, u, vt = cv.SVDecomp(kernel)
# getting the highest singular value
i_value = np.argmax(w)
vt = vt[i_value, :].reshape((1, 3))
u = u[:, i_value].reshape((3, 1)) * w[i_value, 0:1]
# filtering the image w/ the obtained kernel
img_svd = cv.sepFilter2D(img_svd, -1, vt, u)
return img_svd
if __name__ == '__main__':
np.seterr(over='ignore')
img_path = sys.argv[1]
# =========================================================================
# ==================== Task 1 =================================
# =========================================================================
print('\nTask 1:')
img = cv.imread(img_path, cv.IMREAD_GRAYSCALE)
# ++++++++++++++++++++++++++++++
# a
# ++++
# the function cv.integrale
img_integ = my_integral(img)
# normalization of the integral
img_integ = ((img_integ - img_integ.min()) /
(img_integ.max() - img_integ.min()) * 255).astype(np.uint8)
display_image('Task 1 - a', img_integ)
# ++++++++++++++++++++++++++++++
# b
# ++++
# Compute the mean grey value
img_integ2 = cv.integral(img)
img_integ3 = my_integral(img)
# summing up each pixel value in the image
mean1 = mean_4_image(img, (0, 0), img.shape)
# computing an integral image using the function cv.integral
mean2 = mean_4_integral(img_integ2, (0, 0), img_integ2.shape)
# computing an integral image with your own function
mean3 = mean_4_integral(img_integ3, (0, 0), img_integ3.shape)
print('Mean grey value of the image (i): ', mean1)
print('Mean grey value of the image (ii): ', mean2)
print('Mean grey value of the image (iii): ', mean3)
# ++++++++++++++++++++++++++++++
# c
# ++++
square_l = 100
# getting the 10 random points
YX = [(random.randint(0, img_integ2.shape[0]-square_l),
random.randint(0, img_integ2.shape[1]-square_l))
for _ in range(10)]
print('Mean gray value w/ 10 random squares (i)', end=' ')
calc_mean_exec_time(img, mean_4_image, YX)
print('Mean gray value w/ 10 random squares (ii)', end=' ')
calc_mean_exec_time(img, mean_4_integral, YX, cv.integral)
print('Mean gray value w/ 10 random squares (iii)', end=' ')
calc_mean_exec_time(img, mean_4_integral, YX, my_integral)
# =========================================================================
# ==================== Task 2 =================================
# =========================================================================
print('\nTask 2:')
img_eqz = cv.equalizeHist(img)
display_image('Equalization', img_eqz)
img_my_eqz = img.copy()
histogram = np.zeros(256)
# histogram creation
for i in range(256):
histogram[i] = np.count_nonzero(img_my_eqz == i)
# Creation of the cumulative distribution function CDF
cdf = np.array([np.sum(histogram[:(i+1)]) for i in range(256)])
# normalization
nr = np.round(((cdf - cdf.min()) / (cdf.max() - cdf.min())) * 255)
for y in range(img.shape[0]):
for x in range(img.shape[1]):
img_my_eqz[y, x] = nr[img[y, x]]
display_image('My equalization', img_my_eqz)
error = max_pwise_error(img_eqz, img_my_eqz)
print('Max pixel wise error (equalization): ', error)
# =========================================================================
# ==================== Task 4 =================================
# =========================================================================
print('\nTask 4:')
sigma = (2 * (2**(1/2)))
k_size = (3, 3)
display_image('Gray image', img)
img_gaus = cv.GaussianBlur(img, k_size, sigma)
display_image('OpenCV gaussian', img_gaus)
img_my_gaus = gaussian_blur(img, k_size, sigma)
display_image('My gaussian', img_my_gaus)
img_my_gaus_sep = gaussian_blur_w_sep(img, k_size, sigma)
display_image('My gaussian w/ separability', img_my_gaus_sep)
# computation maximum pixel wise error
print('Maximum pixel error:')
# OpenCV - MyGaussian
error = max_pwise_error(img_gaus, img_my_gaus)
print('OpenCV - MyGaussian = ', error)
# OpenCV - MyGaussianSep
error = max_pwise_error(img_gaus, img_my_gaus_sep)
print('OpenCV - MyGaussianSep = ', error)
# MyGaussian - MyGaussianSep
error = max_pwise_error(img_my_gaus, img_my_gaus_sep)
print('MyGaussian - MyGaussianSep = ', error)
# =========================================================================
# ==================== Task 5 =================================
# =========================================================================
print('\nTask 5:')
sigma1 = (2)
sigma2 = (2 * (2**(1/2)))
k_size = (0, 0)
img_my_gaus_1 = img.copy()
img_my_gaus_1 = cv.GaussianBlur(
img_my_gaus_1, k_size, sigma1)
img_my_gaus_1 = cv.GaussianBlur(
img_my_gaus_1, k_size, sigma1)
display_image('My gaussian twice', img_my_gaus_1)
img_my_gaus_2 = cv.GaussianBlur(
img, k_size, sigma2)
display_image('My gaussian once', img_my_gaus_2)
# computation maximum pixel error
error = max_pwise_error(img_my_gaus_1, img_my_gaus_2)
print('Maximum pixel error:', error)
# =========================================================================
# ==================== Task 7 =================================
# =========================================================================
print('\nTask 7:')
k_sizes = [7, 9]
img_sp = salt_n_pepper(img)
display_image('Salt n Pepper', img_sp)
# Gaussian filtering
gray_means = []
for k_s in k_sizes:
img_sp_copy = img_sp.copy()
img_sp_gaus = cv.GaussianBlur(img_sp_copy, (k_s, k_s), 0)
distance = distance_mean_gray_val(img, img_sp_gaus)
gray_means.append((distance, k_s, img_sp_gaus))
res = min(gray_means, key=lambda x: x[0])
txt = 'SP gaussian (size: {}, mean: {:0.2f})'.format(
res[1], res[0])
print(txt)
display_image(txt, res[2])
# Median filtering
gray_means = []
for k_s in k_sizes:
img_sp_copy = img_sp.copy()
img_sp_median = cv.medianBlur(img_sp_copy, k_s)
distance = distance_mean_gray_val(img, img_sp_median)
gray_means.append((distance, k_s, img_sp_median))
res = min(gray_means, key=lambda x: x[0])
txt = 'SP median (size: {}, mean: {:0.2f})'.format(
res[1], res[0])
print(txt)
display_image(txt, res[2])
# Bilateral filtering
gray_means = []
for k_s in k_sizes:
img_sp_copy = img_sp.copy()
img_sp_bilateral = cv.bilateralFilter(
img_sp_copy, k_s, 80, 80)
distance = distance_mean_gray_val(img, img_sp_bilateral)
gray_means.append((distance, k_s, img_sp_bilateral))
res = min(gray_means, key=lambda x: x[0])
txt = 'SP bilateral (size: {}, mean: {:0.2f})'.format(
res[1], res[0])
print(txt)
display_image(txt, res[2])
# =========================================================================
# ==================== Task 8 =================================
# =========================================================================
print('\nTask 8:')
kernel1 = np.matrix([
[0.0113, 0.0838, 0.0113],
[0.0838, 0.6193, 0.0838],
[0.0113, 0.0838, 0.0113]])
kernel2 = np.matrix([
[-0.8984, 0.1472, 1.1410],
[-1.9075, 0.1566, 2.1359],
[-0.8659, 0.0573, 1.0337]])
img_k1 = cv.filter2D(img, -1, kernel1)
img_k1_svd = filter_SVD(img, kernel1)
display_image('kernel1', img_k1)
display_image('kernel1 w/ SVD', img_k1_svd)
img_k2 = cv.filter2D(img, -1, kernel2)
img_k2_svd = filter_SVD(img, kernel2)
display_image('kernel2', img_k2)
display_image('kernel2 w/ SVD', img_k2_svd)
# computation of the pixel wise error
error = max_pwise_error(img_k1, img_k1_svd)
print('Pixel wise error w/ kernel1: ', error)
error = max_pwise_error(img_k2, img_k2_svd)
print('Pixel wise error w/ kernel2: ', error)
|
[
"cv2.GaussianBlur",
"cv2.integral",
"numpy.sum",
"numpy.argmax",
"cv2.sepFilter2D",
"cv2.medianBlur",
"cv2.bilateralFilter",
"numpy.exp",
"cv2.imshow",
"random.randint",
"cv2.filter2D",
"cv2.copyMakeBorder",
"cv2.destroyAllWindows",
"cv2.equalizeHist",
"numpy.size",
"cv2.waitKey",
"cv2.SVDecomp",
"numpy.matrix",
"numpy.count_nonzero",
"random.uniform",
"numpy.seterr",
"numpy.zeros",
"time.time",
"cv2.imread"
] |
[((295, 322), 'cv2.imshow', 'cv.imshow', (['window_name', 'img'], {}), '(window_name, img)\n', (304, 322), True, 'import cv2 as cv\n'), ((327, 340), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (337, 340), True, 'import cv2 as cv\n'), ((345, 367), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (365, 367), True, 'import cv2 as cv\n'), ((1734, 1745), 'time.time', 'time.time', ([], {}), '()\n', (1743, 1745), False, 'import time\n'), ((2685, 2701), 'numpy.zeros', 'np.zeros', (['k_size'], {}), '(k_size)\n', (2693, 2701), True, 'import numpy as np\n'), ((2968, 2982), 'numpy.sum', 'np.sum', (['kernel'], {}), '(kernel)\n', (2974, 2982), True, 'import numpy as np\n'), ((2994, 3022), 'cv2.filter2D', 'cv.filter2D', (['img', '(-1)', 'kernel'], {}), '(img, -1, kernel)\n', (3005, 3022), True, 'import cv2 as cv\n'), ((3283, 3307), 'numpy.zeros', 'np.zeros', (['(k_size[0], 1)'], {}), '((k_size[0], 1))\n', (3291, 3307), True, 'import numpy as np\n'), ((3478, 3497), 'numpy.zeros', 'np.zeros', (['k_size[1]'], {}), '(k_size[1])\n', (3486, 3497), True, 'import numpy as np\n'), ((3657, 3678), 'numpy.sum', 'np.sum', (['kernelY[:, 0]'], {}), '(kernelY[:, 0])\n', (3663, 3678), True, 'import numpy as np\n'), ((3694, 3709), 'numpy.sum', 'np.sum', (['kernelX'], {}), '(kernelX)\n', (3700, 3709), True, 'import numpy as np\n'), ((3785, 3813), 'cv2.filter2D', 'cv.filter2D', (['img', '(-1)', 'kernel'], {}), '(img, -1, kernel)\n', (3796, 3813), True, 'import cv2 as cv\n'), ((4526, 4545), 'cv2.SVDecomp', 'cv.SVDecomp', (['kernel'], {}), '(kernel)\n', (4537, 4545), True, 'import cv2 as cv\n'), ((4601, 4613), 'numpy.argmax', 'np.argmax', (['w'], {}), '(w)\n', (4610, 4613), True, 'import numpy as np\n'), ((4774, 4808), 'cv2.sepFilter2D', 'cv.sepFilter2D', (['img_svd', '(-1)', 'vt', 'u'], {}), '(img_svd, -1, vt, u)\n', (4788, 4808), True, 'import cv2 as cv\n'), ((4862, 4886), 'numpy.seterr', 'np.seterr', ([], {'over': '"""ignore"""'}), "(over='ignore')\n", (4871, 4886), True, 'import numpy as np\n'), ((5174, 5214), 'cv2.imread', 'cv.imread', (['img_path', 'cv.IMREAD_GRAYSCALE'], {}), '(img_path, cv.IMREAD_GRAYSCALE)\n', (5183, 5214), True, 'import cv2 as cv\n'), ((5653, 5669), 'cv2.integral', 'cv.integral', (['img'], {}), '(img)\n', (5664, 5669), True, 'import cv2 as cv\n'), ((7115, 7135), 'cv2.equalizeHist', 'cv.equalizeHist', (['img'], {}), '(img)\n', (7130, 7135), True, 'import cv2 as cv\n'), ((7225, 7238), 'numpy.zeros', 'np.zeros', (['(256)'], {}), '(256)\n', (7233, 7238), True, 'import numpy as np\n'), ((8196, 8231), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['img', 'k_size', 'sigma'], {}), '(img, k_size, sigma)\n', (8211, 8231), True, 'import cv2 as cv\n'), ((9349, 9395), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['img_my_gaus_1', 'k_size', 'sigma1'], {}), '(img_my_gaus_1, k_size, sigma1)\n', (9364, 9395), True, 'import cv2 as cv\n'), ((9425, 9471), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['img_my_gaus_1', 'k_size', 'sigma1'], {}), '(img_my_gaus_1, k_size, sigma1)\n', (9440, 9471), True, 'import cv2 as cv\n'), ((9557, 9593), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['img', 'k_size', 'sigma2'], {}), '(img, k_size, sigma2)\n', (9572, 9593), True, 'import cv2 as cv\n'), ((11817, 11911), 'numpy.matrix', 'np.matrix', (['[[0.0113, 0.0838, 0.0113], [0.0838, 0.6193, 0.0838], [0.0113, 0.0838, 0.0113]]'], {}), '([[0.0113, 0.0838, 0.0113], [0.0838, 0.6193, 0.0838], [0.0113, \n 0.0838, 0.0113]])\n', (11826, 11911), True, 'import numpy as np\n'), ((11947, 12043), 'numpy.matrix', 'np.matrix', (['[[-0.8984, 0.1472, 1.141], [-1.9075, 0.1566, 2.1359], [-0.8659, 0.0573, 1.0337]\n ]'], {}), '([[-0.8984, 0.1472, 1.141], [-1.9075, 0.1566, 2.1359], [-0.8659, \n 0.0573, 1.0337]])\n', (11956, 12043), True, 'import numpy as np\n'), ((12079, 12108), 'cv2.filter2D', 'cv.filter2D', (['img', '(-1)', 'kernel1'], {}), '(img, -1, kernel1)\n', (12090, 12108), True, 'import cv2 as cv\n'), ((12251, 12280), 'cv2.filter2D', 'cv.filter2D', (['img', '(-1)', 'kernel2'], {}), '(img, -1, kernel2)\n', (12262, 12280), True, 'import cv2 as cv\n'), ((1203, 1220), 'numpy.size', 'np.size', (['img_mean'], {}), '(img_mean)\n', (1210, 1220), True, 'import numpy as np\n'), ((1599, 1616), 'numpy.size', 'np.size', (['img_mean'], {}), '(img_mean)\n', (1606, 1616), True, 'import numpy as np\n'), ((3409, 3439), 'numpy.exp', 'np.exp', (['(num / (2 * sigma ** 2))'], {}), '(num / (2 * sigma ** 2))\n', (3415, 3439), True, 'import numpy as np\n'), ((3596, 3626), 'numpy.exp', 'np.exp', (['(num / (2 * sigma ** 2))'], {}), '(num / (2 * sigma ** 2))\n', (3602, 3626), True, 'import numpy as np\n'), ((4336, 4349), 'numpy.size', 'np.size', (['img1'], {}), '(img1)\n', (4343, 4349), True, 'import numpy as np\n'), ((4409, 4422), 'numpy.size', 'np.size', (['img2'], {}), '(img2)\n', (4416, 4422), True, 'import numpy as np\n'), ((7313, 7346), 'numpy.count_nonzero', 'np.count_nonzero', (['(img_my_eqz == i)'], {}), '(img_my_eqz == i)\n', (7329, 7346), True, 'import numpy as np\n'), ((10270, 10313), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['img_sp_copy', '(k_s, k_s)', '(0)'], {}), '(img_sp_copy, (k_s, k_s), 0)\n', (10285, 10313), True, 'import cv2 as cv\n'), ((10733, 10764), 'cv2.medianBlur', 'cv.medianBlur', (['img_sp_copy', 'k_s'], {}), '(img_sp_copy, k_s)\n', (10746, 10764), True, 'import cv2 as cv\n'), ((11192, 11236), 'cv2.bilateralFilter', 'cv.bilateralFilter', (['img_sp_copy', 'k_s', '(80)', '(80)'], {}), '(img_sp_copy, k_s, 80, 80)\n', (11210, 11236), True, 'import cv2 as cv\n'), ((441, 504), 'cv2.copyMakeBorder', 'cv.copyMakeBorder', (['img', '(1)', '(1)', '(1)', '(1)', 'cv.BORDER_CONSTANT'], {'value': '(0)'}), '(img, 1, 1, 1, 1, cv.BORDER_CONSTANT, value=0)\n', (458, 504), True, 'import cv2 as cv\n'), ((2908, 2938), 'numpy.exp', 'np.exp', (['(num / (2 * sigma ** 2))'], {}), '(num / (2 * sigma ** 2))\n', (2914, 2938), True, 'import numpy as np\n'), ((6345, 6394), 'random.randint', 'random.randint', (['(0)', '(img_integ2.shape[0] - square_l)'], {}), '(0, img_integ2.shape[0] - square_l)\n', (6359, 6394), False, 'import random\n'), ((6405, 6454), 'random.randint', 'random.randint', (['(0)', '(img_integ2.shape[1] - square_l)'], {}), '(0, img_integ2.shape[1] - square_l)\n', (6419, 6454), False, 'import random\n'), ((7427, 7452), 'numpy.sum', 'np.sum', (['histogram[:i + 1]'], {}), '(histogram[:i + 1])\n', (7433, 7452), True, 'import numpy as np\n'), ((2110, 2121), 'time.time', 'time.time', ([], {}), '()\n', (2119, 2121), False, 'import time\n'), ((4069, 4089), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (4083, 4089), False, 'import random\n'), ((4185, 4205), 'random.randint', 'random.randint', (['(0)', '(2)'], {}), '(0, 2)\n', (4199, 4205), False, 'import random\n')]
|
# CSE Drone Team 2020
import numpy as np
import cv2
import cv2.aruco as aruco
import sys, time, math
class ArucoTracker():
def __init__(self, tracker_id, tracker_size, mtx, dst, camera_size=[640,480], gui=False):
#Marker information
self.tracker_id = tracker_id
self.tracker_size = tracker_size
#Aruco dictionary
self._aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
self._parameters = aruco.DetectorParameters_create()
#Camera Configuration
self._cap = cv2.VideoCapture(0)
self._cap.set(cv2.CAP_PROP_FRAME_WIDTH, camera_size[0])
self._cap.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_size[1])
self._mtx = mtx
self._dst = dst
#Helper attributes
self.font = cv2.FONT_HERSHEY_PLAIN
self._gui = gui
self._t_read = time.time()
self._t_detect = self._t_read
self.is_detected = False
self._kill = False
def stop(self):
self._kill = True
self._cap.release()
def track(self, loop=True, gui=None):
self._kill = False
if gui is None: gui = self._gui
# initalizing marker tracking.
detected = False
x = y = z = a = 0
while not self._kill:
#Reading camera input from rpi camera
ret, frame = self._cap.read()
if np.shape(frame) ==():
print("Camera error!")
self._cap.release()
exit()
#-- Converting image frame into gray scale
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#-- Detects all the aruco markers based upon the provided parameters and aruco dictionary.
corners, ids, rejected = aruco.detectMarkers(image=gray, dictionary=self._aruco_dict,
parameters=self._parameters,
cameraMatrix=self._mtx,
distCoeff=self._dst)
idx = -1
c =0
if not ids is None:
for x in ids:
if self.tracker_id == x[0]:
idx = c
c = c + 1
if idx != -1:
detected = True
ret_data = aruco.estimatePoseSingleMarkers(corners[idx], self.tracker_size, self._mtx, self._dst)
#We would need tvec from here as the position vectors are needed to get the markers position with reference to the drone.
rvec, tvec = ret_data[0][0,0,:], ret_data[1][0,0,:]
# These are the marker position vectors that is required to navigate the drone towards the UTA logo.
x = tvec[0]
y = tvec[1]
z = tvec[2]
angle = math.atan((corners[idx][0][2][1]-corners[idx][0][0][1])/(corners[idx][0][2][0]-corners[idx][0][0][0])) * (180/math.pi)
yaw_angle = angle
if angle < 0:
yaw_angle = angle + 90
else:
yaw_angle = angle - 90
a = yaw_angle
#Draw the detected marker and put a reference frame over it
aruco.drawDetectedMarkers(frame, corners)
aruco.drawAxis(frame, self._mtx, self._dst, rvec, tvec, 10)
print ("Marker X = %.1f Y = %.1f Z = %.1f "%(tvec[0], tvec[1], tvec[2]))
font = self.font
if gui:
#-- Print the tag position in camera frame
str_position = "MARKER Position x=%4.0f y=%4.0f z=%4.0f a = %3.1f "%(tvec[0], tvec[1], tvec[2],a)
cv2.putText(frame, str_position, (0, 100), font, 1, (0, 255, 0), 2, cv2.LINE_AA)
else:
print("Nothing detected ")
# displaing iamge in the screen can be computationally heavy for pi when ardupilot is also running on parallel.
# make sure this is off when the drone isd flying.
if gui:
#--- Display the frame
cv2.imshow('frame', frame)
#--- use 'q' to quit
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
self._cap.release()
cv2.destroyAllWindows()
break
if not loop:
# returning the positions of the logo
self._kill =True
return (detected, x, y, z, a)
def small_track(self, loop=False, gui=None):
self.tracker_id = 15
self.tracker_size = 10.1
(detected, x, y, z, a) = self.track(loop=loop, gui =gui)
return (detected, x, y, z ,a)
def big_track(self, loop=False, gui=None):
self.tracker_id = 4
self.tracker_size = 35.0
(detected, x, y, z ,a) = self.track(loop=loop, gui =gui)
return (detected, x, y, z, a)
if __name__ == "__main__":
tracker_id = 15
tracker_size = 10.1 #- [cm]
# path to the camera matrix and distortion.
# Our Raspberry pi HQ camera doesn't have much distoration but these values are absolutely required to detect the marker
# as without these will be passed later to the detector fuction.
mtx = np.loadtxt('calib/cameraMatrix.txt', delimiter=',')
dst = np.loadtxt('calib/cameraDistortion.txt', delimiter=',')
# creating our aruco tracker object.
aruco_tracker = ArucoTracker(tracker_id=tracker_id, tracker_size=tracker_size, gui= True, mtx=mtx, dst=dst)
# intializing tracker for the specific id of the logo.
aruco_tracker.track()
|
[
"math.atan",
"cv2.aruco.estimatePoseSingleMarkers",
"cv2.aruco.drawDetectedMarkers",
"cv2.aruco.DetectorParameters_create",
"cv2.putText",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.destroyAllWindows",
"time.time",
"cv2.aruco.Dictionary_get",
"cv2.VideoCapture",
"cv2.aruco.detectMarkers",
"numpy.shape",
"cv2.aruco.drawAxis",
"numpy.loadtxt",
"cv2.imshow"
] |
[((5528, 5579), 'numpy.loadtxt', 'np.loadtxt', (['"""calib/cameraMatrix.txt"""'], {'delimiter': '""","""'}), "('calib/cameraMatrix.txt', delimiter=',')\n", (5538, 5579), True, 'import numpy as np\n'), ((5592, 5647), 'numpy.loadtxt', 'np.loadtxt', (['"""calib/cameraDistortion.txt"""'], {'delimiter': '""","""'}), "('calib/cameraDistortion.txt', delimiter=',')\n", (5602, 5647), True, 'import numpy as np\n'), ((397, 437), 'cv2.aruco.Dictionary_get', 'aruco.Dictionary_get', (['aruco.DICT_6X6_250'], {}), '(aruco.DICT_6X6_250)\n', (417, 437), True, 'import cv2.aruco as aruco\n'), ((466, 499), 'cv2.aruco.DetectorParameters_create', 'aruco.DetectorParameters_create', ([], {}), '()\n', (497, 499), True, 'import cv2.aruco as aruco\n'), ((551, 570), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (567, 570), False, 'import cv2\n'), ((873, 884), 'time.time', 'time.time', ([], {}), '()\n', (882, 884), False, 'import sys, time, math\n'), ((1667, 1706), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (1679, 1706), False, 'import cv2\n'), ((1849, 1988), 'cv2.aruco.detectMarkers', 'aruco.detectMarkers', ([], {'image': 'gray', 'dictionary': 'self._aruco_dict', 'parameters': 'self._parameters', 'cameraMatrix': 'self._mtx', 'distCoeff': 'self._dst'}), '(image=gray, dictionary=self._aruco_dict, parameters=\n self._parameters, cameraMatrix=self._mtx, distCoeff=self._dst)\n', (1868, 1988), True, 'import cv2.aruco as aruco\n'), ((1469, 1484), 'numpy.shape', 'np.shape', (['frame'], {}), '(frame)\n', (1477, 1484), True, 'import numpy as np\n'), ((2401, 2491), 'cv2.aruco.estimatePoseSingleMarkers', 'aruco.estimatePoseSingleMarkers', (['corners[idx]', 'self.tracker_size', 'self._mtx', 'self._dst'], {}), '(corners[idx], self.tracker_size, self._mtx,\n self._dst)\n', (2432, 2491), True, 'import cv2.aruco as aruco\n'), ((3367, 3408), 'cv2.aruco.drawDetectedMarkers', 'aruco.drawDetectedMarkers', (['frame', 'corners'], {}), '(frame, corners)\n', (3392, 3408), True, 'import cv2.aruco as aruco\n'), ((3425, 3484), 'cv2.aruco.drawAxis', 'aruco.drawAxis', (['frame', 'self._mtx', 'self._dst', 'rvec', 'tvec', '(10)'], {}), '(frame, self._mtx, self._dst, rvec, tvec, 10)\n', (3439, 3484), True, 'import cv2.aruco as aruco\n'), ((4302, 4328), 'cv2.imshow', 'cv2.imshow', (['"""frame"""', 'frame'], {}), "('frame', frame)\n", (4312, 4328), False, 'import cv2\n'), ((2954, 3067), 'math.atan', 'math.atan', (['((corners[idx][0][2][1] - corners[idx][0][0][1]) / (corners[idx][0][2][0] -\n corners[idx][0][0][0]))'], {}), '((corners[idx][0][2][1] - corners[idx][0][0][1]) / (corners[idx][0\n ][2][0] - corners[idx][0][0][0]))\n', (2963, 3067), False, 'import sys, time, math\n'), ((3855, 3940), 'cv2.putText', 'cv2.putText', (['frame', 'str_position', '(0, 100)', 'font', '(1)', '(0, 255, 0)', '(2)', 'cv2.LINE_AA'], {}), '(frame, str_position, (0, 100), font, 1, (0, 255, 0), 2, cv2.LINE_AA\n )\n', (3866, 3940), False, 'import cv2\n'), ((4389, 4403), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4400, 4403), False, 'import cv2\n'), ((4507, 4530), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4528, 4530), False, 'import cv2\n')]
|
#!/usr/bin/env python
import os
import time
import traceback
from argparse import ArgumentParser
from glob import glob
import numpy as np
import tensorflow as tf
from scipy.misc import imread, imsave
from utils import (get_hand_segmentation_for_image, get_combined_segmentation_for_image,
get_patho_segmentation_for_image, hand_subdir, image_subdir, data_subdirs,
patho_subdir, combined_subdir)
def parse_arguments():
parser = ArgumentParser()
parser.add_argument("--output-dir", required=True, type=str,
help="Target directory to store the patches in")
parser.add_argument("--data-dir", type=str, default="training-data",
help="Input directory from where to load images")
parser.add_argument("--size", type=int, default=256,
help="Size of the square patches in pixels")
parser.add_argument("--step-size", type=float, default=0.5,
help="Step size to use when looking for patches as a percentage of the patch size")
parser.add_argument("--min-hand", type=float, default=1.0,
help="Minimum percentage of hand pixels")
parser.add_argument("--max-hand", type=float, default=1.0,
help="Maximum percentage of hand pixels")
parser.add_argument("--min-patho", type=float, default=0.0,
help="Minimum percentage of pathology pixels")
parser.add_argument("--max-patho", type=float, default=1.0,
help="Maximum percentage of pathology pixels")
parser.add_argument("--match-pattern", type=str, default=None,
help="Specify pattern for files to match")
parser.add_argument("--verify", action="store_true",
help="Verify data integrity if specified")
return parser.parse_args()
def verify_data_integrity(image_dir, hand_dir, patho_dir, combined_dir):
# count images in the directories
images_count = len(glob("{}/*.png".format(image_dir)))
hand_count = len(glob("{}/*.png".format(hand_dir)))
patho_count = len(glob("{}/*.png".format(patho_dir)))
combined_count = len(glob("{}/*.png".format(combined_dir)))
tf.logging.info("Image file counts: {}/{}/{}/{} (images/hand/patho/combined)".format(
images_count, hand_count, patho_count, combined_count))
assert images_count == hand_count and images_count == patho_count and images_count == combined_count
# check file names
for file_name in glob("{}/*.png".format(image_dir)):
assert os.path.isfile(get_hand_segmentation_for_image(file_name, hand_dir))
assert os.path.isfile(get_patho_segmentation_for_image(file_name, patho_dir))
assert os.path.isfile(get_combined_segmentation_for_image(file_name, combined_dir))
tf.logging.info("There seems to be exactly one hand, pathology, and combined segmentation per image")
def add_patch_to_file_name(file_name, patch_number):
assert patch_number < 1e5
return "{}_patch_{:04d}.png".format(os.path.splitext(file_name)[0], patch_number)
def find_patches_in_file(image_file, hand_dir, patho_dir, combined_dir, output_dir, args):
# pylint: disable=too-many-locals
patch_size = args.size
patch_step = int(args.size * args.step_size)
min_hand = int(patch_size * patch_size * args.min_hand)
max_hand = int(patch_size * patch_size * args.max_hand)
min_patho = int(patch_size * patch_size * args.min_patho)
max_patho = int(patch_size * patch_size * args.max_patho)
image_filename = os.path.basename(image_file)
image = imread(image_file)
hand_file = get_hand_segmentation_for_image(image_file, hand_dir)
hand_filename = os.path.basename(hand_file)
hand = imread(hand_file)
patho_file = get_patho_segmentation_for_image(image_file, patho_dir)
patho = imread(patho_file)
patho_filename = os.path.basename(patho_file)
combined_file = get_combined_segmentation_for_image(image_file, combined_dir)
combined = imread(combined_file)
combined_filename = os.path.basename(combined_file)
partial_patch_count = 0
non_hand_patch_count = 0
non_patho_patch_count = 0
found_patch_count = 0
for i in range(0, image.shape[0], patch_step):
for j in range(0, image.shape[1], patch_step):
hand_patch = hand[i:i+patch_size, j:j+patch_size]
if hand_patch.shape != (patch_size, patch_size):
# ignore partial patches
partial_patch_count += 1
continue
if np.count_nonzero(hand_patch) < min_hand or np.count_nonzero(hand_patch) > max_hand:
# ignore patches that have too few/much hand
non_hand_patch_count += 1
continue
patho_patch = patho[i:i+patch_size, j:j+patch_size]
if np.count_nonzero(patho_patch) < min_patho or np.count_nonzero(patho_patch) > max_patho:
# ignore patches that have too few/much patho
non_patho_patch_count += 1
continue
# save patches
image_patch = image[i:i+patch_size, j:j+patch_size]
combined_patch = combined[i:i+patch_size, j:j+patch_size]
imsave("{}/{}/{}".format(output_dir, image_subdir, add_patch_to_file_name(image_filename, found_patch_count)), image_patch)
imsave("{}/{}/{}".format(output_dir, hand_subdir, add_patch_to_file_name(hand_filename, found_patch_count)), hand_patch)
imsave("{}/{}/{}".format(output_dir, patho_subdir, add_patch_to_file_name(patho_filename, found_patch_count)), patho_patch)
imsave("{}/{}/{}".format(output_dir, combined_subdir, add_patch_to_file_name(combined_filename, found_patch_count)), combined_patch)
found_patch_count += 1
tf.logging.info("Found {} patches and ignored {}/{}/{} (bad-hand/bad-patho/partial) in file '{}'".format(
found_patch_count, non_hand_patch_count, non_patho_patch_count, partial_patch_count, image_filename))
return found_patch_count
def main():
# handle arguments and config
args = parse_arguments()
tf.logging.info("Args: {}".format(args))
data_dir = os.path.join("data", args.data_dir)
image_dir = os.path.join(data_dir, image_subdir)
hand_dir = os.path.join(data_dir, hand_subdir)
patho_dir = os.path.join(data_dir, patho_subdir)
combined_dir = os.path.join(data_dir, combined_subdir)
if args.verify:
verify_data_integrity(image_dir, hand_dir, patho_dir, combined_dir)
for sub_dir in data_subdirs:
assert not os.path.exists(os.path.join(args.output_dir, sub_dir)), \
"Output directory '{}' exists already, select another!".format(args.output_dir)
os.makedirs(os.path.join(args.output_dir, sub_dir))
found_patch_count = 0
processed_image_count = 0
for image_file in glob("{}/*{}.png".format(image_dir, args.match_pattern + "*" if args.match_pattern else "")):
found_patch_count += find_patches_in_file(image_file, hand_dir, patho_dir, combined_dir, args.output_dir, args)
processed_image_count += 1
tf.logging.info("Found {} patches in {} images".format(found_patch_count, processed_image_count))
if __name__ == "__main__":
START_TIME = time.time()
tf.logging.set_verbosity(tf.logging.INFO)
try:
main()
except Exception as ex:
tf.logging.fatal("Exception occurred: {}".format(traceback.format_exc()))
finally:
tf.logging.info("Finished execution after {:.1f}m".format((time.time() - START_TIME) / 60))
|
[
"utils.get_patho_segmentation_for_image",
"utils.get_combined_segmentation_for_image",
"numpy.count_nonzero",
"argparse.ArgumentParser",
"tensorflow.logging.info",
"os.path.basename",
"tensorflow.logging.set_verbosity",
"time.time",
"utils.get_hand_segmentation_for_image",
"os.path.splitext",
"traceback.format_exc",
"os.path.join",
"scipy.misc.imread"
] |
[((471, 487), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (485, 487), False, 'from argparse import ArgumentParser\n'), ((2610, 2721), 'tensorflow.logging.info', 'tf.logging.info', (['"""There seems to be exactly one hand, pathology, and combined segmentation per image"""'], {}), "(\n 'There seems to be exactly one hand, pathology, and combined segmentation per image'\n )\n", (2625, 2721), True, 'import tensorflow as tf\n'), ((3334, 3362), 'os.path.basename', 'os.path.basename', (['image_file'], {}), '(image_file)\n', (3350, 3362), False, 'import os\n'), ((3373, 3391), 'scipy.misc.imread', 'imread', (['image_file'], {}), '(image_file)\n', (3379, 3391), False, 'from scipy.misc import imread, imsave\n'), ((3406, 3459), 'utils.get_hand_segmentation_for_image', 'get_hand_segmentation_for_image', (['image_file', 'hand_dir'], {}), '(image_file, hand_dir)\n', (3437, 3459), False, 'from utils import get_hand_segmentation_for_image, get_combined_segmentation_for_image, get_patho_segmentation_for_image, hand_subdir, image_subdir, data_subdirs, patho_subdir, combined_subdir\n'), ((3478, 3505), 'os.path.basename', 'os.path.basename', (['hand_file'], {}), '(hand_file)\n', (3494, 3505), False, 'import os\n'), ((3515, 3532), 'scipy.misc.imread', 'imread', (['hand_file'], {}), '(hand_file)\n', (3521, 3532), False, 'from scipy.misc import imread, imsave\n'), ((3548, 3603), 'utils.get_patho_segmentation_for_image', 'get_patho_segmentation_for_image', (['image_file', 'patho_dir'], {}), '(image_file, patho_dir)\n', (3580, 3603), False, 'from utils import get_hand_segmentation_for_image, get_combined_segmentation_for_image, get_patho_segmentation_for_image, hand_subdir, image_subdir, data_subdirs, patho_subdir, combined_subdir\n'), ((3614, 3632), 'scipy.misc.imread', 'imread', (['patho_file'], {}), '(patho_file)\n', (3620, 3632), False, 'from scipy.misc import imread, imsave\n'), ((3652, 3680), 'os.path.basename', 'os.path.basename', (['patho_file'], {}), '(patho_file)\n', (3668, 3680), False, 'import os\n'), ((3699, 3760), 'utils.get_combined_segmentation_for_image', 'get_combined_segmentation_for_image', (['image_file', 'combined_dir'], {}), '(image_file, combined_dir)\n', (3734, 3760), False, 'from utils import get_hand_segmentation_for_image, get_combined_segmentation_for_image, get_patho_segmentation_for_image, hand_subdir, image_subdir, data_subdirs, patho_subdir, combined_subdir\n'), ((3774, 3795), 'scipy.misc.imread', 'imread', (['combined_file'], {}), '(combined_file)\n', (3780, 3795), False, 'from scipy.misc import imread, imsave\n'), ((3818, 3849), 'os.path.basename', 'os.path.basename', (['combined_file'], {}), '(combined_file)\n', (3834, 3849), False, 'import os\n'), ((5781, 5816), 'os.path.join', 'os.path.join', (['"""data"""', 'args.data_dir'], {}), "('data', args.data_dir)\n", (5793, 5816), False, 'import os\n'), ((5831, 5867), 'os.path.join', 'os.path.join', (['data_dir', 'image_subdir'], {}), '(data_dir, image_subdir)\n', (5843, 5867), False, 'import os\n'), ((5881, 5916), 'os.path.join', 'os.path.join', (['data_dir', 'hand_subdir'], {}), '(data_dir, hand_subdir)\n', (5893, 5916), False, 'import os\n'), ((5931, 5967), 'os.path.join', 'os.path.join', (['data_dir', 'patho_subdir'], {}), '(data_dir, patho_subdir)\n', (5943, 5967), False, 'import os\n'), ((5985, 6024), 'os.path.join', 'os.path.join', (['data_dir', 'combined_subdir'], {}), '(data_dir, combined_subdir)\n', (5997, 6024), False, 'import os\n'), ((6824, 6835), 'time.time', 'time.time', ([], {}), '()\n', (6833, 6835), False, 'import time\n'), ((6838, 6879), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (6862, 6879), True, 'import tensorflow as tf\n'), ((2383, 2435), 'utils.get_hand_segmentation_for_image', 'get_hand_segmentation_for_image', (['file_name', 'hand_dir'], {}), '(file_name, hand_dir)\n', (2414, 2435), False, 'from utils import get_hand_segmentation_for_image, get_combined_segmentation_for_image, get_patho_segmentation_for_image, hand_subdir, image_subdir, data_subdirs, patho_subdir, combined_subdir\n'), ((2463, 2517), 'utils.get_patho_segmentation_for_image', 'get_patho_segmentation_for_image', (['file_name', 'patho_dir'], {}), '(file_name, patho_dir)\n', (2495, 2517), False, 'from utils import get_hand_segmentation_for_image, get_combined_segmentation_for_image, get_patho_segmentation_for_image, hand_subdir, image_subdir, data_subdirs, patho_subdir, combined_subdir\n'), ((2545, 2605), 'utils.get_combined_segmentation_for_image', 'get_combined_segmentation_for_image', (['file_name', 'combined_dir'], {}), '(file_name, combined_dir)\n', (2580, 2605), False, 'from utils import get_hand_segmentation_for_image, get_combined_segmentation_for_image, get_patho_segmentation_for_image, hand_subdir, image_subdir, data_subdirs, patho_subdir, combined_subdir\n'), ((2832, 2859), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (2848, 2859), False, 'import os\n'), ((6325, 6363), 'os.path.join', 'os.path.join', (['args.output_dir', 'sub_dir'], {}), '(args.output_dir, sub_dir)\n', (6337, 6363), False, 'import os\n'), ((6178, 6216), 'os.path.join', 'os.path.join', (['args.output_dir', 'sub_dir'], {}), '(args.output_dir, sub_dir)\n', (6190, 6216), False, 'import os\n'), ((4261, 4289), 'numpy.count_nonzero', 'np.count_nonzero', (['hand_patch'], {}), '(hand_patch)\n', (4277, 4289), True, 'import numpy as np\n'), ((4304, 4332), 'numpy.count_nonzero', 'np.count_nonzero', (['hand_patch'], {}), '(hand_patch)\n', (4320, 4332), True, 'import numpy as np\n'), ((4517, 4546), 'numpy.count_nonzero', 'np.count_nonzero', (['patho_patch'], {}), '(patho_patch)\n', (4533, 4546), True, 'import numpy as np\n'), ((4562, 4591), 'numpy.count_nonzero', 'np.count_nonzero', (['patho_patch'], {}), '(patho_patch)\n', (4578, 4591), True, 'import numpy as np\n'), ((6977, 6999), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (6997, 6999), False, 'import traceback\n'), ((7076, 7087), 'time.time', 'time.time', ([], {}), '()\n', (7085, 7087), False, 'import time\n')]
|
import numpy as np
from numpy.linalg import inv
def ukfupdate(xsigmapts, ysigmapts, yobs, sigw):
"""Provides Updated mean and covariance.
:param xsigmapts: prior state sigma points.
:param ysigmapts: measurement generated by prior state sigma points.
:param yobs: actual measurement.
:param sigw: sigma point weights.
:return: updatedata.
"""
# Calculating the mean of xsigmapts
xm = xsigmapts.mean(0)
# Calculating the mean of ysigmapts
ym = ysigmapts.mean(0)
# Calculating pxx
l1 = np.shape(xsigmapts)[0]
pxx = np.zeros((l1, l1))
pyy = np.zeros((l1, l1))
pxy = np.zeros((l1, l1))
# If error happens here check for dimension of state space and the number of sigma points
for i in range(0, 2*l1):
pxx = pxx + sigw.wc[i]*np.matmul(xsigmapts[:, i]-xm, (xsigmapts[:, i]-xm).transpose())
pyy = pyy + sigw.wc[i]*np.matmul(ysigmapts[:, i]-ym, (ysigmapts[:, i]-ym).transpose())
pxy = pxy + sigw.wc[i]*np.matmul(xsigmapts[:, i]-xm, (ysigmapts[:, i]-ym).transpose())
K = np.matmul(pxy, inv(pyy))
xmpost = xm + np.matmul(K, (yobs-ym))
xcpost = pxx - np.matmul(np.matmul(K, pyy), K.transpose())
updatedata = {"xmpost": xmpost, "xcpost": xcpost}
return updatedata
|
[
"numpy.shape",
"numpy.linalg.inv",
"numpy.zeros",
"numpy.matmul"
] |
[((572, 590), 'numpy.zeros', 'np.zeros', (['(l1, l1)'], {}), '((l1, l1))\n', (580, 590), True, 'import numpy as np\n'), ((601, 619), 'numpy.zeros', 'np.zeros', (['(l1, l1)'], {}), '((l1, l1))\n', (609, 619), True, 'import numpy as np\n'), ((630, 648), 'numpy.zeros', 'np.zeros', (['(l1, l1)'], {}), '((l1, l1))\n', (638, 648), True, 'import numpy as np\n'), ((539, 558), 'numpy.shape', 'np.shape', (['xsigmapts'], {}), '(xsigmapts)\n', (547, 558), True, 'import numpy as np\n'), ((1080, 1088), 'numpy.linalg.inv', 'inv', (['pyy'], {}), '(pyy)\n', (1083, 1088), False, 'from numpy.linalg import inv\n'), ((1108, 1131), 'numpy.matmul', 'np.matmul', (['K', '(yobs - ym)'], {}), '(K, yobs - ym)\n', (1117, 1131), True, 'import numpy as np\n'), ((1161, 1178), 'numpy.matmul', 'np.matmul', (['K', 'pyy'], {}), '(K, pyy)\n', (1170, 1178), True, 'import numpy as np\n')]
|
import numpy as np
def realization(p_1, p_2, n_trials):
p_3 = 1.0 - p_1 - p_2
outcomes = np.random.random(n_trials)
ii_1 = outcomes<=p_1
ii_2 = (outcomes>p_1) & (outcomes<=(p_1+p_2))
ii_3 = (~ii_1) & (~ii_2)
outcomes[ii_1] = 1
outcomes[ii_2] = 2
outcomes[ii_3] = 3
N_1 = len(outcomes[outcomes==1])
N_2 = len(outcomes[outcomes==2])
return N_1, N_2
def joint_probability(p_1, p_2, n_trials, n_iteraciones):
proba = np.zeros([n_trials+1, n_trials+1])
for i in range(n_iteraciones):
N_1, N_2 = realization(p_1, p_2, n_trials)
proba[N_1, N_2] += 1
proba /= n_iteraciones
return proba
def covarianza(p_1, p_2, n_total):
p = joint_probability(p_1, p_2, n_total, 100000)
#valor esperado de N1*N2
E_N1_N2 = 0.0
for i in range(n_total+1):
for j in range(n_total+1):
E_N1_N2 += p[i,j] * i * j
# valor esperado de N1
E_N1 = 0.0
for i in range(n_total+1):
p_i = 0.0
for j in range(n_total+1):
p_i += p[i,j]
E_N1 += p_i * i
# valor esperado de N2
E_N2 = 0.0
for j in range(n_total+1):
p_j = 0.0
for i in range(n_total+1):
p_j += p[i,j]
E_N2 += p_j * j
return E_N1_N2 - E_N1 * E_N2
|
[
"numpy.random.random",
"numpy.zeros"
] |
[((99, 125), 'numpy.random.random', 'np.random.random', (['n_trials'], {}), '(n_trials)\n', (115, 125), True, 'import numpy as np\n'), ((468, 506), 'numpy.zeros', 'np.zeros', (['[n_trials + 1, n_trials + 1]'], {}), '([n_trials + 1, n_trials + 1])\n', (476, 506), True, 'import numpy as np\n')]
|
import os
import random
import string
import numpy as np
import pandas as pd
from sklearn import preprocessing
from pymilvus_orm.types import DataType
from base.schema_wrapper import ApiCollectionSchemaWrapper, ApiFieldSchemaWrapper
from common import common_type as ct
from utils.util_log import test_log as log
import threading
import traceback
"""" Methods of processing data """
l2 = lambda x, y: np.linalg.norm(np.array(x) - np.array(y))
def gen_unique_str(str_value=None):
prefix = "".join(random.choice(string.ascii_letters + string.digits) for _ in range(8))
return "test_" + prefix if str_value is None else str_value + "_" + prefix
def gen_str_by_length(length=8):
return "".join(random.choice(string.ascii_letters + string.digits) for _ in range(length))
def gen_int64_field(name=ct.default_int64_field_name, description=ct.default_desc, is_primary=False, **kwargs):
int64_field, _ = ApiFieldSchemaWrapper().init_field_schema(name=name, dtype=DataType.INT64, description=description,
is_primary=is_primary, **kwargs)
return int64_field
def gen_float_field(name=ct.default_float_field_name, is_primary=False, description=ct.default_desc):
float_field, _ = ApiFieldSchemaWrapper().init_field_schema(name=name, dtype=DataType.FLOAT, description=description,
is_primary=is_primary)
return float_field
def gen_float_vec_field(name=ct.default_float_vec_field_name, is_primary=False, dim=ct.default_dim,
description=ct.default_desc):
float_vec_field, _ = ApiFieldSchemaWrapper().init_field_schema(name=name, dtype=DataType.FLOAT_VECTOR,
description=description, dim=dim,
is_primary=is_primary)
return float_vec_field
def gen_binary_vec_field(name=ct.default_binary_vec_field_name, is_primary=False, dim=ct.default_dim,
description=ct.default_desc):
binary_vec_field, _ = ApiFieldSchemaWrapper().init_field_schema(name=name, dtype=DataType.BINARY_VECTOR,
description=description, dim=dim,
is_primary=is_primary)
return binary_vec_field
def gen_default_collection_schema(description=ct.default_desc, primary_field=ct.default_int64_field_name, auto_id=False):
fields = [gen_int64_field(), gen_float_field(), gen_float_vec_field()]
schema, _ = ApiCollectionSchemaWrapper().init_collection_schema(fields=fields, description=description,
primary_field=primary_field, auto_id=auto_id)
return schema
def gen_collection_schema(fields, primary_field=None, description=ct.default_desc, auto_id=False):
schema, _ = ApiCollectionSchemaWrapper().init_collection_schema(fields=fields, primary_field=primary_field,
description=description, auto_id=auto_id)
return schema
def gen_default_binary_collection_schema(description=ct.default_desc, primary_field=ct.default_int64_field_name):
fields = [gen_int64_field(), gen_float_field(), gen_binary_vec_field()]
binary_schema, _ = ApiCollectionSchemaWrapper().init_collection_schema(fields=fields, description=description,
primary_field=primary_field)
return binary_schema
def gen_vectors(nb, dim):
vectors = [[random.random() for _ in range(dim)] for _ in range(nb)]
vectors = preprocessing.normalize(vectors, axis=1, norm='l2')
return vectors.tolist()
def gen_binary_vectors(num, dim):
raw_vectors = []
binary_vectors = []
for _ in range(num):
raw_vector = [random.randint(0, 1) for _ in range(dim)]
raw_vectors.append(raw_vector)
binary_vectors.append(bytes(np.packbits(raw_vector, axis=-1).tolist()))
return raw_vectors, binary_vectors
def gen_default_dataframe_data(nb=ct.default_nb, dim=ct.default_dim, start=0):
int_values = pd.Series(data=[i for i in range(start, start + nb)])
float_values = pd.Series(data=[float(i) for i in range(start, start + nb)], dtype="float32")
float_vec_values = gen_vectors(nb, dim)
df = pd.DataFrame({
ct.default_int64_field_name: int_values,
ct.default_float_field_name: float_values,
ct.default_float_vec_field_name: float_vec_values
})
return df
def gen_default_binary_dataframe_data(nb=ct.default_nb, dim=ct.default_dim, start=0):
int_values = pd.Series(data=[i for i in range(start, start + nb)])
float_values = pd.Series(data=[float(i) for i in range(start, start + nb)], dtype="float32")
binary_raw_values, binary_vec_values = gen_binary_vectors(nb, dim)
df = pd.DataFrame({
ct.default_int64_field_name: int_values,
ct.default_float_field_name: float_values,
ct.default_binary_vec_field_name: binary_vec_values
})
return df, binary_raw_values
def gen_default_list_data(nb=ct.default_nb, dim=ct.default_dim):
int_values = [i for i in range(nb)]
float_values = [np.float32(i) for i in range(nb)]
float_vec_values = gen_vectors(nb, dim)
data = [int_values, float_values, float_vec_values]
return data
def gen_default_tuple_data(nb=ct.default_nb, dim=ct.default_dim):
int_values = [i for i in range(nb)]
float_values = [float(i) for i in range(nb)]
float_vec_values = gen_vectors(nb, dim)
data = (int_values, float_values, float_vec_values)
return data
def gen_numpy_data(nb=ct.default_nb, dim=ct.default_dim):
int_values = np.arange(nb, dtype='int64')
float_values = np.arange(nb, dtype='float32')
float_vec_values = gen_vectors(nb, dim)
data = [int_values, float_values, float_vec_values]
return data
def gen_default_binary_list_data(nb=ct.default_nb, dim=ct.default_dim):
int_values = [i for i in range(nb)]
float_values = [np.float32(i) for i in range(nb)]
binary_raw_values, binary_vec_values = gen_binary_vectors(nb, dim)
data = [int_values, float_values, binary_vec_values]
return data, binary_raw_values
def gen_simple_index():
index_params = []
for i in range(len(ct.all_index_types)):
if ct.all_index_types[i] in ct.binary_support:
continue
dic = {"index_type": ct.all_index_types[i], "metric_type": "L2"}
dic.update({"params": ct.default_index_params[i]})
index_params.append(dic)
return index_params
def gen_invalid_field_types():
field_types = [
6,
1.0,
[[]],
{},
(),
"",
"a"
]
return field_types
def gen_all_type_fields():
fields = []
for k, v in DataType.__members__.items():
if v != DataType.UNKNOWN:
field, _ = ApiFieldSchemaWrapper().init_field_schema(name=k.lower(), dtype=v)
fields.append(field)
return fields
def gen_normal_expressions():
expressions = [
"",
"int64 > 0",
"(int64 > 0 && int64 < 400) or (int64 > 500 && int64 < 1000)",
"int64 not in [1, 2, 3]",
"int64 in [1, 2, 3] and float != 2",
"int64 == 0 || int64 == 1 || int64 == 2",
]
return expressions
def jaccard(x, y):
x = np.asarray(x, np.bool)
y = np.asarray(y, np.bool)
return 1 - np.double(np.bitwise_and(x, y).sum()) / np.double(np.bitwise_or(x, y).sum())
def hamming(x, y):
x = np.asarray(x, np.bool)
y = np.asarray(y, np.bool)
return np.bitwise_xor(x, y).sum()
def tanimoto(x, y):
x = np.asarray(x, np.bool)
y = np.asarray(y, np.bool)
return -np.log2(np.double(np.bitwise_and(x, y).sum()) / np.double(np.bitwise_or(x, y).sum()))
def substructure(x, y):
x = np.asarray(x, np.bool)
y = np.asarray(y, np.bool)
return 1 - np.double(np.bitwise_and(x, y).sum()) / np.count_nonzero(y)
def superstructure(x, y):
x = np.asarray(x, np.bool)
y = np.asarray(y, np.bool)
return 1 - np.double(np.bitwise_and(x, y).sum()) / np.count_nonzero(x)
def modify_file(file_path_list, is_modify=False, input_content=""):
"""
file_path_list : file list -> list[<file_path>]
is_modify : does the file need to be reset
input_content :the content that need to insert to the file
"""
if not isinstance(file_path_list, list):
log.error("[modify_file] file is not a list.")
for file_path in file_path_list:
folder_path, file_name = os.path.split(file_path)
if not os.path.isdir(folder_path):
log.debug("[modify_file] folder(%s) is not exist." % folder_path)
os.makedirs(folder_path)
if not os.path.isfile(file_path):
log.error("[modify_file] file(%s) is not exist." % file_path)
else:
if is_modify is True:
log.debug("[modify_file] start modifying file(%s)..." % file_path)
with open(file_path, "r+") as f:
f.seek(0)
f.truncate()
f.write(input_content)
f.close()
log.info("[modify_file] file(%s) modification is complete." % file_path_list)
def index_to_dict(index):
return {
"collection_name": index.collection_name,
"field_name": index.field_name,
# "name": index.name,
"params": index.params
}
def assert_equal_index(index_1, index_2):
return index_to_dict(index_1) == index_to_dict(index_2)
def gen_partitions(collection_w, partition_num=1):
"""
target: create extra partitions except for _default
method: create more than one partitions
expected: return collection and raw data
"""
log.info("gen_partitions: creating partitions")
for i in range(partition_num):
partition_name = "search_partition_" + str(i)
collection_w.create_partition(partition_name=partition_name,
description="search partition")
par = collection_w.partitions
assert len(par) == (partition_num + 1)
log.info("gen_partitions: created partitions %s" % par)
def insert_data(collection_w, nb=3000, is_binary=False):
"""
target: insert non-binary/binary data
method: insert non-binary/binary data into partitions if any
expected: return collection and raw data
"""
par = collection_w.partitions
num = len(par)
vectors = []
binary_raw_vectors = []
log.info("insert_data: inserting data into collection %s (num_entities: %s)"
% (collection_w.name, nb))
for i in range(num):
if is_binary:
default_data, binary_raw_data = gen_default_binary_dataframe_data(nb // num)
binary_raw_vectors.extend(binary_raw_data)
else:
default_data = gen_default_dataframe_data(nb // num)
collection_w.insert(default_data, par[i].name)
vectors.append(default_data)
log.info("insert_data: inserted data into collection %s (num_entities: %s)"
% (collection_w.name, nb))
return collection_w, vectors, binary_raw_vectors
|
[
"numpy.bitwise_xor",
"utils.util_log.test_log.error",
"os.path.isfile",
"numpy.arange",
"base.schema_wrapper.ApiFieldSchemaWrapper",
"numpy.bitwise_or",
"pandas.DataFrame",
"base.schema_wrapper.ApiCollectionSchemaWrapper",
"random.randint",
"utils.util_log.test_log.debug",
"utils.util_log.test_log.info",
"numpy.asarray",
"numpy.packbits",
"random.random",
"sklearn.preprocessing.normalize",
"numpy.count_nonzero",
"os.makedirs",
"os.path.isdir",
"numpy.float32",
"random.choice",
"numpy.array",
"numpy.bitwise_and",
"os.path.split",
"pymilvus_orm.types.DataType.__members__.items"
] |
[((3773, 3824), 'sklearn.preprocessing.normalize', 'preprocessing.normalize', (['vectors'], {'axis': '(1)', 'norm': '"""l2"""'}), "(vectors, axis=1, norm='l2')\n", (3796, 3824), False, 'from sklearn import preprocessing\n'), ((4483, 4641), 'pandas.DataFrame', 'pd.DataFrame', (['{ct.default_int64_field_name: int_values, ct.default_float_field_name:\n float_values, ct.default_float_vec_field_name: float_vec_values}'], {}), '({ct.default_int64_field_name: int_values, ct.\n default_float_field_name: float_values, ct.default_float_vec_field_name:\n float_vec_values})\n', (4495, 4641), True, 'import pandas as pd\n'), ((5013, 5174), 'pandas.DataFrame', 'pd.DataFrame', (['{ct.default_int64_field_name: int_values, ct.default_float_field_name:\n float_values, ct.default_binary_vec_field_name: binary_vec_values}'], {}), '({ct.default_int64_field_name: int_values, ct.\n default_float_field_name: float_values, ct.\n default_binary_vec_field_name: binary_vec_values})\n', (5025, 5174), True, 'import pandas as pd\n'), ((5855, 5883), 'numpy.arange', 'np.arange', (['nb'], {'dtype': '"""int64"""'}), "(nb, dtype='int64')\n", (5864, 5883), True, 'import numpy as np\n'), ((5903, 5933), 'numpy.arange', 'np.arange', (['nb'], {'dtype': '"""float32"""'}), "(nb, dtype='float32')\n", (5912, 5933), True, 'import numpy as np\n'), ((6968, 6996), 'pymilvus_orm.types.DataType.__members__.items', 'DataType.__members__.items', ([], {}), '()\n', (6994, 6996), False, 'from pymilvus_orm.types import DataType\n'), ((7516, 7538), 'numpy.asarray', 'np.asarray', (['x', 'np.bool'], {}), '(x, np.bool)\n', (7526, 7538), True, 'import numpy as np\n'), ((7547, 7569), 'numpy.asarray', 'np.asarray', (['y', 'np.bool'], {}), '(y, np.bool)\n', (7557, 7569), True, 'import numpy as np\n'), ((7691, 7713), 'numpy.asarray', 'np.asarray', (['x', 'np.bool'], {}), '(x, np.bool)\n', (7701, 7713), True, 'import numpy as np\n'), ((7722, 7744), 'numpy.asarray', 'np.asarray', (['y', 'np.bool'], {}), '(y, np.bool)\n', (7732, 7744), True, 'import numpy as np\n'), ((7813, 7835), 'numpy.asarray', 'np.asarray', (['x', 'np.bool'], {}), '(x, np.bool)\n', (7823, 7835), True, 'import numpy as np\n'), ((7844, 7866), 'numpy.asarray', 'np.asarray', (['y', 'np.bool'], {}), '(y, np.bool)\n', (7854, 7866), True, 'import numpy as np\n'), ((7999, 8021), 'numpy.asarray', 'np.asarray', (['x', 'np.bool'], {}), '(x, np.bool)\n', (8009, 8021), True, 'import numpy as np\n'), ((8030, 8052), 'numpy.asarray', 'np.asarray', (['y', 'np.bool'], {}), '(y, np.bool)\n', (8040, 8052), True, 'import numpy as np\n'), ((8164, 8186), 'numpy.asarray', 'np.asarray', (['x', 'np.bool'], {}), '(x, np.bool)\n', (8174, 8186), True, 'import numpy as np\n'), ((8195, 8217), 'numpy.asarray', 'np.asarray', (['y', 'np.bool'], {}), '(y, np.bool)\n', (8205, 8217), True, 'import numpy as np\n'), ((9942, 9989), 'utils.util_log.test_log.info', 'log.info', (['"""gen_partitions: creating partitions"""'], {}), "('gen_partitions: creating partitions')\n", (9950, 9989), True, 'from utils.util_log import test_log as log\n'), ((10299, 10354), 'utils.util_log.test_log.info', 'log.info', (["('gen_partitions: created partitions %s' % par)"], {}), "('gen_partitions: created partitions %s' % par)\n", (10307, 10354), True, 'from utils.util_log import test_log as log\n'), ((10684, 10797), 'utils.util_log.test_log.info', 'log.info', (["('insert_data: inserting data into collection %s (num_entities: %s)' % (\n collection_w.name, nb))"], {}), "(\n 'insert_data: inserting data into collection %s (num_entities: %s)' % (\n collection_w.name, nb))\n", (10692, 10797), True, 'from utils.util_log import test_log as log\n'), ((11167, 11273), 'utils.util_log.test_log.info', 'log.info', (["('insert_data: inserted data into collection %s (num_entities: %s)' % (\n collection_w.name, nb))"], {}), "('insert_data: inserted data into collection %s (num_entities: %s)' %\n (collection_w.name, nb))\n", (11175, 11273), True, 'from utils.util_log import test_log as log\n'), ((5355, 5368), 'numpy.float32', 'np.float32', (['i'], {}), '(i)\n', (5365, 5368), True, 'import numpy as np\n'), ((6184, 6197), 'numpy.float32', 'np.float32', (['i'], {}), '(i)\n', (6194, 6197), True, 'import numpy as np\n'), ((8594, 8640), 'utils.util_log.test_log.error', 'log.error', (['"""[modify_file] file is not a list."""'], {}), "('[modify_file] file is not a list.')\n", (8603, 8640), True, 'from utils.util_log import test_log as log\n'), ((8712, 8736), 'os.path.split', 'os.path.split', (['file_path'], {}), '(file_path)\n', (8725, 8736), False, 'import os\n'), ((418, 429), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (426, 429), True, 'import numpy as np\n'), ((432, 443), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (440, 443), True, 'import numpy as np\n'), ((504, 555), 'random.choice', 'random.choice', (['(string.ascii_letters + string.digits)'], {}), '(string.ascii_letters + string.digits)\n', (517, 555), False, 'import random\n'), ((708, 759), 'random.choice', 'random.choice', (['(string.ascii_letters + string.digits)'], {}), '(string.ascii_letters + string.digits)\n', (721, 759), False, 'import random\n'), ((919, 942), 'base.schema_wrapper.ApiFieldSchemaWrapper', 'ApiFieldSchemaWrapper', ([], {}), '()\n', (940, 942), False, 'from base.schema_wrapper import ApiCollectionSchemaWrapper, ApiFieldSchemaWrapper\n'), ((1263, 1286), 'base.schema_wrapper.ApiFieldSchemaWrapper', 'ApiFieldSchemaWrapper', ([], {}), '()\n', (1284, 1286), False, 'from base.schema_wrapper import ApiCollectionSchemaWrapper, ApiFieldSchemaWrapper\n'), ((1653, 1676), 'base.schema_wrapper.ApiFieldSchemaWrapper', 'ApiFieldSchemaWrapper', ([], {}), '()\n', (1674, 1676), False, 'from base.schema_wrapper import ApiCollectionSchemaWrapper, ApiFieldSchemaWrapper\n'), ((2138, 2161), 'base.schema_wrapper.ApiFieldSchemaWrapper', 'ApiFieldSchemaWrapper', ([], {}), '()\n', (2159, 2161), False, 'from base.schema_wrapper import ApiCollectionSchemaWrapper, ApiFieldSchemaWrapper\n'), ((2657, 2685), 'base.schema_wrapper.ApiCollectionSchemaWrapper', 'ApiCollectionSchemaWrapper', ([], {}), '()\n', (2683, 2685), False, 'from base.schema_wrapper import ApiCollectionSchemaWrapper, ApiFieldSchemaWrapper\n'), ((2998, 3026), 'base.schema_wrapper.ApiCollectionSchemaWrapper', 'ApiCollectionSchemaWrapper', ([], {}), '()\n', (3024, 3026), False, 'from base.schema_wrapper import ApiCollectionSchemaWrapper, ApiFieldSchemaWrapper\n'), ((3437, 3465), 'base.schema_wrapper.ApiCollectionSchemaWrapper', 'ApiCollectionSchemaWrapper', ([], {}), '()\n', (3463, 3465), False, 'from base.schema_wrapper import ApiCollectionSchemaWrapper, ApiFieldSchemaWrapper\n'), ((3702, 3717), 'random.random', 'random.random', ([], {}), '()\n', (3715, 3717), False, 'import random\n'), ((3981, 4001), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (3995, 4001), False, 'import random\n'), ((7756, 7776), 'numpy.bitwise_xor', 'np.bitwise_xor', (['x', 'y'], {}), '(x, y)\n', (7770, 7776), True, 'import numpy as np\n'), ((8108, 8127), 'numpy.count_nonzero', 'np.count_nonzero', (['y'], {}), '(y)\n', (8124, 8127), True, 'import numpy as np\n'), ((8273, 8292), 'numpy.count_nonzero', 'np.count_nonzero', (['x'], {}), '(x)\n', (8289, 8292), True, 'import numpy as np\n'), ((8752, 8778), 'os.path.isdir', 'os.path.isdir', (['folder_path'], {}), '(folder_path)\n', (8765, 8778), False, 'import os\n'), ((8792, 8857), 'utils.util_log.test_log.debug', 'log.debug', (["('[modify_file] folder(%s) is not exist.' % folder_path)"], {}), "('[modify_file] folder(%s) is not exist.' % folder_path)\n", (8801, 8857), True, 'from utils.util_log import test_log as log\n'), ((8870, 8894), 'os.makedirs', 'os.makedirs', (['folder_path'], {}), '(folder_path)\n', (8881, 8894), False, 'import os\n'), ((8911, 8936), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (8925, 8936), False, 'import os\n'), ((8950, 9011), 'utils.util_log.test_log.error', 'log.error', (["('[modify_file] file(%s) is not exist.' % file_path)"], {}), "('[modify_file] file(%s) is not exist.' % file_path)\n", (8959, 9011), True, 'from utils.util_log import test_log as log\n'), ((9076, 9142), 'utils.util_log.test_log.debug', 'log.debug', (["('[modify_file] start modifying file(%s)...' % file_path)"], {}), "('[modify_file] start modifying file(%s)...' % file_path)\n", (9085, 9142), True, 'from utils.util_log import test_log as log\n'), ((9344, 9421), 'utils.util_log.test_log.info', 'log.info', (["('[modify_file] file(%s) modification is complete.' % file_path_list)"], {}), "('[modify_file] file(%s) modification is complete.' % file_path_list)\n", (9352, 9421), True, 'from utils.util_log import test_log as log\n'), ((7055, 7078), 'base.schema_wrapper.ApiFieldSchemaWrapper', 'ApiFieldSchemaWrapper', ([], {}), '()\n', (7076, 7078), False, 'from base.schema_wrapper import ApiCollectionSchemaWrapper, ApiFieldSchemaWrapper\n'), ((4098, 4130), 'numpy.packbits', 'np.packbits', (['raw_vector'], {'axis': '(-1)'}), '(raw_vector, axis=-1)\n', (4109, 4130), True, 'import numpy as np\n'), ((7595, 7615), 'numpy.bitwise_and', 'np.bitwise_and', (['x', 'y'], {}), '(x, y)\n', (7609, 7615), True, 'import numpy as np\n'), ((7635, 7654), 'numpy.bitwise_or', 'np.bitwise_or', (['x', 'y'], {}), '(x, y)\n', (7648, 7654), True, 'import numpy as np\n'), ((8078, 8098), 'numpy.bitwise_and', 'np.bitwise_and', (['x', 'y'], {}), '(x, y)\n', (8092, 8098), True, 'import numpy as np\n'), ((8243, 8263), 'numpy.bitwise_and', 'np.bitwise_and', (['x', 'y'], {}), '(x, y)\n', (8257, 8263), True, 'import numpy as np\n'), ((7897, 7917), 'numpy.bitwise_and', 'np.bitwise_and', (['x', 'y'], {}), '(x, y)\n', (7911, 7917), True, 'import numpy as np\n'), ((7937, 7956), 'numpy.bitwise_or', 'np.bitwise_or', (['x', 'y'], {}), '(x, y)\n', (7950, 7956), True, 'import numpy as np\n')]
|
import datetime
import os
from collections import deque
import random
import numpy as np
import tensorflow as tf
import pysc2.agents.myAgent.myAgent_6.config.config as config
from pysc2.agents.myAgent.myAgent_6.net.lenet import Lenet
class DQN():
def __init__(self, mu, sigma, learning_rate, actiondim, parameterdim, statedim, name): # 初始化
# 初始化回放缓冲区,用REPLAY_SIZE定义其最大长度
self.replay_buffer = deque(maxlen=config.REPLAY_SIZE)
# 神经网络参数
self.mu = mu
self.sigma = sigma
self.learning_rate = learning_rate
self.time_step = 0
self.epsilon = config.INITIAL_EPSILON
# 动作维度数,动作参数维度数(默认为6),状态维度数
self.action_dim = actiondim
self.parameterdim = parameterdim
self.state_dim = statedim
# 网络结构初始化
self.name = name
self.net = Lenet(self.mu, self.sigma, self.learning_rate, self.action_dim, self.parameterdim, self.state_dim, self.name)
# Init session
self.session = tf.InteractiveSession()
self.session.run(tf.initialize_all_variables())
self.modelSaver = tf.train.Saver()
self.recordSaver = None
self.recordCount = 0
self.restoreModelMark = True
def restoreModel(self, modelLoadPath):
self.modelSaver.restore(self.session, modelLoadPath + '/' + self.name + '.ckpt')
def saveModel(self, modelSavePath, episode):
if episode % config.MODEL_SAVE_EPISODE == 0:
thisPath = modelSavePath + 'episode_' + str(episode) + '/'
try:
os.makedirs(thisPath)
except OSError:
pass
self.modelSaver.save(self.session, thisPath + self.name + '.ckpt', )
def saveRecord(self, modelSavePath, data):
if self.recordSaver is None:
thisPath = modelSavePath
self.recordSaver = tf.summary.FileWriter(thisPath, self.session.graph)
data_summary = tf.Summary(value=[tf.Summary.Value(tag=self.name + '_' + "loss", simple_value=data)])
self.recordSaver.add_summary(summary=data_summary, global_step=self.recordCount)
self.recordCount += 1
def perceive(self, state, action, reward, next_state, done): # 感知存储信息
one_hot_action = np.zeros(self.action_dim + self.parameterdim, dtype=np.float32)
one_hot_action[int(action[0])] = 1
if self.parameterdim != 0:
one_hot_action[self.action_dim:] = action[1:]
state = np.squeeze(state)
next_state = np.squeeze(next_state)
self.replay_buffer.append([state, one_hot_action, reward, next_state, done])
def train_Q_network(self, modelSavePath, episode): # 训练网络
if len(self.replay_buffer) > config.BATCH_SIZE:
for mark in range(config.LOOP):
minibatch = random.sample(self.replay_buffer, config.BATCH_SIZE)
state_batch = np.array([data[0] for data in minibatch])
action_batch = np.array([data[1] for data in minibatch])
reward_batch = np.array([data[2] for data in minibatch])
next_state_batch = np.array([data[3] for data in minibatch])
# Step 2: calculate y
y_batch = np.array([])
Q_value_batch = np.array(self.session.run(self.net.Q_value, {self.net.state_input: next_state_batch}))
for i in range(0, config.BATCH_SIZE):
done = minibatch[i][4]
if done:
temp = np.append(np.array(reward_batch[i]), np.array(Q_value_batch[i][self.action_dim:]))
temp = temp.reshape((1, 1 + self.parameterdim))
y_batch = np.append(y_batch, temp)
else:
temp = np.append(np.array(reward_batch[i] + config.GAMMA * np.max(Q_value_batch[i][0:self.action_dim])),
Q_value_batch[i][self.action_dim:])
temp = temp.reshape((1, 1 + self.parameterdim))
y_batch = np.append(y_batch, temp)
y_batch = np.array(y_batch).reshape(config.BATCH_SIZE, 1 + self.parameterdim)
_, loss = self.session.run([self.net.train_op, self.net.loss],
feed_dict={self.net.y_input: y_batch,
self.net.action_input: action_batch,
self.net.state_input: state_batch})
self.saveRecord(modelSavePath, loss)
self.saveModel(modelSavePath, episode)
def egreedy_action(self, state): # 输出带随机的动作
Q_value = self.session.run(self.net.Q_value, {self.net.state_input: state})[0]
# self.epsilon -= (config.INITIAL_EPSILON - config.FINAL_EPSILON) / 10000
if np.random.uniform() <= self.epsilon:
random_action = np.random.randint(0, self.action_dim)
random_parameter = np.random.rand(self.parameterdim)
random_action_and_parameter = np.append(random_action, random_parameter).flatten()
return random_action_and_parameter
else:
action = np.argmax(Q_value[0:self.action_dim])
parameter = np.array(Q_value[self.action_dim:(self.action_dim + self.parameterdim)])
action_and_parameter = np.append(action, parameter).flatten()
return action_and_parameter
def action(self, state, modelLoadPath):
if self.restoreModelMark == True and modelLoadPath is not None:
self.restoreModelMark = False
self.restoreModel(modelLoadPath)
print(self.name + 'read!')
Q_value = self.session.run(self.net.Q_value, {self.net.state_input: state})[0]
action = np.argmax(Q_value[0:self.action_dim])
parameter = np.array(Q_value[self.action_dim:(self.action_dim + self.parameterdim)])
action_and_parameter = np.append(action, parameter)
return action_and_parameter
|
[
"numpy.random.uniform",
"os.makedirs",
"tensorflow.train.Saver",
"numpy.argmax",
"numpy.random.rand",
"random.sample",
"numpy.zeros",
"numpy.append",
"pysc2.agents.myAgent.myAgent_6.net.lenet.Lenet",
"tensorflow.summary.FileWriter",
"numpy.array",
"numpy.random.randint",
"tensorflow.initialize_all_variables",
"tensorflow.Summary.Value",
"tensorflow.InteractiveSession",
"numpy.max",
"numpy.squeeze",
"collections.deque"
] |
[((418, 450), 'collections.deque', 'deque', ([], {'maxlen': 'config.REPLAY_SIZE'}), '(maxlen=config.REPLAY_SIZE)\n', (423, 450), False, 'from collections import deque\n'), ((845, 959), 'pysc2.agents.myAgent.myAgent_6.net.lenet.Lenet', 'Lenet', (['self.mu', 'self.sigma', 'self.learning_rate', 'self.action_dim', 'self.parameterdim', 'self.state_dim', 'self.name'], {}), '(self.mu, self.sigma, self.learning_rate, self.action_dim, self.\n parameterdim, self.state_dim, self.name)\n', (850, 959), False, 'from pysc2.agents.myAgent.myAgent_6.net.lenet import Lenet\n'), ((1002, 1025), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {}), '()\n', (1023, 1025), True, 'import tensorflow as tf\n'), ((1109, 1125), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {}), '()\n', (1123, 1125), True, 'import tensorflow as tf\n'), ((2252, 2315), 'numpy.zeros', 'np.zeros', (['(self.action_dim + self.parameterdim)'], {'dtype': 'np.float32'}), '(self.action_dim + self.parameterdim, dtype=np.float32)\n', (2260, 2315), True, 'import numpy as np\n'), ((2468, 2485), 'numpy.squeeze', 'np.squeeze', (['state'], {}), '(state)\n', (2478, 2485), True, 'import numpy as np\n'), ((2507, 2529), 'numpy.squeeze', 'np.squeeze', (['next_state'], {}), '(next_state)\n', (2517, 2529), True, 'import numpy as np\n'), ((5801, 5838), 'numpy.argmax', 'np.argmax', (['Q_value[0:self.action_dim]'], {}), '(Q_value[0:self.action_dim])\n', (5810, 5838), True, 'import numpy as np\n'), ((5859, 5929), 'numpy.array', 'np.array', (['Q_value[self.action_dim:self.action_dim + self.parameterdim]'], {}), '(Q_value[self.action_dim:self.action_dim + self.parameterdim])\n', (5867, 5929), True, 'import numpy as np\n'), ((5963, 5991), 'numpy.append', 'np.append', (['action', 'parameter'], {}), '(action, parameter)\n', (5972, 5991), True, 'import numpy as np\n'), ((1051, 1080), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (1078, 1080), True, 'import tensorflow as tf\n'), ((1870, 1921), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['thisPath', 'self.session.graph'], {}), '(thisPath, self.session.graph)\n', (1891, 1921), True, 'import tensorflow as tf\n'), ((4857, 4876), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (4874, 4876), True, 'import numpy as np\n'), ((4922, 4959), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.action_dim'], {}), '(0, self.action_dim)\n', (4939, 4959), True, 'import numpy as np\n'), ((4991, 5024), 'numpy.random.rand', 'np.random.rand', (['self.parameterdim'], {}), '(self.parameterdim)\n', (5005, 5024), True, 'import numpy as np\n'), ((5203, 5240), 'numpy.argmax', 'np.argmax', (['Q_value[0:self.action_dim]'], {}), '(Q_value[0:self.action_dim])\n', (5212, 5240), True, 'import numpy as np\n'), ((5265, 5335), 'numpy.array', 'np.array', (['Q_value[self.action_dim:self.action_dim + self.parameterdim]'], {}), '(Q_value[self.action_dim:self.action_dim + self.parameterdim])\n', (5273, 5335), True, 'import numpy as np\n'), ((1565, 1586), 'os.makedirs', 'os.makedirs', (['thisPath'], {}), '(thisPath)\n', (1576, 1586), False, 'import os\n'), ((2809, 2861), 'random.sample', 'random.sample', (['self.replay_buffer', 'config.BATCH_SIZE'], {}), '(self.replay_buffer, config.BATCH_SIZE)\n', (2822, 2861), False, 'import random\n'), ((2892, 2933), 'numpy.array', 'np.array', (['[data[0] for data in minibatch]'], {}), '([data[0] for data in minibatch])\n', (2900, 2933), True, 'import numpy as np\n'), ((2965, 3006), 'numpy.array', 'np.array', (['[data[1] for data in minibatch]'], {}), '([data[1] for data in minibatch])\n', (2973, 3006), True, 'import numpy as np\n'), ((3038, 3079), 'numpy.array', 'np.array', (['[data[2] for data in minibatch]'], {}), '([data[2] for data in minibatch])\n', (3046, 3079), True, 'import numpy as np\n'), ((3115, 3156), 'numpy.array', 'np.array', (['[data[3] for data in minibatch]'], {}), '([data[3] for data in minibatch])\n', (3123, 3156), True, 'import numpy as np\n'), ((3222, 3234), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3230, 3234), True, 'import numpy as np\n'), ((1964, 2029), 'tensorflow.Summary.Value', 'tf.Summary.Value', ([], {'tag': "(self.name + '_' + 'loss')", 'simple_value': 'data'}), "(tag=self.name + '_' + 'loss', simple_value=data)\n", (1980, 2029), True, 'import tensorflow as tf\n'), ((5067, 5109), 'numpy.append', 'np.append', (['random_action', 'random_parameter'], {}), '(random_action, random_parameter)\n', (5076, 5109), True, 'import numpy as np\n'), ((5373, 5401), 'numpy.append', 'np.append', (['action', 'parameter'], {}), '(action, parameter)\n', (5382, 5401), True, 'import numpy as np\n'), ((3701, 3725), 'numpy.append', 'np.append', (['y_batch', 'temp'], {}), '(y_batch, temp)\n', (3710, 3725), True, 'import numpy as np\n'), ((4064, 4088), 'numpy.append', 'np.append', (['y_batch', 'temp'], {}), '(y_batch, temp)\n', (4073, 4088), True, 'import numpy as np\n'), ((4115, 4132), 'numpy.array', 'np.array', (['y_batch'], {}), '(y_batch)\n', (4123, 4132), True, 'import numpy as np\n'), ((3522, 3547), 'numpy.array', 'np.array', (['reward_batch[i]'], {}), '(reward_batch[i])\n', (3530, 3547), True, 'import numpy as np\n'), ((3549, 3593), 'numpy.array', 'np.array', (['Q_value_batch[i][self.action_dim:]'], {}), '(Q_value_batch[i][self.action_dim:])\n', (3557, 3593), True, 'import numpy as np\n'), ((3835, 3878), 'numpy.max', 'np.max', (['Q_value_batch[i][0:self.action_dim]'], {}), '(Q_value_batch[i][0:self.action_dim])\n', (3841, 3878), True, 'import numpy as np\n')]
|
import logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%Y/%m/%d %H:%M:%S',
level=logging.INFO,
)
logger = logging.getLogger("Main")
import os,random
import numpy as np
import torch
from utils_glue import output_modes, processors
from pytorch_pretrained_bert.my_modeling import BertConfig
from pytorch_pretrained_bert import BertTokenizer
from optimization import BERTAdam
import config
from utils import divide_parameters, load_and_cache_examples
from modeling import BertForGLUESimple,BertForGLUESimpleAdaptor
from textbrewer import DistillationConfig, TrainingConfig, MultiTeacherDistiller
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler, DistributedSampler
from tqdm import tqdm
from utils_glue import compute_metrics
from functools import partial
def args_check(args):
if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
logger.warning("Output directory () already exists and is not empty.")
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
if not args.do_train and not args.do_predict:
raise ValueError("At least one of `do_train` or `do_predict` must be True.")
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count() if not args.no_cuda else 0
else:
device = torch.device("cuda", args.local_rank)
n_gpu = 1
torch.distributed.init_process_group(backend='nccl')
logger.info("device %s n_gpu %d distributed training %r", device, n_gpu, bool(args.local_rank != -1))
args.n_gpu = n_gpu
args.device = device
return device, n_gpu
def predict(model,eval_datasets,step,args):
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
eval_output_dir = args.output_dir
results = {}
for eval_task,eval_dataset in zip(eval_task_names, eval_datasets):
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
logger.info("Predicting...")
logger.info("***** Running predictions *****")
logger.info(" task name = %s", eval_task)
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.predict_batch_size)
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.predict_batch_size)
model.eval()
pred_logits = []
label_ids = []
for batch in tqdm(eval_dataloader, desc="Evaluating", disable=None):
input_ids, input_mask, segment_ids, labels = batch
input_ids = input_ids.to(args.device)
input_mask = input_mask.to(args.device)
segment_ids = segment_ids.to(args.device)
with torch.no_grad():
logits = model(input_ids, input_mask, segment_ids)
cpu_logits = logits.detach().cpu()
for i in range(len(cpu_logits)):
pred_logits.append(cpu_logits[i].numpy())
label_ids.append(labels[i])
pred_logits = np.array(pred_logits)
label_ids = np.array(label_ids)
if args.output_mode == "classification":
preds = np.argmax(pred_logits, axis=1)
else: # args.output_mode == "regression":
preds = np.squeeze(pred_logits)
result = compute_metrics(eval_task, preds, label_ids)
logger.info(f"task:,{eval_task}")
logger.info(f"result: {result}")
results.update(result)
output_eval_file = os.path.join(eval_output_dir, "eval_results-%s.txt" % eval_task)
with open(output_eval_file, "a") as writer:
logger.info("***** Eval results {} task {} *****".format(step, eval_task))
writer.write("step: %d ****\n " % step)
for key in sorted(results.keys()):
logger.info("%s = %s", key, str(results[key]))
writer.write("%s = %s\n" % (key, str(results[key])))
model.train()
return results
def main():
#parse arguments
config.parse()
args = config.args
for k,v in vars(args).items():
logger.info(f"{k}:{v}")
#set seeds
torch.manual_seed(args.random_seed)
torch.cuda.manual_seed_all(args.random_seed)
np.random.seed(args.random_seed)
random.seed(args.random_seed)
#arguments check
device, n_gpu = args_check(args)
os.makedirs(args.output_dir, exist_ok=True)
forward_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
args.forward_batch_size = forward_batch_size
#load bert config
bert_config_T = BertConfig.from_json_file(args.bert_config_file_T)
bert_config_S = BertConfig.from_json_file(args.bert_config_file_S)
assert args.max_seq_length <= bert_config_T.max_position_embeddings
assert args.max_seq_length <= bert_config_S.max_position_embeddings
#Prepare GLUE task
processor = processors[args.task_name]()
args.output_mode = output_modes[args.task_name]
label_list = processor.get_labels()
num_labels = len(label_list)
#read data
train_dataset = None
eval_datasets = None
num_train_steps = None
tokenizer = BertTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case)
if args.do_train:
train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False)
if args.aux_task_name:
aux_train_dataset = load_and_cache_examples(args, args.aux_task_name, tokenizer, evaluate=False, is_aux=True)
train_dataset = torch.utils.data.ConcatDataset([train_dataset, aux_train_dataset])
num_train_steps = int(len(train_dataset)/args.train_batch_size) * args.num_train_epochs
if args.do_predict:
eval_datasets = []
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
for eval_task in eval_task_names:
eval_datasets.append(load_and_cache_examples(args, eval_task, tokenizer, evaluate=True))
logger.info("Data loaded")
#Build Model and load checkpoint
model_S = BertForGLUESimple(bert_config_S, num_labels=num_labels,args=args)
#Load teacher
if args.tuned_checkpoint_Ts:
model_Ts = [BertForGLUESimple(bert_config_T, num_labels=num_labels,args=args) for i in range(len(args.tuned_checkpoint_Ts))]
for model_T, ckpt_T in zip(model_Ts,args.tuned_checkpoint_Ts):
logger.info("Load state dict %s" % ckpt_T)
state_dict_T = torch.load(ckpt_T, map_location='cpu')
model_T.load_state_dict(state_dict_T)
model_T.eval()
else:
assert args.do_predict is True
#Load student
if args.load_model_type=='bert':
assert args.init_checkpoint_S is not None
state_dict_S = torch.load(args.init_checkpoint_S, map_location='cpu')
state_weight = {k[5:]:v for k,v in state_dict_S.items() if k.startswith('bert.')}
missing_keys,_ = model_S.bert.load_state_dict(state_weight,strict=False)
assert len(missing_keys)==0
elif args.load_model_type=='all':
assert args.tuned_checkpoint_S is not None
state_dict_S = torch.load(args.tuned_checkpoint_S,map_location='cpu')
model_S.load_state_dict(state_dict_S)
else:
logger.info("Model is randomly initialized.")
if args.do_train:
for model_T in model_Ts:
model_T.to(device)
model_S.to(device)
if args.local_rank != -1 or n_gpu > 1:
if args.local_rank != -1:
raise NotImplementedError
elif n_gpu > 1:
if args.do_train:
model_Ts = [torch.nn.DataParallel(model_T) for model_T in model_Ts]
model_S = torch.nn.DataParallel(model_S) #,output_device=n_gpu-1)
if args.do_train:
#parameters
params = list(model_S.named_parameters())
all_trainable_params = divide_parameters(params, lr=args.learning_rate)
logger.info("Length of all_trainable_params: %d", len(all_trainable_params))
optimizer = BERTAdam(all_trainable_params,lr=args.learning_rate,
warmup=args.warmup_proportion,t_total=num_train_steps,schedule=args.schedule,
s_opt1=args.s_opt1, s_opt2=args.s_opt2, s_opt3=args.s_opt3)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Forward batch size = %d", forward_batch_size)
logger.info(" Num backward steps = %d", num_train_steps)
########### DISTILLATION ###########
train_config = TrainingConfig(
gradient_accumulation_steps = args.gradient_accumulation_steps,
ckpt_frequency = args.ckpt_frequency,
log_dir = args.output_dir,
output_dir = args.output_dir,
device = args.device)
distill_config = DistillationConfig(
temperature = args.temperature,
kd_loss_type = 'ce')
logger.info(f"{train_config}")
logger.info(f"{distill_config}")
adaptor = partial(BertForGLUESimpleAdaptor, no_logits=False, no_mask = False)
distiller = MultiTeacherDistiller(train_config = train_config,
distill_config = distill_config,
model_T = model_Ts, model_S = model_S,
adaptor_T=adaptor,
adaptor_S=adaptor)
if args.local_rank == -1:
train_sampler = RandomSampler(train_dataset)
else:
raise NotImplementedError
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.forward_batch_size,drop_last=True)
callback_func = partial(predict, eval_datasets=eval_datasets, args=args)
with distiller:
distiller.train(optimizer, scheduler=None, dataloader=train_dataloader,
num_epochs=args.num_train_epochs, callback=callback_func)
if not args.do_train and args.do_predict:
res = predict(model_S,eval_datasets,step=0,args=args)
print (res)
if __name__ == "__main__":
main()
|
[
"numpy.random.seed",
"torch.utils.data.RandomSampler",
"numpy.argmax",
"pytorch_pretrained_bert.BertTokenizer",
"textbrewer.MultiTeacherDistiller",
"torch.cuda.device_count",
"config.parse",
"pytorch_pretrained_bert.my_modeling.BertConfig.from_json_file",
"torch.device",
"modeling.BertForGLUESimple",
"utils_glue.compute_metrics",
"os.path.join",
"torch.no_grad",
"torch.utils.data.DataLoader",
"textbrewer.TrainingConfig",
"torch.load",
"os.path.exists",
"utils.load_and_cache_examples",
"random.seed",
"torch.utils.data.SequentialSampler",
"functools.partial",
"tqdm.tqdm",
"torch.utils.data.ConcatDataset",
"torch.manual_seed",
"torch.cuda.is_available",
"numpy.squeeze",
"os.listdir",
"torch.distributed.init_process_group",
"os.makedirs",
"logging.basicConfig",
"textbrewer.DistillationConfig",
"torch.utils.data.DistributedSampler",
"torch.cuda.manual_seed_all",
"numpy.array",
"optimization.BERTAdam",
"torch.nn.DataParallel",
"logging.getLogger",
"utils.divide_parameters"
] |
[((15, 157), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(levelname)s - %(name)s - %(message)s"""', 'datefmt': '"""%Y/%m/%d %H:%M:%S"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt=\n '%Y/%m/%d %H:%M:%S', level=logging.INFO)\n", (34, 157), False, 'import logging\n'), ((176, 201), 'logging.getLogger', 'logging.getLogger', (['"""Main"""'], {}), "('Main')\n", (193, 201), False, 'import logging\n'), ((3946, 4010), 'os.path.join', 'os.path.join', (['eval_output_dir', "('eval_results-%s.txt' % eval_task)"], {}), "(eval_output_dir, 'eval_results-%s.txt' % eval_task)\n", (3958, 4010), False, 'import os, random\n'), ((4432, 4446), 'config.parse', 'config.parse', ([], {}), '()\n', (4444, 4446), False, 'import config\n'), ((4556, 4591), 'torch.manual_seed', 'torch.manual_seed', (['args.random_seed'], {}), '(args.random_seed)\n', (4573, 4591), False, 'import torch\n'), ((4596, 4640), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.random_seed'], {}), '(args.random_seed)\n', (4622, 4640), False, 'import torch\n'), ((4645, 4677), 'numpy.random.seed', 'np.random.seed', (['args.random_seed'], {}), '(args.random_seed)\n', (4659, 4677), True, 'import numpy as np\n'), ((4682, 4711), 'random.seed', 'random.seed', (['args.random_seed'], {}), '(args.random_seed)\n', (4693, 4711), False, 'import os, random\n'), ((4775, 4818), 'os.makedirs', 'os.makedirs', (['args.output_dir'], {'exist_ok': '(True)'}), '(args.output_dir, exist_ok=True)\n', (4786, 4818), False, 'import os, random\n'), ((4998, 5048), 'pytorch_pretrained_bert.my_modeling.BertConfig.from_json_file', 'BertConfig.from_json_file', (['args.bert_config_file_T'], {}), '(args.bert_config_file_T)\n', (5023, 5048), False, 'from pytorch_pretrained_bert.my_modeling import BertConfig\n'), ((5069, 5119), 'pytorch_pretrained_bert.my_modeling.BertConfig.from_json_file', 'BertConfig.from_json_file', (['args.bert_config_file_S'], {}), '(args.bert_config_file_S)\n', (5094, 5119), False, 'from pytorch_pretrained_bert.my_modeling import BertConfig\n'), ((5568, 5643), 'pytorch_pretrained_bert.BertTokenizer', 'BertTokenizer', ([], {'vocab_file': 'args.vocab_file', 'do_lower_case': 'args.do_lower_case'}), '(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case)\n', (5581, 5643), False, 'from pytorch_pretrained_bert import BertTokenizer\n'), ((6483, 6549), 'modeling.BertForGLUESimple', 'BertForGLUESimple', (['bert_config_S'], {'num_labels': 'num_labels', 'args': 'args'}), '(bert_config_S, num_labels=num_labels, args=args)\n', (6500, 6549), False, 'from modeling import BertForGLUESimple, BertForGLUESimpleAdaptor\n'), ((895, 926), 'os.path.exists', 'os.path.exists', (['args.output_dir'], {}), '(args.output_dir)\n', (909, 926), False, 'import os, random\n'), ((931, 958), 'os.listdir', 'os.listdir', (['args.output_dir'], {}), '(args.output_dir)\n', (941, 958), False, 'import os, random\n'), ((1625, 1662), 'torch.device', 'torch.device', (['"""cuda"""', 'args.local_rank'], {}), "('cuda', args.local_rank)\n", (1637, 1662), False, 'import torch\n'), ((1689, 1741), 'torch.distributed.init_process_group', 'torch.distributed.init_process_group', ([], {'backend': '"""nccl"""'}), "(backend='nccl')\n", (1725, 1741), False, 'import torch\n'), ((2720, 2807), 'torch.utils.data.DataLoader', 'DataLoader', (['eval_dataset'], {'sampler': 'eval_sampler', 'batch_size': 'args.predict_batch_size'}), '(eval_dataset, sampler=eval_sampler, batch_size=args.\n predict_batch_size)\n', (2730, 2807), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler, DistributedSampler\n'), ((2894, 2948), 'tqdm.tqdm', 'tqdm', (['eval_dataloader'], {'desc': '"""Evaluating"""', 'disable': 'None'}), "(eval_dataloader, desc='Evaluating', disable=None)\n", (2898, 2948), False, 'from tqdm import tqdm\n'), ((3487, 3508), 'numpy.array', 'np.array', (['pred_logits'], {}), '(pred_logits)\n', (3495, 3508), True, 'import numpy as np\n'), ((3531, 3550), 'numpy.array', 'np.array', (['label_ids'], {}), '(label_ids)\n', (3539, 3550), True, 'import numpy as np\n'), ((3763, 3807), 'utils_glue.compute_metrics', 'compute_metrics', (['eval_task', 'preds', 'label_ids'], {}), '(eval_task, preds, label_ids)\n', (3778, 3807), False, 'from utils_glue import compute_metrics\n'), ((5690, 5762), 'utils.load_and_cache_examples', 'load_and_cache_examples', (['args', 'args.task_name', 'tokenizer'], {'evaluate': '(False)'}), '(args, args.task_name, tokenizer, evaluate=False)\n', (5713, 5762), False, 'from utils import divide_parameters, load_and_cache_examples\n'), ((7179, 7233), 'torch.load', 'torch.load', (['args.init_checkpoint_S'], {'map_location': '"""cpu"""'}), "(args.init_checkpoint_S, map_location='cpu')\n", (7189, 7233), False, 'import torch\n'), ((8283, 8331), 'utils.divide_parameters', 'divide_parameters', (['params'], {'lr': 'args.learning_rate'}), '(params, lr=args.learning_rate)\n', (8300, 8331), False, 'from utils import divide_parameters, load_and_cache_examples\n'), ((8438, 8640), 'optimization.BERTAdam', 'BERTAdam', (['all_trainable_params'], {'lr': 'args.learning_rate', 'warmup': 'args.warmup_proportion', 't_total': 'num_train_steps', 'schedule': 'args.schedule', 's_opt1': 'args.s_opt1', 's_opt2': 'args.s_opt2', 's_opt3': 'args.s_opt3'}), '(all_trainable_params, lr=args.learning_rate, warmup=args.\n warmup_proportion, t_total=num_train_steps, schedule=args.schedule,\n s_opt1=args.s_opt1, s_opt2=args.s_opt2, s_opt3=args.s_opt3)\n', (8446, 8640), False, 'from optimization import BERTAdam\n'), ((9007, 9201), 'textbrewer.TrainingConfig', 'TrainingConfig', ([], {'gradient_accumulation_steps': 'args.gradient_accumulation_steps', 'ckpt_frequency': 'args.ckpt_frequency', 'log_dir': 'args.output_dir', 'output_dir': 'args.output_dir', 'device': 'args.device'}), '(gradient_accumulation_steps=args.gradient_accumulation_steps,\n ckpt_frequency=args.ckpt_frequency, log_dir=args.output_dir, output_dir\n =args.output_dir, device=args.device)\n', (9021, 9201), False, 'from textbrewer import DistillationConfig, TrainingConfig, MultiTeacherDistiller\n'), ((9290, 9357), 'textbrewer.DistillationConfig', 'DistillationConfig', ([], {'temperature': 'args.temperature', 'kd_loss_type': '"""ce"""'}), "(temperature=args.temperature, kd_loss_type='ce')\n", (9308, 9357), False, 'from textbrewer import DistillationConfig, TrainingConfig, MultiTeacherDistiller\n'), ((9486, 9551), 'functools.partial', 'partial', (['BertForGLUESimpleAdaptor'], {'no_logits': '(False)', 'no_mask': '(False)'}), '(BertForGLUESimpleAdaptor, no_logits=False, no_mask=False)\n', (9493, 9551), False, 'from functools import partial\n'), ((9576, 9737), 'textbrewer.MultiTeacherDistiller', 'MultiTeacherDistiller', ([], {'train_config': 'train_config', 'distill_config': 'distill_config', 'model_T': 'model_Ts', 'model_S': 'model_S', 'adaptor_T': 'adaptor', 'adaptor_S': 'adaptor'}), '(train_config=train_config, distill_config=\n distill_config, model_T=model_Ts, model_S=model_S, adaptor_T=adaptor,\n adaptor_S=adaptor)\n', (9597, 9737), False, 'from textbrewer import DistillationConfig, TrainingConfig, MultiTeacherDistiller\n'), ((10019, 10124), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'sampler': 'train_sampler', 'batch_size': 'args.forward_batch_size', 'drop_last': '(True)'}), '(train_dataset, sampler=train_sampler, batch_size=args.\n forward_batch_size, drop_last=True)\n', (10029, 10124), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler, DistributedSampler\n'), ((10143, 10199), 'functools.partial', 'partial', (['predict'], {'eval_datasets': 'eval_datasets', 'args': 'args'}), '(predict, eval_datasets=eval_datasets, args=args)\n', (10150, 10199), False, 'from functools import partial\n'), ((1545, 1570), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (1568, 1570), False, 'import torch\n'), ((2276, 2304), 'os.makedirs', 'os.makedirs', (['eval_output_dir'], {}), '(eval_output_dir)\n', (2287, 2304), False, 'import os, random\n'), ((2599, 2630), 'torch.utils.data.SequentialSampler', 'SequentialSampler', (['eval_dataset'], {}), '(eval_dataset)\n', (2616, 2630), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler, DistributedSampler\n'), ((2661, 2693), 'torch.utils.data.DistributedSampler', 'DistributedSampler', (['eval_dataset'], {}), '(eval_dataset)\n', (2679, 2693), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler, DistributedSampler\n'), ((3621, 3651), 'numpy.argmax', 'np.argmax', (['pred_logits'], {'axis': '(1)'}), '(pred_logits, axis=1)\n', (3630, 3651), True, 'import numpy as np\n'), ((3722, 3745), 'numpy.squeeze', 'np.squeeze', (['pred_logits'], {}), '(pred_logits)\n', (3732, 3745), True, 'import numpy as np\n'), ((5826, 5919), 'utils.load_and_cache_examples', 'load_and_cache_examples', (['args', 'args.aux_task_name', 'tokenizer'], {'evaluate': '(False)', 'is_aux': '(True)'}), '(args, args.aux_task_name, tokenizer, evaluate=False,\n is_aux=True)\n', (5849, 5919), False, 'from utils import divide_parameters, load_and_cache_examples\n'), ((5944, 6010), 'torch.utils.data.ConcatDataset', 'torch.utils.data.ConcatDataset', (['[train_dataset, aux_train_dataset]'], {}), '([train_dataset, aux_train_dataset])\n', (5974, 6010), False, 'import torch\n'), ((6620, 6686), 'modeling.BertForGLUESimple', 'BertForGLUESimple', (['bert_config_T'], {'num_labels': 'num_labels', 'args': 'args'}), '(bert_config_T, num_labels=num_labels, args=args)\n', (6637, 6686), False, 'from modeling import BertForGLUESimple, BertForGLUESimpleAdaptor\n'), ((6886, 6924), 'torch.load', 'torch.load', (['ckpt_T'], {'map_location': '"""cpu"""'}), "(ckpt_T, map_location='cpu')\n", (6896, 6924), False, 'import torch\n'), ((7553, 7608), 'torch.load', 'torch.load', (['args.tuned_checkpoint_S'], {'map_location': '"""cpu"""'}), "(args.tuned_checkpoint_S, map_location='cpu')\n", (7563, 7608), False, 'import torch\n'), ((9911, 9939), 'torch.utils.data.RandomSampler', 'RandomSampler', (['train_dataset'], {}), '(train_dataset)\n', (9924, 9939), False, 'from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler, DistributedSampler\n'), ((2200, 2231), 'os.path.exists', 'os.path.exists', (['eval_output_dir'], {}), '(eval_output_dir)\n', (2214, 2231), False, 'import os, random\n'), ((3186, 3201), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3199, 3201), False, 'import torch\n'), ((6330, 6396), 'utils.load_and_cache_examples', 'load_and_cache_examples', (['args', 'eval_task', 'tokenizer'], {'evaluate': '(True)'}), '(args, eval_task, tokenizer, evaluate=True)\n', (6353, 6396), False, 'from utils import divide_parameters, load_and_cache_examples\n'), ((8103, 8133), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model_S'], {}), '(model_S)\n', (8124, 8133), False, 'import torch\n'), ((1470, 1495), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1493, 1495), False, 'import torch\n'), ((8025, 8055), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model_T'], {}), '(model_T)\n', (8046, 8055), False, 'import torch\n')]
|
import numpy as np
import random,copy
from scipy.sparse import csr_matrix
import scipy.integrate
from numpy import linalg
import time
import settings
import math
####PAULI OPERATORS####
def sigma_x_operator(basis_vector,indices,pos_sigma=-1):
"""Operator that creates the matrix representation of sigma_x."""
M=sigma_moins_operator(basis_vector,indices,pos_sigma=-1)
return M+np.transpose(M)
def sigma_moins_operator(basis_vector,indices,pos_sigma):
"""
Operator that creates the matrix representation of sigma_+
we create an overloading variable pos_sigma, that denotes the position of the sigma_+
if pos_sigma=-1, global operation.
"""
dim=len(basis_vector)
sigma_x_matrix=np.zeros((dim, dim)) #creation of the output array
for ii in range(dim-1): #Not optimized. We need to implement a 'for basis_vector_loc in basis_vector'
basis_vector_ii=basis_vector[ii]
(n_initial,n_final)=get_indices(basis_vector[ii],indices)
#we look for possible connections in a restricted set, in order to reduce the computation
#time. The function get_indices will return the indices between which to look.
if n_initial<0. or n_final<0.:
continue
for jj in range(n_initial,n_final):
basis_vector_jj=basis_vector[jj]
if pos_sigma>-0.1: #Local sigma_x
loc1=list(copy.copy(basis_vector_ii))
loc1.append(pos_sigma) #we add the index j to the smallest list
if set(loc1) == set(basis_vector_jj):
sigma_x_matrix[ii,jj]=1.
continue
else: #Global sigma_x
if(set(basis_vector_ii).issubset(set(basis_vector_jj))): #here issubset is sufficient because we know that basis_vector_ii and
sigma_x_matrix[ii,jj]=1. #basis_vector_jj only differ by one excitation (thanks to get_indices).
return sigma_x_matrix
def sigma_z_operator(basis_vector,pos=-1):
"""
Operator that creates the matrix representation of sigma_z. As sigma^z is diagonal in the computational basis,
we will only return a vector-type array and later apply element-wise multiplication with the wavefunction
if pos=-1, global operation.
"""
dim=len(basis_vector)
sigma_z_matrix=np.zeros(dim)
#Local operator at position pos
if pos>-0.1:
for jj in range(dim):
if (set([pos]).issubset(set(basis_vector[jj]))):
sigma_z_matrix[jj]=1.
#Global operator, all positions
else:
for jj in range(dim):
leng=len(basis_vector[jj])
sigma_z_matrix[jj]=leng
return sigma_z_matrix
def sigma_z_z_operator(basis_vector,pos_1,pos_2):
"""
Operator that creates the matrix representation of sigma_z(pos_1)sigma_z(pos_2).
As it is diagonal in the computational basis, we will only return a vector-type array and
later apply element-wise multiplication with the wavefunction.
"""
dim=len(basis_vector)
sigma_z_z_matrix=np.zeros(dim)
for jj in range(dim):
if (set([pos_1,pos_2]).issubset(set(basis_vector[jj]))):
sigma_z_z_matrix[jj]=1.
return sigma_z_z_matrix
def get_indices(basis_vector_loc,indices):
"""
This function will return the indices for which the basis vectors are possibly
connected to the input vector by a sigma^x operator. Increasing number of excitations.
"""
n_initial=indices[len(basis_vector_loc)+1]
if not len(basis_vector_loc)+2<len(indices):
return (-1,-1)
n_final=indices[len(basis_vector_loc)+2]
return (n_initial,n_final)
###OBSERVABLES ROUTINES
def expectation_value(psi,H_2):
"""Function that computes the expectation value of H_2. """
Hpsi=np.multiply(H_2,psi)
return np.vdot(psi,Hpsi)
def expected_shortfall(H,psi,H_2,seuil):
"""Function that computes the expected shortfall of H_2. """
val=0.
prob=0.
integer=len(psi)-1
while prob<(seuil-0.00001):
prob+=abs(psi[integer])**2
val+=abs(psi[integer])**2*len(H[integer])
integer-=1
return -val/prob
def expectation_value_rho(rho,H_2):
"""Function that computes the expectation value of H_2. """
return np.trace(H_2@rho )
def expected_shortfall_rho(H,rho,H_2,seuil):
"""Function that computes the expected shortfall of H_2. """
return np.trace(H_2@rho )
#val=0.
#prob=0.
#integer=len(psi)-1
#while prob<(seuil-0.00001):
# prob+=abs(psi[integer])**2
# val+=abs(psi[integer])**2*len(H[integer])
# integer-=1
#return -val/prob
def compute_observable(H,psi,H_2,**kwargs):
"""Function called to evaluate the observable on the wavefunction."""
if settings.type_observable[0]=="energy":
return (expectation_value(psi,H_2)).real
elif settings.type_observable[0]=="cVAR":
if settings.type_observable[1]==0.:
raise ValueError('could not find a positive threshold value for the expected shortfall')
else:
progressive=kwargs.get('var_progressive',False)
if not progressive:
return (expected_shortfall(H,psi,H_2,settings.type_observable[1])).real
else:
seuil_progressive=kwargs.get('seuil_var_progressive',False)
return (expected_shortfall(H,psi,H_2,seuil_progressive)).real
def compute_observable_rho(H,rho,H_detuning,**kwargs):
"""Function called to evaluate the observable on the density matrix."""
H_2=square_mat(H_detuning)
if settings.type_observable[0]=="energy":
return (expectation_value_rho(rho,H_2)).real
elif settings.type_observable[0]=="cVAR":
if settings.type_observable[1]==0.:
raise ValueError('could not find a positive threshold value for the expected shortfall')
else:
return (expected_shortfall_rho(rho,psi,H_2,settings.type_observable[1])).real
####TIME-EVOLUTION ROUTINES#####
def get_derivative(mat_diag,mat_Rabi,**kwargs):
"""Returns function for t-evolution of the wavefunction using scipy.integrate.solve_ivp"""
tunneling=kwargs.get('tunneling','on')
if tunneling=='off':
def H_on_psi_loc(tt,yy):
return -1j*np.multiply(mat_diag,yy)
return H_on_psi_loc
else:
def H_on_psi_loc(tt,yy):
return -1j*np.multiply(mat_diag,yy)-1j*(mat_Rabi @yy)
return H_on_psi_loc
def square_mat(diagonal_matrice):
dim=len(diagonal_matrice)
mat_square=np.zeros((dim, dim),dtype=complex)
for mm in range(dim):
mat_square[mm,mm]=diagonal_matrice[mm]
return mat_square
def get_derivative_density_matrix(mat_diag,mat_Rabi,sigma_moins_array,**kwargs):
"""
Returns function for t-evolution using the numerical integration of the density matrix
\dot{\rho}=-i(H_eff \rho-\rho H_eff^{\dagger})
+\Gamma \sum_j \sigma_j^_ \rho \sigma_j^+
"""
dim=len(mat_diag)
tunneling=kwargs.get('tunneling','on')
if tunneling=='off':
def L_on_rho_loc(tt,yy):
yy=np.reshape(yy, (dim,dim))
H_eff=csr_matrix(square_mat(mat_diag))
deriv=-1j*(H_eff @ yy- yy @ (H_eff.conj()).transpose())+settings.Gamma*sum(sig @ yy @ (sig.transpose()) for sig in sigma_moins_array)
return np.reshape(deriv, dim*dim)
return L_on_rho_loc
else:
def L_on_rho_loc(tt,yy):
yy=np.reshape(yy, (dim,dim))
H_eff=csr_matrix(mat_Rabi+square_mat(mat_diag))
deriv=-1j*(H_eff @ yy- yy @ (H_eff.conj()).transpose())+settings.Gamma*sum(sig @ yy @ (sig.transpose()) for sig in sigma_moins_array)
return np.reshape(deriv, dim*dim)
return L_on_rho_loc
def evol_scipy(psi,mat_diag,mat_Rabi,tf,k,rn=-1,**kwargs):
"""
Main time-evolution function for the wavefunction.
"""
dissipative=settings.dissipation
indices=kwargs.get('indices',0.)
basis_vector=kwargs.get('basis_vector',0.)
mat_Rabi=csr_matrix(mat_Rabi) #The Rabi matrix is a sparse matrix
H_on_psi=get_derivative(mat_diag,mat_Rabi,**kwargs)
t_span=(0.,tf)
#Coherent time-evolution.
if not dissipative:
sol=scipy.integrate.solve_ivp(H_on_psi, t_span, psi, method='RK45',
t_eval=None, dense_output=False,
events=None, vectorized=False)
values=sol.y
psi=values[ : , -1]
#Dissipative time-evolution. Jumps allowed.
else:
if rn<0:
rn=random.random() #random number. if norm(psi)<rn, then jump.
is_norm_positive=get_test_jump(rn) #Automatic stopping of the time-evolution
is_norm_positive.terminal=True #if the norm of psi gets below rn.
finished=False
while not finished:
sol=scipy.integrate.solve_ivp(H_on_psi, t_span, psi, method='RK45',
t_eval=None, dense_output=False,
events=is_norm_positive, vectorized=False)
values=sol.y
psi=values[ : , -1]
if len(sol.t_events[0])<1: #We reached the final time without jump.
finished=True
else: #There is a jump
(type,tab)=compute_jump_probas(psi,basis_vector,k)
m=get_jump_index(tab,k)
(psi,mat_Rabi)=quantum_jump(type,basis_vector,psi,mat_diag,mat_Rabi,indices,m)
#Update of the Hamiltonian, time-span and random number
H_on_psi=get_derivative(mat_diag,mat_Rabi,**kwargs)
t_span=(sol.t[-1],tf)
rn=random.random()
is_norm_positive=get_test_jump(rn)
is_norm_positive.terminal=True
return (psi,mat_Rabi,rn)
def evol_scipy_rho(rho0,matdiag,mat_Rabi,tf,k,**kwargs):
"""
Main time-evolution function for the density matrix.
"""
indices=kwargs.get('indices',0.)
basis_vector=kwargs.get('basis_vector',0.)
mat_Rabi=csr_matrix(mat_Rabi)
sigma_moins_tab=[]
for jjj in k:
sigma_moins_tab.append(csr_matrix(sigma_moins_operator(basis_vector,indices,jjj)))
L_on_rho=get_derivative_density_matrix(matdiag,mat_Rabi,sigma_moins_tab,**kwargs)
t_span=(0.,tf)
rho0=np.reshape(rho0, len(matdiag)*len(matdiag))
sol=scipy.integrate.solve_ivp(L_on_rho, t_span, rho0, method='RK45',
t_eval=None, dense_output=False, vectorized=False)
values=sol.y
rho=values[ : , -1]
rho=np.reshape(rho, (len(matdiag),len(matdiag)))
return rho
####JUMP routines###
def get_test_jump(rn):
""""Decorated. This function returns the function to evaluate for stopping of t-evol."""
def norm_positive_loc(t,y):
return np.linalg.norm(y)**2-rn
return norm_positive_loc
def get_jump_index(tab,k):
"""This function returns the index of the jump."""
rn2=random.random()
temp=0.
m=0
while temp<rn2:
temp+=tab[m]
m+=1
return k[m-1]
def quantum_jump(type,basis_vector,psi_loc,mat_diagloc,mat_Rabiloc,indices,location_jump):
"""This function computes the effect of a quantum jump, returns the new wf and the Ham."""
if type=="Emission": #Jump by spontaneous emission
(psi_new,indices_to_delete)=jump_elements(basis_vector,psi_loc,indices,location_jump)
rn=random.random()
if rn<settings.branching_ratio: #If one goes to the uncoupled
ido=np.identity(mat_Rabiloc.shape[0]) #ground state, we set the corresponding
for a in indices_to_delete: #matrix elements to zero, so that further
ido[a,a]=0. #re-excitation will not be possible
ido=csr_matrix(ido)
for mm in indices_to_delete:
mat_Rabiloc=ido@mat_Rabiloc@ido
return (psi_new/np.linalg.norm(psi_new),mat_Rabiloc)
else: #Jump by dephasing, here no modification of mat_Rabi
psi_new=dephasing(basis_vector,psi_loc,location_jump)
return (psi_new/np.linalg.norm(psi_new),mat_Rabiloc)
def compute_jump_probas(psi,basis_vector,k):
"""This function computes the probabilities of jumps, and the type \in {Emission,Dephasing}."""
tab=np.zeros(settings.N)
G=settings.Gamma #taux emission spontanee
g=settings.gamma_deph #taux dephasing
rn3=random.random() #Determination of the type of event
if rn3<=G/(g+G):
type="Emission"
else:
type="Dephasing"
p_tot=0.
for mm in k: #we loop over all the possible jump sites.
H_loc=-1j/2.*sigma_z_operator(basis_vector,mm) #Creation of jump operators
Hpsi=np.multiply(H_loc,psi)
tab[mm]=abs(np.vdot(psi,1j*Hpsi)) #Probability of jump mm
p_tot+=abs(np.vdot(psi,1j*Hpsi))
return (type,tab/p_tot)
def jump_elements(basis_vector,psi_loc,indices,location_jump):
"""
This function will return the new wavefunction after a jump at position location_jump on psi_loc
It will also return the indices of the Hamiltonian to set to zero after the jump if there is a
jump towards the uncoupled ground state.
"""
index_to_delete=[]
psi_new=np.zeros_like(psi_loc)
for ii,basis_vector_loc in enumerate(basis_vector):
if (set([location_jump]).issubset(set(basis_vector_loc))): #The jump site is part of the target
continue #vector --> Not concerned
(n_initial,n_final)=get_indices(basis_vector_loc,indices) #Get the indices to look for the parent state
if n_initial<0. or n_final<0.: #Parent state do not exist.
continue
for mm in range(n_initial,n_final):
if set(basis_vector_loc)|set([location_jump])==set(basis_vector[mm]):
psi_new[ii]=psi_loc[mm] #we set the target value to the
index_to_delete.append(mm) #parent value, and add the parent
return (psi_new,index_to_delete) #index to the list for future possible deletion.
def dephasing(basis_vector,psi_loc,location_jump):
"""This function will return the new wavefunction after a dephasing event at position location_jump on psi_loc."""
psi_new=np.zeros_like(psi_loc)
for ii,basis_vector_loc in enumerate(basis_vector):
if (set([location_jump]).issubset(set(basis_vector_loc))): #The jump site is part of the target
continue #vector --> Not concerned
psi_new[ii]=psi_loc[ii] #else, projection.
return psi_new
####RUN routines. Different kinds of evolutions.
def QAOA_single_run_observable(theta,H,psi_l,H_Rabi,H_detuning,H_diss,indices,k,N_max=102,N_min=50,stability_threshold=0.04):#settings.stability_threshold):
# We can make it a little bit more modular as well.
p=int(len(theta))
val_tab=[]
for kk in range(N_max):
psi=psi_l
mat_Rabi=H_Rabi
rn=-1
for pp in range(p):
if pp%2==0:
mat_diag=H_diss
(psi,mat_Rabi,rn)=evol_scipy(psi,mat_diag,mat_Rabi,theta[pp],k,rn,basis_vector=H,
indices=indices)
mat_Rabi=H_Rabi
else:
mat_diag=H_detuning+H_diss
if settings.type_evolution=="mixte":
(psi,mat_Rabi,rn)=evol_scipy(psi,mat_diag,mat_Rabi,theta[pp],k,rn,basis_vector=H,
indices=indices)
mat_Rabi=H_Rabi
else:
(psi,mat_Rabi,rn)=evol_scipy(psi,mat_diag,mat_Rabi,theta[pp],k,rn,basis_vector=H,
indices=indices,tunneling='off')
mat_Rabi=H_Rabi
###We compute the observable only at the end of the calculation
psi=psi/np.linalg.norm(psi)
val_tab.append(compute_observable(H,psi,H_detuning))
##Test if we have gathered enough statistics for the precision threshold that we ask. We also ask a min number of traj
if np.std(val_tab)/np.sqrt(kk+1.)<stability_threshold and kk>N_min:
return np.mean(val_tab)
return np.mean(val_tab)
def QAOA_single_run_observable_density_matrix(theta,H,rho0,H_Rabi,H_detuning,H_diss,indices,k):
# We can make it a little bit more modular as well.
p=int(len(theta))
rho=rho0
mat_Rabi=H_Rabi
for pp in range(p):
if pp%2==0:
mat_diag=H_diss
rho=evol_scipy_rho(rho,mat_diag,mat_Rabi,theta[pp],k,basis_vector=H,
indices=indices)
else:
mat_diag=H_detuning+H_diss
if settings.type_evolution=="mixte":
rho=evol_scipy_rho(rho,mat_diag,mat_Rabi,theta[pp],k,basis_vector=H,
indices=indices)
else:
rho=evol_scipy_rho(rho,mat_diag,mat_Rabi,theta[pp],k,basis_vector=H,
indices=indices,tunneling='off')
###We compute the observable only at the end of the calculation
val_obs=compute_observable_rho(H,rho,H_detuning)
return val_obs
|
[
"numpy.trace",
"numpy.zeros_like",
"numpy.multiply",
"numpy.std",
"numpy.vdot",
"numpy.zeros",
"numpy.transpose",
"numpy.identity",
"copy.copy",
"random.random",
"scipy.sparse.csr_matrix",
"numpy.mean",
"numpy.reshape",
"numpy.linalg.norm",
"numpy.sqrt"
] |
[((731, 751), 'numpy.zeros', 'np.zeros', (['(dim, dim)'], {}), '((dim, dim))\n', (739, 751), True, 'import numpy as np\n'), ((2582, 2595), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (2590, 2595), True, 'import numpy as np\n'), ((3320, 3333), 'numpy.zeros', 'np.zeros', (['dim'], {}), '(dim)\n', (3328, 3333), True, 'import numpy as np\n'), ((4056, 4077), 'numpy.multiply', 'np.multiply', (['H_2', 'psi'], {}), '(H_2, psi)\n', (4067, 4077), True, 'import numpy as np\n'), ((4088, 4106), 'numpy.vdot', 'np.vdot', (['psi', 'Hpsi'], {}), '(psi, Hpsi)\n', (4095, 4106), True, 'import numpy as np\n'), ((4531, 4550), 'numpy.trace', 'np.trace', (['(H_2 @ rho)'], {}), '(H_2 @ rho)\n', (4539, 4550), True, 'import numpy as np\n'), ((4673, 4692), 'numpy.trace', 'np.trace', (['(H_2 @ rho)'], {}), '(H_2 @ rho)\n', (4681, 4692), True, 'import numpy as np\n'), ((6814, 6849), 'numpy.zeros', 'np.zeros', (['(dim, dim)'], {'dtype': 'complex'}), '((dim, dim), dtype=complex)\n', (6822, 6849), True, 'import numpy as np\n'), ((8301, 8321), 'scipy.sparse.csr_matrix', 'csr_matrix', (['mat_Rabi'], {}), '(mat_Rabi)\n', (8311, 8321), False, 'from scipy.sparse import csr_matrix\n'), ((10582, 10602), 'scipy.sparse.csr_matrix', 'csr_matrix', (['mat_Rabi'], {}), '(mat_Rabi)\n', (10592, 10602), False, 'from scipy.sparse import csr_matrix\n'), ((11501, 11516), 'random.random', 'random.random', ([], {}), '()\n', (11514, 11516), False, 'import random, copy\n'), ((12961, 12981), 'numpy.zeros', 'np.zeros', (['settings.N'], {}), '(settings.N)\n', (12969, 12981), True, 'import numpy as np\n'), ((13180, 13195), 'random.random', 'random.random', ([], {}), '()\n', (13193, 13195), False, 'import random, copy\n'), ((14155, 14177), 'numpy.zeros_like', 'np.zeros_like', (['psi_loc'], {}), '(psi_loc)\n', (14168, 14177), True, 'import numpy as np\n'), ((15322, 15344), 'numpy.zeros_like', 'np.zeros_like', (['psi_loc'], {}), '(psi_loc)\n', (15335, 15344), True, 'import numpy as np\n'), ((17369, 17385), 'numpy.mean', 'np.mean', (['val_tab'], {}), '(val_tab)\n', (17376, 17385), True, 'import numpy as np\n'), ((394, 409), 'numpy.transpose', 'np.transpose', (['M'], {}), '(M)\n', (406, 409), True, 'import numpy as np\n'), ((11988, 12003), 'random.random', 'random.random', ([], {}), '()\n', (12001, 12003), False, 'import random, copy\n'), ((13604, 13627), 'numpy.multiply', 'np.multiply', (['H_loc', 'psi'], {}), '(H_loc, psi)\n', (13615, 13627), True, 'import numpy as np\n'), ((7372, 7398), 'numpy.reshape', 'np.reshape', (['yy', '(dim, dim)'], {}), '(yy, (dim, dim))\n', (7382, 7398), True, 'import numpy as np\n'), ((7614, 7642), 'numpy.reshape', 'np.reshape', (['deriv', '(dim * dim)'], {}), '(deriv, dim * dim)\n', (7624, 7642), True, 'import numpy as np\n'), ((7728, 7754), 'numpy.reshape', 'np.reshape', (['yy', '(dim, dim)'], {}), '(yy, (dim, dim))\n', (7738, 7754), True, 'import numpy as np\n'), ((7979, 8007), 'numpy.reshape', 'np.reshape', (['deriv', '(dim * dim)'], {}), '(deriv, dim * dim)\n', (7989, 8007), True, 'import numpy as np\n'), ((8881, 8896), 'random.random', 'random.random', ([], {}), '()\n', (8894, 8896), False, 'import random, copy\n'), ((12110, 12143), 'numpy.identity', 'np.identity', (['mat_Rabiloc.shape[0]'], {}), '(mat_Rabiloc.shape[0])\n', (12121, 12143), True, 'import numpy as np\n'), ((12408, 12423), 'scipy.sparse.csr_matrix', 'csr_matrix', (['ido'], {}), '(ido)\n', (12418, 12423), False, 'from scipy.sparse import csr_matrix\n'), ((13647, 13672), 'numpy.vdot', 'np.vdot', (['psi', '(1.0j * Hpsi)'], {}), '(psi, 1.0j * Hpsi)\n', (13654, 13672), True, 'import numpy as np\n'), ((13742, 13767), 'numpy.vdot', 'np.vdot', (['psi', '(1.0j * Hpsi)'], {}), '(psi, 1.0j * Hpsi)\n', (13749, 13767), True, 'import numpy as np\n'), ((17038, 17057), 'numpy.linalg.norm', 'np.linalg.norm', (['psi'], {}), '(psi)\n', (17052, 17057), True, 'import numpy as np\n'), ((17341, 17357), 'numpy.mean', 'np.mean', (['val_tab'], {}), '(val_tab)\n', (17348, 17357), True, 'import numpy as np\n'), ((6543, 6568), 'numpy.multiply', 'np.multiply', (['mat_diag', 'yy'], {}), '(mat_diag, yy)\n', (6554, 6568), True, 'import numpy as np\n'), ((10190, 10205), 'random.random', 'random.random', ([], {}), '()\n', (10203, 10205), False, 'import random, copy\n'), ((11357, 11374), 'numpy.linalg.norm', 'np.linalg.norm', (['y'], {}), '(y)\n', (11371, 11374), True, 'import numpy as np\n'), ((12537, 12560), 'numpy.linalg.norm', 'np.linalg.norm', (['psi_new'], {}), '(psi_new)\n', (12551, 12560), True, 'import numpy as np\n'), ((12770, 12793), 'numpy.linalg.norm', 'np.linalg.norm', (['psi_new'], {}), '(psi_new)\n', (12784, 12793), True, 'import numpy as np\n'), ((1518, 1544), 'copy.copy', 'copy.copy', (['basis_vector_ii'], {}), '(basis_vector_ii)\n', (1527, 1544), False, 'import random, copy\n'), ((6662, 6687), 'numpy.multiply', 'np.multiply', (['mat_diag', 'yy'], {}), '(mat_diag, yy)\n', (6673, 6687), True, 'import numpy as np\n'), ((17257, 17272), 'numpy.std', 'np.std', (['val_tab'], {}), '(val_tab)\n', (17263, 17272), True, 'import numpy as np\n'), ((17273, 17290), 'numpy.sqrt', 'np.sqrt', (['(kk + 1.0)'], {}), '(kk + 1.0)\n', (17280, 17290), True, 'import numpy as np\n')]
|
import pytest
try:
from unittest import mock
except ImportError:
import mock
from collections import defaultdict, Counter
import itertools
import numpy as np
from openpathsampling.tests.test_helpers import make_1d_traj
from .serialization_helpers import get_uuid, set_uuid
from .storable_functions import *
_MODULE = "openpathsampling.experimental.simstore.storable_functions"
class MockBackend(object):
def __init__(self):
self.storable_function_tables = defaultdict(dict)
self.called_register = defaultdict(int)
self.called_load = defaultdict(int)
def has_table(self, table_name):
return table_name in self.storable_function_tables
def register_storable_function(self, table_name, result_type):
self.storable_function_tables[table_name] = {}
self.called_register[table_name] += 1
def load_storable_function_results(self, func_uuid, uuids):
table = self.storable_function_tables[func_uuid]
found_uuids = [uuid for uuid in uuids if uuid in table]
for uuid in uuids:
self.called_load[uuid] += 1
return {uuid: table[uuid] for uuid in found_uuids}
def add_storable_function_results(self, table_name, result_dict):
self.storable_function_tables[table_name].update(result_dict)
def test_requires_lists_pre():
assert requires_lists_pre([1]) == [[1]]
assert requires_lists_pre([1,2]) == [[1,2]]
@pytest.mark.parametrize('array_input,expected', [
([[1], [2], [3]], [1, 2, 3]),
([1, 2, 3], [1, 2, 3]),
([[1, 2], [3, 4]], [[1, 2], [3, 4]]),
([[[1, 2]], [[3, 4]]], [[1, 2], [3, 4]]),
])
def test_scalarize_singletons(array_input, expected):
np.testing.assert_array_equal(
scalarize_singletons(np.array(array_input)),
np.array(expected)
)
def test_scalarize_singletons_to_float():
arr = np.array([1.0])
arr.shape = tuple()
scalarized = scalarize_singletons(arr)
assert not isinstance(scalarized, np.ndarray)
assert isinstance(scalarized, float)
def test_wrap_numpy():
for inp in [1, [1, 2]]:
assert isinstance(wrap_numpy(inp), np.ndarray)
class TestStorableFunctionConfig(object):
def setup(self):
self.config = StorableFunctionConfig(processors=[
scalarize_singletons,
wrap_numpy,
requires_lists_pre,
requires_lists_post
])
@staticmethod
def func(values):
return np.array([s.xyz[:,0] for s in values])
def test_register(self):
assert len(self.config.processors) == 4
names = ['scalarize_singletons', 'requires_lists_pre',
'requires_lists_post', 'wrap_numpy']
for key in names:
assert key in self.config.processor_dict
assert self.config.item_preprocessors == []
assert self.config.list_preprocessors == [requires_lists_pre]
assert self.config.item_postprocessors == [scalarize_singletons]
assert self.config.list_postprocessors == [wrap_numpy,
requires_lists_post]
mock_wrap_numpy = Processor(name='wrap_numpy',
stage='item-pre',
func=lambda x: x)
self.config.register(mock_wrap_numpy)
proc_dict_wrap_numpy = self.config.processor_dict['wrap_numpy']
assert len(self.config.processors) == 4
assert proc_dict_wrap_numpy is mock_wrap_numpy
assert proc_dict_wrap_numpy is not wrap_numpy
assert mock_wrap_numpy in self.config.processors
assert wrap_numpy not in self.config.processors
assert self.config.item_preprocessors == [mock_wrap_numpy]
assert self.config.list_preprocessors == [requires_lists_pre]
assert self.config.item_postprocessors == [scalarize_singletons]
assert self.config.list_postprocessors == [requires_lists_post]
@pytest.mark.parametrize('style', ['obj', 'name'])
def test_deregister(self, style):
dereg = {'obj': wrap_numpy, 'name': 'wrap_numpy'}[style]
assert len(self.config.processors) == 4
self.config.deregister(dereg)
assert len(self.config.processors) == 3
assert 'wrap_numpy' not in self.config.processor_dict
assert wrap_numpy not in self.config.processors
def test_deregister_error(self):
with pytest.raises(KeyError):
self.config.deregister('foo')
def test_deregister_no_error(self):
# just run it to ensure it doesn't error out
self.config.deregister('foo', error_if_missing=False)
def test_func(self):
# test of the internally used test func
snap = make_1d_traj([5.0])[0]
assert self.func([snap]) == [[5]]
def test_list_preprocess(self):
snap = make_1d_traj([5.0])[0]
assert self.config.list_preprocess([snap]) == [[snap]]
def test_item_preprocess(self):
snap = make_1d_traj([5.0])[0]
assert self.config.item_preprocess(snap) == snap
def test_item_postprocess(self):
np.testing.assert_array_equal(
self.config.item_postprocess(np.array([[5.0]])),
np.array([5.0])
)
def test_list_postprocess(self):
snap = make_1d_traj([5.0])[0]
values = self.func([snap])
np.testing.assert_array_equal(self.config.list_postprocess(values),
np.array([5.0]))
def test_storable_function_integration(self):
snap = make_1d_traj([5.0])[0]
sf = StorableFunction(self.func, func_config=self.config)
assert sf(snap) == 5.0
np.testing.assert_array_equal(sf([snap]), np.array([5.0]))
class TestStorableFunctionResults(object):
def setup(self):
self.cv = StorableFunction(lambda x: x)
self.cv.__uuid__ = "funcUUID"
self.mapping = {'UUID1': "foo",
'UUID2': "bar"}
self.sfr = StorableFunctionResults(self.cv, "funcUUID")
self.sfr.result_dict = self.mapping
self.sfr.local_uuids = set(self.mapping.keys())
def test_get_results_as_dict_cached(self):
result, missing = self.sfr.get_results_as_dict({'UUID1': "object"})
assert result == {'UUID1': "foo"}
assert missing == {}
def test_get_results_as_dict_missing(self):
result, missing = self.sfr.get_results_as_dict({"UUID3": "object"})
assert result == {}
assert missing == {"UUID3": "object"}
def test_get_results_as_dict_storage(self):
pytest.skip()
pass
def test_update(self):
new_sfr = StorableFunctionResults(self.cv, "funcUUID")
new_sfr.result_dict = {'UUID3': "baz"}
new_sfr.local_uuids = set(['UUID3'])
self.sfr.update(new_sfr)
assert len(self.sfr) == 3
assert "UUID3" in self.sfr.local_uuids
assert self.sfr.result_dict["UUID3"] == "baz"
# TODO: test_cache_results_nonpure_function
# if you try to cache results that don't match the original, you get an
# error
def test_cache_results(self):
self.sfr.cache_results({"UUID3": "baz"})
assert len(self.sfr) == 3
assert "UUID3" in self.sfr.local_uuids
assert self.sfr.result_dict["UUID3"] == "baz"
def test_clear(self):
assert len(self.sfr) != 0
self.sfr.clear()
assert len(self.sfr) == 0
assert self.sfr.result_dict == {}
assert self.sfr.local_uuids == set([])
def test_len(self):
assert len(self.sfr) == 2
def test_to_dict_from_dict_cycle(self):
pass
@mock.patch(_MODULE + '.get_uuid', lambda x: x)
@mock.patch(_MODULE + '.has_uuid', lambda x: isinstance(x, str))
class TestStorableFunction(object):
def setup(self):
def get_expected(uuid):
expected = {'uuid': 'eval', 'uuid1': 'other'}
return expected[uuid]
self.func = StorableFunction(get_expected)
@pytest.mark.parametrize('min_max', [(None, None), (None, 10), (0, 10)])
def test_check_periodic(self, min_max):
period_min, period_max = min_max
n_nones = Counter(min_max)[None]
expected = {2: False, 0: True, 1: 'error'}[n_nones]
check_period = StorableFunction._check_period
if expected == 'error':
with pytest.raises(ValueError, match='period'):
check_period(period_min, period_max)
else:
assert check_period(period_min, period_max) == expected
def test_is_periodic(self):
assert not self.func.is_periodic
func = StorableFunction(lambda s: s.xyz[0][0], period_min=0.0,
period_max=1.0)
assert func.is_periodic
def test_gets_source(self):
pytest.skip()
pass
def test_no_source_warning(self):
pytest.skip()
pass
def test_disk_cache_property(self):
pytest.skip()
pass
@pytest.mark.parametrize('mode', ['no-caching', 'analysis',
'production'])
def test_mode(self, mode):
self.func.mode = mode
assert self.func.mode == mode
if mode == 'no-caching':
assert self.func.local_cache is None
else:
assert self.func.local_cache is not None
def test_bad_mode(self):
with pytest.raises(ValueError):
self.func.mode = 'foo'
@staticmethod
def _set_cache(func, mode, found_in, expected):
if found_in == 'cache':
func.local_cache.cache_results(expected)
elif mode == 'no-caching':
pass
else:
func.local_cache.clear()
@staticmethod
def _set_storage(func, mode, found_in, expected):
if found_in == 'storage':
def get_storage(cv_uuid, uuids):
missing = {uuid: uuids[uuid] for uuid in uuids
if uuid not in expected.keys()}
found = {uuid: uuids[uuid] for uuid in uuids
if uuid in expected.keys()}
return {uuid: expected[uuid] for uuid in found}, missing
else:
def get_storage(cv_uuid, uuids):
return {}, dict(uuids)
storage = mock.MagicMock(get_function_results=get_storage)
func._handlers.add(storage)
@pytest.mark.parametrize('mode, found_in', [
('analysis', 'storage'), ('analysis', 'cache'),
('analysis', 'eval'), ('production', 'cache'),
('production', 'eval'), ('no-caching', 'eval')
])
def test_call(self, mode, found_in):
# mode = 'analysis'
# found_in = 'cache'
# setup, depending on the parametrized parameters
expected = {'uuid': 'eval'}
get_expected = lambda x: expected[x]
func = StorableFunction(get_expected)
func.mode = mode
self._set_cache(func, mode, found_in, expected={'uuid': 'cache'})
self._set_storage(func, mode, found_in, expected={'uuid': 'storage'})
# validation of correct behavior
# NOTE: some of this testing is based on internal behavior, which
# perhaps shouldn't be in the public-facing API
if found_in != 'cache' and mode != 'no-caching':
assert 'uuid' not in func.local_cache.result_dict
assert func('uuid') == found_in
if mode != 'no-caching':
assert func.local_cache.result_dict['uuid'] == found_in
@pytest.mark.parametrize("found_in_1, found_in_2", [
('storage', 'storage'), ('cache', 'cache'), ('eval', 'eval'),
('cache', 'eval')
])
def test_call_multiple(self, found_in_1, found_in_2):
# only test this in analysis
expected_dict = {'uuid': found_in_1, 'other': found_in_2}
expected = {
level: {uuid: expected
for uuid, expected in expected_dict.items()
if expected == level}
for level in ['eval', 'cache', 'storage']
}
get_expected = lambda x: expected['eval'][x]
func = StorableFunction(get_expected)
self._set_cache(func, 'analysis', 'cache',
expected=expected['cache'])
self._set_storage(func, 'analysis', 'storage',
expected=expected['storage'])
assert func(['uuid', 'other']) == [found_in_1, found_in_2]
def test_to_dict_from_dict_cycle(self):
pytest.skip()
pass
def test_full_serialization_cycle(self):
pytest.skip()
pass
@pytest.mark.parametrize('found_in', ['cache', 'storage', 'eval'])
def test_analysis_mode_integration(self, found_in):
pytest.skip()
pass
class TestStorageFunctionHandler(object):
def setup(self):
self.backend = MockBackend()
self.storage = mock.NonCallableMock(backend=self.backend)
self.sf_handler = StorageFunctionHandler(self.storage)
self.func = StorableFunction(lambda x: x.xyz[0][0])
self.f2 = StorableFunction.from_dict(self.func.to_dict())
set_uuid(self.f2, get_uuid(self.func))
self.result_dict = {'snap1': 5.0, 'snap2': 3.0}
@staticmethod
def _make_sfr(func, result_dict):
sfr = StorableFunctionResults(func, get_uuid(func))
sfr.cache_results(result_dict)
return sfr
def test_codec_settings(self):
# TODO: is this actually used?
pytest.skip()
@pytest.mark.parametrize('has_table, with_result',
itertools.product([True, False],
[True, False]))
def test_register_function(self, has_table, with_result):
uuid = get_uuid(self.func)
if has_table:
self.storage.backend.storable_function_tables[uuid] = {}
example = 1.0 if with_result else None
unable_to_register = example is None and not has_table
add_table = not has_table and not unable_to_register
assert not self.func.has_handler
assert len(self.sf_handler.all_functions) == 0
assert self.sf_handler.functions == []
self.sf_handler.register_function(self.func, example)
sf_tables = self.backend.storable_function_tables
if not unable_to_register:
assert uuid in sf_tables
else:
assert uuid not in sf_tables
assert self.func is self.sf_handler.canonical_functions[uuid]
assert self.sf_handler.all_functions[uuid] == [self.func]
if not unable_to_register:
assert self.func.has_handler
assert self.func._handlers == {self.sf_handler}
assert self.sf_handler.functions == [self.func]
# make a copy of the func
assert get_uuid(self.f2) == get_uuid(self.func)
assert self.f2 is not self.func
# internal checks should ensure that you call add_table False here
expected_calls = {True: 1, False: 0}[add_table]
assert self.backend.called_register[uuid] == expected_calls
self.sf_handler.register_function(self.f2, example)
assert self.sf_handler.canonical_functions[uuid] is not self.f2
assert self.sf_handler.canonical_functions[uuid] is self.func
assert self.sf_handler.all_functions[uuid] == [self.func, self.f2]
assert self.backend.called_register[uuid] == expected_calls
assert self.sf_handler.functions == [self.func]
def test_update_cache(self):
self.sf_handler.register_function(self.func)
item1, item2 = self.result_dict.items()
sfr1 = self._make_sfr(self.func, dict([item1]))
sfr2 = self._make_sfr(self.f2, dict([item2]))
assert self.func.local_cache.result_dict == {}
self.sf_handler.update_cache(sfr1)
assert self.func.local_cache.result_dict == {'snap1': 5.0}
# register a new function; models the parallel update
self.sf_handler.update_cache(sfr2)
assert self.func.local_cache.result_dict == self.result_dict
def test_clear_non_canonical(self):
sf_handler = self.sf_handler
uuid = get_uuid(self.func)
sf_handler.register_function(self.func)
sf_handler.register_function(self.f2)
assert sf_handler.canonical_functions[uuid] == self.func
assert sf_handler.all_functions[uuid] == [self.func, self.f2]
sf_handler.clear_non_canonical()
assert sf_handler.canonical_functions[uuid] == self.func
assert sf_handler.all_functions[uuid] == [self.func]
@pytest.mark.parametrize('inputs', [['snap1'], ['snap1', 'snap2']])
def test_get_function_results(self, inputs):
sf_handler = self.sf_handler
sf_handler.register_function(self.func)
uuid = get_uuid(self.func)
registered_values = {uuid: value
for uuid, value in self.result_dict.items()
if uuid in inputs}
self.backend.add_storable_function_results(
table_name=get_uuid(self.func),
result_dict=registered_values
)
uuid_items = {'snap1': "This is snap1",
'snap2': "This is snap2"}
expected_found = {uuid: self.result_dict[uuid] for uuid in inputs}
missing_uuids = [uuid for uuid in uuid_items.keys()
if uuid not in registered_values]
expected_missing = {uuid: uuid_items[uuid]
for uuid in missing_uuids}
found, missing = sf_handler.get_function_results(uuid, uuid_items)
assert found == expected_found
assert missing == expected_missing
|
[
"collections.Counter",
"pytest.skip",
"mock.patch",
"collections.defaultdict",
"mock.NonCallableMock",
"pytest.raises",
"numpy.array",
"itertools.product",
"pytest.mark.parametrize",
"mock.MagicMock",
"openpathsampling.tests.test_helpers.make_1d_traj"
] |
[((1438, 1630), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""array_input,expected"""', '[([[1], [2], [3]], [1, 2, 3]), ([1, 2, 3], [1, 2, 3]), ([[1, 2], [3, 4]], [\n [1, 2], [3, 4]]), ([[[1, 2]], [[3, 4]]], [[1, 2], [3, 4]])]'], {}), "('array_input,expected', [([[1], [2], [3]], [1, 2, 3\n ]), ([1, 2, 3], [1, 2, 3]), ([[1, 2], [3, 4]], [[1, 2], [3, 4]]), ([[[1,\n 2]], [[3, 4]]], [[1, 2], [3, 4]])])\n", (1461, 1630), False, 'import pytest\n'), ((7628, 7674), 'mock.patch', 'mock.patch', (["(_MODULE + '.get_uuid')", '(lambda x: x)'], {}), "(_MODULE + '.get_uuid', lambda x: x)\n", (7638, 7674), False, 'import mock\n'), ((1869, 1884), 'numpy.array', 'np.array', (['[1.0]'], {}), '([1.0])\n', (1877, 1884), True, 'import numpy as np\n'), ((3944, 3993), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""style"""', "['obj', 'name']"], {}), "('style', ['obj', 'name'])\n", (3967, 3993), False, 'import pytest\n'), ((7979, 8050), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""min_max"""', '[(None, None), (None, 10), (0, 10)]'], {}), "('min_max', [(None, None), (None, 10), (0, 10)])\n", (8002, 8050), False, 'import pytest\n'), ((8967, 9040), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode"""', "['no-caching', 'analysis', 'production']"], {}), "('mode', ['no-caching', 'analysis', 'production'])\n", (8990, 9040), False, 'import pytest\n'), ((10361, 10557), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode, found_in"""', "[('analysis', 'storage'), ('analysis', 'cache'), ('analysis', 'eval'), (\n 'production', 'cache'), ('production', 'eval'), ('no-caching', 'eval')]"], {}), "('mode, found_in', [('analysis', 'storage'), (\n 'analysis', 'cache'), ('analysis', 'eval'), ('production', 'cache'), (\n 'production', 'eval'), ('no-caching', 'eval')])\n", (10384, 10557), False, 'import pytest\n'), ((11478, 11614), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""found_in_1, found_in_2"""', "[('storage', 'storage'), ('cache', 'cache'), ('eval', 'eval'), ('cache',\n 'eval')]"], {}), "('found_in_1, found_in_2', [('storage', 'storage'),\n ('cache', 'cache'), ('eval', 'eval'), ('cache', 'eval')])\n", (11501, 11614), False, 'import pytest\n'), ((12568, 12633), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""found_in"""', "['cache', 'storage', 'eval']"], {}), "('found_in', ['cache', 'storage', 'eval'])\n", (12591, 12633), False, 'import pytest\n'), ((16555, 16621), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""inputs"""', "[['snap1'], ['snap1', 'snap2']]"], {}), "('inputs', [['snap1'], ['snap1', 'snap2']])\n", (16578, 16621), False, 'import pytest\n'), ((482, 499), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (493, 499), False, 'from collections import defaultdict, Counter\n'), ((531, 547), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (542, 547), False, 'from collections import defaultdict, Counter\n'), ((575, 591), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (586, 591), False, 'from collections import defaultdict, Counter\n'), ((1791, 1809), 'numpy.array', 'np.array', (['expected'], {}), '(expected)\n', (1799, 1809), True, 'import numpy as np\n'), ((2461, 2500), 'numpy.array', 'np.array', (['[s.xyz[:, 0] for s in values]'], {}), '([s.xyz[:, 0] for s in values])\n', (2469, 2500), True, 'import numpy as np\n'), ((6565, 6578), 'pytest.skip', 'pytest.skip', ([], {}), '()\n', (6576, 6578), False, 'import pytest\n'), ((8784, 8797), 'pytest.skip', 'pytest.skip', ([], {}), '()\n', (8795, 8797), False, 'import pytest\n'), ((8858, 8871), 'pytest.skip', 'pytest.skip', ([], {}), '()\n', (8869, 8871), False, 'import pytest\n'), ((8934, 8947), 'pytest.skip', 'pytest.skip', ([], {}), '()\n', (8945, 8947), False, 'import pytest\n'), ((10270, 10318), 'mock.MagicMock', 'mock.MagicMock', ([], {'get_function_results': 'get_storage'}), '(get_function_results=get_storage)\n', (10284, 10318), False, 'import mock\n'), ((12454, 12467), 'pytest.skip', 'pytest.skip', ([], {}), '()\n', (12465, 12467), False, 'import pytest\n'), ((12535, 12548), 'pytest.skip', 'pytest.skip', ([], {}), '()\n', (12546, 12548), False, 'import pytest\n'), ((12698, 12711), 'pytest.skip', 'pytest.skip', ([], {}), '()\n', (12709, 12711), False, 'import pytest\n'), ((12850, 12892), 'mock.NonCallableMock', 'mock.NonCallableMock', ([], {'backend': 'self.backend'}), '(backend=self.backend)\n', (12870, 12892), False, 'import mock\n'), ((13443, 13456), 'pytest.skip', 'pytest.skip', ([], {}), '()\n', (13454, 13456), False, 'import pytest\n'), ((13542, 13589), 'itertools.product', 'itertools.product', (['[True, False]', '[True, False]'], {}), '([True, False], [True, False])\n', (13559, 13589), False, 'import itertools\n'), ((1759, 1780), 'numpy.array', 'np.array', (['array_input'], {}), '(array_input)\n', (1767, 1780), True, 'import numpy as np\n'), ((4400, 4423), 'pytest.raises', 'pytest.raises', (['KeyError'], {}), '(KeyError)\n', (4413, 4423), False, 'import pytest\n'), ((4712, 4731), 'openpathsampling.tests.test_helpers.make_1d_traj', 'make_1d_traj', (['[5.0]'], {}), '([5.0])\n', (4724, 4731), False, 'from openpathsampling.tests.test_helpers import make_1d_traj\n'), ((4829, 4848), 'openpathsampling.tests.test_helpers.make_1d_traj', 'make_1d_traj', (['[5.0]'], {}), '([5.0])\n', (4841, 4848), False, 'from openpathsampling.tests.test_helpers import make_1d_traj\n'), ((4967, 4986), 'openpathsampling.tests.test_helpers.make_1d_traj', 'make_1d_traj', (['[5.0]'], {}), '([5.0])\n', (4979, 4986), False, 'from openpathsampling.tests.test_helpers import make_1d_traj\n'), ((5197, 5212), 'numpy.array', 'np.array', (['[5.0]'], {}), '([5.0])\n', (5205, 5212), True, 'import numpy as np\n'), ((5276, 5295), 'openpathsampling.tests.test_helpers.make_1d_traj', 'make_1d_traj', (['[5.0]'], {}), '([5.0])\n', (5288, 5295), False, 'from openpathsampling.tests.test_helpers import make_1d_traj\n'), ((5448, 5463), 'numpy.array', 'np.array', (['[5.0]'], {}), '([5.0])\n', (5456, 5463), True, 'import numpy as np\n'), ((5531, 5550), 'openpathsampling.tests.test_helpers.make_1d_traj', 'make_1d_traj', (['[5.0]'], {}), '([5.0])\n', (5543, 5550), False, 'from openpathsampling.tests.test_helpers import make_1d_traj\n'), ((5701, 5716), 'numpy.array', 'np.array', (['[5.0]'], {}), '([5.0])\n', (5709, 5716), True, 'import numpy as np\n'), ((8154, 8170), 'collections.Counter', 'Counter', (['min_max'], {}), '(min_max)\n', (8161, 8170), False, 'from collections import defaultdict, Counter\n'), ((9370, 9395), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (9383, 9395), False, 'import pytest\n'), ((5165, 5182), 'numpy.array', 'np.array', (['[[5.0]]'], {}), '([[5.0]])\n', (5173, 5182), True, 'import numpy as np\n'), ((8340, 8381), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""period"""'}), "(ValueError, match='period')\n", (8353, 8381), False, 'import pytest\n')]
|
# -*- coding: utf-8 -*-
"""
Functions to calculate the color of a multilayer thin film under reflected
light. A perfect mirror will look white, because we imagine seeing the white
light source ("illuminant") reflected in it. A half-reflective mirror will be
gray, a non-reflective surface will be black, etc. See tmm.examples.sample5()
for a few example calculations for how this is used.
For functions that require an illuminant, the most common choice would be to
use colorpy.illuminants.get_illuminant_D65(), which approximates a phase of
natural daylight. See http://en.wikipedia.org/wiki/Illuminant_D65 .
"""
from __future__ import division, print_function, absolute_import
from numpy import arange, array
import numpy as np
from .tmm_core import coh_tmm
try:
import colorpy
import colorpy.illuminants
import colorpy.ciexyz
except ImportError:
print('Warning: Colorpy not detected (or perhaps an error occurred when',
'loading it). Film color calculations (in tmm.color)',
'will not work. Main version is at http://pypi.python.org/pypi/colorpy',
'A Python 3 compatible edit is at https://github.com/fish2000/ColorPy/')
inf = float('inf')
def calc_reflectances(n_fn_list, d_list, th_0, pol='s', spectral_range='narrow'):
"""
Calculate the reflection spectrum of a thin-film stack.
n_fn_list[m] should be a function that inputs wavelength in nm and
outputs refractive index of the m'th layer. In other words,
n_fn_list[2](456) == 1.53 + 0.4j mans that layer #2 has a refractive index
of 1.53 + 0.4j at 456nm. These functions could be defined with
scipy.interpolate.interp1d() for example.
pol, d_list and th_0 are defined as in tmm.coh_tmm ... but d_list
MUST be in units of nanometers
spectral_range can be 'full' if all the functions in n_fn_list can take
wavelength arguments between 360-830nm; or 'narrow' if some or all require
arguments only in the range 400-700nm. The wavelengths outside the
'narrow' range make only a tiny difference to the color, because they are
almost invisible to the eye. If spectral_range is 'narrow', then the n(400)
values are used for 360-400 and n(700) for 700-830nm
Returns a 2-column array where the first column is wavelength in nm
(360,361,362,...,830) and the second column is reflectivity (from 0
to 1, where 1 is a perfect mirror). This range is chosen to be
consistent with colorpy.illuminants. See colorpy.ciexyz.start_wl_nm etc.
"""
lam_vac_list = arange(360, 831)
num_layers = len(n_fn_list)
def extend_spectral_range(n_fn):
"""
Starting with a narrow-spectrum refractive index function
n_fn(wavelength), create then return the corresponding full-spectrum
refractive index function
"""
def extended_n_fn(lam):
if lam < 400:
return n_fn(400)
elif lam > 700:
return n_fn(700)
else:
return n_fn(lam)
return extended_n_fn
if spectral_range == 'narrow':
n_fn_list = [extend_spectral_range(n_fn) for n_fn in n_fn_list]
final_answer = []
for lam_vac in lam_vac_list:
n_list = [n_fn_list[i](lam_vac) for i in range(num_layers)]
R = coh_tmm(pol, n_list, d_list, th_0, lam_vac)['R']
final_answer.append([lam_vac,R])
final_answer = array(final_answer)
return final_answer
def calc_spectrum(reflectances, illuminant):
"""
* reflectances is the output of calc_reflec_spec()
* illuminant is a 2D numpy arrays, with one row for each wavelength,
with the first column holding the wavelength in nm, and the
second column the intensity. This is the form returned by the
functions in colorpy.illuminants. It is normally assumed that
illuminant is normalized so that Y=1.
"""
#Both colorpy.illuminants and calc_reflec_spec should go from
#colorpy.ciexyz.start_wl_nm etc, so they should have matching
#wavelength specifications
if not np.all(reflectances[:,0] == illuminant[:,0]):
raise ValueError('Wavelength range is inconsistent...Both should be 360,361,...,830.\n'
+ 'reflectances[0]=' + str(reflectances[0]) + ', reflectances[-1]=' + str(reflectances[-1])
+ '\nilluminant[0]=' + str(illuminant[0]) + ', illuminant[-1]=' + str(reflectances[-1]))
final_answer = []
for i,lam in enumerate(reflectances[:,0]):
final_answer.append([lam, reflectances[i,1] * illuminant[i,1]])
return array(final_answer)
def calc_color(spectrum, scale=None, show_warnings=True):
"""
Calculate the color in various representations.
spectrum is the output of calc_spectrum.
scale is the scaling method. Possibilities are:
* scale=None means don't scale. This is usually what you want, bucause
the illuminant should be pre-scaled in an appropriate way.
(Specifically, it's scaled to get Y=1 for a perfect reflector.)
* scale='Y1' means that the intensity is increased or decreased in
order to set Y (the luminance) to 1. So you can get white but not gray,
you can get orange but not brown, etc.
* scale=0.789 multiplies X,Y,Z by 0.789. Any number > 0 is OK.
Returns a dictionary with rgb, irgb, xy, xyY, and XYZ. Definitions:
* xy, xyY and XYZ are defined as in
http://en.wikipedia.org/wiki/CIE_1931_color_space
* rgb is the linear (i.e., proportional to intensity, not
gamma-corrected) version of sRGB.
* irgb is ready-to-display sRGB, i.e. it is clipped to the range 0-1,
and gamma-corrected, and rounded to three integers in the range 0-255.
(sRGB is the standard RGB used in modern displays and printers.)
"""
assert (scale is None or scale == 'Y1'
or (type(scale) is float and scale > 0))
XYZ = colorpy.ciexyz.xyz_from_spectrum(spectrum)
assert min(XYZ) >= 0
if scale == 'Y1' or type(scale) is float:
factor = (1.0 / XYZ[1] if scale == 'Y1' else scale)
XYZ[0] *= factor
XYZ[1] *= factor
XYZ[2] *= factor
X,Y,Z = XYZ
if show_warnings:
if Y > 1:
print('Warning: Oversaturated color! XYZ = ', XYZ)
xy = [X / (X + Y + Z), Y / (X + Y + Z)]
xyY = [xy[0], xy[1], Y]
rgb = colorpy.colormodels.rgb_from_xyz(XYZ)
irgb = colorpy.colormodels.irgb_from_rgb(rgb)
return {'xy':xy, 'xyY':xyY, 'XYZ':XYZ, 'rgb':rgb, 'irgb':irgb}
def plot_reflectances(reflectances, filename='temp_plot.png', title='Reflectance', ylabel='Fraction reflected'):
"""
Makes nice colored plot of reflectances. reflectances is the output of
calc_reflectances(...)
"""
colorpy.plots.spectrum_plot(reflectances, title, filename, ylabel=ylabel)
def plot_spectrum(spectrum, filename='temp_plot.png', title='Reflected light under illumination', ylabel='Intensity (a.u.)'):
"""
Makes nice colored plot of the reflected color spectrum you see under a
certain illuminant. spectrum is the output of
calc_spectrum(...)
"""
colorpy.plots.spectrum_plot(spectrum, title, filename, ylabel=ylabel)
|
[
"colorpy.ciexyz.xyz_from_spectrum",
"numpy.all",
"colorpy.plots.spectrum_plot",
"numpy.array",
"numpy.arange",
"colorpy.colormodels.irgb_from_rgb",
"colorpy.colormodels.rgb_from_xyz"
] |
[((2541, 2557), 'numpy.arange', 'arange', (['(360)', '(831)'], {}), '(360, 831)\n', (2547, 2557), False, 'from numpy import arange, array\n'), ((3416, 3435), 'numpy.array', 'array', (['final_answer'], {}), '(final_answer)\n', (3421, 3435), False, 'from numpy import arange, array\n'), ((4564, 4583), 'numpy.array', 'array', (['final_answer'], {}), '(final_answer)\n', (4569, 4583), False, 'from numpy import arange, array\n'), ((5883, 5925), 'colorpy.ciexyz.xyz_from_spectrum', 'colorpy.ciexyz.xyz_from_spectrum', (['spectrum'], {}), '(spectrum)\n', (5915, 5925), False, 'import colorpy\n'), ((6333, 6370), 'colorpy.colormodels.rgb_from_xyz', 'colorpy.colormodels.rgb_from_xyz', (['XYZ'], {}), '(XYZ)\n', (6365, 6370), False, 'import colorpy\n'), ((6382, 6420), 'colorpy.colormodels.irgb_from_rgb', 'colorpy.colormodels.irgb_from_rgb', (['rgb'], {}), '(rgb)\n', (6415, 6420), False, 'import colorpy\n'), ((6724, 6797), 'colorpy.plots.spectrum_plot', 'colorpy.plots.spectrum_plot', (['reflectances', 'title', 'filename'], {'ylabel': 'ylabel'}), '(reflectances, title, filename, ylabel=ylabel)\n', (6751, 6797), False, 'import colorpy\n'), ((7094, 7163), 'colorpy.plots.spectrum_plot', 'colorpy.plots.spectrum_plot', (['spectrum', 'title', 'filename'], {'ylabel': 'ylabel'}), '(spectrum, title, filename, ylabel=ylabel)\n', (7121, 7163), False, 'import colorpy\n'), ((4072, 4118), 'numpy.all', 'np.all', (['(reflectances[:, 0] == illuminant[:, 0])'], {}), '(reflectances[:, 0] == illuminant[:, 0])\n', (4078, 4118), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
import sys
import os
import numpy as np
from BaseDriver import LabberDriver, Error
sys.path.append('C:\\Program Files (x86)\\Keysight\\SD1\\Libraries\\Python')
import keysightSD1
class Driver(LabberDriver):
"""Keysigh PXI HVI trigger"""
def performOpen(self, options={}):
"""Perform the operation of opening the instrument connection"""
# timeout
self.timeout_ms = int(1000 * self.dComCfg['Timeout'])
# get PXI chassis
self.chassis = int(self.comCfg.address)
# auto-scan chassis address
n_unit = keysightSD1.SD_Module.moduleCount()
all_chassis = [
keysightSD1.SD_Module.getChassisByIndex(n) for n in range(n_unit)]
# check if user-given chassis is available
if n_unit > 0 and self.chassis not in all_chassis:
# if all units are in the same chassis, override given PXI chassis
if np.all(np.array(all_chassis) == all_chassis[0]):
self.chassis = all_chassis[0]
# number of slots in chassis
self.n_slot = 18
# supported AWGs and Digitizers
self.AWGS = ['M3201', 'M3202', 'M3300', 'M3302']
self.DIGS = ['M3100', 'M3102']
# keep track of current PXI configuration
# 0: None, 1: AWG, 2: Digitizer
self.units = [0] * self.n_slot
self.old_trig_period = -1.0
self.old_dig_delay = -1.0
# Create HVI object
self.HVI = keysightSD1.SD_HVI()
def performClose(self, bError=False, options={}):
"""Perform the close instrument connection operation"""
# do not check for error if close was called with an error
try:
# close instrument
self.HVI.stop()
self.HVI.close()
except Exception:
# never return error here
pass
def performSetValue(self, quant, value, sweepRate=0.0, options={}):
"""Perform the Set Value instrument operation. This function should
return the actual value set by the instrument"""
# continue depending on quantity
if quant.name == 'Auto-detect':
# auto-detect units
if value:
self.auto_detect()
elif quant.name == 'Scan':
# when scanning, just run auto-detect
self.auto_detect()
else:
# just set the quantity value, config will be set at final call
quant.setValue(value)
# only update configuration at final call
if self.isFinalCall(options):
self.configure_hvi()
return value
def configure_hvi(self):
"""Configure and start/stop HVI depending on UI settings"""
# get units
units = self.get_pxi_config_from_ui()
n_awg = len([x for x in units if x == 1])
n_dig = len([x for x in units if x == 2])
# if no units in use, just stop
if (n_awg + n_dig) == 0:
self.HVI.stop()
return
# check if unit configuration changed, if so reload HVI
if units != self.units:
# stop current HVI, may not even be running
self.HVI.stop()
self.HVI.close()
self.units = units
# we need at least one AWG
if n_awg == 0:
raise Error('This driver requires at least one AWG.')
# currently only support 2 digitizers
if n_dig > 2:
raise Error('This driver only supports up to two digitizers.')
# get HVI name and open
hvi_name = 'InternalTrigger_%d_%d.HVI' % (n_awg, n_dig)
dir_path = os.path.dirname(os.path.realpath(__file__))
self.HVI.open(os.path.join(dir_path, 'HVI_Delay', hvi_name))
# assign units, run twice to ignore errors before all units are set
for m in range(2):
awg_number = 0
dig_number = 0
for n, unit in enumerate(units):
# if unit in use, assign to module
if unit == 0:
continue
elif unit == 1:
# AWG
module_name = 'Module %d' % awg_number
awg_number += 1
elif unit == 2:
# digitizer
module_name = 'DAQ %d' % dig_number
dig_number += 1
r = self.HVI.assignHardwareWithUserNameAndSlot(
module_name, self.chassis, n + 1)
# only check for errors after second run
if m > 0:
self.check_keysight_error(r)
# clear old trig period to force update
self.old_trig_period = 0.0
# only update trig period if necessary, takes time to re-compile
if (self.getValue('Trig period') != self.old_trig_period or
self.getValue('Digitizer delay') != self.old_dig_delay):
self.old_trig_period = self.getValue('Trig period')
self.old_dig_delay = self.getValue('Digitizer delay')
# update trig period, include 460 ns delay in HVI
wait = round(self.getValue('Trig period') / 10E-9) - 46
digi_wait = round(self.getValue('Digitizer delay') / 10E-9)
# special case if only one module: add 240 ns extra delay
if (n_awg + n_dig) == 1:
wait += 24
# r = self.HVI.writeIntegerConstantWithIndex(0, 'Wait time', wait)
r = self.HVI.writeIntegerConstantWithUserName(
'Module 0', 'Wait time', wait)
self.check_keysight_error(r)
self.log('Number of modules', self.HVI.getNumberOfModules())
for n in range(n_dig):
r = self.HVI.writeIntegerConstantWithUserName(
'DAQ %d' % n, 'Digi wait', digi_wait)
self.check_keysight_error(r)
# need to recompile after setting wait time, not sure why
self.check_keysight_error(self.HVI.compile())
# try to load a few times, sometimes hangs on first try
n_try = 5
while True:
try:
self.check_keysight_error(self.HVI.load())
break
except Exception:
n_try -= 1
if n_try <= 0:
raise
# start or stop the HVI, depending on output state
if self.getValue('Output'):
self.check_keysight_error(self.HVI.start())
else:
self.HVI.stop()
def check_keysight_error(self, code):
"""Check and raise error"""
if code >= 0:
return
# get error message
raise Error(keysightSD1.SD_Error.getErrorMessage(code))
def auto_detect(self):
"""Auto-detect units"""
# start by clearing old config
for n in range(self.n_slot):
self.setValue('Slot %d' % (n + 1), 0)
# loop through all units, make sure chassis match
n_unit = keysightSD1.SD_Module.moduleCount()
for n in range(n_unit):
chassis = keysightSD1.SD_Module.getChassisByIndex(n)
slot = keysightSD1.SD_Module.getSlotByIndex(n)
# if chassis match, check unit type
if chassis == self.chassis:
model = keysightSD1.SD_Module.getProductNameByIndex(n)
if model[:5] in self.AWGS:
self.setValue('Slot %d' % slot, 'AWG')
elif model[:5] in self.DIGS:
self.setValue('Slot %d' % slot, 'Digitizer')
def get_pxi_config_from_ui(self):
"""Get PXI config from user interface"""
units = []
for n in range(self.n_slot):
units.append(self.getValueIndex('Slot %d' % (n + 1)))
return units
if __name__ == '__main__':
pass
|
[
"sys.path.append",
"BaseDriver.Error",
"keysightSD1.SD_Module.getProductNameByIndex",
"keysightSD1.SD_Module.getSlotByIndex",
"os.path.realpath",
"keysightSD1.SD_Module.moduleCount",
"keysightSD1.SD_HVI",
"keysightSD1.SD_Module.getChassisByIndex",
"numpy.array",
"os.path.join",
"keysightSD1.SD_Error.getErrorMessage"
] |
[((105, 181), 'sys.path.append', 'sys.path.append', (['"""C:\\\\Program Files (x86)\\\\Keysight\\\\SD1\\\\Libraries\\\\Python"""'], {}), "('C:\\\\Program Files (x86)\\\\Keysight\\\\SD1\\\\Libraries\\\\Python')\n", (120, 181), False, 'import sys\n'), ((585, 620), 'keysightSD1.SD_Module.moduleCount', 'keysightSD1.SD_Module.moduleCount', ([], {}), '()\n', (618, 620), False, 'import keysightSD1\n'), ((1469, 1489), 'keysightSD1.SD_HVI', 'keysightSD1.SD_HVI', ([], {}), '()\n', (1487, 1489), False, 'import keysightSD1\n'), ((7161, 7196), 'keysightSD1.SD_Module.moduleCount', 'keysightSD1.SD_Module.moduleCount', ([], {}), '()\n', (7194, 7196), False, 'import keysightSD1\n'), ((657, 699), 'keysightSD1.SD_Module.getChassisByIndex', 'keysightSD1.SD_Module.getChassisByIndex', (['n'], {}), '(n)\n', (696, 699), False, 'import keysightSD1\n'), ((6855, 6897), 'keysightSD1.SD_Error.getErrorMessage', 'keysightSD1.SD_Error.getErrorMessage', (['code'], {}), '(code)\n', (6891, 6897), False, 'import keysightSD1\n'), ((7251, 7293), 'keysightSD1.SD_Module.getChassisByIndex', 'keysightSD1.SD_Module.getChassisByIndex', (['n'], {}), '(n)\n', (7290, 7293), False, 'import keysightSD1\n'), ((7313, 7352), 'keysightSD1.SD_Module.getSlotByIndex', 'keysightSD1.SD_Module.getSlotByIndex', (['n'], {}), '(n)\n', (7349, 7352), False, 'import keysightSD1\n'), ((3333, 3380), 'BaseDriver.Error', 'Error', (['"""This driver requires at least one AWG."""'], {}), "('This driver requires at least one AWG.')\n", (3338, 3380), False, 'from BaseDriver import LabberDriver, Error\n'), ((3479, 3535), 'BaseDriver.Error', 'Error', (['"""This driver only supports up to two digitizers."""'], {}), "('This driver only supports up to two digitizers.')\n", (3484, 3535), False, 'from BaseDriver import LabberDriver, Error\n'), ((3680, 3706), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (3696, 3706), False, 'import os\n'), ((3734, 3779), 'os.path.join', 'os.path.join', (['dir_path', '"""HVI_Delay"""', 'hvi_name'], {}), "(dir_path, 'HVI_Delay', hvi_name)\n", (3746, 3779), False, 'import os\n'), ((7465, 7511), 'keysightSD1.SD_Module.getProductNameByIndex', 'keysightSD1.SD_Module.getProductNameByIndex', (['n'], {}), '(n)\n', (7508, 7511), False, 'import keysightSD1\n'), ((935, 956), 'numpy.array', 'np.array', (['all_chassis'], {}), '(all_chassis)\n', (943, 956), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 16 20:23:01 2020
@author: wantysal
"""
# Standard library imports
import numpy as np
# Mosqito functions import
from mosqito.sq_metrics.tonality.tone_to_noise_ecma._spectrum_smoothing import _spectrum_smoothing
from mosqito.sq_metrics.tonality.tone_to_noise_ecma._LTH import _LTH
from mosqito.sq_metrics.tonality.tone_to_noise_ecma._critical_band import _critical_band
def _screening_for_tones(freqs, spec_db, method, low_freq, high_freq):
"""
Screening function to find the tonal candidates in a spectrum
The 'smoothed' method is the one described by <NAME> and <NAME> in :
Automating prominent tone evaluations and accounting for time-varying
conditions, Sound Quality Symposium, SQS 2008, Detroit, 2008.
The 'not-smoothed' method is the one used by Aures and Terhardt
The criteria of tonal width comes from Wade Bray in 'Methods for automating
prominent tone evaluation and for considering variations with time or other
reference quantities'
Parameters
----------
freqs : numpy.array
frequency axis (n blocks x frequency axis)
spec_db : numpy.array
spectrum in dB (n block x spectrum)
method : string
the method chosen to find the tones 'Sottek'
low_freq : float
lowest frequency of interest
high_freq : float
highest frequency of interest
Returns
-------
tones : list
list of index corresponding to the potential tonal components
"""
###############################################################################
# Detection of the tonal candidates according to their level
# Creation of the smoothed spectrum
smooth_spec = _spectrum_smoothing(freqs, spec_db.T, 24, low_freq, high_freq, freqs).T
n = spec_db.shape[0]
if len(spec_db.shape)>1:
m = spec_db.shape[1]
stop = np.arange(1,n+1) * m -1
else:
m = spec_db.shape[0]
n = 1
stop = [m]
smooth_spec = smooth_spec.ravel()
spec_db = spec_db.ravel()
freqs = freqs.ravel()
if method == "smoothed":
# Criteria 1 : the level of the spectral line is higher than the level of
# the two neighboring lines
maxima = (np.diff(np.sign(np.diff(spec_db))) < 0).nonzero()[0] + 1
# Criteria 2 : the level of the spectral line exceeds the corresponding lines
# of the 1/24 octave smoothed spectrum by at least 6 dB
indexx = np.where(spec_db[maxima] > smooth_spec[maxima] + 6)[0]
# Criteria 3 : the level of the spectral line exceeds the threshold of hearing
threshold = _LTH(freqs)
audible = np.where(spec_db[maxima][indexx] > threshold[maxima][indexx] + 10)[0]
index = np.arange(0, len(spec_db))[maxima][indexx][audible]
if method == "not-smoothed":
# Criteria 1 : the level of the spectral line is higher than the level of
# the two neighboring lines
maxima = (
np.diff(np.sign(np.diff(spec_db[3 : len(spec_db) - 3]))) < 0
).nonzero()[
0
] + 1 # local max
# Criteria 2 : the level of the spectral line is at least 7 dB higher than its
# +/- 2,3 neighbors
indexx = np.where(
(spec_db[maxima] > (spec_db[maxima + 2] + 7))
& (spec_db[maxima] > (spec_db[maxima - 2] + 7))
& (spec_db[maxima] > (spec_db[maxima + 3] + 7))
& (spec_db[maxima] > (spec_db[maxima - 3] + 7))
)[0]
# Criteria 3 : the level of the spectral line exceeds the threshold of hearing
threshold = _LTH(freqs)
audible = np.where(spec_db[maxima][indexx] > threshold[maxima][indexx] + 10)[0]
index = np.arange(0, len(spec_db))[maxima][indexx][audible]
###############################################################################
# Check of the tones width : a candidate is discarded if its width is greater
# than half the critical bandwidth
if n == 1:
tones = []
else:
tones = [[]for i in range(n)]
# Each candidate is evaluated
while len(index) > 0:
# Index of the candidate
peak_index = index[0]
for i in range(n):
if (peak_index<stop[i]) & (peak_index>(stop[i]-m)):
block = i
# Lower and higher limits of the tone width
low_limit = peak_index
high_limit = peak_index
# Screen the right points of the peak
temp = peak_index + 1
# As long as the level decreases or remains above the smoothed spectrum,
while (spec_db[temp] > smooth_spec[temp] + 6) and (temp + 1 < (block+1)*m):
# if a highest spectral line is found, it becomes the candidate
if spec_db[temp] > spec_db[peak_index]:
peak_index = temp
high_limit += 1
temp += 1
# Screen the left points of the peak
temp = peak_index - 1
# As long as the level decreases,
while (spec_db[temp] > smooth_spec[temp] + 6) and (temp +1 > (block)*m):
# if a highest spectral line is found, it becomes the candidate
if spec_db[temp] > spec_db[peak_index]:
peak_index = temp
low_limit -= 1
temp -= 1
# Critical bandwidth
f1, f2 = _critical_band(freqs[peak_index])
cb_width = f2 - f1
# Tonal width
t_width = freqs[high_limit] - freqs[low_limit]
if t_width < cb_width:
if n>1:
tones[block] = np.append(tones[block], peak_index - block*m)
else:
tones = np.append(tones, peak_index )
# All the candidates already screened are deleted from the list
sup = np.where(index <= high_limit)[0]
index = np.delete(index, sup)
tones = np.asarray(tones, dtype=object)
return tones
|
[
"mosqito.sq_metrics.tonality.tone_to_noise_ecma._critical_band._critical_band",
"mosqito.sq_metrics.tonality.tone_to_noise_ecma._LTH._LTH",
"numpy.asarray",
"numpy.append",
"numpy.where",
"numpy.arange",
"numpy.diff",
"mosqito.sq_metrics.tonality.tone_to_noise_ecma._spectrum_smoothing._spectrum_smoothing",
"numpy.delete"
] |
[((5967, 5998), 'numpy.asarray', 'np.asarray', (['tones'], {'dtype': 'object'}), '(tones, dtype=object)\n', (5977, 5998), True, 'import numpy as np\n'), ((1765, 1834), 'mosqito.sq_metrics.tonality.tone_to_noise_ecma._spectrum_smoothing._spectrum_smoothing', '_spectrum_smoothing', (['freqs', 'spec_db.T', '(24)', 'low_freq', 'high_freq', 'freqs'], {}), '(freqs, spec_db.T, 24, low_freq, high_freq, freqs)\n', (1784, 1834), False, 'from mosqito.sq_metrics.tonality.tone_to_noise_ecma._spectrum_smoothing import _spectrum_smoothing\n'), ((2711, 2722), 'mosqito.sq_metrics.tonality.tone_to_noise_ecma._LTH._LTH', '_LTH', (['freqs'], {}), '(freqs)\n', (2715, 2722), False, 'from mosqito.sq_metrics.tonality.tone_to_noise_ecma._LTH import _LTH\n'), ((3688, 3699), 'mosqito.sq_metrics.tonality.tone_to_noise_ecma._LTH._LTH', '_LTH', (['freqs'], {}), '(freqs)\n', (3692, 3699), False, 'from mosqito.sq_metrics.tonality.tone_to_noise_ecma._LTH import _LTH\n'), ((5452, 5485), 'mosqito.sq_metrics.tonality.tone_to_noise_ecma._critical_band._critical_band', '_critical_band', (['freqs[peak_index]'], {}), '(freqs[peak_index])\n', (5466, 5485), False, 'from mosqito.sq_metrics.tonality.tone_to_noise_ecma._critical_band import _critical_band\n'), ((5928, 5949), 'numpy.delete', 'np.delete', (['index', 'sup'], {}), '(index, sup)\n', (5937, 5949), True, 'import numpy as np\n'), ((2548, 2599), 'numpy.where', 'np.where', (['(spec_db[maxima] > smooth_spec[maxima] + 6)'], {}), '(spec_db[maxima] > smooth_spec[maxima] + 6)\n', (2556, 2599), True, 'import numpy as np\n'), ((2741, 2807), 'numpy.where', 'np.where', (['(spec_db[maxima][indexx] > threshold[maxima][indexx] + 10)'], {}), '(spec_db[maxima][indexx] > threshold[maxima][indexx] + 10)\n', (2749, 2807), True, 'import numpy as np\n'), ((3319, 3519), 'numpy.where', 'np.where', (['((spec_db[maxima] > spec_db[maxima + 2] + 7) & (spec_db[maxima] > spec_db[\n maxima - 2] + 7) & (spec_db[maxima] > spec_db[maxima + 3] + 7) & (\n spec_db[maxima] > spec_db[maxima - 3] + 7))'], {}), '((spec_db[maxima] > spec_db[maxima + 2] + 7) & (spec_db[maxima] > \n spec_db[maxima - 2] + 7) & (spec_db[maxima] > spec_db[maxima + 3] + 7) &\n (spec_db[maxima] > spec_db[maxima - 3] + 7))\n', (3327, 3519), True, 'import numpy as np\n'), ((3718, 3784), 'numpy.where', 'np.where', (['(spec_db[maxima][indexx] > threshold[maxima][indexx] + 10)'], {}), '(spec_db[maxima][indexx] > threshold[maxima][indexx] + 10)\n', (3726, 3784), True, 'import numpy as np\n'), ((5879, 5908), 'numpy.where', 'np.where', (['(index <= high_limit)'], {}), '(index <= high_limit)\n', (5887, 5908), True, 'import numpy as np\n'), ((1946, 1965), 'numpy.arange', 'np.arange', (['(1)', '(n + 1)'], {}), '(1, n + 1)\n', (1955, 1965), True, 'import numpy as np\n'), ((5674, 5721), 'numpy.append', 'np.append', (['tones[block]', '(peak_index - block * m)'], {}), '(tones[block], peak_index - block * m)\n', (5683, 5721), True, 'import numpy as np\n'), ((5762, 5790), 'numpy.append', 'np.append', (['tones', 'peak_index'], {}), '(tones, peak_index)\n', (5771, 5790), True, 'import numpy as np\n'), ((2339, 2355), 'numpy.diff', 'np.diff', (['spec_db'], {}), '(spec_db)\n', (2346, 2355), True, 'import numpy as np\n')]
|
"""Variable is a one-dimensional discrete and continuous real variable class.
<NAME>, July 2005
"""
# ----------------------------------------------------------------------------
from __future__ import absolute_import, print_function
# PyDSTool imports
from .utils import *
from .common import *
from .common import (
_num_types,
_num_equivtype,
_float_types,
_real_types,
_int_types,
_seq_types,
_num_type2name,
_num_name2type,
_num_name2equivtypes,
_all_float,
_all_int,
_all_complex,
_num_maxmin,
)
from .errors import *
from .Points import *
from .Interval import *
from .FuncSpec import ImpFuncSpec
from numpy import (
Inf,
NaN,
isfinite,
sometrue,
alltrue,
any,
all,
array,
float64,
int32,
ndarray,
asarray,
)
import copy
import types, math, random
import six
__all__ = [
"Variable",
"HybridVariable",
"OutputFn",
"isinputcts",
"isinputdiscrete",
"isoutputcts",
"isoutputdiscrete",
"iscontinuous",
"isdiscrete",
"numeric_to_vars",
"pointset_to_vars",
]
# ------------------------------------------------------------------
class VarDiagnostics(Diagnostics):
def getWarnings(self):
if self.warnings:
output = "Warnings:"
for (i, d) in self.warnings:
if d is None:
output += "Independent variable value %s was out of " % i + "bounds"
else:
output += (
"Dependent variable value was out of "
+ "bounds at independent variable value %s" % i
)
else:
output = ""
return output
def pointset_to_vars(pts, discrete=True):
"""Utility to convert Pointset to a dictionary of Variables.
If discrete option set to False (default is True) then the
Variables will be linearly interpolated within their domain.
Any labels in the pointset will be preserved in the Variables
in case of their re-extraction using the getDataPoints method.
"""
coordnames = pts.coordnames
vals = pts.coordarray
all_types_float = pts.coordtype == float
if isparameterized(pts):
indepvar = pts.indepvararray
indepvarname = pts.indepvarname
if discrete:
indepvartype = int
else:
indepvartype = float
indepdomain = Interval(
pts.indepvarname,
indepvartype,
extent(pts.indepvararray),
abseps=pts._abseps,
)
else:
indepvar = None
indepvarname = None
indepdomain = None
return numeric_to_vars(
vals,
coordnames,
indepvar,
indepvarname,
indepdomain,
all_types_float,
discrete,
pts._abseps,
pts.labels,
)
def numeric_to_vars(
vals,
coordnames,
indepvar=None,
indepvarname="t",
indepdomain=None,
all_types_float=True,
discrete=True,
abseps=None,
labels=None,
):
"""Utility to convert numeric types to a dictionary of Variables.
If discrete option set to True (default is False) then the
Variables will be linearly interpolated within their domain.
"""
if isinstance(coordnames, str):
coordnames = [coordnames]
if isinstance(vals, _num_types):
vals = [[vals]]
vars = {}
if indepvar is None:
for i, c in enumerate(coordnames):
if all_types_float:
vartype = float
else:
vartype = array(vals[i]).dtype.type
if discrete:
vars[c] = Variable(
outputdata=Pointset(
{"coordnames": c, "coordarray": vals[i], "coordtype": vartype}
),
name=c,
abseps=abseps,
labels=labels,
)
else:
raise AssertionError(
"Cannot use continuously defined "
"option without an independent variable"
)
return vars
else:
if isinstance(indepvar, _num_types):
indepvartype = type(indepvar)
indepvar = [indepvar]
else:
indepvartype = asarray(indepvar).dtype.type
if indepdomain is None:
indepdomain = indepvarname
else:
if isinstance(indepdomain, Interval):
assert indepvarname == indepdomain.name, "Indep varname mismatch"
else:
if discrete:
var_type = int
else:
var_type = float
indepdomain = Interval(indepvarname, var_type, indepdomain)
for i, c in enumerate(coordnames):
if all_types_float:
vartype = float
else:
vartype = array(vals[i]).dtype.type
if discrete:
vars[c] = Variable(
outputdata=Pointset(
{
"coordnames": c,
"coordarray": vals[i],
"coordtype": vartype,
"indepvarname": indepvarname,
"indepvararray": indepvar,
"indepvartype": indepvartype,
}
),
indepdomain=indepdomain,
name=c,
abseps=abseps,
labels=labels,
)
else:
dom_int = Interval(c, vartype, extent(vals[i]), abseps=abseps)
vars[c] = Variable(
outputdata=interp1d(indepvar, vals[i]),
indepdomain=indepdomain,
depdomain=dom_int,
name=c,
abseps=abseps,
labels=labels,
)
return vars
class Variable(object):
"""One-dimensional discrete and continuous real variable class.
"""
def __init__(
self,
outputdata=None,
indepdomain=None,
depdomain=None,
name="noname",
abseps=None,
labels=None,
):
# funcreg stores function data for dynamically created methods
# to allow a Variable to be copied using pickling
self._funcreg = {}
if isinstance(name, str):
# !!! name is probably redundant
self.name = name
else:
raise TypeError("name argument must be a string")
# defaults for empty 'placeholder' Variables used by ODEsystem
if outputdata is None or isinstance(outputdata, (Pointset, interp1d)):
if indepdomain is None:
indepdomain = "t"
if depdomain is None:
depdomain = "x"
# set some initial values so that can test if what changed
# after calling setOutput()
self._vectorizable = True
self.defined = False
self.indepdomain = None
self.indepvartype = None
self.indepvarname = None
self.depdomain = None
self.coordtype = None
self.coordname = None
self._refvars = None # for use with ExplicitFnGen
# Ranges covered by the current trajectory held (if known)
self.trajirange = None
self.trajdrange = None
# independent variable domain
self.setIndepdomain(indepdomain, abseps)
# used internally, especially for "input" variables
self._internal_t_offset = 0
# output function
self.setOutput(outputdata, abseps)
# dependent variable domain
self.setDepdomain(depdomain, abseps)
assert self.coordname != self.indepvarname, (
"Independent variable " "name and coordinate name must be different"
)
self.diagnostics = VarDiagnostics()
# labels is for internal use in case Variable data is from a Pointset
# that uses labels. This preserves them for getDataPoints method to
# restore them.
self.labels = labels
def is_continuous_valued(self):
return isoutputcts(self)
def is_discrete_valued(self):
return not isoutputcts(self)
# Auxiliary functions for user-defined code to call
def _auxfn_globalindepvar(self, parsinps, t):
return self.globalt0 + t
def _auxfn_initcond(self, parsinps, varname):
return self.initialconditions[varname]
def _auxfn_heav(self, parsinps, x):
if x > 0:
return 1
else:
return 0
def _auxfn_if(self, parsinps, c, e1, e2):
if c:
return e1
else:
return e2
def _auxfn_getindex(self, parsinps, varname):
return self._var_namemap[varname]
def addMethods(self, funcspec):
"""Add dynamically-created methods to Veriable object"""
# Add the auxiliary function specs to this Variable's namespace
for auxfnname in funcspec.auxfns:
fninfo = funcspec.auxfns[auxfnname]
if not hasattr(Variable, fninfo[1]):
# user-defined auxiliary functions
# (built-ins are provided explicitly)
try:
six.exec_(fninfo[0], globals())
except:
print("Error in supplied auxiliary function code")
raise
self._funcreg[fninfo[1]] = ("Variable", fninfo[0])
setattr(Variable, fninfo[1], eval(fninfo[1]))
# Add the spec function to this Variable's namespace
fninfo_spec = funcspec.spec
if not hasattr(Variable, fninfo_spec[1]):
try:
six.exec_(fninfo_spec[0], globals())
except:
print("Error in supplied functional specification code")
raise
self._funcreg[fninfo_spec[1]] = ("Variable", fninfo_spec[0])
setattr(Variable, fninfo_spec[1], eval(fninfo_spec[1]))
# Add the auxiliary spec function (if present) to this Var's namespace
if funcspec.auxspec:
fninfo_auxspec = funcspec.auxspec
if not hasattr(Variable, fninfo_auxspec[1]):
try:
six.exec_(fninfo_auxspec[0], globals())
except:
print("Error in supplied auxiliary variable code")
raise
self._funcreg[fninfo_auxspec[1]] = ("Variable", fninfo_auxspec[0])
setattr(Variable, fninfo_auxspec[1], eval(fninfo_auxspec[1]))
# For implicit functions
if isinstance(funcspec, ImpFuncSpec):
impfn_name = funcspec.algparams["impfn_name"]
if funcspec.algparams["jac"]:
jac_str = "fprime=funcspec.algparams['jac'],"
else:
jac_str = ""
# Wrap spec fn like this as it has been set up as a
# method, but want to call as regular function
# *** ALSO *** spec fn has signature (ds, t, x, p)
# but implicit function solvers expect
# (x, t, p), so have to switch 1st and 2nd args here
# after 'ds' filled with None
if len(funcspec.vars) == 1:
# dimension == 1, so have to make list output from spec
# into a scalar
# Also, scalar a1 needs to be put into list form for
# acceptance as x in spec fn
specfn_str = (
"lambda a1, a2, a3: " + fninfo_spec[1] + "(None, a2, [a1], a3)[0]"
)
else:
# for dimension > 1 a1 will already be an array / list
specfn_str = (
"lambda a1, a2, a3: " + fninfo_spec[1] + "(None, a2, a1, a3)"
)
this_scope = globals() # WE CHANGE GLOBALS()
this_scope.update(
{
"funcspec": locals()["funcspec"],
"fninfo_spec": locals()["fninfo_spec"],
}
)
impfn_str = (
impfn_name
+ " = makeImplicitFunc("
+ specfn_str
+ ","
+ jac_str
+ """x0=funcspec.algparams['x0'],
extrafargs=(funcspec.algparams['pars'],),
xtolval=funcspec.algparams['atol'],
maxnumiter=funcspec.algparams['maxnumiter'],
solmethod=funcspec.algparams['solvemethod'],
standalone=False)"""
)
try:
six.exec_(impfn_str, this_scope)
except:
print("Error in supplied implicit function code")
raise
# record special reference to the implicit fn,
# as its a method of Variable (for delete method).
self._funcreg["_impfn"] = (impfn_name, impfn_str)
# In previous versions setattr was to self, not the Variable class
setattr(Variable, impfn_name, eval(impfn_name))
# clean up globals() afterwards
del this_scope["funcspec"]
del this_scope["fninfo_spec"]
def getDataPoints(self):
"""Reveal underlying mesh and values at mesh points, provided
Variable is based on a mesh (otherwise None is returned).
The returned Pointset will be time-shifted according to the
Variable's current _internal_t_offset attribute.
Any pointset labels present when the variable was created will
be restored.
"""
if isinstance(self.output, VarCaller):
return Pointset(
indepvarname=self.indepvarname,
indepvararray=self.output.pts.indepvararray + self._internal_t_offset,
coordnames=[self.coordname],
coordarray=self.output.pts.coordarray[0],
labels=self.labels,
)
elif hasattr(self.output, "datapoints"):
datapoints = self.output.datapoints
return Pointset(
indepvarname=self.indepvarname,
indepvararray=datapoints[0] + self._internal_t_offset,
coordnames=[self.coordname],
coordarray=datapoints[1],
labels=self.labels,
)
else:
return None
def underlyingMesh(self):
"""Reveal underlying mesh as arrays, rather than Pointset
as returned by getDataPoints method. If no underlying mesh is
present, None is returned."""
try:
# works if .output is an interpclass instance
mesh = self.output.datapoints
except AttributeError:
try:
# works if .output is a VarCaller instance (with underlying Pointset)
pts = self.output.pts
mesh = array([pts.indepvararray, pts.coordarray[0]])
except AttributeError:
mesh = None
return mesh
def truncate_to_idx(self, idx):
mesh = self.underlyingMesh()
if mesh is None:
raise RuntimeError(
"Cannot truncate a Variable without an underlying mesh by index"
)
try:
new_t_end = mesh[0][idx]
except IndexError:
raise ValueError("Truncation index %d out of range" % idx)
except TypeError:
raise TypeError("Index must be an integer")
if isinstance(self.indepdomain, Interval):
self.indepdomain.set([self.indepdomain[0], new_t_end])
else:
# ndarray type
self.indepdomain = self.indepdomain[0:idx]
# adjust depdomain for array type of dep domain
# (nothing to change for Interval type)
if isinstance(self.depdomain, ndarray):
self.depdomain = self.depdomain[0:idx]
# adjust trajirange and trajdrange
self._setRanges(self.depdomain._abseps)
def _setRanges(self, abseps=None):
# set trajirange and trajdrange for the two types of Variable output method
# that these are associated with (see method setOutput)
try:
output = self.output
except AttributeError:
# output not set or not a compatible type for trajirange and trajdrange
return
if isinstance(output, VarCaller):
self.trajirange = Interval(
"traj_indep_bd",
self.indepvartype,
extent(output.pts.indepvararray),
abseps=abseps,
)
self.trajdrange = Interval(
"traj_dep_bd",
self.coordtype,
extent(output.pts.coordarray[0]),
abseps=abseps,
)
elif isinstance(output, (OutputFn, interpclass) + six.class_types):
if hasattr(output, "types"):
deptype = output.types[0]
indeptype = output.types[1]
else:
# default
deptype = indeptype = float
if isinstance(output.datapoints[0], Interval):
assert compareNumTypes(
output.types[0], output.datapoints[0].type
), "Inconsistent type with Interval bounds"
self.trajirange = output.datapoints[0]
else:
self.trajirange = Interval(
"traj_indep_bd",
indeptype,
extent(output.datapoints[0]),
abseps=abseps,
)
if isinstance(output.datapoints[1], Interval):
assert compareNumTypes(
output.types[1], output.datapoints[1].type
), "Inconsistent type with Interval bounds"
self.trajdrange = output.datapoints[1]
else:
self.trajdrange = Interval(
"traj_dep_bd", deptype, extent(output.datapoints[1]), abseps=abseps
)
def setOutput(
self,
outputdata,
funcspec=None,
globalt0=0,
var_namemap=None,
ics=None,
refvars=None,
abseps=None,
):
"""Dynamically create 'output' method of Variable"""
self.globalt0 = globalt0
if type(outputdata) in [
types.FunctionType,
types.BuiltinFunctionType,
types.MethodType,
]:
# Variable generated from function, given in closed form
self.output = outputdata
assert ics is None, "Invalid option for this type of output"
if outputdata != noneFn:
self.defined = True
elif isinstance(outputdata, tuple):
# For ExplicitFnGen or ImplicitFnGen types, whose functional forms
# may need to access these at call time.
assert len(outputdata) == 2, "Incorrect size of outputdata tuple"
if funcspec is not None:
self.addMethods(funcspec)
self._var_namemap = var_namemap
self._funcreg["funcspec"] = (None, funcspec)
else:
raise ValueError("funcspec missing in setOutput")
# Add the specific mapping functions for Ex/ImplicitFnGen objects
try:
six.exec_(outputdata[1], globals())
except:
print("Internal Error in _mapspecfn code")
raise
has_op = hasattr(self, "output")
# have to define this function in here because use of lambda
# won't allow me to pickle the Variable object
if not has_op or (has_op and self.output is noneFn):
def wrap_output(arg):
return eval(outputdata[0])(self, arg)
setattr(self, "output", wrap_output)
self._funcreg["outputdata"] = (None, outputdata)
t0 = self.indepdomain[0]
if ics is None and not isinstance(funcspec, ImpFuncSpec):
try:
self.initialconditions = {self.coordname: self.output(t0)}
except ValueError:
self.initialconditions = {self.coordname: NaN}
except TypeError:
print("Debugging info: self.output = %s" % self.output)
raise
else:
self.initialconditions = ics
self._vectorizable = False
self._refvars = refvars
self.defined = True
elif isinstance(outputdata, (OutputFn, interpclass) + six.class_types):
# Variable generated by callable object that generates values over
# mesh points that it holds, e.g. by interpolation
# (InstanceType and TypeType are for backwards compatibility, e.g.
# for old SciPy interpolate code that uses Classic Classes)
assert ics is None, "Invalid option for this type of output"
assert "__call__" in dir(outputdata), "Must provide callable object"
self.output = outputdata
if hasattr(outputdata, "datapoints"):
self._setRanges(abseps)
self.defined = True
elif isinstance(outputdata, Pointset):
# Variable generated from a pointset (without interpolation)
assert ics is None, "Invalid option for this type of output"
assert isparameterized(outputdata), (
"Must only pass parameterized" " pointsets"
)
if outputdata.dimension == 1:
self.coordname = copy.copy(outputdata.coordnames[0])
self.indepvarname = outputdata.indepvarname
self.output = VarCaller(outputdata)
self.coordtype = outputdata.coordtype
self.indepvartype = outputdata.indepvartype
if self.indepdomain is not None:
for v in outputdata[self.indepvarname]:
if not v in self.indepdomain:
raise ValueError(
"New Pointset data violates "
"independent variable domain already specified"
)
if self.depdomain is not None:
for v in outputdata[self.coordname]:
if not v in self.depdomain:
raise ValueError(
"New Pointset data violates "
"dependent variable domain already specified"
)
self._setRanges(abseps)
self.defined = True
else:
raise ValueError("Pointset data must be 1D to create a " "Variable")
elif outputdata is None:
# placeholder for an unknown output type
assert ics is None, "Invalid option when outputdata argument is None"
self.output = noneFn
self.defined = False
else:
raise TypeError("Invalid type for data argument: " + str(type(outputdata)))
def setIndepdomain(self, indepdomain, abseps=None):
if isinstance(indepdomain, str):
self.indepvarname = indepdomain
if self.indepdomain is not None:
# If indepdomain already set and indepvarname is none then
# name won't get put in place unless we force it here
self.indepvarname = indepdomain
self.indepdomain.name = indepdomain
else:
self.indepdomain = Interval(
self.indepvarname, float, [-Inf, Inf], abseps=abseps
)
self.indepvartype = float
else:
if isinstance(indepdomain, Interval):
if self.trajirange:
if indepdomain.contains(self.trajirange) is notcontained:
raise ValueError(
"Cannot set independent variable"
" domain inside current trajectory's"
" range"
)
self.indepdomain = indepdomain
self.indepvarname = indepdomain.name
self.indepvartype = _num_name2type[indepdomain.typestr]
elif isinstance(indepdomain, dict):
# enumerated discrete domains
assert len(indepdomain) == 1, (
"Independent variable " "dictionary must have only 1 entry"
)
d = list(indepdomain.values())[0]
assert all(isfinite(d)), "Independent variable values must be" " finite"
if self.trajirange:
assert self.trajirange[0] in d
assert self.trajirange[1] in d
self.indepvarname = list(indepdomain.keys())[0]
if isinstance(d, (list, tuple)):
if self.coordtype is not None:
self.indepdomain = array(d, self.coordtype)
else:
self.indepdomain = array(d)
elif isinstance(d, ndarray):
da = array(d)
if (
self.indepvartype is not None
and self.indepvartype != da.dtype.type
):
raise TypeError(
"Mismatch between type of indepdomain "
"argument and Pointset data"
)
else:
self.indepdomain = da
else:
raise TypeError("Invalid type for independent " "variable domain")
# assert this after self.indepdomain has been made an array
# because isincreasing is most efficient on already-created
# arrays
assert isincreasing(
self.indepdomain
), "Independent variable values must be increasing"
self.indepvartype = self.indepdomain.dtype.type
else:
print("Independent variable argument domain was: %r" % indepdomain)
raise TypeError("Invalid type for independent variable " "domain")
def setDepdomain(self, depdomain, abseps=None):
if isinstance(depdomain, str):
self.coordname = depdomain
if self.depdomain is None:
if self.coordtype is None:
self.depdomain = Interval(
self.coordname, float, [-Inf, Inf], abseps=abseps
)
self.coordtype = float
else:
self.depdomain = Interval(
self.coordname,
self.coordtype,
_num_maxmin[self.coordtype],
abseps=abseps,
)
else:
# If interp functions supplied then don't have a name for
# Interval yet, so update it.
if isinstance(self.output, interpclass) and isinstance(
self.depdomain, Interval
):
self.depdomain.name = depdomain
else:
assert isinstance(self.output, Pointset)
self.diagnostics.warnings.append(
(
self.depdomain.name,
"Dependent variable already named. "
"Ignoring user-supplied name.",
)
)
else:
if isinstance(depdomain, Interval):
if self.trajdrange:
if depdomain.contains(self.trajdrange) is notcontained:
raise ValueError(
"Cannot set dependent variable "
"domain inside current trajectory's "
"range"
)
self.depdomain = depdomain
self.coordname = depdomain.name
if self.coordtype is None:
self.coordtype = depdomain.type
elif self.coordtype == depdomain.type:
pass
else:
raise TypeError(
"Mismatch between type of depdomain "
"argument and Pointset coord data"
)
elif isinstance(depdomain, dict):
assert (
len(depdomain) == 1
), "Depend variables dictionary must have only 1 entry"
d = list(depdomain.values())[0]
if self.trajdrange:
assert self.trajdrange[0] in d
assert self.trajdrange[1] in d
## Assume d is in increasing order
assert all(isfinite(d)), "Values must be finite"
self.coordname = list(depdomain.keys())[0]
if isinstance(d, (list, tuple)):
if self.coordtype is not None:
self.depdomain = array(d, self.coordtype)
else:
self.depdomain = array(d)
elif isinstance(d, ndarray):
da = array(d)
if self.coordtype is not None and self.coordtype != da.dtype.type:
raise TypeError(
"Mismatch between type of depdomain "
"argument and Pointset coord data"
)
else:
self.depdomain = da
else:
raise TypeError("Invalid type for dependent variable " "domain")
self.coordtype = self.depdomain.dtype.type
else:
print("Dependent variable domain argument was: %r" % depdomain)
raise TypeError("Invalid type for dependent variable domain")
if isinstance(self.output, Pointset):
assert (
self.coordname == self.output.coordnames[0]
), "Mismatch between Pointset coord name and declared name"
assert self.indepvarname == self.output.indepvarname, (
"Mismatch between Pointset independent variable name "
"and declared name"
)
def __call__(self, indepvar, checklevel=0):
# Set actual time by subtracting internal offset. Especially for use by
# "input" variables that are based on inherently time-shifted
# arrays of values, with nothing to do with the globalt0 of hybrid
# trajectories.
indepvar = asarray(indepvar) - self._internal_t_offset
if checklevel == 0:
# level 0 -- no depvar bounds checking at all
# (no need to check for indepvar as list case, which output
# should know how to handle)
try:
if not self._vectorizable and isinstance(indepvar, _seq_types):
return [self.output(ival) for ival in indepvar]
else:
return self.output(indepvar)
except (OverflowError, ValueError):
self.diagnostics.errors.append(
(indepvar, self.name + ": Overflow error in output")
)
raise
except PyDSTool_BoundsError:
self.diagnostics.errors.append(
(indepvar, self.name + ": Bounds error in output")
)
raise
elif checklevel in [1, 2]:
if self.trajirange is None:
idep = self.indepdomain
else:
# use known bounds on indep variable imposed by self.output
idep = self.trajirange
indepvar_ok = True
# level 1 -- ignore uncertain cases (treat as contained)
# level 2 -- warn on uncertain (treat as contained)
if isinstance(indepvar, _seq_types):
vectorizable = self._vectorizable
for d in indepvar:
# use 'in' so that this is compatible with
# interval, array and index indeps
try:
contresult = d in idep
except PyDSTool_UncertainValueError:
contresult = True
# adjust for rounding error so that interpolator
# does not barf on out-of-range values
if d < idep[0]:
try:
# list
dix = indepvar.index(d)
except AttributeError:
# array
dix = indepvar.tolist().index(d)
indepvar[dix] = idep[0]
elif d > idep[1]:
try:
# list
dix = indepvar.index(d)
except AttributeError:
# array
dix = indepvar.tolist().index(d)
indepvar[dix] = idep[1]
if checklevel == 2:
self.diagnostics.warnings.append((d, None))
if not contresult:
indepvar_ok = False
break
else:
vectorizable = True
try:
indepvar_ok = indepvar in idep
except PyDSTool_UncertainValueError as errinfo:
# adjust for rounding error so that interpolator
# does not barf on out-of-range values
if indepvar < idep[0]:
indepvar = idep[0]
elif indepvar > idep[1]:
indepvar = idep[1]
if checklevel == 2:
self.diagnostics.warnings.append((indepvar, None))
# continue to get dependent variable value, unless indep
# value was not OK
if not indepvar_ok:
## print "*** Debug info for variable: ", self.name
## print "Interval rounding tolerance was", idep._abseps
if checklevel == 2:
self.diagnostics.errors.append(
(indepvar, self.name + " : " + self.indepdomain._infostr(1))
)
if vectorizable:
raise ValueError(
"Independent variable value(s) " "out of range in Variable call"
)
else:
raise ValueError(
"Independent variable value " + str(indepvar) + " out of "
"range in Variable call"
)
try:
if vectorizable:
depvar = self.output(indepvar)
else:
depvar = [self.output(ival) for ival in indepvar]
depvar_ok = True
except PyDSTool_BoundsError as errinfo:
depvar_ok = False
# Now check that all computed values were in depdomain
if depvar_ok:
# no need to use self.trajdrange instead of
# self.depdomain because we trust that self.output
# generated the output within its own bounds!
if isinstance(depvar, (_seq_types, Pointset)):
if isinstance(depvar, Pointset):
dv = depvar.toarray()
else:
dv = depvar
for d in dv:
# use 'in' so that this is compatible with
# interval, array and index indeps
try:
contresult = d in self.depdomain
except PyDSTool_UncertainValueError as errinfo:
contresult = True
if checklevel == 2:
# find which indepvar was the cause of
# the uncertain value
try:
# list
depix = dv.index(d)
except AttributeError:
# array
depix = dv.tolist().index(d)
self.diagnostics.warnings.append(
(indepvar[depix], errinfo.value)
)
if not isfinite(d):
# DEBUG
# print dv
# print self.output, "\n"
raise PyDSTool_BoundsError(
"Return value was not finite/defined (%s)" % str(d)
)
if not contresult:
depvar_ok = False
break
elif depvar is None:
# DEBUG
# print "*** Debug info for variable: ", self.name
# print "Independent variable domain: ", self.indepdomain._infostr(1)
# print "Dependent variable domain: ", self.depdomain._infostr(1)
raise ValueError(
"Cannot compute a return value for "
"independent variable value " + str(indepvar)
)
else:
if isinstance(depvar, Point):
dv = depvar[0]
else:
dv = depvar
try:
depvar_ok = dv in self.depdomain
except PyDSTool_UncertainValueError as errinfo:
if checklevel == 2:
self.diagnostics.warnings.append((indepvar, errinfo.varval))
if not isfinite(dv):
# DEBUG
# print dv
# print self.output, "\n"
raise PyDSTool_BoundsError(
"Return value was not finite/defined (%s)" % str(dv)
)
# return value if depvar in bounds
if depvar_ok:
return dv
else:
# DEBUG
# print "Variable '%s' -"%self.name, "dependent var domain: ", \
# self.depdomain._infostr(1)
# self.diagnostics.showWarnings()
if vectorizable:
# DEBUG
# print self.output(indepvar), "\n"
raise PyDSTool_BoundsError(
"Computed value(s) %f outside" % dv
+ " validity range in Variable call"
)
else:
raise PyDSTool_BoundsError(
"Computed value %f outside" % dv
+ " validity range in Variable call"
)
else:
# level 3 -- exception will be raised for uncertain case
indepvar_ok = False
try:
# don't trap uncertain case exception from
# Interval.__contains__
if isinstance(indepvar, _seq_types):
vectorizable = self._vectorizable
indepvar_ok = all([i in self.indepdomain for i in indepvar])
else:
vectorizable = True
indepvar_ok = indepvar in self.indepdomain
except TypeError as e:
raise TypeError(
"Something messed up with the Variable " "initialization: " + str(e)
)
else:
if not indepvar_ok:
raise ValueError(
"Independent variable "
+ str(indepvar)
+ " out of range in Variable call"
)
# Don't need 'if indepvar_ok' because exception would have
# been raised.
# For this checklevel, don't trap uncertain case exception from
# Interval.__contains__
try:
if vectorizable:
depvar = self.output(indepvar)
depvar_ok = depvar in self.depdomain
else:
depvar = [self.output(ival) for ival in indepvar]
depvar_ok = all([d in self.depdomain for d in depvar])
except PyDSTool_BoundsError as e:
raise ValueError(
"Cannot compute a return value for "
"this independent variable value: " + str(e)
)
except PyDSTool_TypeError:
if not self.defined:
print("Variable '%s' not fully defined." % self.name)
return None
else:
raise
else:
if depvar_ok:
return depvar
else:
if vectorizable:
raise PyDSTool_BoundsError(
"Computed value(s) "
"outside validity range in Variable call"
)
else:
raise PyDSTool_BoundsError(
"Computed value "
+ str(depvar)
+ "outside validity range in Variable call"
)
def __repr__(self):
return self._infostr(verbose=0)
__str__ = __repr__
def _infostr(self, verbose=1):
if verbose == 0:
return "Variable " + self.coordname + "(" + self.indepvarname + ")"
else:
try:
if isinputcts(self):
ipstr = "continuous"
else:
ipstr = "discrete"
except ValueError:
ipstr = "not defined"
outputStr = (
"Variable:\n Independent variable '"
+ self.indepvarname
+ "' ["
+ ipstr
+ "]\n"
)
try:
if isoutputcts(self):
opstr = "continuous"
else:
opstr = "discrete"
except ValueError:
opstr = "not defined"
outputStr += " defined in domain " + str(self.indepdomain)
if verbose == 2:
if self.trajirange is None:
outputStr += "\n ranges not known for this trajectory"
else:
outputStr += "\n trajectory ranges " + str(self.trajirange)
outputStr += (
"\nDependent variable '"
+ self.coordname
+ "' ["
+ opstr
+ "]\n defined in domain "
)
if not isinstance(self.depdomain, Interval):
outputStr += _num_type2name[self.coordtype] + ": "
outputStr += str(self.depdomain)
if verbose == 2:
if self.trajdrange is None:
outputStr += "\n ranges not known for this trajectory"
else:
outputStr += "\n trajectory ranges " + str(self.trajdrange)
return outputStr
def info(self, verboselevel=1):
print(self._infostr(verboselevel))
def __copy__(self):
pickledself = pickle.dumps(self)
return pickle.loads(pickledself)
def __deepcopy__(self, memo=None, _nil=[]):
pickledself = pickle.dumps(self)
return pickle.loads(pickledself)
def __getstate__(self):
d = copy.copy(self.__dict__)
# remove reference to Cfunc types by converting to strings
d["indepvartype"] = _num_type2name[self.indepvartype]
d["coordtype"] = _num_type2name[self.coordtype]
if "funcspec" in self._funcreg:
# then self is Imp/ExplicitFnGen and 'output' could not
# be put in _funcreg because it relies on wrap_output
# function that's not in the global namespace (so pickle fails
# to find it)
del d["output"]
for fname, finfo in self._funcreg.items():
if finfo[0] == "self":
try:
del d[fname]
except KeyError:
pass
# else it's a Variable class method which won't get pickled
# anyway, and will be restored to any class not in possession
# of it if this object is unpickled
return d
def __setstate__(self, state):
self.__dict__.update(state)
# print self.name, "- setstate: self.depdomain = ", self.depdomain.get()
# reinstate Cfunc types
self.indepvartype = _num_name2type[self.indepvartype]
self.coordtype = _num_name2type[self.coordtype]
# reinstate dynamic methods / functions
for fname, finfo in self._funcreg.items():
if finfo[0] == "self" and not hasattr(eval(finfo[0]), fname):
# avoids special entry for 'outputdata'
setattr(eval(finfo[0]), fname, finfo[1])
if "funcspec" in self._funcreg:
# Add the specific mapping functions for Ex/ImplicitFnGen objects
funcspec = self._funcreg["funcspec"][1]
outputdata = self._funcreg["outputdata"][1]
if hasattr(self, "_var_namemap"):
var_namemap = self._var_namemap
else:
var_namemap = None
if hasattr(self, "initialconditions"):
ics = copy.copy(self.initialconditions)
else:
ics = None
if hasattr(self, "_refvars"):
if self._refvars is not None and self._refvars != []:
refvars = [copy.copy(v) for v in self._refvars]
else:
refvars = None
else:
refvars = None
# if refvars in dictionary then just leave them there!
self.setOutput(
outputdata, funcspec, self.globalt0, var_namemap, ics, refvars
)
def __del__(self):
# delete object-specific class methods etc. before deleting
# to avoid crowding namespace
## if hasattr(self, 'output'):
## del self.output
for fname, finfo in self._funcreg.items():
# Treat special cases first
if finfo[0] is None:
# don't want to eval(None) below
continue
elif fname == "_impfn":
exec_str = "del Variable." + finfo[0]
try:
exec(exec_str)
except AttributeError:
# Uncertain why the name appears multiple times for their
# to be multiple attempts to delete it (which of course
# fail after the first successful attempt)
pass
elif fname is "funcspec":
# doesn't refer to any dynamically-created methods
# so ignore
pass
elif fname is "outputdata":
# doesn't refer to any dynamically-created methods
# so ignore
pass
elif hasattr(eval(finfo[0]), fname):
exec_str = "del " + finfo[0] + "." + fname
try:
exec(exec_str)
except RuntimeError:
# sometimes get these when objects improperly delted
# and new objects with the same name created
pass
if hasattr(self, "_refvars"):
if self._refvars is not None and self._refvars != []:
for v in self._refvars:
v.__del__()
class HybridVariable(Variable):
"""Mimics part of the API of a non-hybrid variable.
This is a somewhat ugly hack as it's implemented by using a whole
HybridTrajectory object to extract individual variable values,
rather than having extracted a sequence of Variable objects from
a HT and stitching them back together as a single entity."""
def __init__(self, hybridtraj, coordname, indepdomain, abseps=None):
# store reference to the hybrid trajectory
self._ht = hybridtraj
self.name = "Hybrid variable " + coordname
self.outputdata = None # not used
self.defined = True
self.indepvarname = "t"
self.indepdomain = indepdomain
self.indepvartype = float
self.coordname = coordname
self.depdomain = Interval(self.coordname, float, [-Inf, Inf], abseps=abseps)
self.coordtype = float
self.trajirange = None
self.trajdrange = None
self.diagnostics = Diagnostics()
# important that this isn't a Pointset for Variable.py's
# isinputcts, isoutputcts, etc.
self.output = None
def __call__(self, indepvar, checklevel=0):
return self._ht(indepvar, self.coordname, checklevel=checklevel)
def getDataPoints(self):
"""Returns a Pointset of independent and dependent variable values,
provided variable is based on a mesh (otherwise None is returned).
"""
return self._ht.sample([self.coordname])
def underlyingMesh(self):
"""Reveal underlying mesh as arrays, rather than Pointset as returned
by getDataPoints method."""
vs = self._ht.sample([self.coordname])
return array([vs.indepvararray, vs.coordarray[0]])
def __repr__(self):
return "Hybrid variable " + self.coordname
__str__ = __repr__
def info(self, verboselevel=1):
return "Hybrid variable " + self.coordname
# overrides from Variable class
def __getstate__(self):
return copy.copy(self.__dict__)
def __setstate__(self, state):
self.__dict__.update(state)
def __del__(self):
# must override Variable.__del__
pass
class OutputFn(object):
"""One-dimensional function wrapper."""
def __init__(self, fn, datapoints=None, numtypes=(float64, float64), abseps=None):
assert isinstance(fn, types.FunctionType) or isinstance(
fn, types.BuiltinFunctionType
), "fn argument must be a regular Python function"
self.fn = fn
# datapoints can be exhaustive list of known values for fn or
# a Interval range for continuous-valued functions
if datapoints is None:
datapoints = (
Interval("indepvardom", numtypes[0], [-Inf, Inf], abseps=abseps),
Interval("depvardom", numtypes[1], [-Inf, Inf], abseps=abseps),
)
try:
self.datapoints = (datapoints[0], datapoints[1])
except TypeError:
raise TypeError(
"datapoints argument must be a 2-tuple or list " "of 2-tuples or lists"
)
try:
self.types = (numtypes[0], numtypes[1])
except TypeError:
raise TypeError(
"numtypes argument must be a 2-tuple or list " "of 2-tuples or lists"
)
def __call__(self, arg):
if isinstance(arg, _seq_types):
try:
return self.fn(arg)
except:
return array([self.fn(v) for v in arg])
else:
return self.fn(arg)
def __getstate__(self):
d = copy.copy(self.__dict__)
# remove reference to Cfunc types by converting to strings
d["types"] = (_num_type2name[self.types[0]], _num_type2name[self.types[1]])
return d
def __setstate__(self, state):
self.__dict__.update(state)
# reinstate Cfunc types
self.types = (_num_name2type[self.types[0]], _num_name2type[self.types[1]])
# ---------------------------------------------------------------------
def isinputcts(obj):
if isinstance(obj, Variable):
if obj.defined:
if compareNumTypes(obj.indepvartype, float64):
return isinstance(obj.indepdomain, Interval) and not isinstance(
obj.output, Pointset
)
elif compareNumTypes(obj.indepvartype, int32):
return False
else:
raise TypeError("Unsupported independent variable type for Variable")
else:
raise ValueError("Variable is not fully defined")
else:
# provide support for e.g. Trajectories. Cannot use Trajectory class
# name explicitly here because will run into an infinite import loop
# between Variable and Trajectory!
if compareNumTypes(obj.indepvartype, float64):
return isinstance(obj.indepdomain, Interval)
def isinputdiscrete(var):
return not isinputcts(var)
##def isinputdiscrete(var):
## if compareNumTypes(var.indepvartype, float64):
## return type(var.indepdomain) == ndarray or \
## isinstance(var.output, Pointset)
## elif compareNumTypes(var.indepvartype, int32):
## return True
## else:
## raise TypeError("Unsupported independent variable type for Variable")
def isoutputcts(var):
assert isinstance(var, Variable), "Argument must be a Variable"
if var.defined:
if compareNumTypes(var.coordtype, float64):
return isinstance(var.depdomain, Interval) and not isinstance(
var.output, Pointset
)
elif compareNumTypes(var.coordtype, int32):
return False
else:
raise TypeError("Unsupported dependent variable type for Variable")
else:
raise ValueError("Variable is not fully defined")
def isoutputdiscrete(obj):
return not isoutputcts(obj)
def iscontinuous(var):
"""Determine if variable is continuously defined on its input and
output domains."""
assert isinstance(var, Variable), "Argument must be a Variable"
return isinputcts(var) and isoutputcts(var)
def isdiscrete(var):
"""Determine if variable is discretely defined on its input and
output domains."""
return not (isinputcts(var) and isoutputcts(var))
|
[
"six.exec_",
"numpy.asarray",
"copy.copy",
"numpy.isfinite",
"numpy.array",
"numpy.all"
] |
[((45847, 45871), 'copy.copy', 'copy.copy', (['self.__dict__'], {}), '(self.__dict__)\n', (45856, 45871), False, 'import copy\n'), ((51884, 51927), 'numpy.array', 'array', (['[vs.indepvararray, vs.coordarray[0]]'], {}), '([vs.indepvararray, vs.coordarray[0]])\n', (51889, 51927), False, 'from numpy import Inf, NaN, isfinite, sometrue, alltrue, any, all, array, float64, int32, ndarray, asarray\n'), ((52210, 52234), 'copy.copy', 'copy.copy', (['self.__dict__'], {}), '(self.__dict__)\n', (52219, 52234), False, 'import copy\n'), ((53878, 53902), 'copy.copy', 'copy.copy', (['self.__dict__'], {}), '(self.__dict__)\n', (53887, 53902), False, 'import copy\n'), ((31964, 31981), 'numpy.asarray', 'asarray', (['indepvar'], {}), '(indepvar)\n', (31971, 31981), False, 'from numpy import Inf, NaN, isfinite, sometrue, alltrue, any, all, array, float64, int32, ndarray, asarray\n'), ((13229, 13261), 'six.exec_', 'six.exec_', (['impfn_str', 'this_scope'], {}), '(impfn_str, this_scope)\n', (13238, 13261), False, 'import six\n'), ((47845, 47878), 'copy.copy', 'copy.copy', (['self.initialconditions'], {}), '(self.initialconditions)\n', (47854, 47878), False, 'import copy\n'), ((4522, 4539), 'numpy.asarray', 'asarray', (['indepvar'], {}), '(indepvar)\n', (4529, 4539), False, 'from numpy import Inf, NaN, isfinite, sometrue, alltrue, any, all, array, float64, int32, ndarray, asarray\n'), ((15564, 15609), 'numpy.array', 'array', (['[pts.indepvararray, pts.coordarray[0]]'], {}), '([pts.indepvararray, pts.coordarray[0]])\n', (15569, 15609), False, 'from numpy import Inf, NaN, isfinite, sometrue, alltrue, any, all, array, float64, int32, ndarray, asarray\n'), ((3765, 3779), 'numpy.array', 'array', (['vals[i]'], {}), '(vals[i])\n', (3770, 3779), False, 'from numpy import Inf, NaN, isfinite, sometrue, alltrue, any, all, array, float64, int32, ndarray, asarray\n'), ((5152, 5166), 'numpy.array', 'array', (['vals[i]'], {}), '(vals[i])\n', (5157, 5166), False, 'from numpy import Inf, NaN, isfinite, sometrue, alltrue, any, all, array, float64, int32, ndarray, asarray\n'), ((25594, 25605), 'numpy.isfinite', 'isfinite', (['d'], {}), '(d)\n', (25602, 25605), False, 'from numpy import Inf, NaN, isfinite, sometrue, alltrue, any, all, array, float64, int32, ndarray, asarray\n'), ((30078, 30089), 'numpy.isfinite', 'isfinite', (['d'], {}), '(d)\n', (30086, 30089), False, 'from numpy import Inf, NaN, isfinite, sometrue, alltrue, any, all, array, float64, int32, ndarray, asarray\n'), ((41281, 41329), 'numpy.all', 'all', (['[(i in self.indepdomain) for i in indepvar]'], {}), '([(i in self.indepdomain) for i in indepvar])\n', (41284, 41329), False, 'from numpy import Inf, NaN, isfinite, sometrue, alltrue, any, all, array, float64, int32, ndarray, asarray\n'), ((42406, 42450), 'numpy.all', 'all', (['[(d in self.depdomain) for d in depvar]'], {}), '([(d in self.depdomain) for d in depvar])\n', (42409, 42450), False, 'from numpy import Inf, NaN, isfinite, sometrue, alltrue, any, all, array, float64, int32, ndarray, asarray\n'), ((48072, 48084), 'copy.copy', 'copy.copy', (['v'], {}), '(v)\n', (48081, 48084), False, 'import copy\n'), ((22478, 22513), 'copy.copy', 'copy.copy', (['outputdata.coordnames[0]'], {}), '(outputdata.coordnames[0])\n', (22487, 22513), False, 'import copy\n'), ((26008, 26032), 'numpy.array', 'array', (['d', 'self.coordtype'], {}), '(d, self.coordtype)\n', (26013, 26032), False, 'from numpy import Inf, NaN, isfinite, sometrue, alltrue, any, all, array, float64, int32, ndarray, asarray\n'), ((26104, 26112), 'numpy.array', 'array', (['d'], {}), '(d)\n', (26109, 26112), False, 'from numpy import Inf, NaN, isfinite, sometrue, alltrue, any, all, array, float64, int32, ndarray, asarray\n'), ((26185, 26193), 'numpy.array', 'array', (['d'], {}), '(d)\n', (26190, 26193), False, 'from numpy import Inf, NaN, isfinite, sometrue, alltrue, any, all, array, float64, int32, ndarray, asarray\n'), ((30320, 30344), 'numpy.array', 'array', (['d', 'self.coordtype'], {}), '(d, self.coordtype)\n', (30325, 30344), False, 'from numpy import Inf, NaN, isfinite, sometrue, alltrue, any, all, array, float64, int32, ndarray, asarray\n'), ((30414, 30422), 'numpy.array', 'array', (['d'], {}), '(d)\n', (30419, 30422), False, 'from numpy import Inf, NaN, isfinite, sometrue, alltrue, any, all, array, float64, int32, ndarray, asarray\n'), ((30495, 30503), 'numpy.array', 'array', (['d'], {}), '(d)\n', (30500, 30503), False, 'from numpy import Inf, NaN, isfinite, sometrue, alltrue, any, all, array, float64, int32, ndarray, asarray\n'), ((38294, 38305), 'numpy.isfinite', 'isfinite', (['d'], {}), '(d)\n', (38302, 38305), False, 'from numpy import Inf, NaN, isfinite, sometrue, alltrue, any, all, array, float64, int32, ndarray, asarray\n'), ((39744, 39756), 'numpy.isfinite', 'isfinite', (['dv'], {}), '(dv)\n', (39752, 39756), False, 'from numpy import Inf, NaN, isfinite, sometrue, alltrue, any, all, array, float64, int32, ndarray, asarray\n')]
|
import unittest
import numpy
import pytest
import dpnp as cupy
from tests.third_party.cupy import testing
# from cupy.core import _accelerator
@testing.gpu
class TestSearch(unittest.TestCase):
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmax_all(self, xp, dtype):
a = testing.shaped_random((2, 3), xp, dtype)
return a.argmax()
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_external_argmax_all(self, xp, dtype):
a = testing.shaped_random((2, 3), xp, dtype)
return xp.argmax(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_argmax_nan(self, xp, dtype):
a = xp.array([float('nan'), -1, 1], dtype)
return a.argmax()
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmax_axis_large(self, xp, dtype):
a = testing.shaped_random((3, 1000), xp, dtype)
return a.argmax(axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_external_argmax_axis_large(self, xp, dtype):
a = testing.shaped_random((3, 1000), xp, dtype)
return xp.argmax(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmax_axis0(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return a.argmax(axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmax_axis1(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return a.argmax(axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmax_axis2(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return a.argmax(axis=2)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmax_tie(self, xp, dtype):
a = xp.array([0, 5, 2, 3, 4, 5], dtype)
return a.argmax()
@testing.for_all_dtypes(no_complex=True)
def test_argmax_zero_size(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
a.argmax()
@testing.for_all_dtypes(no_complex=True)
def test_argmax_zero_size_axis0(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
a.argmax(axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmax_zero_size_axis1(self, xp, dtype):
a = testing.shaped_random((0, 1), xp, dtype)
return a.argmax(axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmin_all(self, xp, dtype):
a = testing.shaped_random((2, 3), xp, dtype)
return a.argmin()
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_argmin_nan(self, xp, dtype):
a = xp.array([float('nan'), -1, 1], dtype)
return a.argmin()
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_external_argmin_all(self, xp, dtype):
a = testing.shaped_random((2, 3), xp, dtype)
return xp.argmin(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmin_axis_large(self, xp, dtype):
a = testing.shaped_random((3, 1000), xp, dtype)
return a.argmin(axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_external_argmin_axis_large(self, xp, dtype):
a = testing.shaped_random((3, 1000), xp, dtype)
return xp.argmin(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmin_axis0(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return a.argmin(axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmin_axis1(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return a.argmin(axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmin_axis2(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return a.argmin(axis=2)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmin_tie(self, xp, dtype):
a = xp.array([0, 1, 2, 3, 0, 5], dtype)
return a.argmin()
@testing.for_all_dtypes(no_complex=True)
def test_argmin_zero_size(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
return a.argmin()
@testing.for_all_dtypes(no_complex=True)
def test_argmin_zero_size_axis0(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
a.argmin(axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_argmin_zero_size_axis1(self, xp, dtype):
a = testing.shaped_random((0, 1), xp, dtype)
return a.argmin(axis=1)
# This class compares CUB results against NumPy's
# TODO(leofang): test axis after support is added
# @testing.parameterize(*testing.product({
# 'shape': [(10,), (10, 20), (10, 20, 30), (10, 20, 30, 40)],
# 'order': ('C', 'F'),
# }))
# @testing.gpu
# @unittest.skipUnless(cupy.cuda.cub.available, 'The CUB routine is not enabled')
# class TestCubReduction(unittest.TestCase):
# def setUp(self):
# self.old_accelerators = _accelerator.get_routine_accelerators()
# _accelerator.set_routine_accelerators(['cub'])
# def tearDown(self):
# _accelerator.set_routine_accelerators(self.old_accelerators)
# @testing.for_dtypes('bhilBHILefdFD')
# @testing.numpy_cupy_allclose(rtol=1E-5)
# def test_cub_argmin(self, xp, dtype):
# a = testing.shaped_random(self.shape, xp, dtype)
# if self.order == 'C':
# a = xp.ascontiguousarray(a)
# else:
# a = xp.asfortranarray(a)
# if xp is numpy:
# return a.argmin()
# # xp is cupy, first ensure we really use CUB
# ret = cupy.empty(()) # Cython checks return type, need to fool it
# func = 'cupy.core._routines_statistics.cub.device_reduce'
# with testing.AssertFunctionIsCalled(func, return_value=ret):
# a.argmin()
# # ...then perform the actual computation
# return a.argmin()
# @testing.for_dtypes('bhilBHILefdFD')
# @testing.numpy_cupy_allclose(rtol=1E-5)
# def test_cub_argmax(self, xp, dtype):
# a = testing.shaped_random(self.shape, xp, dtype)
# if self.order == 'C':
# a = xp.ascontiguousarray(a)
# else:
# a = xp.asfortranarray(a)
# if xp is numpy:
# return a.argmax()
# # xp is cupy, first ensure we really use CUB
# ret = cupy.empty(()) # Cython checks return type, need to fool it
# func = 'cupy.core._routines_statistics.cub.device_reduce'
# with testing.AssertFunctionIsCalled(func, return_value=ret):
# a.argmax()
# # ...then perform the actual computation
# return a.argmax()
@testing.gpu
@testing.parameterize(*testing.product({
'func': ['argmin', 'argmax'],
'is_module': [True, False],
'shape': [(3, 4), ()],
}))
class TestArgMinMaxDtype(unittest.TestCase):
@testing.for_dtypes(
dtypes=[numpy.int8, numpy.int16, numpy.int32, numpy.int64],
name='result_dtype')
@testing.for_all_dtypes(name='in_dtype')
def test_argminmax_dtype(self, in_dtype, result_dtype):
a = testing.shaped_random(self.shape, cupy, in_dtype)
if self.is_module:
func = getattr(cupy, self.func)
y = func(a, dtype=result_dtype)
else:
func = getattr(a, self.func)
y = func(dtype=result_dtype)
assert y.shape == ()
assert y.dtype == result_dtype
@testing.parameterize(
{'cond_shape': (2, 3, 4), 'x_shape': (2, 3, 4), 'y_shape': (2, 3, 4)},
{'cond_shape': (4,), 'x_shape': (2, 3, 4), 'y_shape': (2, 3, 4)},
{'cond_shape': (2, 3, 4), 'x_shape': (2, 3, 4), 'y_shape': (3, 4)},
{'cond_shape': (3, 4), 'x_shape': (2, 3, 4), 'y_shape': (4,)},
)
@testing.gpu
class TestWhereTwoArrays(unittest.TestCase):
@testing.for_all_dtypes_combination(
names=['cond_type', 'x_type', 'y_type'])
@testing.numpy_cupy_allclose()
def test_where_two_arrays(self, xp, cond_type, x_type, y_type):
m = testing.shaped_random(self.cond_shape, xp, xp.bool_)
# Almost all values of a matrix `shaped_random` makes are not zero.
# To make a sparse matrix, we need multiply `m`.
cond = testing.shaped_random(self.cond_shape, xp, cond_type) * m
x = testing.shaped_random(self.x_shape, xp, x_type, seed=0)
y = testing.shaped_random(self.y_shape, xp, y_type, seed=1)
return xp.where(cond, x, y)
@testing.parameterize(
{'cond_shape': (2, 3, 4)},
{'cond_shape': (4,)},
{'cond_shape': (2, 3, 4)},
{'cond_shape': (3, 4)},
)
@testing.gpu
class TestWhereCond(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_where_cond(self, xp, dtype):
m = testing.shaped_random(self.cond_shape, xp, xp.bool_)
cond = testing.shaped_random(self.cond_shape, xp, dtype) * m
return xp.where(cond)
@testing.gpu
class TestWhereError(unittest.TestCase):
def test_one_argument(self):
for xp in (numpy, cupy):
cond = testing.shaped_random((3, 4), xp, dtype=xp.bool_)
x = testing.shaped_random((2, 3, 4), xp, xp.int32)
with pytest.raises(ValueError):
xp.where(cond, x)
@testing.parameterize(
{'array': numpy.empty((0,))},
{'array': numpy.empty((0, 2))},
{'array': numpy.empty((0, 2, 0))},
)
@testing.gpu
class TestNonzero(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_nonzero(self, xp, dtype):
array = xp.array(self.array, dtype=dtype)
return xp.nonzero(array)
@testing.parameterize(
{'array': numpy.array(0)},
{'array': numpy.array(1)},
)
@testing.gpu
@testing.with_requires('numpy>=1.17.0')
class TestNonzeroZeroDimension(unittest.TestCase):
@testing.for_all_dtypes()
def test_nonzero(self, dtype):
for xp in (numpy, cupy):
array = xp.array(self.array, dtype=dtype)
with pytest.raises(DeprecationWarning):
xp.nonzero(array)
@testing.parameterize(
{'array': numpy.array(0)},
{'array': numpy.array(1)},
{'array': numpy.empty((0,))},
{'array': numpy.empty((0, 2))},
{'array': numpy.empty((0, 2, 0))},
)
@testing.gpu
class TestFlatNonzero(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_flatnonzero(self, xp, dtype):
array = xp.array(self.array, dtype=dtype)
return xp.flatnonzero(array)
@testing.parameterize(
{'array': numpy.empty((0,))},
{'array': numpy.empty((0, 2))},
{'array': numpy.empty((0, 2, 0))},
)
@testing.gpu
class TestArgwhere(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_argwhere(self, xp, dtype):
array = xp.array(self.array, dtype=dtype)
return xp.argwhere(array)
# DPNP_BUG
# dpnp/backend.pyx:86: in dpnp.backend.dpnp_array
# raise TypeError(f"Intel NumPy array(): Unsupported non-sequence obj={type(obj)}")
# E TypeError: Intel NumPy array(): Unsupported non-sequence obj=<class 'int'>
# @testing.parameterize(
# {'array': cupy.array(1)},
# )
# @testing.gpu
# class TestArgwhereZeroDimension(unittest.TestCase):
# def test_argwhere(self):
# with testing.assert_warns(DeprecationWarning):
# return cupy.nonzero(self.array)
@testing.gpu
class TestNanArgMin(unittest.TestCase):
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmin_all(self, xp, dtype):
a = testing.shaped_random((2, 3), xp, dtype)
return xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmin_nan(self, xp, dtype):
a = xp.array([float('nan'), -1, 1], dtype)
return xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmin_nan2(self, xp, dtype):
a = xp.array([float('nan'), float('nan'), -1, 1], dtype)
return xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmin_nan3(self, xp, dtype):
a = xp.array([float('nan'), float('nan'), -1, 1, 1.0, -2.0], dtype)
return xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmin_nan4(self, xp, dtype):
a = xp.array([-1, 1, 1.0, -2.0, float('nan'), float('nan')],
dtype)
return xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmin_nan5(self, xp, dtype):
a = xp.array([-1, 1, 1.0, -2.0, float('nan'), float('nan'), -1, 1],
dtype)
return xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmin_axis_large(self, xp, dtype):
a = testing.shaped_random((3, 1000), xp, dtype)
return xp.nanargmin(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmin_axis0(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return xp.nanargmin(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmin_axis1(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return xp.nanargmin(a, axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmin_axis2(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return xp.nanargmin(a, axis=2)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmin_tie(self, xp, dtype):
a = xp.array([0, 5, 2, 3, 4, 5], dtype)
return xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
def test_nanargmin_zero_size(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
xp.nanargmin(a)
@testing.for_all_dtypes(no_complex=True)
def test_nanargmin_zero_size_axis0(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
return xp.nanargmin(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmin_zero_size_axis1(self, xp, dtype):
a = testing.shaped_random((0, 1), xp, dtype)
return xp.nanargmin(a, axis=1)
@testing.gpu
class TestNanArgMax(unittest.TestCase):
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmax_all(self, xp, dtype):
a = testing.shaped_random((2, 3), xp, dtype)
return xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmax_nan(self, xp, dtype):
a = xp.array([float('nan'), -1, 1], dtype)
return xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmax_nan2(self, xp, dtype):
a = xp.array([float('nan'), float('nan'), -1, 1], dtype)
return xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmax_nan3(self, xp, dtype):
a = xp.array([float('nan'), float('nan'), -1, 1, 1.0, -2.0], dtype)
return xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmax_nan4(self, xp, dtype):
a = xp.array([-1, 1, 1.0, -2.0, float('nan'), float('nan')],
dtype)
return xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(accept_error=ValueError)
def test_nanargmax_nan5(self, xp, dtype):
a = xp.array([-1, 1, 1.0, -2.0, float('nan'), float('nan'), -1, 1],
dtype)
return xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmax_axis_large(self, xp, dtype):
a = testing.shaped_random((3, 1000), xp, dtype)
return xp.nanargmax(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmax_axis0(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return xp.nanargmax(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmax_axis1(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return xp.nanargmax(a, axis=1)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmax_axis2(self, xp, dtype):
a = testing.shaped_random((2, 3, 4), xp, dtype)
return xp.nanargmax(a, axis=2)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmax_tie(self, xp, dtype):
a = xp.array([0, 5, 2, 3, 4, 5], dtype)
return xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
def test_nanargmax_zero_size(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
xp.nanargmax(a)
@testing.for_all_dtypes(no_complex=True)
def test_nanargmax_zero_size_axis0(self, dtype):
for xp in (numpy, cupy):
a = testing.shaped_random((0, 1), xp, dtype)
with pytest.raises(ValueError):
return xp.nanargmax(a, axis=0)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose()
def test_nanargmax_zero_size_axis1(self, xp, dtype):
a = testing.shaped_random((0, 1), xp, dtype)
return xp.nanargmax(a, axis=1)
@testing.gpu
@testing.parameterize(*testing.product(
{'bins': [
[],
[0, 1, 2, 4, 10],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
[0.0, 1.0, 2.5, 4.0, 10.0],
[-1.0, 1.0, 2.5, 4.0, 20.0],
[1.5, 2.5, 4.0, 6.0],
[float('-inf'), 1.5, 2.5, 4.0, 6.0],
[1.5, 2.5, 4.0, 6.0, float('inf')],
[float('-inf'), 1.5, 2.5, 4.0, 6.0, float('inf')],
[0.0, 1.0, 1.0, 4.0, 4.0, 10.0],
[0.0, 1.0, 1.0, 4.0, 4.0, 4.0, 4.0, 10.0],
],
'side': ['left', 'right'],
'shape': [(), (10,), (6, 3, 3)]})
)
class TestSearchSorted(unittest.TestCase):
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_array_equal()
def test_searchsorted(self, xp, dtype):
x = testing.shaped_arange(self.shape, xp, dtype)
bins = xp.array(self.bins)
y = xp.searchsorted(bins, x, side=self.side)
return y,
@testing.gpu
@testing.parameterize(
{'side': 'left'},
{'side': 'right'})
class TestSearchSortedNanInf(unittest.TestCase):
@testing.numpy_cupy_array_equal()
def test_searchsorted_nanbins(self, xp):
x = testing.shaped_arange((10,), xp, xp.float64)
bins = xp.array([0, 1, 2, 4, 10, float('nan')])
y = xp.searchsorted(bins, x, side=self.side)
return y,
@testing.numpy_cupy_array_equal()
def test_searchsorted_nan(self, xp):
x = testing.shaped_arange((10,), xp, xp.float64)
x[5] = float('nan')
bins = xp.array([0, 1, 2, 4, 10])
y = xp.searchsorted(bins, x, side=self.side)
return y,
# DPNP_BUG
# Segmentation fault on access to negative index # x[-1] = float('nan') #######
# @testing.numpy_cupy_array_equal()
# def test_searchsorted_nan_last(self, xp):
# x = testing.shaped_arange((10,), xp, xp.float64)
# x[-1] = float('nan')
# bins = xp.array([0, 1, 2, 4, float('nan')])
# y = xp.searchsorted(bins, x, side=self.side)
# return y,
# @testing.numpy_cupy_array_equal()
# def test_searchsorted_nan_last_repeat(self, xp):
# x = testing.shaped_arange((10,), xp, xp.float64)
# x[-1] = float('nan')
# bins = xp.array([0, 1, 2, float('nan'), float('nan')])
# y = xp.searchsorted(bins, x, side=self.side)
# return y,
# @testing.numpy_cupy_array_equal()
# def test_searchsorted_all_nans(self, xp):
# x = testing.shaped_arange((10,), xp, xp.float64)
# x[-1] = float('nan')
# bins = xp.array([float('nan'), float('nan'), float('nan'),
# float('nan'), float('nan')])
# y = xp.searchsorted(bins, x, side=self.side)
# return y,
###############################################################################
@testing.numpy_cupy_array_equal()
def test_searchsorted_inf(self, xp):
x = testing.shaped_arange((10,), xp, xp.float64)
x[5] = float('inf')
bins = xp.array([0, 1, 2, 4, 10])
y = xp.searchsorted(bins, x, side=self.side)
return y,
@testing.numpy_cupy_array_equal()
def test_searchsorted_minf(self, xp):
x = testing.shaped_arange((10,), xp, xp.float64)
x[5] = float('-inf')
bins = xp.array([0, 1, 2, 4, 10])
y = xp.searchsorted(bins, x, side=self.side)
return y,
@testing.gpu
class TestSearchSortedInvalid(unittest.TestCase):
# Cant test unordered bins due to numpy undefined
# behavior for searchsorted
def test_searchsorted_ndbins(self):
for xp in (numpy, cupy):
x = testing.shaped_arange((10,), xp, xp.float64)
bins = xp.array([[10, 4], [2, 1], [7, 8]])
with pytest.raises(ValueError):
xp.searchsorted(bins, x)
@testing.gpu
class TestSearchSortedWithSorter(unittest.TestCase):
@testing.numpy_cupy_array_equal()
def test_sorter(self, xp):
x = testing.shaped_arange((12,), xp, xp.float64)
bins = xp.array([10, 4, 2, 1, 8])
sorter = xp.array([3, 2, 1, 4, 0])
y = xp.searchsorted(bins, x, sorter=sorter)
return y,
def test_invalid_sorter(self):
for xp in (numpy, cupy):
x = testing.shaped_arange((12,), xp, xp.float64)
bins = xp.array([10, 4, 2, 1, 8])
sorter = xp.array([0])
with pytest.raises(ValueError):
xp.searchsorted(bins, x, sorter=sorter)
def test_nonint_sorter(self):
for xp in (numpy, cupy):
x = testing.shaped_arange((12,), xp, xp.float64)
bins = xp.array([10, 4, 2, 1, 8])
sorter = xp.array([], dtype=xp.float64)
with pytest.raises(TypeError):
xp.searchsorted(bins, x, sorter=sorter)
|
[
"tests.third_party.cupy.testing.product",
"tests.third_party.cupy.testing.for_all_dtypes",
"tests.third_party.cupy.testing.parameterize",
"tests.third_party.cupy.testing.for_all_dtypes_combination",
"numpy.empty",
"tests.third_party.cupy.testing.with_requires",
"pytest.raises",
"tests.third_party.cupy.testing.shaped_random",
"numpy.array",
"tests.third_party.cupy.testing.shaped_arange",
"tests.third_party.cupy.testing.for_dtypes",
"tests.third_party.cupy.testing.numpy_cupy_allclose",
"tests.third_party.cupy.testing.numpy_cupy_array_equal"
] |
[((8409, 8713), 'tests.third_party.cupy.testing.parameterize', 'testing.parameterize', (["{'cond_shape': (2, 3, 4), 'x_shape': (2, 3, 4), 'y_shape': (2, 3, 4)}", "{'cond_shape': (4,), 'x_shape': (2, 3, 4), 'y_shape': (2, 3, 4)}", "{'cond_shape': (2, 3, 4), 'x_shape': (2, 3, 4), 'y_shape': (3, 4)}", "{'cond_shape': (3, 4), 'x_shape': (2, 3, 4), 'y_shape': (4,)}"], {}), "({'cond_shape': (2, 3, 4), 'x_shape': (2, 3, 4),\n 'y_shape': (2, 3, 4)}, {'cond_shape': (4,), 'x_shape': (2, 3, 4),\n 'y_shape': (2, 3, 4)}, {'cond_shape': (2, 3, 4), 'x_shape': (2, 3, 4),\n 'y_shape': (3, 4)}, {'cond_shape': (3, 4), 'x_shape': (2, 3, 4),\n 'y_shape': (4,)})\n", (8429, 8713), False, 'from tests.third_party.cupy import testing\n'), ((9415, 9540), 'tests.third_party.cupy.testing.parameterize', 'testing.parameterize', (["{'cond_shape': (2, 3, 4)}", "{'cond_shape': (4,)}", "{'cond_shape': (2, 3, 4)}", "{'cond_shape': (3, 4)}"], {}), "({'cond_shape': (2, 3, 4)}, {'cond_shape': (4,)}, {\n 'cond_shape': (2, 3, 4)}, {'cond_shape': (3, 4)})\n", (9435, 9540), False, 'from tests.third_party.cupy import testing\n'), ((10697, 10735), 'tests.third_party.cupy.testing.with_requires', 'testing.with_requires', (['"""numpy>=1.17.0"""'], {}), "('numpy>=1.17.0')\n", (10718, 10735), False, 'from tests.third_party.cupy import testing\n'), ((20142, 20199), 'tests.third_party.cupy.testing.parameterize', 'testing.parameterize', (["{'side': 'left'}", "{'side': 'right'}"], {}), "({'side': 'left'}, {'side': 'right'})\n", (20162, 20199), False, 'from tests.third_party.cupy import testing\n'), ((203, 242), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (225, 242), False, 'from tests.third_party.cupy import testing\n'), ((248, 277), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (275, 277), False, 'from tests.third_party.cupy import testing\n'), ((405, 444), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (427, 444), False, 'from tests.third_party.cupy import testing\n'), ((450, 479), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (477, 479), False, 'from tests.third_party.cupy import testing\n'), ((618, 657), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (640, 657), False, 'from tests.third_party.cupy import testing\n'), ((663, 715), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {'accept_error': 'ValueError'}), '(accept_error=ValueError)\n', (690, 715), False, 'from tests.third_party.cupy import testing\n'), ((841, 880), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (863, 880), False, 'from tests.third_party.cupy import testing\n'), ((886, 915), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (913, 915), False, 'from tests.third_party.cupy import testing\n'), ((1059, 1098), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (1081, 1098), False, 'from tests.third_party.cupy import testing\n'), ((1104, 1133), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (1131, 1133), False, 'from tests.third_party.cupy import testing\n'), ((1290, 1329), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (1312, 1329), False, 'from tests.third_party.cupy import testing\n'), ((1335, 1364), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (1362, 1364), False, 'from tests.third_party.cupy import testing\n'), ((1503, 1542), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (1525, 1542), False, 'from tests.third_party.cupy import testing\n'), ((1548, 1577), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (1575, 1577), False, 'from tests.third_party.cupy import testing\n'), ((1716, 1755), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (1738, 1755), False, 'from tests.third_party.cupy import testing\n'), ((1761, 1790), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (1788, 1790), False, 'from tests.third_party.cupy import testing\n'), ((1929, 1968), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (1951, 1968), False, 'from tests.third_party.cupy import testing\n'), ((1974, 2003), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (2001, 2003), False, 'from tests.third_party.cupy import testing\n'), ((2126, 2165), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (2148, 2165), False, 'from tests.third_party.cupy import testing\n'), ((2377, 2416), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (2399, 2416), False, 'from tests.third_party.cupy import testing\n'), ((2640, 2679), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (2662, 2679), False, 'from tests.third_party.cupy import testing\n'), ((2685, 2714), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (2712, 2714), False, 'from tests.third_party.cupy import testing\n'), ((2860, 2899), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (2882, 2899), False, 'from tests.third_party.cupy import testing\n'), ((2905, 2934), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (2932, 2934), False, 'from tests.third_party.cupy import testing\n'), ((3062, 3101), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (3084, 3101), False, 'from tests.third_party.cupy import testing\n'), ((3107, 3159), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {'accept_error': 'ValueError'}), '(accept_error=ValueError)\n', (3134, 3159), False, 'from tests.third_party.cupy import testing\n'), ((3285, 3324), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (3307, 3324), False, 'from tests.third_party.cupy import testing\n'), ((3330, 3359), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (3357, 3359), False, 'from tests.third_party.cupy import testing\n'), ((3498, 3537), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (3520, 3537), False, 'from tests.third_party.cupy import testing\n'), ((3543, 3572), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (3570, 3572), False, 'from tests.third_party.cupy import testing\n'), ((3716, 3755), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (3738, 3755), False, 'from tests.third_party.cupy import testing\n'), ((3761, 3790), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (3788, 3790), False, 'from tests.third_party.cupy import testing\n'), ((3947, 3986), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (3969, 3986), False, 'from tests.third_party.cupy import testing\n'), ((3992, 4021), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (4019, 4021), False, 'from tests.third_party.cupy import testing\n'), ((4160, 4199), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (4182, 4199), False, 'from tests.third_party.cupy import testing\n'), ((4205, 4234), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (4232, 4234), False, 'from tests.third_party.cupy import testing\n'), ((4373, 4412), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (4395, 4412), False, 'from tests.third_party.cupy import testing\n'), ((4418, 4447), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (4445, 4447), False, 'from tests.third_party.cupy import testing\n'), ((4586, 4625), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (4608, 4625), False, 'from tests.third_party.cupy import testing\n'), ((4631, 4660), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (4658, 4660), False, 'from tests.third_party.cupy import testing\n'), ((4783, 4822), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (4805, 4822), False, 'from tests.third_party.cupy import testing\n'), ((5041, 5080), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (5063, 5080), False, 'from tests.third_party.cupy import testing\n'), ((5304, 5343), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (5326, 5343), False, 'from tests.third_party.cupy import testing\n'), ((5349, 5378), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (5376, 5378), False, 'from tests.third_party.cupy import testing\n'), ((7843, 7947), 'tests.third_party.cupy.testing.for_dtypes', 'testing.for_dtypes', ([], {'dtypes': '[numpy.int8, numpy.int16, numpy.int32, numpy.int64]', 'name': '"""result_dtype"""'}), "(dtypes=[numpy.int8, numpy.int16, numpy.int32, numpy.\n int64], name='result_dtype')\n", (7861, 7947), False, 'from tests.third_party.cupy import testing\n'), ((7965, 8004), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'name': '"""in_dtype"""'}), "(name='in_dtype')\n", (7987, 8004), False, 'from tests.third_party.cupy import testing\n'), ((8781, 8856), 'tests.third_party.cupy.testing.for_all_dtypes_combination', 'testing.for_all_dtypes_combination', ([], {'names': "['cond_type', 'x_type', 'y_type']"}), "(names=['cond_type', 'x_type', 'y_type'])\n", (8815, 8856), False, 'from tests.third_party.cupy import testing\n'), ((8871, 8900), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (8898, 8900), False, 'from tests.third_party.cupy import testing\n'), ((9614, 9638), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (9636, 9638), False, 'from tests.third_party.cupy import testing\n'), ((9644, 9676), 'tests.third_party.cupy.testing.numpy_cupy_array_equal', 'testing.numpy_cupy_array_equal', ([], {}), '()\n', (9674, 9676), False, 'from tests.third_party.cupy import testing\n'), ((10409, 10433), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (10431, 10433), False, 'from tests.third_party.cupy import testing\n'), ((10439, 10471), 'tests.third_party.cupy.testing.numpy_cupy_array_equal', 'testing.numpy_cupy_array_equal', ([], {}), '()\n', (10469, 10471), False, 'from tests.third_party.cupy import testing\n'), ((10793, 10817), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (10815, 10817), False, 'from tests.third_party.cupy import testing\n'), ((11285, 11309), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (11307, 11309), False, 'from tests.third_party.cupy import testing\n'), ((11315, 11347), 'tests.third_party.cupy.testing.numpy_cupy_array_equal', 'testing.numpy_cupy_array_equal', ([], {}), '()\n', (11345, 11347), False, 'from tests.third_party.cupy import testing\n'), ((11672, 11696), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {}), '()\n', (11694, 11696), False, 'from tests.third_party.cupy import testing\n'), ((11702, 11734), 'tests.third_party.cupy.testing.numpy_cupy_array_equal', 'testing.numpy_cupy_array_equal', ([], {}), '()\n', (11732, 11734), False, 'from tests.third_party.cupy import testing\n'), ((12412, 12451), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (12434, 12451), False, 'from tests.third_party.cupy import testing\n'), ((12457, 12486), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (12484, 12486), False, 'from tests.third_party.cupy import testing\n'), ((12622, 12661), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (12644, 12661), False, 'from tests.third_party.cupy import testing\n'), ((12667, 12719), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {'accept_error': 'ValueError'}), '(accept_error=ValueError)\n', (12694, 12719), False, 'from tests.third_party.cupy import testing\n'), ((12853, 12892), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (12875, 12892), False, 'from tests.third_party.cupy import testing\n'), ((12898, 12950), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {'accept_error': 'ValueError'}), '(accept_error=ValueError)\n', (12925, 12950), False, 'from tests.third_party.cupy import testing\n'), ((13099, 13138), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (13121, 13138), False, 'from tests.third_party.cupy import testing\n'), ((13144, 13196), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {'accept_error': 'ValueError'}), '(accept_error=ValueError)\n', (13171, 13196), False, 'from tests.third_party.cupy import testing\n'), ((13356, 13395), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (13378, 13395), False, 'from tests.third_party.cupy import testing\n'), ((13401, 13453), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {'accept_error': 'ValueError'}), '(accept_error=ValueError)\n', (13428, 13453), False, 'from tests.third_party.cupy import testing\n'), ((13634, 13673), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (13656, 13673), False, 'from tests.third_party.cupy import testing\n'), ((13679, 13731), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {'accept_error': 'ValueError'}), '(accept_error=ValueError)\n', (13706, 13731), False, 'from tests.third_party.cupy import testing\n'), ((13919, 13958), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (13941, 13958), False, 'from tests.third_party.cupy import testing\n'), ((13964, 13993), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (13991, 13993), False, 'from tests.third_party.cupy import testing\n'), ((14147, 14186), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (14169, 14186), False, 'from tests.third_party.cupy import testing\n'), ((14192, 14221), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (14219, 14221), False, 'from tests.third_party.cupy import testing\n'), ((14370, 14409), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (14392, 14409), False, 'from tests.third_party.cupy import testing\n'), ((14415, 14444), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (14442, 14444), False, 'from tests.third_party.cupy import testing\n'), ((14593, 14632), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (14615, 14632), False, 'from tests.third_party.cupy import testing\n'), ((14638, 14667), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (14665, 14667), False, 'from tests.third_party.cupy import testing\n'), ((14816, 14855), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (14838, 14855), False, 'from tests.third_party.cupy import testing\n'), ((14861, 14890), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (14888, 14890), False, 'from tests.third_party.cupy import testing\n'), ((15021, 15060), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (15043, 15060), False, 'from tests.third_party.cupy import testing\n'), ((15280, 15319), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (15302, 15319), False, 'from tests.third_party.cupy import testing\n'), ((15560, 15599), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (15582, 15599), False, 'from tests.third_party.cupy import testing\n'), ((15605, 15634), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (15632, 15634), False, 'from tests.third_party.cupy import testing\n'), ((15845, 15884), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (15867, 15884), False, 'from tests.third_party.cupy import testing\n'), ((15890, 15919), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (15917, 15919), False, 'from tests.third_party.cupy import testing\n'), ((16055, 16094), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (16077, 16094), False, 'from tests.third_party.cupy import testing\n'), ((16100, 16152), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {'accept_error': 'ValueError'}), '(accept_error=ValueError)\n', (16127, 16152), False, 'from tests.third_party.cupy import testing\n'), ((16286, 16325), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (16308, 16325), False, 'from tests.third_party.cupy import testing\n'), ((16331, 16383), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {'accept_error': 'ValueError'}), '(accept_error=ValueError)\n', (16358, 16383), False, 'from tests.third_party.cupy import testing\n'), ((16532, 16571), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (16554, 16571), False, 'from tests.third_party.cupy import testing\n'), ((16577, 16629), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {'accept_error': 'ValueError'}), '(accept_error=ValueError)\n', (16604, 16629), False, 'from tests.third_party.cupy import testing\n'), ((16789, 16828), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (16811, 16828), False, 'from tests.third_party.cupy import testing\n'), ((16834, 16886), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {'accept_error': 'ValueError'}), '(accept_error=ValueError)\n', (16861, 16886), False, 'from tests.third_party.cupy import testing\n'), ((17067, 17106), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (17089, 17106), False, 'from tests.third_party.cupy import testing\n'), ((17112, 17164), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {'accept_error': 'ValueError'}), '(accept_error=ValueError)\n', (17139, 17164), False, 'from tests.third_party.cupy import testing\n'), ((17352, 17391), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (17374, 17391), False, 'from tests.third_party.cupy import testing\n'), ((17397, 17426), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (17424, 17426), False, 'from tests.third_party.cupy import testing\n'), ((17580, 17619), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (17602, 17619), False, 'from tests.third_party.cupy import testing\n'), ((17625, 17654), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (17652, 17654), False, 'from tests.third_party.cupy import testing\n'), ((17803, 17842), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (17825, 17842), False, 'from tests.third_party.cupy import testing\n'), ((17848, 17877), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (17875, 17877), False, 'from tests.third_party.cupy import testing\n'), ((18026, 18065), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (18048, 18065), False, 'from tests.third_party.cupy import testing\n'), ((18071, 18100), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (18098, 18100), False, 'from tests.third_party.cupy import testing\n'), ((18249, 18288), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (18271, 18288), False, 'from tests.third_party.cupy import testing\n'), ((18294, 18323), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (18321, 18323), False, 'from tests.third_party.cupy import testing\n'), ((18454, 18493), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (18476, 18493), False, 'from tests.third_party.cupy import testing\n'), ((18713, 18752), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (18735, 18752), False, 'from tests.third_party.cupy import testing\n'), ((18993, 19032), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_complex': '(True)'}), '(no_complex=True)\n', (19015, 19032), False, 'from tests.third_party.cupy import testing\n'), ((19038, 19067), 'tests.third_party.cupy.testing.numpy_cupy_allclose', 'testing.numpy_cupy_allclose', ([], {}), '()\n', (19065, 19067), False, 'from tests.third_party.cupy import testing\n'), ((19844, 19880), 'tests.third_party.cupy.testing.for_all_dtypes', 'testing.for_all_dtypes', ([], {'no_bool': '(True)'}), '(no_bool=True)\n', (19866, 19880), False, 'from tests.third_party.cupy import testing\n'), ((19886, 19918), 'tests.third_party.cupy.testing.numpy_cupy_array_equal', 'testing.numpy_cupy_array_equal', ([], {}), '()\n', (19916, 19918), False, 'from tests.third_party.cupy import testing\n'), ((20264, 20296), 'tests.third_party.cupy.testing.numpy_cupy_array_equal', 'testing.numpy_cupy_array_equal', ([], {}), '()\n', (20294, 20296), False, 'from tests.third_party.cupy import testing\n'), ((20532, 20564), 'tests.third_party.cupy.testing.numpy_cupy_array_equal', 'testing.numpy_cupy_array_equal', ([], {}), '()\n', (20562, 20564), False, 'from tests.third_party.cupy import testing\n'), ((21977, 22009), 'tests.third_party.cupy.testing.numpy_cupy_array_equal', 'testing.numpy_cupy_array_equal', ([], {}), '()\n', (22007, 22009), False, 'from tests.third_party.cupy import testing\n'), ((22255, 22287), 'tests.third_party.cupy.testing.numpy_cupy_array_equal', 'testing.numpy_cupy_array_equal', ([], {}), '()\n', (22285, 22287), False, 'from tests.third_party.cupy import testing\n'), ((23030, 23062), 'tests.third_party.cupy.testing.numpy_cupy_array_equal', 'testing.numpy_cupy_array_equal', ([], {}), '()\n', (23060, 23062), False, 'from tests.third_party.cupy import testing\n'), ((332, 372), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3)', 'xp', 'dtype'], {}), '((2, 3), xp, dtype)\n', (353, 372), False, 'from tests.third_party.cupy import testing\n'), ((543, 583), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3)', 'xp', 'dtype'], {}), '((2, 3), xp, dtype)\n', (564, 583), False, 'from tests.third_party.cupy import testing\n'), ((977, 1020), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(3, 1000)', 'xp', 'dtype'], {}), '((3, 1000), xp, dtype)\n', (998, 1020), False, 'from tests.third_party.cupy import testing\n'), ((1204, 1247), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(3, 1000)', 'xp', 'dtype'], {}), '((3, 1000), xp, dtype)\n', (1225, 1247), False, 'from tests.third_party.cupy import testing\n'), ((1421, 1464), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3, 4)', 'xp', 'dtype'], {}), '((2, 3, 4), xp, dtype)\n', (1442, 1464), False, 'from tests.third_party.cupy import testing\n'), ((1634, 1677), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3, 4)', 'xp', 'dtype'], {}), '((2, 3, 4), xp, dtype)\n', (1655, 1677), False, 'from tests.third_party.cupy import testing\n'), ((1847, 1890), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3, 4)', 'xp', 'dtype'], {}), '((2, 3, 4), xp, dtype)\n', (1868, 1890), False, 'from tests.third_party.cupy import testing\n'), ((2781, 2821), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(0, 1)', 'xp', 'dtype'], {}), '((0, 1), xp, dtype)\n', (2802, 2821), False, 'from tests.third_party.cupy import testing\n'), ((2989, 3029), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3)', 'xp', 'dtype'], {}), '((2, 3), xp, dtype)\n', (3010, 3029), False, 'from tests.third_party.cupy import testing\n'), ((3423, 3463), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3)', 'xp', 'dtype'], {}), '((2, 3), xp, dtype)\n', (3444, 3463), False, 'from tests.third_party.cupy import testing\n'), ((3634, 3677), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(3, 1000)', 'xp', 'dtype'], {}), '((3, 1000), xp, dtype)\n', (3655, 3677), False, 'from tests.third_party.cupy import testing\n'), ((3861, 3904), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(3, 1000)', 'xp', 'dtype'], {}), '((3, 1000), xp, dtype)\n', (3882, 3904), False, 'from tests.third_party.cupy import testing\n'), ((4078, 4121), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3, 4)', 'xp', 'dtype'], {}), '((2, 3, 4), xp, dtype)\n', (4099, 4121), False, 'from tests.third_party.cupy import testing\n'), ((4291, 4334), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3, 4)', 'xp', 'dtype'], {}), '((2, 3, 4), xp, dtype)\n', (4312, 4334), False, 'from tests.third_party.cupy import testing\n'), ((4504, 4547), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3, 4)', 'xp', 'dtype'], {}), '((2, 3, 4), xp, dtype)\n', (4525, 4547), False, 'from tests.third_party.cupy import testing\n'), ((5445, 5485), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(0, 1)', 'xp', 'dtype'], {}), '((0, 1), xp, dtype)\n', (5466, 5485), False, 'from tests.third_party.cupy import testing\n'), ((8077, 8126), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['self.shape', 'cupy', 'in_dtype'], {}), '(self.shape, cupy, in_dtype)\n', (8098, 8126), False, 'from tests.third_party.cupy import testing\n'), ((7677, 7779), 'tests.third_party.cupy.testing.product', 'testing.product', (["{'func': ['argmin', 'argmax'], 'is_module': [True, False], 'shape': [(3, 4),\n ()]}"], {}), "({'func': ['argmin', 'argmax'], 'is_module': [True, False],\n 'shape': [(3, 4), ()]})\n", (7692, 7779), False, 'from tests.third_party.cupy import testing\n'), ((8981, 9033), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['self.cond_shape', 'xp', 'xp.bool_'], {}), '(self.cond_shape, xp, xp.bool_)\n', (9002, 9033), False, 'from tests.third_party.cupy import testing\n'), ((9252, 9307), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['self.x_shape', 'xp', 'x_type'], {'seed': '(0)'}), '(self.x_shape, xp, x_type, seed=0)\n', (9273, 9307), False, 'from tests.third_party.cupy import testing\n'), ((9320, 9375), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['self.y_shape', 'xp', 'y_type'], {'seed': '(1)'}), '(self.y_shape, xp, y_type, seed=1)\n', (9341, 9375), False, 'from tests.third_party.cupy import testing\n'), ((9731, 9783), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['self.cond_shape', 'xp', 'xp.bool_'], {}), '(self.cond_shape, xp, xp.bool_)\n', (9752, 9783), False, 'from tests.third_party.cupy import testing\n'), ((10255, 10272), 'numpy.empty', 'numpy.empty', (['(0,)'], {}), '((0,))\n', (10266, 10272), False, 'import numpy\n'), ((10289, 10308), 'numpy.empty', 'numpy.empty', (['(0, 2)'], {}), '((0, 2))\n', (10300, 10308), False, 'import numpy\n'), ((10325, 10347), 'numpy.empty', 'numpy.empty', (['(0, 2, 0)'], {}), '((0, 2, 0))\n', (10336, 10347), False, 'import numpy\n'), ((10633, 10647), 'numpy.array', 'numpy.array', (['(0)'], {}), '(0)\n', (10644, 10647), False, 'import numpy\n'), ((10664, 10678), 'numpy.array', 'numpy.array', (['(1)'], {}), '(1)\n', (10675, 10678), False, 'import numpy\n'), ((11065, 11079), 'numpy.array', 'numpy.array', (['(0)'], {}), '(0)\n', (11076, 11079), False, 'import numpy\n'), ((11096, 11110), 'numpy.array', 'numpy.array', (['(1)'], {}), '(1)\n', (11107, 11110), False, 'import numpy\n'), ((11127, 11144), 'numpy.empty', 'numpy.empty', (['(0,)'], {}), '((0,))\n', (11138, 11144), False, 'import numpy\n'), ((11161, 11180), 'numpy.empty', 'numpy.empty', (['(0, 2)'], {}), '((0, 2))\n', (11172, 11180), False, 'import numpy\n'), ((11197, 11219), 'numpy.empty', 'numpy.empty', (['(0, 2, 0)'], {}), '((0, 2, 0))\n', (11208, 11219), False, 'import numpy\n'), ((11517, 11534), 'numpy.empty', 'numpy.empty', (['(0,)'], {}), '((0,))\n', (11528, 11534), False, 'import numpy\n'), ((11551, 11570), 'numpy.empty', 'numpy.empty', (['(0, 2)'], {}), '((0, 2))\n', (11562, 11570), False, 'import numpy\n'), ((11587, 11609), 'numpy.empty', 'numpy.empty', (['(0, 2, 0)'], {}), '((0, 2, 0))\n', (11598, 11609), False, 'import numpy\n'), ((12544, 12584), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3)', 'xp', 'dtype'], {}), '((2, 3), xp, dtype)\n', (12565, 12584), False, 'from tests.third_party.cupy import testing\n'), ((14058, 14101), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(3, 1000)', 'xp', 'dtype'], {}), '((3, 1000), xp, dtype)\n', (14079, 14101), False, 'from tests.third_party.cupy import testing\n'), ((14281, 14324), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3, 4)', 'xp', 'dtype'], {}), '((2, 3, 4), xp, dtype)\n', (14302, 14324), False, 'from tests.third_party.cupy import testing\n'), ((14504, 14547), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3, 4)', 'xp', 'dtype'], {}), '((2, 3, 4), xp, dtype)\n', (14525, 14547), False, 'from tests.third_party.cupy import testing\n'), ((14727, 14770), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3, 4)', 'xp', 'dtype'], {}), '((2, 3, 4), xp, dtype)\n', (14748, 14770), False, 'from tests.third_party.cupy import testing\n'), ((15704, 15744), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(0, 1)', 'xp', 'dtype'], {}), '((0, 1), xp, dtype)\n', (15725, 15744), False, 'from tests.third_party.cupy import testing\n'), ((15977, 16017), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3)', 'xp', 'dtype'], {}), '((2, 3), xp, dtype)\n', (15998, 16017), False, 'from tests.third_party.cupy import testing\n'), ((17491, 17534), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(3, 1000)', 'xp', 'dtype'], {}), '((3, 1000), xp, dtype)\n', (17512, 17534), False, 'from tests.third_party.cupy import testing\n'), ((17714, 17757), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3, 4)', 'xp', 'dtype'], {}), '((2, 3, 4), xp, dtype)\n', (17735, 17757), False, 'from tests.third_party.cupy import testing\n'), ((17937, 17980), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3, 4)', 'xp', 'dtype'], {}), '((2, 3, 4), xp, dtype)\n', (17958, 17980), False, 'from tests.third_party.cupy import testing\n'), ((18160, 18203), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3, 4)', 'xp', 'dtype'], {}), '((2, 3, 4), xp, dtype)\n', (18181, 18203), False, 'from tests.third_party.cupy import testing\n'), ((19137, 19177), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(0, 1)', 'xp', 'dtype'], {}), '((0, 1), xp, dtype)\n', (19158, 19177), False, 'from tests.third_party.cupy import testing\n'), ((19975, 20019), 'tests.third_party.cupy.testing.shaped_arange', 'testing.shaped_arange', (['self.shape', 'xp', 'dtype'], {}), '(self.shape, xp, dtype)\n', (19996, 20019), False, 'from tests.third_party.cupy import testing\n'), ((20354, 20398), 'tests.third_party.cupy.testing.shaped_arange', 'testing.shaped_arange', (['(10,)', 'xp', 'xp.float64'], {}), '((10,), xp, xp.float64)\n', (20375, 20398), False, 'from tests.third_party.cupy import testing\n'), ((20618, 20662), 'tests.third_party.cupy.testing.shaped_arange', 'testing.shaped_arange', (['(10,)', 'xp', 'xp.float64'], {}), '((10,), xp, xp.float64)\n', (20639, 20662), False, 'from tests.third_party.cupy import testing\n'), ((22063, 22107), 'tests.third_party.cupy.testing.shaped_arange', 'testing.shaped_arange', (['(10,)', 'xp', 'xp.float64'], {}), '((10,), xp, xp.float64)\n', (22084, 22107), False, 'from tests.third_party.cupy import testing\n'), ((22342, 22386), 'tests.third_party.cupy.testing.shaped_arange', 'testing.shaped_arange', (['(10,)', 'xp', 'xp.float64'], {}), '((10,), xp, xp.float64)\n', (22363, 22386), False, 'from tests.third_party.cupy import testing\n'), ((23106, 23150), 'tests.third_party.cupy.testing.shaped_arange', 'testing.shaped_arange', (['(12,)', 'xp', 'xp.float64'], {}), '((12,), xp, xp.float64)\n', (23127, 23150), False, 'from tests.third_party.cupy import testing\n'), ((2259, 2299), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(0, 1)', 'xp', 'dtype'], {}), '((0, 1), xp, dtype)\n', (2280, 2299), False, 'from tests.third_party.cupy import testing\n'), ((2516, 2556), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(0, 1)', 'xp', 'dtype'], {}), '((0, 1), xp, dtype)\n', (2537, 2556), False, 'from tests.third_party.cupy import testing\n'), ((4916, 4956), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(0, 1)', 'xp', 'dtype'], {}), '((0, 1), xp, dtype)\n', (4937, 4956), False, 'from tests.third_party.cupy import testing\n'), ((5180, 5220), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(0, 1)', 'xp', 'dtype'], {}), '((0, 1), xp, dtype)\n', (5201, 5220), False, 'from tests.third_party.cupy import testing\n'), ((9182, 9235), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['self.cond_shape', 'xp', 'cond_type'], {}), '(self.cond_shape, xp, cond_type)\n', (9203, 9235), False, 'from tests.third_party.cupy import testing\n'), ((9799, 9848), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['self.cond_shape', 'xp', 'dtype'], {}), '(self.cond_shape, xp, dtype)\n', (9820, 9848), False, 'from tests.third_party.cupy import testing\n'), ((10025, 10074), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(3, 4)', 'xp'], {'dtype': 'xp.bool_'}), '((3, 4), xp, dtype=xp.bool_)\n', (10046, 10074), False, 'from tests.third_party.cupy import testing\n'), ((10091, 10137), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(2, 3, 4)', 'xp', 'xp.int32'], {}), '((2, 3, 4), xp, xp.int32)\n', (10112, 10137), False, 'from tests.third_party.cupy import testing\n'), ((15157, 15197), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(0, 1)', 'xp', 'dtype'], {}), '((0, 1), xp, dtype)\n', (15178, 15197), False, 'from tests.third_party.cupy import testing\n'), ((15422, 15462), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(0, 1)', 'xp', 'dtype'], {}), '((0, 1), xp, dtype)\n', (15443, 15462), False, 'from tests.third_party.cupy import testing\n'), ((18590, 18630), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(0, 1)', 'xp', 'dtype'], {}), '((0, 1), xp, dtype)\n', (18611, 18630), False, 'from tests.third_party.cupy import testing\n'), ((18855, 18895), 'tests.third_party.cupy.testing.shaped_random', 'testing.shaped_random', (['(0, 1)', 'xp', 'dtype'], {}), '((0, 1), xp, dtype)\n', (18876, 18895), False, 'from tests.third_party.cupy import testing\n'), ((22771, 22815), 'tests.third_party.cupy.testing.shaped_arange', 'testing.shaped_arange', (['(10,)', 'xp', 'xp.float64'], {}), '((10,), xp, xp.float64)\n', (22792, 22815), False, 'from tests.third_party.cupy import testing\n'), ((23391, 23435), 'tests.third_party.cupy.testing.shaped_arange', 'testing.shaped_arange', (['(12,)', 'xp', 'xp.float64'], {}), '((12,), xp, xp.float64)\n', (23412, 23435), False, 'from tests.third_party.cupy import testing\n'), ((23701, 23745), 'tests.third_party.cupy.testing.shaped_arange', 'testing.shaped_arange', (['(12,)', 'xp', 'xp.float64'], {}), '((12,), xp, xp.float64)\n', (23722, 23745), False, 'from tests.third_party.cupy import testing\n'), ((2317, 2342), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2330, 2342), False, 'import pytest\n'), ((2574, 2599), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2587, 2599), False, 'import pytest\n'), ((4974, 4999), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (4987, 4999), False, 'import pytest\n'), ((5238, 5263), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5251, 5263), False, 'import pytest\n'), ((10155, 10180), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (10168, 10180), False, 'import pytest\n'), ((10957, 10990), 'pytest.raises', 'pytest.raises', (['DeprecationWarning'], {}), '(DeprecationWarning)\n', (10970, 10990), False, 'import pytest\n'), ((15215, 15240), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (15228, 15240), False, 'import pytest\n'), ((15480, 15505), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (15493, 15505), False, 'import pytest\n'), ((18648, 18673), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (18661, 18673), False, 'import pytest\n'), ((18913, 18938), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (18926, 18938), False, 'import pytest\n'), ((22888, 22913), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (22901, 22913), False, 'import pytest\n'), ((23534, 23559), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (23547, 23559), False, 'import pytest\n'), ((23861, 23885), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (23874, 23885), False, 'import pytest\n')]
|
"""Provides an easy way of generating several geometric objects.
CONTAINS
--------
vtkArrowSource
vtkCylinderSource
vtkSphereSource
vtkPlaneSource
vtkLineSource
vtkCubeSource
vtkConeSource
vtkDiskSource
vtkRegularPolygonSource
vtkPyramid
vtkPlatonicSolidSource
vtkSuperquadricSource
as well as some pure-python helpers.
"""
import numpy as np
import pyvista
from pyvista import _vtk
from pyvista.utilities import check_valid_vector
NORMALS = {
'x': [1, 0, 0],
'y': [0, 1, 0],
'z': [0, 0, 1],
'-x': [-1, 0, 0],
'-y': [0, -1, 0],
'-z': [0, 0, -1],
}
def translate(surf, center=[0., 0., 0.], direction=[1., 0., 0.]):
"""Translate and orient a mesh to a new center and direction.
By default, the input mesh is considered centered at the origin
and facing in the x direction.
"""
normx = np.array(direction)/np.linalg.norm(direction)
normz = np.cross(normx, [0, 1.0, 0.0000001])
normz /= np.linalg.norm(normz)
normy = np.cross(normz, normx)
trans = np.zeros((4, 4))
trans[:3, 0] = normx
trans[:3, 1] = normy
trans[:3, 2] = normz
trans[3, 3] = 1
surf.transform(trans)
if not np.allclose(center, [0., 0., 0.]):
surf.points += np.array(center)
def Cylinder(center=(0.0, 0.0, 0.0), direction=(1.0, 0.0, 0.0),
radius=0.5, height=1.0, resolution=100, capping=True):
"""Create the surface of a cylinder.
See also :func:`pyvista.CylinderStructured`.
Parameters
----------
center : sequence, optional
Location of the centroid in ``[x, y, z]``.
direction : sequence, optional
Direction cylinder points to in ``[x, y, z]``.
radius : float, optional
Radius of the cylinder.
height : float, optional
Height of the cylinder.
resolution : int, optional
Number of points on the circular face of the cylinder.
capping : bool, optional
Cap cylinder ends with polygons. Default ``True``.
Returns
-------
pyvista.PolyData
Cylinder surface.
Examples
--------
>>> import pyvista
>>> import numpy as np
>>> cylinder = pyvista.Cylinder(center=[1, 2, 3], direction=[1, 1, 1],
... radius=1, height=2)
>>> cylinder.plot(show_edges=True, line_width=5, cpos='xy')
"""
cylinderSource = _vtk.vtkCylinderSource()
cylinderSource.SetRadius(radius)
cylinderSource.SetHeight(height)
cylinderSource.SetCapping(capping)
cylinderSource.SetResolution(resolution)
cylinderSource.Update()
surf = pyvista.wrap(cylinderSource.GetOutput())
surf.rotate_z(-90, inplace=True)
translate(surf, center, direction)
return surf
def CylinderStructured(radius=0.5, height=1.0,
center=(0.,0.,0.), direction=(1.,0.,0.),
theta_resolution=32, z_resolution=10):
"""Create a cylinder mesh as a :class:`pyvista.StructuredGrid`.
The end caps are left open. This can create a surface mesh if a single
value for the ``radius`` is given or a 3D mesh if multiple radii are given
as a list/array in the ``radius`` argument.
Parameters
----------
radius : float, sequence, optional
Radius of the cylinder. If a sequence, then describes the
radial coordinates of the cells as a range of values as
specified by the ``radius``.
height : float, optional
Height of the cylinder along its Z-axis.
center : sequence
Location of the centroid in ``[x, y, z]``.
direction : sequence
Direction cylinder Z-axis in ``[x, y, z]``.
theta_resolution : int, optional
Number of points on the circular face of the cylinder.
Ignored if ``radius`` is an iterable.
z_resolution : int, optional
Number of points along the height (Z-axis) of the cylinder.
Returns
-------
pyvista.StructuredGrid
Structured cylinder.
Examples
--------
Default structured cylinder
>>> import pyvista
>>> mesh = pyvista.CylinderStructured()
>>> mesh.plot(show_edges=True)
Structured cylinder with an inner radius of 1, outer of 2, with 5
segments.
>>> import numpy as np
>>> mesh = pyvista.CylinderStructured(radius=np.linspace(1, 2, 5))
>>> mesh.plot(show_edges=True)
"""
# Define grid in polar coordinates
r = np.array([radius]).ravel()
nr = len(r)
theta = np.linspace(0, 2*np.pi, num=theta_resolution)
radius_matrix, theta_matrix = np.meshgrid(r, theta)
# Transform to cartesian space
X = radius_matrix * np.cos(theta_matrix)
Y = radius_matrix * np.sin(theta_matrix)
# Make all the nodes in the grid
xx = np.array([X] * z_resolution).ravel()
yy = np.array([Y] * z_resolution).ravel()
dz = height / (z_resolution - 1)
zz = np.empty(yy.size)
zz = np.full((X.size, z_resolution), dz)
zz *= np.arange(z_resolution)
zz = zz.ravel(order='f')
# Create the grid
grid = pyvista.StructuredGrid()
grid.points = np.c_[xx, yy, zz]
grid.dimensions = [nr, theta_resolution, z_resolution]
# Orient properly in user direction
vx = np.array([0., 0., 1.])
if not np.allclose(vx, direction):
direction /= np.linalg.norm(direction)
vx -= vx.dot(direction) * direction
vx /= np.linalg.norm(vx)
vy = np.cross(direction, vx)
rmtx = np.array([vx, vy, direction])
grid.points = grid.points.dot(rmtx)
# Translate to given center
grid.points -= np.array(grid.center)
grid.points += np.array(center)
return grid
def Arrow(start=(0., 0., 0.), direction=(1., 0., 0.), tip_length=0.25,
tip_radius=0.1, tip_resolution=20, shaft_radius=0.05,
shaft_resolution=20, scale=None):
"""Create an arrow.
Parameters
----------
start : iterable, optional
Start location in ``[x, y, z]``.
direction : iterable, optional
Direction the arrow points to in ``[x, y, z]``.
tip_length : float, optional
Length of the tip.
tip_radius : float, optional
Radius of the tip.
tip_resolution : int, optional
Number of faces around the tip.
shaft_radius : float, optional
Radius of the shaft.
shaft_resolution : int, optional
Number of faces around the shaft.
scale : float or str, optional
Scale factor of the entire object, default is ``None``
(i.e. scale of 1). ``'auto'`` scales to length of direction
array.
Returns
-------
pyvista.PolyData
Arrow mesh.
Examples
--------
Plot a default arrow.
>>> import pyvista
>>> mesh = pyvista.Arrow()
>>> mesh.plot(show_edges=True)
"""
# Create arrow object
arrow = _vtk.vtkArrowSource()
arrow.SetTipLength(tip_length)
arrow.SetTipRadius(tip_radius)
arrow.SetTipResolution(tip_resolution)
arrow.SetShaftRadius(shaft_radius)
arrow.SetShaftResolution(shaft_resolution)
arrow.Update()
surf = pyvista.wrap(arrow.GetOutput())
if scale == 'auto':
scale = float(np.linalg.norm(direction))
if isinstance(scale, float) or isinstance(scale, int):
surf.points *= scale
elif scale is not None:
raise TypeError("Scale must be either float, int or 'auto'.")
translate(surf, start, direction)
return surf
def Sphere(radius=0.5, center=(0, 0, 0), direction=(0, 0, 1), theta_resolution=30,
phi_resolution=30, start_theta=0, end_theta=360, start_phi=0, end_phi=180):
"""Create a vtk Sphere.
Parameters
----------
radius : float, optional
Sphere radius.
center : np.ndarray or list, optional
Center in ``[x, y, z]``.
direction : list or tuple or np.ndarray, optional
Direction the top of the sphere points to in ``[x, y, z]``.
theta_resolution : int , optional
Set the number of points in the longitude direction (ranging
from ``start_theta`` to ``end_theta``).
phi_resolution : int, optional
Set the number of points in the latitude direction (ranging from
``start_phi`` to ``end_phi``).
start_theta : float, optional
Starting longitude angle.
end_theta : float, optional
Ending longitude angle.
start_phi : float, optional
Starting latitude angle.
end_phi : float, optional
Ending latitude angle.
Returns
-------
pyvista.PolyData
Sphere mesh.
Examples
--------
Create a sphere using default parameters.
>>> import pyvista
>>> sphere = pyvista.Sphere()
>>> sphere.plot(show_edges=True)
Create a quarter sphere by setting ``end_theta``.
>>> sphere = pyvista.Sphere(end_theta=90)
>>> out = sphere.plot(show_edges=True)
"""
sphere = _vtk.vtkSphereSource()
sphere.SetRadius(radius)
sphere.SetThetaResolution(theta_resolution)
sphere.SetPhiResolution(phi_resolution)
sphere.SetStartTheta(start_theta)
sphere.SetEndTheta(end_theta)
sphere.SetStartPhi(start_phi)
sphere.SetEndPhi(end_phi)
sphere.Update()
surf = pyvista.wrap(sphere.GetOutput())
surf.rotate_y(-90, inplace=True)
translate(surf, center, direction)
return surf
def Plane(center=(0, 0, 0), direction=(0, 0, 1), i_size=1, j_size=1,
i_resolution=10, j_resolution=10):
"""Create a plane.
Parameters
----------
center : list or tuple or np.ndarray
Location of the centroid in ``[x, y, z]``.
direction : list or tuple or np.ndarray
Direction of the plane's normal in ``[x, y, z]``.
i_size : float
Size of the plane in the i direction.
j_size : float
Size of the plane in the j direction.
i_resolution : int
Number of points on the plane in the i direction.
j_resolution : int
Number of points on the plane in the j direction.
Returns
-------
pyvista.PolyData
Plane mesh.
Examples
--------
Create a default plane.
>>> import pyvista
>>> mesh = pyvista.Plane()
>>> mesh.point_data.clear()
>>> mesh.plot(show_edges=True)
"""
planeSource = _vtk.vtkPlaneSource()
planeSource.SetXResolution(i_resolution)
planeSource.SetYResolution(j_resolution)
planeSource.Update()
surf = pyvista.wrap(planeSource.GetOutput())
surf.points[:, 0] *= i_size
surf.points[:, 1] *= j_size
surf.rotate_y(-90, inplace=True)
translate(surf, center, direction)
return surf
def Line(pointa=(-0.5, 0., 0.), pointb=(0.5, 0., 0.), resolution=1):
"""Create a line.
Parameters
----------
pointa : np.ndarray or list, optional
Location in ``[x, y, z]``.
pointb : np.ndarray or list, optional
Location in ``[x, y, z]``.
resolution : int, optional
Number of pieces to divide line into.
Returns
-------
pyvista.PolyData
Line mesh.
Examples
--------
Create a line between ``(0, 0, 0)`` and ``(0, 0, 1)``.
>>> import pyvista
>>> mesh = pyvista.Line((0, 0, 0), (0, 0, 1))
>>> mesh.plot(color='k', line_width=10)
"""
if resolution <= 0:
raise ValueError('Resolution must be positive')
if np.array(pointa).size != 3:
raise TypeError('Point A must be a length three tuple of floats.')
if np.array(pointb).size != 3:
raise TypeError('Point B must be a length three tuple of floats.')
src = _vtk.vtkLineSource()
src.SetPoint1(*pointa)
src.SetPoint2(*pointb)
src.SetResolution(resolution)
src.Update()
line = pyvista.wrap(src.GetOutput())
# Compute distance of every point along line
compute = lambda p0, p1: np.sqrt(np.sum((p1 - p0)**2, axis=1))
distance = compute(np.array(pointa), line.points)
line['Distance'] = distance
return line
def Tube(pointa=(-0.5, 0., 0.), pointb=(0.5, 0., 0.), resolution=1, radius=1.0, n_sides=15):
"""Create a tube.
Parameters
----------
pointa : np.ndarray or list, optional
Location in ``[x, y, z]``.
pointb : np.ndarray or list, optional
Location in ``[x, y, z]``.
resolution : int, optional
Number of pieces to divide tube into.
radius : float, optional
Minimum tube radius (minimum because the tube radius may vary).
n_sides : int, optional
Number of sides for the tube.
Returns
-------
pyvista.PolyData
Tube mesh.
Examples
--------
Create a tube between ``(0, 0, 0)`` and ``(0, 0, 1)``.
>>> import pyvista
>>> mesh = pyvista.Tube((0, 0, 0), (0, 0, 1))
>>> mesh.plot()
"""
if resolution <= 0:
raise ValueError('Resolution must be positive.')
if np.array(pointa).size != 3:
raise TypeError('Point A must be a length three tuple of floats.')
if np.array(pointb).size != 3:
raise TypeError('Point B must be a length three tuple of floats.')
line_src = _vtk.vtkLineSource()
line_src.SetPoint1(*pointa)
line_src.SetPoint2(*pointb)
line_src.SetResolution(resolution)
line_src.Update()
if n_sides < 3:
raise ValueError('Number of sides `n_sides` must be >= 3')
tube_filter = _vtk.vtkTubeFilter()
tube_filter.SetInputConnection(line_src.GetOutputPort())
tube_filter.SetRadius(radius)
tube_filter.SetNumberOfSides(n_sides)
tube_filter.Update()
return pyvista.wrap(tube_filter.GetOutput())
def Cube(center=(0.0, 0.0, 0.0), x_length=1.0, y_length=1.0,
z_length=1.0, bounds=None, clean=True):
"""Create a cube.
It's possible to specify either the center and side lengths or
just the bounds of the cube. If ``bounds`` are given, all other
arguments are ignored.
.. versionchanged:: 0.33.0
The cube is created using ``vtk.vtkCubeSource``. For
compatibility with :func:`pyvista.PlatonicSolid`, face indices
are also added as cell data. For full compatibility with
:func:`PlatonicSolid() <pyvista.PlatonicSolid>`, one has to
use ``x_length = y_length = z_length = 2 * radius / 3**0.5``.
The cube points are also cleaned by default now, leaving only
the 8 corners and a watertight (manifold) mesh.
Parameters
----------
center : sequence, optional
Center in ``[x, y, z]``.
x_length : float, optional
Length of the cube in the x-direction.
y_length : float, optional
Length of the cube in the y-direction.
z_length : float, optional
Length of the cube in the z-direction.
bounds : sequence, optional
Specify the bounding box of the cube. If given, all other size
arguments are ignored. ``(xMin, xMax, yMin, yMax, zMin, zMax)``.
clean : bool, optional
Whether to clean the raw points of the mesh, making the cube
manifold. Note that this will degrade the texture coordinates
that come with the mesh, so if you plan to map a texture on
the cube, consider setting this to ``False``.
.. versionadded:: 0.33.0
Returns
-------
pyvista.PolyData
Mesh of the cube.
Examples
--------
Create a default cube.
>>> import pyvista
>>> mesh = pyvista.Cube()
>>> mesh.plot(show_edges=True, line_width=5)
"""
src = _vtk.vtkCubeSource()
if bounds is not None:
if np.array(bounds).size != 6:
raise TypeError('Bounds must be given as length 6 tuple: (xMin, xMax, yMin, yMax, zMin, zMax)')
src.SetBounds(bounds)
else:
src.SetCenter(center)
src.SetXLength(x_length)
src.SetYLength(y_length)
src.SetZLength(z_length)
src.Update()
cube = pyvista.wrap(src.GetOutput())
# add face index data for compatibility with PlatonicSolid
# but make it inactive for backwards compatibility
cube.cell_data.set_array([1, 4, 0, 3, 5, 2],['FaceIndex'])
# clean duplicate points
if clean:
cube.clean(inplace=True)
return cube
def Box(bounds=(-1., 1., -1., 1., -1., 1.), level=0, quads=True):
"""Create a box with solid faces for the given bounds.
Parameters
----------
bounds : iterable, optional
Specify the bounding box of the cube.
``(xMin, xMax, yMin, yMax, zMin, zMax)``.
level : int, optional
Level of subdivision of the faces.
quads : bool, optional
Flag to tell the source to generate either a quad or two
triangle for a set of four points. Default ``True``.
Returns
-------
pyvista.PolyData
Mesh of the box.
Examples
--------
Create a box with subdivision ``level=2``.
>>> import pyvista
>>> mesh = pyvista.Box(level=2)
>>> mesh.plot(show_edges=True)
"""
if np.array(bounds).size != 6:
raise TypeError('Bounds must be given as length 6 tuple: (xMin, xMax, yMin, yMax, zMin, zMax)')
src = _vtk.vtkTessellatedBoxSource()
src.SetLevel(level)
if quads:
src.QuadsOn()
else:
src.QuadsOff()
src.SetBounds(bounds)
src.Update()
return pyvista.wrap(src.GetOutput())
def Cone(center=(0., 0., 0.), direction=(1., 0., 0.), height=1.0, radius=None,
capping=True, angle=None, resolution=6):
"""Create a cone.
Parameters
----------
center : iterable, optional
Center in ``[x, y, z]``. Axis of the cone passes through this
point.
direction : iterable, optional
Direction vector in ``[x, y, z]``. Orientation vector of the
cone.
height : float, optional
Height along the cone in its specified direction.
radius : float, optional
Base radius of the cone.
capping : bool, optional
Enable or disable the capping the base of the cone with a
polygon.
angle : float, optional
The angle in degrees between the axis of the cone and a
generatrix.
resolution : int, optional
Number of facets used to represent the cone.
Returns
-------
pyvista.PolyData
Cone mesh.
Examples
--------
Create a default Cone.
>>> import pyvista
>>> mesh = pyvista.Cone()
>>> mesh.plot(show_edges=True, line_width=5)
"""
src = _vtk.vtkConeSource()
src.SetCapping(capping)
src.SetDirection(direction)
src.SetCenter(center)
src.SetHeight(height)
if angle and radius:
raise ValueError("Both radius and angle specified. They are mutually exclusive.")
elif angle and not radius:
src.SetAngle(angle)
elif not angle and radius:
src.SetRadius(radius)
elif not angle and not radius:
src.SetRadius(0.5)
src.SetResolution(resolution)
src.Update()
return pyvista.wrap(src.GetOutput())
def Polygon(center=(0., 0., 0.), radius=1, normal=(0, 0, 1), n_sides=6):
"""Create a polygon.
Parameters
----------
center : iterable, optional
Center in ``[x, y, z]``. Central axis of the polygon passes
through this point.
radius : float, optional
The radius of the polygon.
normal : iterable, optional
Direction vector in ``[x, y, z]``. Orientation vector of the polygon.
n_sides : int, optional
Number of sides of the polygon.
Returns
-------
pyvista.PolyData
Mesh of the polygon.
Examples
--------
Create an 8 sided polygon.
>>> import pyvista
>>> mesh = pyvista.Polygon(n_sides=8)
>>> mesh.plot(show_edges=True, line_width=5)
"""
src = _vtk.vtkRegularPolygonSource()
src.SetCenter(center)
src.SetNumberOfSides(n_sides)
src.SetRadius(radius)
src.SetNormal(normal)
src.Update()
return pyvista.wrap(src.GetOutput())
def Disc(center=(0., 0., 0.), inner=0.25, outer=0.5, normal=(0, 0, 1), r_res=1,
c_res=6):
"""Create a polygonal disk with a hole in the center.
The disk has zero height. The user can specify the inner and outer
radius of the disk, and the radial and circumferential resolution
of the polygonal representation.
Parameters
----------
center : iterable
Center in ``[x, y, z]``. Middle of the axis of the disc.
inner : float, optional
The inner radius.
outer : float, optional
The outer radius.
normal : iterable
Direction vector in ``[x, y, z]``. Orientation vector of the disc.
r_res : int, optional
Number of points in radial direction.
c_res : int, optional
Number of points in circumferential direction.
Returns
-------
pyvista.PolyData
Disk mesh.
Examples
--------
Create a disc with 50 points in the circumferential direction.
>>> import pyvista
>>> mesh = pyvista.Disc(c_res=50)
>>> mesh.plot(show_edges=True, line_width=5)
"""
src = _vtk.vtkDiskSource()
src.SetInnerRadius(inner)
src.SetOuterRadius(outer)
src.SetRadialResolution(r_res)
src.SetCircumferentialResolution(c_res)
src.Update()
normal = np.array(normal)
center = np.array(center)
surf = pyvista.wrap(src.GetOutput())
surf.rotate_y(90, inplace=True)
translate(surf, center, normal)
return surf
def Text3D(string, depth=0.5):
"""Create 3D text from a string.
Parameters
----------
string : str
String to generate 3D text from.
depth : float, optional
Depth of the text. Defaults to ``0.5``.
Returns
-------
pyvista.PolyData
3D text mesh.
Examples
--------
>>> import pyvista
>>> text_mesh = pyvista.Text3D('PyVista')
>>> text_mesh.plot(cpos='xy')
"""
vec_text = _vtk.vtkVectorText()
vec_text.SetText(string)
extrude = _vtk.vtkLinearExtrusionFilter()
extrude.SetInputConnection(vec_text.GetOutputPort())
extrude.SetExtrusionTypeToNormalExtrusion()
extrude.SetVector(0, 0, 1)
extrude.SetScaleFactor(depth)
tri_filter = _vtk.vtkTriangleFilter()
tri_filter.SetInputConnection(extrude.GetOutputPort())
tri_filter.Update()
return pyvista.wrap(tri_filter.GetOutput())
def Wavelet(extent=(-10, 10, -10, 10, -10, 10), center=(0, 0, 0), maximum=255,
x_freq=60, y_freq=30, z_freq=40, x_mag=10, y_mag=18, z_mag=5,
std=0.5, subsample_rate=1):
"""Create a wavelet.
Produces images with pixel values determined by
``Maximum*Gaussian*x_mag*sin(x_freq*x)*sin(y_freq*y)*cos(z_freq*z)``
Values are float scalars on point data with name ``"RTData"``.
Parameters
----------
extent : sequence, optional
Set/Get the extent of the whole output image. Default
``(-10, 10, -10, 10, -10, 10)``.
center : list, optional
Center of the wavelet.
maximum : float, optional
Maximum of the wavelet function.
x_freq : float, optional
Natural frequency in the x direction.
y_freq : float, optional
Natural frequency in the y direction.
z_freq : float, optional
Natural frequency in the z direction.
x_mag : float, optional
Magnitude in the x direction.
y_mag : float, optional
Magnitude in the y direction.
z_mag : float, optional
Magnitude in the z direction.
std : float, optional
Standard deviation.
subsample_rate : int, optional
The sub-sample rate.
Returns
-------
pyvista.PolyData
Wavelet mesh.
Examples
--------
>>> import pyvista
>>> wavelet = pyvista.Wavelet(extent=(0, 50, 0, 50, 0, 10), x_freq=20,
... y_freq=10, z_freq=1, x_mag=100, y_mag=100,
... z_mag=1000)
>>> wavelet.plot(show_scalar_bar=False)
Extract lower valued cells of the wavelet and create a surface from it.
>>> thresh = wavelet.threshold(800).extract_surface()
>>> thresh.plot(show_scalar_bar=False)
Smooth it to create "waves"
>>> waves = thresh.smooth(n_iter=100, relaxation_factor=0.1)
>>> waves.plot(color='white', smooth_shading=True, show_edges=True)
"""
wavelet_source = _vtk.vtkRTAnalyticSource()
wavelet_source.SetWholeExtent(*extent)
wavelet_source.SetCenter(center)
wavelet_source.SetMaximum(maximum)
wavelet_source.SetXFreq(x_freq)
wavelet_source.SetYFreq(y_freq)
wavelet_source.SetZFreq(z_freq)
wavelet_source.SetXMag(x_mag)
wavelet_source.SetYMag(y_mag)
wavelet_source.SetZMag(z_mag)
wavelet_source.SetStandardDeviation(std)
wavelet_source.SetSubsampleRate(subsample_rate)
wavelet_source.Update()
return pyvista.wrap(wavelet_source.GetOutput())
def CircularArc(pointa, pointb, center, resolution=100, negative=False):
"""Create a circular arc defined by two endpoints and a center.
The number of segments composing the polyline is controlled by
setting the object resolution.
Parameters
----------
pointa : sequence
Position of the first end point.
pointb : sequence
Position of the other end point.
center : sequence
Center of the circle that defines the arc.
resolution : int, optional
The number of segments of the polyline that draws the arc.
Resolution of 1 will just create a line.
negative : bool, optional
By default the arc spans the shortest angular sector between
``pointa`` and ``pointb``.
By setting this to ``True``, the longest angular sector is
used instead (i.e. the negative coterminal angle to the
shortest one).
Returns
-------
pyvista.PolyData
Circular arc mesh.
Examples
--------
Create a quarter arc centered at the origin in the xy plane.
>>> import pyvista
>>> arc = pyvista.CircularArc([-1, 0, 0], [0, 1, 0], [0, 0, 0])
>>> pl = pyvista.Plotter()
>>> _ = pl.add_mesh(arc, color='k', line_width=10)
>>> _ = pl.show_bounds(location='all', font_size=30, use_2d=True)
>>> _ = pl.view_xy()
>>> pl.show()
"""
check_valid_vector(pointa, 'pointa')
check_valid_vector(pointb, 'pointb')
check_valid_vector(center, 'center')
if not np.isclose(
np.linalg.norm(np.array(pointa) - np.array(center)),
np.linalg.norm(np.array(pointb) - np.array(center)),
):
raise ValueError("pointa and pointb are not equidistant from center")
# fix half-arc bug: if a half arc travels directly through the
# center point, it becomes a line
pointb = list(pointb)
pointb[0] -= 1E-10
pointb[1] -= 1E-10
arc = _vtk.vtkArcSource()
arc.SetPoint1(*pointa)
arc.SetPoint2(*pointb)
arc.SetCenter(*center)
arc.SetResolution(resolution)
arc.SetNegative(negative)
arc.Update()
angle = np.deg2rad(arc.GetAngle())
arc = pyvista.wrap(arc.GetOutput())
# Compute distance of every point along circular arc
center = np.array(center).ravel()
radius = np.sqrt(np.sum((arc.points[0]-center)**2, axis=0))
angles = np.arange(0.0, 1.0 + 1.0/resolution, 1.0/resolution) * angle
arc['Distance'] = radius * angles
return arc
def CircularArcFromNormal(center, resolution=100, normal=None,
polar=None, angle=None):
"""Create a circular arc defined by normal to the plane of the arc, and an angle.
The number of segments composing the polyline is controlled by
setting the object resolution.
Parameters
----------
center : sequence
Center of the circle that defines the arc.
resolution : int, optional
The number of segments of the polyline that draws the arc.
Resolution of 1 will just create a line.
normal : sequence, optional
The normal vector to the plane of the arc. By default it
points in the positive Z direction.
polar : sequence, optional
Starting point of the arc in polar coordinates. By default it
is the unit vector in the positive x direction.
angle : float, optional
Arc length (in degrees) beginning at the polar vector. The
direction is counterclockwise. By default it is 90.
Returns
-------
pyvista.PolyData
Circular arc mesh.
Examples
--------
Quarter arc centered at the origin in the xy plane.
>>> import pyvista
>>> normal = [0, 0, 1]
>>> polar = [-1, 0, 0]
>>> arc = pyvista.CircularArcFromNormal([0, 0, 0], normal=normal, polar=polar)
>>> pl = pyvista.Plotter()
>>> _ = pl.add_mesh(arc, color='k', line_width=10)
>>> _ = pl.show_bounds(location='all', font_size=30, use_2d=True)
>>> _ = pl.view_xy()
>>> pl.show()
"""
check_valid_vector(center, 'center')
if normal is None:
normal = [0, 0, 1]
if polar is None:
polar = [1, 0, 0]
if angle is None:
angle = 90.0
arc = _vtk.vtkArcSource()
arc.SetCenter(*center)
arc.SetResolution(resolution)
arc.UseNormalAndAngleOn()
check_valid_vector(normal, 'normal')
arc.SetNormal(*normal)
check_valid_vector(polar, 'polar')
arc.SetPolarVector(*polar)
arc.SetAngle(angle)
arc.Update()
angle = np.deg2rad(arc.GetAngle())
arc = pyvista.wrap(arc.GetOutput())
# Compute distance of every point along circular arc
center = np.array(center)
radius = np.sqrt(np.sum((arc.points[0] - center)**2, axis=0))
angles = np.linspace(0.0, angle, resolution+1)
arc['Distance'] = radius * angles
return arc
def Pyramid(points=None):
"""Create a pyramid defined by 5 points.
Parameters
----------
points : sequence, optional
Points of the pyramid. Points are ordered such that the first
four points are the four counterclockwise points on the
quadrilateral face, and the last point is the apex.
Defaults to pyramid in example.
Returns
-------
pyvista.UnstructuredGrid
Unstructured grid containing a single pyramid cell.
Examples
--------
>>> import pyvista
>>> pointa = [1.0, 1.0, 0.0]
>>> pointb = [-1.0, 1.0, 0.0]
>>> pointc = [-1.0, -1.0, 0.0]
>>> pointd = [1.0, -1.0, 0.0]
>>> pointe = [0.0, 0.0, 1.608]
>>> pyramid = pyvista.Pyramid([pointa, pointb, pointc, pointd, pointe])
>>> pyramid.plot(show_edges=True, line_width=5)
"""
if points is None:
points = [[1.0, 1.0, 0.0],
[-1.0, 1.0, 0.0],
[-1.0, -1.0, 0.0],
[1.0, -1.0, 0.0],
[0.0, 0.0, (4 - 2**0.5)**0.5]]
if len(points) != 5:
raise TypeError('Points must be given as length 5 np.ndarray or list')
check_valid_vector(points[0], 'points[0]')
check_valid_vector(points[1], 'points[1]')
check_valid_vector(points[2], 'points[2]')
check_valid_vector(points[3], 'points[3]')
check_valid_vector(points[4], 'points[4]')
pyramid = _vtk.vtkPyramid()
pyramid.GetPointIds().SetId(0, 0)
pyramid.GetPointIds().SetId(1, 1)
pyramid.GetPointIds().SetId(2, 2)
pyramid.GetPointIds().SetId(3, 3)
pyramid.GetPointIds().SetId(4, 4)
ug = _vtk.vtkUnstructuredGrid()
ug.SetPoints(pyvista.vtk_points(np.array(points), False))
ug.InsertNextCell(pyramid.GetCellType(), pyramid.GetPointIds())
return pyvista.wrap(ug)
def Triangle(points=None):
"""Create a triangle defined by 3 points.
Parameters
----------
points : sequence, optional
Points of the triangle. Defaults to a right isosceles
triangle (see example).
Returns
-------
pyvista.PolyData
Triangle mesh.
Examples
--------
>>> import pyvista
>>> pointa = [0, 0, 0]
>>> pointb = [1, 0, 0]
>>> pointc = [0.5, 0.707, 0]
>>> triangle = pyvista.Triangle([pointa, pointb, pointc])
>>> triangle.plot(show_edges=True, line_width=5)
"""
if points is None:
points = [[0, 0, 0], [1, 0, 0], [0.5, 0.5**0.5, 0]]
if len(points) != 3:
raise TypeError('Points must be given as length 3 np.ndarray or list')
check_valid_vector(points[0], 'points[0]')
check_valid_vector(points[1], 'points[1]')
check_valid_vector(points[2], 'points[2]')
cells = np.array([[3, 0, 1, 2]])
return pyvista.wrap(pyvista.PolyData(points, cells))
def Rectangle(points=None):
"""Create a rectangle defined by 4 points.
Parameters
----------
points : sequence, optional
Points of the rectangle. Defaults to a simple example.
Returns
-------
pyvista.PolyData
Rectangle mesh.
Examples
--------
>>> import pyvista
>>> pointa = [1.0, 0.0, 0.0]
>>> pointb = [1.0, 1.0, 0.0]
>>> pointc = [0.0, 1.0, 0.0]
>>> pointd = [0.0, 0.0, 0.0]
>>> rectangle = pyvista.Rectangle([pointa, pointb, pointc, pointd])
>>> rectangle.plot(show_edges=True, line_width=5)
"""
if points is None:
points = [[1.0, 0.0, 0.0], [1.0, 1.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 0.0]]
if len(points) != 4:
raise TypeError('Points must be given as length 4 np.ndarray or list')
check_valid_vector(points[0], 'points[0]')
check_valid_vector(points[1], 'points[1]')
check_valid_vector(points[2], 'points[2]')
check_valid_vector(points[3], 'points[3]')
cells = np.array([[4, 0, 1, 2, 3]])
return pyvista.wrap(pyvista.PolyData(points, cells))
def Circle(radius=0.5, resolution=100):
"""Create a single PolyData circle defined by radius in the XY plane.
Parameters
----------
radius : float, optional
Radius of circle.
resolution : int, optional
Number of points on the circle.
Returns
-------
pyvista.PolyData
Circle mesh.
Examples
--------
>>> import pyvista
>>> radius = 0.5
>>> circle = pyvista.Circle(radius)
>>> circle.plot(show_edges=True, line_width=5)
"""
points = np.zeros((resolution, 3))
theta = np.linspace(0.0, 2.0*np.pi, resolution)
points[:, 0] = radius * np.cos(theta)
points[:, 1] = radius * np.sin(theta)
cells = np.array([np.append(np.array([resolution]), np.arange(resolution))])
return pyvista.wrap(pyvista.PolyData(points, cells))
def Superquadric(center=(0., 0., 0.), scale=(1., 1., 1.), size=0.5,
theta_roundness=1., phi_roundness=1.,
theta_resolution=16, phi_resolution=16,
toroidal=False, thickness=1/3):
"""Create a superquadric.
Parameters
----------
center : iterable, optional
Center of the superquadric in ``[x, y, z]``.
scale : iterable, optional
Scale factors of the superquadric in ``[x, y, z]``.
size : float, optional
Superquadric isotropic size.
theta_roundness : float, optional
Superquadric east/west roundness.
Values range from 0 (rectangular) to 1 (circular) to higher orders.
phi_roundness : float, optional
Superquadric north/south roundness.
Values range from 0 (rectangular) to 1 (circular) to higher orders.
theta_resolution : int, optional
Number of points in the longitude direction.
Values are rounded to nearest multiple of 4.
phi_resolution : int, optional
Number of points in the latitude direction.
Values are rounded to nearest multiple of 8.
toroidal : bool, optional
Whether or not the superquadric is toroidal (``True``)
or ellipsoidal (``False``).
thickness : float, optional
Superquadric ring thickness.
Only applies if toroidal is set to ``True``.
Returns
-------
pyvista.PolyData
Superquadric mesh.
See Also
--------
pyvista.ParametricSuperEllipsoid :
Parametric superquadric if toroidal is ``False``.
pyvista.ParametricSuperToroid :
Parametric superquadric if toroidal is ``True``.
Examples
--------
>>> import pyvista
>>> superquadric = pyvista.Superquadric(scale=(3., 1., 0.5),
... phi_roundness=0.1,
... theta_roundness=0.5)
>>> superquadric.plot(show_edges=True)
"""
superquadricSource = _vtk.vtkSuperquadricSource()
superquadricSource.SetCenter(center)
superquadricSource.SetScale(scale)
superquadricSource.SetSize(size)
superquadricSource.SetThetaRoundness(theta_roundness)
superquadricSource.SetPhiRoundness(phi_roundness)
superquadricSource.SetThetaResolution(round(theta_resolution/4)*4)
superquadricSource.SetPhiResolution(round(phi_resolution/8)*8)
superquadricSource.SetToroidal(toroidal)
superquadricSource.SetThickness(thickness)
superquadricSource.Update()
return pyvista.wrap(superquadricSource.GetOutput())
def PlatonicSolid(kind='tetrahedron', radius=1.0, center=(0.0, 0.0, 0.0)):
"""Create a Platonic solid of a given size.
Parameters
----------
kind : str or int, optional
The kind of Platonic solid to create. Either the name of the
polyhedron or an integer index:
* ``'tetrahedron'`` or ``0``
* ``'cube'`` or ``1``
* ``'octahedron'`` or ``2``
* ``'icosahedron'`` or ``3``
* ``'dodecahedron'`` or ``4``
radius : float, optional
The radius of the circumscribed sphere for the solid to create.
center : sequence, optional
Three-length sequence defining the center of the solid to create.
Returns
-------
pyvista.PolyData
One of the five Platonic solids. Cell scalars are defined that
assign integer labels to each face (with array name
``"FaceIndex"``).
Examples
--------
Create and plot a dodecahedron.
>>> import pyvista
>>> dodeca = pyvista.PlatonicSolid('dodecahedron')
>>> dodeca.plot(categories=True)
See :ref:`platonic_example` for more examples using this filter.
"""
kinds = {
'tetrahedron': 0,
'cube': 1,
'octahedron': 2,
'icosahedron': 3,
'dodecahedron': 4,
}
if isinstance(kind, str):
if kind not in kinds:
raise ValueError(f'Invalid Platonic solid kind "{kind}".')
kind = kinds[kind]
elif isinstance(kind, int) and kind not in range(5):
raise ValueError(f'Invalid Platonic solid index "{kind}".')
elif not isinstance(kind, int):
raise ValueError('Invalid Platonic solid index type '
f'"{type(kind).__name__}".')
check_valid_vector(center, 'center')
solid = _vtk.vtkPlatonicSolidSource()
solid.SetSolidType(kind)
solid.Update()
solid = pyvista.wrap(solid.GetOutput())
solid.scale(radius, inplace=True)
solid.points += np.asanyarray(center) - solid.center
# rename and activate cell scalars
cell_data = solid.get_array(0)
solid.clear_data()
solid.cell_data['FaceIndex'] = cell_data
return solid
def Tetrahedron(radius=1.0, center=(0.0, 0.0, 0.0)):
"""Create a tetrahedron of a given size.
A tetrahedron is composed of four congruent equilateral triangles.
Parameters
----------
radius : float, optional
The radius of the circumscribed sphere for the tetrahedron.
center : sequence, optional
Three-length sequence defining the center of the tetrahedron.
Returns
-------
pyvista.PolyData
Mesh for the tetrahedron. Cell scalars are defined that assign
integer labels to each face (with array name ``"FaceIndex"``).
Examples
--------
Create and plot a tetrahedron.
>>> import pyvista
>>> tetra = pyvista.Tetrahedron()
>>> tetra.plot(categories=True)
See :ref:`platonic_example` for more examples using this filter.
"""
return PlatonicSolid(kind='tetrahedron', radius=radius, center=center)
def Octahedron(radius=1.0, center=(0.0, 0.0, 0.0)):
"""Create an octahedron of a given size.
An octahedron is composed of eight congruent equilateral
triangles.
Parameters
----------
radius : float, optional
The radius of the circumscribed sphere for the octahedron.
center : sequence, optional
Three-length sequence defining the center of the octahedron.
Returns
-------
pyvista.PolyData
Mesh for the octahedron. Cell scalars are defined that assign
integer labels to each face (with array name ``"FaceIndex"``).
Examples
--------
Create and plot an octahedron.
>>> import pyvista
>>> tetra = pyvista.Octahedron()
>>> tetra.plot(categories=True)
See :ref:`platonic_example` for more examples using this filter.
"""
return PlatonicSolid(kind='octahedron', radius=radius, center=center)
def Dodecahedron(radius=1.0, center=(0.0, 0.0, 0.0)):
"""Create a dodecahedron of a given size.
A dodecahedron is composed of twelve congruent regular pentagons.
Parameters
----------
radius : float, optional
The radius of the circumscribed sphere for the dodecahedron.
center : sequence, optional
Three-length sequence defining the center of the dodecahedron.
Returns
-------
pyvista.PolyData
Mesh for the dodecahedron. Cell scalars are defined that assign
integer labels to each face (with array name ``"FaceIndex"``).
Examples
--------
Create and plot a dodecahedron.
>>> import pyvista
>>> tetra = pyvista.Dodecahedron()
>>> tetra.plot(categories=True)
See :ref:`platonic_example` for more examples using this filter.
"""
return PlatonicSolid(kind='dodecahedron', radius=radius, center=center)
def Icosahedron(radius=1.0, center=(0.0, 0.0, 0.0)):
"""Create an icosahedron of a given size.
An icosahedron is composed of twenty congruent equilateral
triangles.
Parameters
----------
radius : float, optional
The radius of the circumscribed sphere for the icosahedron.
center : sequence, optional
Three-length sequence defining the center of the icosahedron.
Returns
-------
pyvista.PolyData
Mesh for the icosahedron. Cell scalars are defined that assign
integer labels to each face (with array name ``"FaceIndex"``).
Examples
--------
Create and plot an icosahedron.
>>> import pyvista
>>> tetra = pyvista.Icosahedron()
>>> tetra.plot(categories=True)
See :ref:`platonic_example` for more examples using this filter.
"""
return PlatonicSolid(kind='icosahedron', radius=radius, center=center)
|
[
"numpy.sum",
"pyvista.StructuredGrid",
"numpy.empty",
"numpy.allclose",
"pyvista._vtk.vtkUnstructuredGrid",
"numpy.sin",
"numpy.linalg.norm",
"numpy.arange",
"numpy.full",
"pyvista._vtk.vtkArrowSource",
"numpy.meshgrid",
"pyvista._vtk.vtkTriangleFilter",
"pyvista._vtk.vtkPlaneSource",
"pyvista._vtk.vtkArcSource",
"pyvista._vtk.vtkSphereSource",
"numpy.linspace",
"pyvista._vtk.vtkCubeSource",
"pyvista.PolyData",
"pyvista._vtk.vtkDiskSource",
"pyvista._vtk.vtkPyramid",
"numpy.cross",
"pyvista._vtk.vtkConeSource",
"pyvista._vtk.vtkRegularPolygonSource",
"pyvista._vtk.vtkLinearExtrusionFilter",
"numpy.cos",
"pyvista._vtk.vtkPlatonicSolidSource",
"pyvista.utilities.check_valid_vector",
"pyvista._vtk.vtkTubeFilter",
"pyvista._vtk.vtkSuperquadricSource",
"pyvista._vtk.vtkCylinderSource",
"pyvista._vtk.vtkRTAnalyticSource",
"pyvista._vtk.vtkVectorText",
"numpy.asanyarray",
"numpy.zeros",
"pyvista._vtk.vtkTessellatedBoxSource",
"pyvista._vtk.vtkLineSource",
"numpy.array",
"pyvista.wrap"
] |
[((894, 926), 'numpy.cross', 'np.cross', (['normx', '[0, 1.0, 1e-07]'], {}), '(normx, [0, 1.0, 1e-07])\n', (902, 926), True, 'import numpy as np\n'), ((944, 965), 'numpy.linalg.norm', 'np.linalg.norm', (['normz'], {}), '(normz)\n', (958, 965), True, 'import numpy as np\n'), ((978, 1000), 'numpy.cross', 'np.cross', (['normz', 'normx'], {}), '(normz, normx)\n', (986, 1000), True, 'import numpy as np\n'), ((1014, 1030), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (1022, 1030), True, 'import numpy as np\n'), ((2352, 2376), 'pyvista._vtk.vtkCylinderSource', '_vtk.vtkCylinderSource', ([], {}), '()\n', (2374, 2376), False, 'from pyvista import _vtk\n'), ((4443, 4490), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)'], {'num': 'theta_resolution'}), '(0, 2 * np.pi, num=theta_resolution)\n', (4454, 4490), True, 'import numpy as np\n'), ((4523, 4544), 'numpy.meshgrid', 'np.meshgrid', (['r', 'theta'], {}), '(r, theta)\n', (4534, 4544), True, 'import numpy as np\n'), ((4847, 4864), 'numpy.empty', 'np.empty', (['yy.size'], {}), '(yy.size)\n', (4855, 4864), True, 'import numpy as np\n'), ((4874, 4909), 'numpy.full', 'np.full', (['(X.size, z_resolution)', 'dz'], {}), '((X.size, z_resolution), dz)\n', (4881, 4909), True, 'import numpy as np\n'), ((4920, 4943), 'numpy.arange', 'np.arange', (['z_resolution'], {}), '(z_resolution)\n', (4929, 4943), True, 'import numpy as np\n'), ((5007, 5031), 'pyvista.StructuredGrid', 'pyvista.StructuredGrid', ([], {}), '()\n', (5029, 5031), False, 'import pyvista\n'), ((5177, 5202), 'numpy.array', 'np.array', (['[0.0, 0.0, 1.0]'], {}), '([0.0, 0.0, 1.0])\n', (5185, 5202), True, 'import numpy as np\n'), ((5541, 5562), 'numpy.array', 'np.array', (['grid.center'], {}), '(grid.center)\n', (5549, 5562), True, 'import numpy as np\n'), ((5582, 5598), 'numpy.array', 'np.array', (['center'], {}), '(center)\n', (5590, 5598), True, 'import numpy as np\n'), ((6797, 6818), 'pyvista._vtk.vtkArrowSource', '_vtk.vtkArrowSource', ([], {}), '()\n', (6816, 6818), False, 'from pyvista import _vtk\n'), ((8845, 8867), 'pyvista._vtk.vtkSphereSource', '_vtk.vtkSphereSource', ([], {}), '()\n', (8865, 8867), False, 'from pyvista import _vtk\n'), ((10211, 10232), 'pyvista._vtk.vtkPlaneSource', '_vtk.vtkPlaneSource', ([], {}), '()\n', (10230, 10232), False, 'from pyvista import _vtk\n'), ((11500, 11520), 'pyvista._vtk.vtkLineSource', '_vtk.vtkLineSource', ([], {}), '()\n', (11518, 11520), False, 'from pyvista import _vtk\n'), ((13005, 13025), 'pyvista._vtk.vtkLineSource', '_vtk.vtkLineSource', ([], {}), '()\n', (13023, 13025), False, 'from pyvista import _vtk\n'), ((13257, 13277), 'pyvista._vtk.vtkTubeFilter', '_vtk.vtkTubeFilter', ([], {}), '()\n', (13275, 13277), False, 'from pyvista import _vtk\n'), ((15362, 15382), 'pyvista._vtk.vtkCubeSource', '_vtk.vtkCubeSource', ([], {}), '()\n', (15380, 15382), False, 'from pyvista import _vtk\n'), ((16969, 16999), 'pyvista._vtk.vtkTessellatedBoxSource', '_vtk.vtkTessellatedBoxSource', ([], {}), '()\n', (16997, 16999), False, 'from pyvista import _vtk\n'), ((18297, 18317), 'pyvista._vtk.vtkConeSource', '_vtk.vtkConeSource', ([], {}), '()\n', (18315, 18317), False, 'from pyvista import _vtk\n'), ((19590, 19620), 'pyvista._vtk.vtkRegularPolygonSource', '_vtk.vtkRegularPolygonSource', ([], {}), '()\n', (19618, 19620), False, 'from pyvista import _vtk\n'), ((20899, 20919), 'pyvista._vtk.vtkDiskSource', '_vtk.vtkDiskSource', ([], {}), '()\n', (20917, 20919), False, 'from pyvista import _vtk\n'), ((21089, 21105), 'numpy.array', 'np.array', (['normal'], {}), '(normal)\n', (21097, 21105), True, 'import numpy as np\n'), ((21119, 21135), 'numpy.array', 'np.array', (['center'], {}), '(center)\n', (21127, 21135), True, 'import numpy as np\n'), ((21723, 21743), 'pyvista._vtk.vtkVectorText', '_vtk.vtkVectorText', ([], {}), '()\n', (21741, 21743), False, 'from pyvista import _vtk\n'), ((21788, 21819), 'pyvista._vtk.vtkLinearExtrusionFilter', '_vtk.vtkLinearExtrusionFilter', ([], {}), '()\n', (21817, 21819), False, 'from pyvista import _vtk\n'), ((22008, 22032), 'pyvista._vtk.vtkTriangleFilter', '_vtk.vtkTriangleFilter', ([], {}), '()\n', (22030, 22032), False, 'from pyvista import _vtk\n'), ((24166, 24192), 'pyvista._vtk.vtkRTAnalyticSource', '_vtk.vtkRTAnalyticSource', ([], {}), '()\n', (24190, 24192), False, 'from pyvista import _vtk\n'), ((26083, 26119), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['pointa', '"""pointa"""'], {}), "(pointa, 'pointa')\n", (26101, 26119), False, 'from pyvista.utilities import check_valid_vector\n'), ((26124, 26160), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['pointb', '"""pointb"""'], {}), "(pointb, 'pointb')\n", (26142, 26160), False, 'from pyvista.utilities import check_valid_vector\n'), ((26165, 26201), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['center', '"""center"""'], {}), "(center, 'center')\n", (26183, 26201), False, 'from pyvista.utilities import check_valid_vector\n'), ((26621, 26640), 'pyvista._vtk.vtkArcSource', '_vtk.vtkArcSource', ([], {}), '()\n', (26638, 26640), False, 'from pyvista import _vtk\n'), ((28714, 28750), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['center', '"""center"""'], {}), "(center, 'center')\n", (28732, 28750), False, 'from pyvista.utilities import check_valid_vector\n'), ((28903, 28922), 'pyvista._vtk.vtkArcSource', '_vtk.vtkArcSource', ([], {}), '()\n', (28920, 28922), False, 'from pyvista import _vtk\n'), ((29018, 29054), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['normal', '"""normal"""'], {}), "(normal, 'normal')\n", (29036, 29054), False, 'from pyvista.utilities import check_valid_vector\n'), ((29086, 29120), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['polar', '"""polar"""'], {}), "(polar, 'polar')\n", (29104, 29120), False, 'from pyvista.utilities import check_valid_vector\n'), ((29342, 29358), 'numpy.array', 'np.array', (['center'], {}), '(center)\n', (29350, 29358), True, 'import numpy as np\n'), ((29438, 29477), 'numpy.linspace', 'np.linspace', (['(0.0)', 'angle', '(resolution + 1)'], {}), '(0.0, angle, resolution + 1)\n', (29449, 29477), True, 'import numpy as np\n'), ((30698, 30740), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['points[0]', '"""points[0]"""'], {}), "(points[0], 'points[0]')\n", (30716, 30740), False, 'from pyvista.utilities import check_valid_vector\n'), ((30745, 30787), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['points[1]', '"""points[1]"""'], {}), "(points[1], 'points[1]')\n", (30763, 30787), False, 'from pyvista.utilities import check_valid_vector\n'), ((30792, 30834), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['points[2]', '"""points[2]"""'], {}), "(points[2], 'points[2]')\n", (30810, 30834), False, 'from pyvista.utilities import check_valid_vector\n'), ((30839, 30881), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['points[3]', '"""points[3]"""'], {}), "(points[3], 'points[3]')\n", (30857, 30881), False, 'from pyvista.utilities import check_valid_vector\n'), ((30886, 30928), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['points[4]', '"""points[4]"""'], {}), "(points[4], 'points[4]')\n", (30904, 30928), False, 'from pyvista.utilities import check_valid_vector\n'), ((30944, 30961), 'pyvista._vtk.vtkPyramid', '_vtk.vtkPyramid', ([], {}), '()\n', (30959, 30961), False, 'from pyvista import _vtk\n'), ((31162, 31188), 'pyvista._vtk.vtkUnstructuredGrid', '_vtk.vtkUnstructuredGrid', ([], {}), '()\n', (31186, 31188), False, 'from pyvista import _vtk\n'), ((31331, 31347), 'pyvista.wrap', 'pyvista.wrap', (['ug'], {}), '(ug)\n', (31343, 31347), False, 'import pyvista\n'), ((32103, 32145), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['points[0]', '"""points[0]"""'], {}), "(points[0], 'points[0]')\n", (32121, 32145), False, 'from pyvista.utilities import check_valid_vector\n'), ((32150, 32192), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['points[1]', '"""points[1]"""'], {}), "(points[1], 'points[1]')\n", (32168, 32192), False, 'from pyvista.utilities import check_valid_vector\n'), ((32197, 32239), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['points[2]', '"""points[2]"""'], {}), "(points[2], 'points[2]')\n", (32215, 32239), False, 'from pyvista.utilities import check_valid_vector\n'), ((32253, 32277), 'numpy.array', 'np.array', (['[[3, 0, 1, 2]]'], {}), '([[3, 0, 1, 2]])\n', (32261, 32277), True, 'import numpy as np\n'), ((33144, 33186), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['points[0]', '"""points[0]"""'], {}), "(points[0], 'points[0]')\n", (33162, 33186), False, 'from pyvista.utilities import check_valid_vector\n'), ((33191, 33233), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['points[1]', '"""points[1]"""'], {}), "(points[1], 'points[1]')\n", (33209, 33233), False, 'from pyvista.utilities import check_valid_vector\n'), ((33238, 33280), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['points[2]', '"""points[2]"""'], {}), "(points[2], 'points[2]')\n", (33256, 33280), False, 'from pyvista.utilities import check_valid_vector\n'), ((33285, 33327), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['points[3]', '"""points[3]"""'], {}), "(points[3], 'points[3]')\n", (33303, 33327), False, 'from pyvista.utilities import check_valid_vector\n'), ((33341, 33368), 'numpy.array', 'np.array', (['[[4, 0, 1, 2, 3]]'], {}), '([[4, 0, 1, 2, 3]])\n', (33349, 33368), True, 'import numpy as np\n'), ((33950, 33975), 'numpy.zeros', 'np.zeros', (['(resolution, 3)'], {}), '((resolution, 3))\n', (33958, 33975), True, 'import numpy as np\n'), ((33988, 34029), 'numpy.linspace', 'np.linspace', (['(0.0)', '(2.0 * np.pi)', 'resolution'], {}), '(0.0, 2.0 * np.pi, resolution)\n', (33999, 34029), True, 'import numpy as np\n'), ((36246, 36274), 'pyvista._vtk.vtkSuperquadricSource', '_vtk.vtkSuperquadricSource', ([], {}), '()\n', (36272, 36274), False, 'from pyvista import _vtk\n'), ((38570, 38606), 'pyvista.utilities.check_valid_vector', 'check_valid_vector', (['center', '"""center"""'], {}), "(center, 'center')\n", (38588, 38606), False, 'from pyvista.utilities import check_valid_vector\n'), ((38620, 38649), 'pyvista._vtk.vtkPlatonicSolidSource', '_vtk.vtkPlatonicSolidSource', ([], {}), '()\n', (38647, 38649), False, 'from pyvista import _vtk\n'), ((836, 855), 'numpy.array', 'np.array', (['direction'], {}), '(direction)\n', (844, 855), True, 'import numpy as np\n'), ((856, 881), 'numpy.linalg.norm', 'np.linalg.norm', (['direction'], {}), '(direction)\n', (870, 881), True, 'import numpy as np\n'), ((1164, 1200), 'numpy.allclose', 'np.allclose', (['center', '[0.0, 0.0, 0.0]'], {}), '(center, [0.0, 0.0, 0.0])\n', (1175, 1200), True, 'import numpy as np\n'), ((1222, 1238), 'numpy.array', 'np.array', (['center'], {}), '(center)\n', (1230, 1238), True, 'import numpy as np\n'), ((4605, 4625), 'numpy.cos', 'np.cos', (['theta_matrix'], {}), '(theta_matrix)\n', (4611, 4625), True, 'import numpy as np\n'), ((4650, 4670), 'numpy.sin', 'np.sin', (['theta_matrix'], {}), '(theta_matrix)\n', (4656, 4670), True, 'import numpy as np\n'), ((5211, 5237), 'numpy.allclose', 'np.allclose', (['vx', 'direction'], {}), '(vx, direction)\n', (5222, 5237), True, 'import numpy as np\n'), ((5260, 5285), 'numpy.linalg.norm', 'np.linalg.norm', (['direction'], {}), '(direction)\n', (5274, 5285), True, 'import numpy as np\n'), ((5344, 5362), 'numpy.linalg.norm', 'np.linalg.norm', (['vx'], {}), '(vx)\n', (5358, 5362), True, 'import numpy as np\n'), ((5376, 5399), 'numpy.cross', 'np.cross', (['direction', 'vx'], {}), '(direction, vx)\n', (5384, 5399), True, 'import numpy as np\n'), ((5415, 5444), 'numpy.array', 'np.array', (['[vx, vy, direction]'], {}), '([vx, vy, direction])\n', (5423, 5444), True, 'import numpy as np\n'), ((11806, 11822), 'numpy.array', 'np.array', (['pointa'], {}), '(pointa)\n', (11814, 11822), True, 'import numpy as np\n'), ((26999, 27044), 'numpy.sum', 'np.sum', (['((arc.points[0] - center) ** 2)'], {'axis': '(0)'}), '((arc.points[0] - center) ** 2, axis=0)\n', (27005, 27044), True, 'import numpy as np\n'), ((27055, 27111), 'numpy.arange', 'np.arange', (['(0.0)', '(1.0 + 1.0 / resolution)', '(1.0 / resolution)'], {}), '(0.0, 1.0 + 1.0 / resolution, 1.0 / resolution)\n', (27064, 27111), True, 'import numpy as np\n'), ((29380, 29425), 'numpy.sum', 'np.sum', (['((arc.points[0] - center) ** 2)'], {'axis': '(0)'}), '((arc.points[0] - center) ** 2, axis=0)\n', (29386, 29425), True, 'import numpy as np\n'), ((32302, 32333), 'pyvista.PolyData', 'pyvista.PolyData', (['points', 'cells'], {}), '(points, cells)\n', (32318, 32333), False, 'import pyvista\n'), ((33393, 33424), 'pyvista.PolyData', 'pyvista.PolyData', (['points', 'cells'], {}), '(points, cells)\n', (33409, 33424), False, 'import pyvista\n'), ((34056, 34069), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (34062, 34069), True, 'import numpy as np\n'), ((34098, 34111), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (34104, 34111), True, 'import numpy as np\n'), ((34217, 34248), 'pyvista.PolyData', 'pyvista.PolyData', (['points', 'cells'], {}), '(points, cells)\n', (34233, 34248), False, 'import pyvista\n'), ((38800, 38821), 'numpy.asanyarray', 'np.asanyarray', (['center'], {}), '(center)\n', (38813, 38821), True, 'import numpy as np\n'), ((4388, 4406), 'numpy.array', 'np.array', (['[radius]'], {}), '([radius])\n', (4396, 4406), True, 'import numpy as np\n'), ((4718, 4746), 'numpy.array', 'np.array', (['([X] * z_resolution)'], {}), '([X] * z_resolution)\n', (4726, 4746), True, 'import numpy as np\n'), ((4764, 4792), 'numpy.array', 'np.array', (['([Y] * z_resolution)'], {}), '([Y] * z_resolution)\n', (4772, 4792), True, 'import numpy as np\n'), ((7127, 7152), 'numpy.linalg.norm', 'np.linalg.norm', (['direction'], {}), '(direction)\n', (7141, 7152), True, 'import numpy as np\n'), ((11277, 11293), 'numpy.array', 'np.array', (['pointa'], {}), '(pointa)\n', (11285, 11293), True, 'import numpy as np\n'), ((11387, 11403), 'numpy.array', 'np.array', (['pointb'], {}), '(pointb)\n', (11395, 11403), True, 'import numpy as np\n'), ((11753, 11783), 'numpy.sum', 'np.sum', (['((p1 - p0) ** 2)'], {'axis': '(1)'}), '((p1 - p0) ** 2, axis=1)\n', (11759, 11783), True, 'import numpy as np\n'), ((12777, 12793), 'numpy.array', 'np.array', (['pointa'], {}), '(pointa)\n', (12785, 12793), True, 'import numpy as np\n'), ((12887, 12903), 'numpy.array', 'np.array', (['pointb'], {}), '(pointb)\n', (12895, 12903), True, 'import numpy as np\n'), ((16827, 16843), 'numpy.array', 'np.array', (['bounds'], {}), '(bounds)\n', (16835, 16843), True, 'import numpy as np\n'), ((26953, 26969), 'numpy.array', 'np.array', (['center'], {}), '(center)\n', (26961, 26969), True, 'import numpy as np\n'), ((31225, 31241), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (31233, 31241), True, 'import numpy as np\n'), ((15421, 15437), 'numpy.array', 'np.array', (['bounds'], {}), '(bounds)\n', (15429, 15437), True, 'import numpy as np\n'), ((34144, 34166), 'numpy.array', 'np.array', (['[resolution]'], {}), '([resolution])\n', (34152, 34166), True, 'import numpy as np\n'), ((34168, 34189), 'numpy.arange', 'np.arange', (['resolution'], {}), '(resolution)\n', (34177, 34189), True, 'import numpy as np\n'), ((26248, 26264), 'numpy.array', 'np.array', (['pointa'], {}), '(pointa)\n', (26256, 26264), True, 'import numpy as np\n'), ((26267, 26283), 'numpy.array', 'np.array', (['center'], {}), '(center)\n', (26275, 26283), True, 'import numpy as np\n'), ((26309, 26325), 'numpy.array', 'np.array', (['pointb'], {}), '(pointb)\n', (26317, 26325), True, 'import numpy as np\n'), ((26328, 26344), 'numpy.array', 'np.array', (['center'], {}), '(center)\n', (26336, 26344), True, 'import numpy as np\n')]
|
'''
A class that performs tracking and drift scans
with parameters acquired from the scan queue.
Author: <NAME>
Date: June 2018
'''
from CommandStation import CommandStation
from astropy.coordinates import SkyCoord, EarthLocation, AltAz
from astropy.time import Time
from astropy.table import Table
from astropy import units as u
from numpy import linspace
from datetime import date
from srtutility.NTPTime import NTPTime
import io
import re
import sqlite3
import _thread
class Scan:
def __init__(self):
self.station = CommandStation()
self.ntp = NTPTime()
self.database_location = '../srtdatabase/srtdata.db'
# Method to take a single data point at a single frequency for a single source.
#
# :param azal: tuple containing azimuth and altitude of scan position
# :param freq: frequency in MHz at which to measure
# :return scan: tuple containing a single power measurement and boolean indicating successful movement
def singlescan(self, azal, freq):
movesuccess = self.station.movebyazal(azal[0], azal[1]) # move station to scan position
if movesuccess:
scan = self.station.readpower(freq) # read power at frequency freq
else:
scan = 0
return (scan, movesuccess)
# Method to take data points across a spectrum for a single source.
#
# :param azal: tuple containing azimuth and altitude of scan position
# :param flimit: tuple containing lower and upper frequency limits in MHz
# :param stepnum: number of steps to take over the frequency range
# :return data: dictionary containing a single spectrum with start and end times and a time correction value
def singlespectrum(self, azal, flimit, stepnum):
spectrum = []
starttime = self.ntp.getcurrenttime() # get start time of spectrum scan
spectrumsuccess = True
for freq in linspace(flimit[0], flimit[1], stepnum): # sweep through frequencies in range, taking stepnum steps
if spectrumsuccess:
scan = self.singlescan(azal, freq) # do single scan at current frequency
if scan[1] == False:
spectrumsuccess = False
else:
scan = (0, False)
spectrum.append(scan[0]) # append scan result to spectrum
endtime = self.ntp.getcurrenttime() # get end time of spectrum scan
data = {'spectrum': spectrum, 'starttime': starttime, 'endtime': endtime, 'spectrumsuccess': spectrumsuccess} # package spectrum and time data
return data
# Method to track a position and take data for a specific duration.
#
# :param scanid: the id of the current scan
# :param pos: tuple containing galactic latitude and longitude of the position to track
# :param flimit: tuple containing lower and upper frequency limits in MHz
# :param stepnum: number of steps to take over the frequency range
# :param time: unix time at which to stop scanning
# :return trackdata: tuple containing a list of scan data and a string indicating the status of the scan
def track(self, scanid, pos, flimit, stepnum, time):
print('running a track scan')
srtdb = sqlite3.connect(self.database_location) # establish a connection and cursor into the database
srtdb.row_factory = sqlite3.Row
cur = srtdb.cursor()
curtime = self.ntp.getcurrenttime() # get start time of scan
trackdata = []
while curtime < time: # continue scanning until current time is past the end time
status = cur.execute("SELECT * FROM SCANIDS WHERE ID = ?", (scanid,)).fetchone() # check current status to see if scan was cancelled
if status['status'] == 'cancelled': # if scan was cancelled, return data collected so far
print('scan was cancelled')
srtdb.close()
return (trackdata, 'cancelled')
azal = self.getazal(pos) # get current azimuth and altitude of tracked position
if azal == 'positionerror' or azal == 'moveboundserror': # check for invalid position or movement, return if found
srtdb.close()
return (trackdata, azal)
spectrumdata = self.singlespectrum(azal, flimit, stepnum) # take a spectrum measurement
trackdata.append(spectrumdata) # append spectrum data to the scan
if spectrumdata['spectrumsuccess'] == False:
print('scan timed out')
srtdb.close()
return (trackdata, 'timeout')
curtime = self.ntp.getcurrenttime() # update current time
print('scan complete')
srtdb.close()
return (trackdata, 'complete')
# Method to take data at a single drift position for a specific duration.
#
# :param scanid: the id of the current scan
# :param pos: tuple containing galactic latitude and longitude of drift position
# :param flimit: tuple containing lower and upper frequency limits in MHz
# :param stepnum: number of steps to take over the frequency range
# :param time: unix time at which to stop scanning
# :return driftdata: tuple containing a list of scan data and a string indicating the status of the scan
def drift(self, scanid, pos, flimit, stepnum, time):
print('running a drift scan')
srtdb = sqlite3.connect(self.database_location) # establish a connection and cursor into the database
srtdb.row_factory = sqlite3.Row
cur = srtdb.cursor()
curtime = self.ntp.getcurrenttime() # get start time of scan
driftdata = []
azal = self.getazal(pos) # get azimuth and altitude of the drift position
if azal == 'positionerror' or azal == 'moveboundserror': # check for invalid or movement, return
srtdb.close()
return (driftdata, azal)
while curtime < time: # continue scanning until the current time is past the end time
status = cur.execute("SELECT * FROM SCANID WHERE ID = ?", (scanid,)).fetchone() # check current status to see if scan was cancelled
if status['status'] == 'cancelled': # if scan was cancelled, return data collected so far
print('scan was cancelled')
srtdb.close()
return (driftdata, 'cancelled')
spectrumdata = self.singlespectrum(azal, flimit, stepnum) # take a spectrum measurement
driftdata.append(spectrumdata) # append spectrum data to the scan
if spectrumdata['spectrumsuccess'] == False:
print('scan timed out')
srtdb.close()
return (driftdata, 'timeout')
curtime = self.ntp.getcurrenttime() # update current time
print('scan complete')
srtdb.close()
return (driftdata, 'complete')
# Method that performs an entire scan and stores the collected data in the database.
#
# :param nextscan: a dict object containing the parameters of a scan
def donextscan(self, nextscan):
srtdb = sqlite3.connect(self.database_location) # establish a connection and cursor into the database
srtdb.row_factory = sqlite3.Row
cur = srtdb.cursor()
pos = (nextscan['ras'], nextscan['dec']) # get position of scan
flower = nextscan['freqlower'] # get spectrum parameters
fupper = nextscan['frequpper']
stepnum = nextscan['stepnum']
duration = re.split('[hms]', nextscan['duration']) # get duration values of scan
seconds = int(duration[0]) * 60 * 60 + int(duration[1]) * 60 + int(duration[2])
curtime = self.ntp.getcurrenttime()
endtime = curtime + seconds # calculate the ending time of the scan in unix time
cur.execute("UPDATE STATUS SET ID = ?, CODE = ?", (nextscan['id'], 'ok')) # update the STATUS table
srtdb.commit()
if nextscan['type'] == 'track':
scandata = self.track(nextscan['id'], pos, (flower, fupper), stepnum, endtime) # do a track scan
else:
scandata = self.drift(nextscan['id'], pos, (flower, fupper), stepnum, endtime) # do a drift scan
if len(scandata[0]) != 0:
print('saving scan data')
starttime = Time(scandata[0][0]['starttime'], format = 'unix') # package scan time info into astropy Time objects for format conversion
endtime = Time(scandata[0][len(scandata) - 1]['endtime'], format = 'unix')
nextscan['starttime'] = starttime.iso # store start and end times with scan params in iso format
nextscan['endtime'] = endtime.iso
tablerows = []
for scan in scandata[0]:
tablerows.append(scan['spectrum'])
t = Table(rows = tablerows, meta = nextscan); # initialize astropy Table object to store scan data with scan params as table metadata
# for scan in scandata[0]: # add scan data to the Table
# t.add_row(scan['spectrum'])
b = io.BytesIO() # initialize byte stream for FITS file writing
t.write(b, format='fits') # write the Table to the byte stream in FITS format
d = date.today() # get today's date
with open('testfits.fits', 'w') as f:
f.write(b.getvalue().decode('ascii'))
cur.execute("INSERT INTO SCANRESULTS VALUES (?,?)", (nextscan['id'], b.getvalue())) # store scan name, date, type, and data in the db
srtdb.commit()
cur.execute("UPDATE SCANIDS SET STATUS = ? WHERE ID = ?", (scandata[1], nextscan['id']))
scanname = cur.execute("SELECT * FROM SCANIDS WHERE ID = ?", (nextscan['id'],)).fetchone()['name']
cur.execute("INSERT INTO SCANHISTORY VALUES (?,?,?,?,?,?)", (nextscan['id'], scanname, nextscan['type'], d.day, d.month, d.year))
srtdb.commit()
srtdb.close()
# Helper method to get the azimuth and altitude of a position.
#
# :param pos: tuple containing right ascension and declination
# :return azal: tuple containing azimuth and altitude, or a string containing an error code
def getazal(self, pos):
print('calculating azal')
srtdb = sqlite3.connect(self.database_location) # establish a connection and cursor into the database
srtdb.row_factory = sqlite3.Row
cur = srtdb.cursor()
configdata = cur.execute("SELECT * FROM CONFIG").fetchone() # retrieve config data from the database
position = SkyCoord(pos[0], pos[1], frame = 'icrs') # convert position into astropy SkyCoord object for coord transformation
location = EarthLocation(lat = configdata['lat'], lon = configdata['lon'], height = configdata['height']) # convert location into astropy EarthLocation
srtdb.close()
unixtime = self.ntp.getcurrenttime() # get curent time to establish AltAz reference frame
observingtime = Time(unixtime, format = 'unix') # create astropy Time object using converted ntp time
azalframe = AltAz(location = location, obstime = observingtime) # create AltAz reference frame
try:
position = position.transform_to(azalframe) # transform position from galactic coords to az/alt coords
except ValueError as e: # if transformation is impossible, return position error
print('positionerror')
return 'positionerror'
azal = (float(position.az.to_string(unit=u.deg, decimal=True)), float(position.alt.to_string(unit=u.deg, decimal=True))) # create azal tuple
if azal[1] < 0 or azal[1] > 180: # if position is not in the sky, return position error
print('positionerror')
return 'positionerror'
if azal[0] < configdata['azlower'] or azal[0] > configdata['azupper']: # if motion would violate movement bounds, return movebounds error
print('moveboundserror')
return 'moveboundserror'
if azal[1] < configdata['allower'] or azal[1] > configdata['alupper']:
print('moveboundserror')
return 'moveboundserror'
print(str(azal[0]) + ', ' + str(azal[1]))
return azal
def main():
srtdb = sqlite3.connect('../srtdatabase/srtdata.db') # establish a connection and cursor into the database
srtdb.row_factory = sqlite3.Row
cur = srtdb.cursor()
# cur.execute("INSERT INTO SCANIDS VALUES (?,?,?)", (-50, 'scantest', 'scheduled'))
# cur.execute("INSERT INTO SCANPARAMS VALUES (?,?,?,?,?,?,?,?,?)", (-50, 'track', 'sun', '9h46m58s', '13d22m20s', '0h0m30s', 1500, 1510, 10))
# srtdb.commit()
scan = cur.execute("SELECT * FROM SCANPARAMS WHERE ID = ?", (-50,)).fetchone()
nextscan = {}
for key in scan.keys():
nextscan[key.lower()] = scan[key]
station = Scan()
# _thread.start_new_thread(station.donextscan, (nextscan,))
station.donextscan(nextscan)
# main()
|
[
"io.BytesIO",
"re.split",
"astropy.table.Table",
"astropy.time.Time",
"astropy.coordinates.AltAz",
"CommandStation.CommandStation",
"datetime.date.today",
"sqlite3.connect",
"astropy.coordinates.EarthLocation",
"numpy.linspace",
"srtutility.NTPTime.NTPTime",
"astropy.coordinates.SkyCoord"
] |
[((11124, 11168), 'sqlite3.connect', 'sqlite3.connect', (['"""../srtdatabase/srtdata.db"""'], {}), "('../srtdatabase/srtdata.db')\n", (11139, 11168), False, 'import sqlite3\n'), ((527, 543), 'CommandStation.CommandStation', 'CommandStation', ([], {}), '()\n', (541, 543), False, 'from CommandStation import CommandStation\n'), ((560, 569), 'srtutility.NTPTime.NTPTime', 'NTPTime', ([], {}), '()\n', (567, 569), False, 'from srtutility.NTPTime import NTPTime\n'), ((1794, 1833), 'numpy.linspace', 'linspace', (['flimit[0]', 'flimit[1]', 'stepnum'], {}), '(flimit[0], flimit[1], stepnum)\n', (1802, 1833), False, 'from numpy import linspace\n'), ((2989, 3028), 'sqlite3.connect', 'sqlite3.connect', (['self.database_location'], {}), '(self.database_location)\n', (3004, 3028), False, 'import sqlite3\n'), ((4925, 4964), 'sqlite3.connect', 'sqlite3.connect', (['self.database_location'], {}), '(self.database_location)\n', (4940, 4964), False, 'import sqlite3\n'), ((6440, 6479), 'sqlite3.connect', 'sqlite3.connect', (['self.database_location'], {}), '(self.database_location)\n', (6455, 6479), False, 'import sqlite3\n'), ((6803, 6842), 're.split', 're.split', (['"""[hms]"""', "nextscan['duration']"], {}), "('[hms]', nextscan['duration'])\n", (6811, 6842), False, 'import re\n'), ((9296, 9335), 'sqlite3.connect', 'sqlite3.connect', (['self.database_location'], {}), '(self.database_location)\n', (9311, 9335), False, 'import sqlite3\n'), ((9566, 9604), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['pos[0]', 'pos[1]'], {'frame': '"""icrs"""'}), "(pos[0], pos[1], frame='icrs')\n", (9574, 9604), False, 'from astropy.coordinates import SkyCoord, EarthLocation, AltAz\n'), ((9697, 9790), 'astropy.coordinates.EarthLocation', 'EarthLocation', ([], {'lat': "configdata['lat']", 'lon': "configdata['lon']", 'height': "configdata['height']"}), "(lat=configdata['lat'], lon=configdata['lon'], height=\n configdata['height'])\n", (9710, 9790), False, 'from astropy.coordinates import SkyCoord, EarthLocation, AltAz\n'), ((9969, 9998), 'astropy.time.Time', 'Time', (['unixtime'], {'format': '"""unix"""'}), "(unixtime, format='unix')\n", (9973, 9998), False, 'from astropy.time import Time\n'), ((10071, 10118), 'astropy.coordinates.AltAz', 'AltAz', ([], {'location': 'location', 'obstime': 'observingtime'}), '(location=location, obstime=observingtime)\n', (10076, 10118), False, 'from astropy.coordinates import SkyCoord, EarthLocation, AltAz\n'), ((7526, 7574), 'astropy.time.Time', 'Time', (["scandata[0][0]['starttime']"], {'format': '"""unix"""'}), "(scandata[0][0]['starttime'], format='unix')\n", (7530, 7574), False, 'from astropy.time import Time\n'), ((7977, 8013), 'astropy.table.Table', 'Table', ([], {'rows': 'tablerows', 'meta': 'nextscan'}), '(rows=tablerows, meta=nextscan)\n', (7982, 8013), False, 'from astropy.table import Table\n'), ((8212, 8224), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (8222, 8224), False, 'import io\n'), ((8364, 8376), 'datetime.date.today', 'date.today', ([], {}), '()\n', (8374, 8376), False, 'from datetime import date\n')]
|
"""
An experimental protocol is handled as a pandas DataFrame
that includes an 'onset' field.
This yields the onset time of the events in the experimental paradigm.
It can also contain:
* a 'trial_type' field that yields the condition identifier.
* a 'duration' field that yields event duration (for so-called block
paradigms).
* a 'modulation' field that associated a scalar value to each event.
Author: <NAME>, 2015
"""
from __future__ import with_statement
import warnings
import numpy as np
def check_events(events):
"""Test that the events data describes a valid experimental paradigm
It is valid if the events data has an 'onset' key.
Parameters
----------
events : pandas DataFrame
Events data that describes a functional experimental paradigm.
Returns
-------
trial_type : array of shape (n_events,), dtype='s'
Per-event experimental conditions identifier.
Defaults to np.repeat('dummy', len(onsets)).
onset : array of shape (n_events,), dtype='f'
Per-event onset time (in seconds)
duration : array of shape (n_events,), dtype='f'
Per-event durantion, (in seconds)
defaults to zeros(n_events) when no duration is provided
modulation : array of shape (n_events,), dtype='f'
Per-event modulation, (in seconds)
defaults to ones(n_events) when no duration is provided
"""
if 'onset' not in events.keys():
raise ValueError('The provided events data has no onset column.')
if 'duration' not in events.keys():
raise ValueError('The provided events data has no duration column.')
onset = np.array(events['onset'])
duration = np.array(events['duration']).astype(np.float)
n_events = len(onset)
trial_type = np.array(events['trial_type'])
modulation = np.ones(n_events)
if 'trial_type' not in events.keys():
warnings.warn("'trial_type' column not found "
"in the given events data.")
trial_type = np.repeat('dummy', n_events)
if 'modulation' in events.keys():
warnings.warn("'modulation' column found in the given events data.")
modulation = np.array(events['modulation']).astype(np.float)
return trial_type, onset, duration, modulation
|
[
"warnings.warn",
"numpy.array",
"numpy.ones",
"numpy.repeat"
] |
[((1664, 1689), 'numpy.array', 'np.array', (["events['onset']"], {}), "(events['onset'])\n", (1672, 1689), True, 'import numpy as np\n'), ((1794, 1824), 'numpy.array', 'np.array', (["events['trial_type']"], {}), "(events['trial_type'])\n", (1802, 1824), True, 'import numpy as np\n'), ((1842, 1859), 'numpy.ones', 'np.ones', (['n_events'], {}), '(n_events)\n', (1849, 1859), True, 'import numpy as np\n'), ((1910, 1982), 'warnings.warn', 'warnings.warn', (['"""\'trial_type\' column not found in the given events data."""'], {}), '("\'trial_type\' column not found in the given events data.")\n', (1923, 1982), False, 'import warnings\n'), ((2029, 2057), 'numpy.repeat', 'np.repeat', (['"""dummy"""', 'n_events'], {}), "('dummy', n_events)\n", (2038, 2057), True, 'import numpy as np\n'), ((2104, 2172), 'warnings.warn', 'warnings.warn', (['"""\'modulation\' column found in the given events data."""'], {}), '("\'modulation\' column found in the given events data.")\n', (2117, 2172), False, 'import warnings\n'), ((1705, 1733), 'numpy.array', 'np.array', (["events['duration']"], {}), "(events['duration'])\n", (1713, 1733), True, 'import numpy as np\n'), ((2194, 2224), 'numpy.array', 'np.array', (["events['modulation']"], {}), "(events['modulation'])\n", (2202, 2224), True, 'import numpy as np\n')]
|
import numpy as np
from BMA_support import *
from BMA_agent import *
try:
from scipy.special import lambertw
except:
print("could not import lambertw (bounded priors won't work)")
class Node(object):
def __init__(self,name='',dims=[],inds=[],num=1,cp=False):
self.name = name
self.ag = [Agent() for i in range(0,count(num))]
# properties that are calculated by the node:
self.marg = Dist()
if cp:
self.prior = Dist()
else:
self.prior = self.marg
self.post = Dist()
self.DKL = 0
self.DKLpr = 0
# properties that have to be defined in the system:
self.p_in = Dist() # used in marginal
self.p0 = Dist()
self.inds = inds
self.beta_r = []
self.dims = dims
self.cp = cp
def initialize(self):
if len(self.inds) < 3: # if no index for beta given, pick [0] as default (->len(beta)=1)
self.inds.append([0])
self.DKL = 0
self.DKLpr = 0
self.beta_r = self.inds[2]
self.post.r = self.inds[1]
self.p_in.r = self.post.r[:-1]
self.post.initialize(self.dims)
self.marg.r = self.inds[0]
self.marg.initialize(self.dims)
self.prior.r = self.inds[0]
self.prior.initialize(self.dims)
self.p0.r = self.inds[0]
self.p0.val = normalize(np.ones(np.shape(self.prior.val)))
for agent in self.ag: agent.reset()
def update_input(self,joint):
Z = np.einsum(joint.val,joint.r,self.prior.r[:-1])
self.p_in.val = np.einsum(1.0/(Z+1e-55),self.prior.r[:-1],joint.val,joint.r,self.post.r[:-1])
def update_posterior(self,U,beta):
if np.shape(U) != np.shape(self.post.val):
print("The utility must have the same shape as the posterior!")
betatimesU = np.einsum(beta,self.beta_r,U,self.post.r,self.post.r)
post = np.einsum(self.prior.val,self.prior.r,np.exp(betatimesU),self.post.r,self.post.r)
self.post.val = normalize(post)
def update_prior(self,alpha,beta):
self.update_marginal()
if self.cp: self.update_bounded_prior(alpha,beta)
def update_marginal(self):
self.marg.val = np.einsum(self.p_in.val,self.p_in.r,self.post.val,self.post.r,self.prior.r)
def update_bounded_prior(self,alpha,beta):
pr = np.copy(self.prior.val)
if len(self.ag) > 1:
for k in range(0,len(self.ag)):
index = np.unravel_index(k,[self.dims[i] for i in self.beta_r])
if alpha[index]/beta[index] > 500:
pr[index] = self.marg.val[index]/beta[index] - self.prior.val[index]*np.log(self.prior.val[index]/self.p0.val[index])/alpha[index]
else:
DKL_pr = np.log(self.prior.val[index]/self.p0.val[index]).dot(self.prior.val[index])
cnst = alpha[index]/beta[index] - DKL_pr
denom = np.real(lambertw(np.exp(cnst)*(alpha[index]/beta[index])*self.marg.val[index]/self.p0.val[index]))
pr[index] = (alpha[index]/beta[index])*self.marg.val[index]/denom + 1e-55
elif len(self.ag) == 1:
if alpha[0]/beta[0] > 500:
pr = self.marg.val/beta[0]-self.prior.val*np.log(self.prior.val/self.p0.val)/alpha[0]
else:
DKL_pr = np.log(self.prior.val/self.p0.val).dot(self.prior.val)
cnst = alpha[0]/beta[0] - DKL_pr
denom = np.real(lambertw(np.exp(cnst)*(alpha[0]/beta[0])*self.marg.val/self.p0.val)) + 1e-55
pr = (alpha[0]/beta[0])*self.marg.val/denom + 1e-55
self.prior.val = normalize(pr)
def process(self,U,beta,alpha,joint):
self.update_input(joint)
self.update_posterior(U,beta)
self.update_prior(alpha,beta)
def calc_DKL(self):
self.DKL = get_DKL(self.post,self.prior)
def calc_DKLpr(self):
self.DKLpr = get_DKL(self.prior,self.p0)
def extract_agents(self):
num = len(self.ag)
if num > 1:
ind = self.prior.r[:-1] # indices of the dimensions that count this nodes agents
dimind = [self.dims[ndx] for ndx in ind]
rgoal = self.post.r[:]
for i in ind:
rgoal.remove(i)
for k in range(0,num):
delta = np.zeros(dimind)
index = np.unravel_index(k,dimind)
delta[index] = 1
self.ag[k].post = np.einsum(delta,ind,self.post.val,self.post.r,rgoal)
prior_r_ag = self.prior.r[:]
for i in ind:
prior_r_ag.remove(i)
self.ag[k].prior = np.einsum(delta,ind,self.prior.val,self.prior.r,prior_r_ag)
pin_r_ag = self.p_in.r[:]
for i in ind:
pin_r_ag.remove(i)
self.ag[k].p_in = np.einsum(delta,ind,self.p_in.val,self.p_in.r,pin_r_ag)
self.ag[k].calc_DKL()
else:
self.ag[0].post = self.post.val
self.ag[0].prior = self.prior.val
self.ag[0].p_in = self.p_in.val
self.ag[0].calc_DKL()
## %%
|
[
"numpy.log",
"numpy.copy",
"numpy.einsum",
"numpy.unravel_index",
"numpy.zeros",
"numpy.shape",
"numpy.exp"
] |
[((1531, 1579), 'numpy.einsum', 'np.einsum', (['joint.val', 'joint.r', 'self.prior.r[:-1]'], {}), '(joint.val, joint.r, self.prior.r[:-1])\n', (1540, 1579), True, 'import numpy as np\n'), ((1602, 1692), 'numpy.einsum', 'np.einsum', (['(1.0 / (Z + 1e-55))', 'self.prior.r[:-1]', 'joint.val', 'joint.r', 'self.post.r[:-1]'], {}), '(1.0 / (Z + 1e-55), self.prior.r[:-1], joint.val, joint.r, self.\n post.r[:-1])\n', (1611, 1692), True, 'import numpy as np\n'), ((1868, 1925), 'numpy.einsum', 'np.einsum', (['beta', 'self.beta_r', 'U', 'self.post.r', 'self.post.r'], {}), '(beta, self.beta_r, U, self.post.r, self.post.r)\n', (1877, 1925), True, 'import numpy as np\n'), ((2244, 2323), 'numpy.einsum', 'np.einsum', (['self.p_in.val', 'self.p_in.r', 'self.post.val', 'self.post.r', 'self.prior.r'], {}), '(self.p_in.val, self.p_in.r, self.post.val, self.post.r, self.prior.r)\n', (2253, 2323), True, 'import numpy as np\n'), ((2381, 2404), 'numpy.copy', 'np.copy', (['self.prior.val'], {}), '(self.prior.val)\n', (2388, 2404), True, 'import numpy as np\n'), ((1731, 1742), 'numpy.shape', 'np.shape', (['U'], {}), '(U)\n', (1739, 1742), True, 'import numpy as np\n'), ((1746, 1769), 'numpy.shape', 'np.shape', (['self.post.val'], {}), '(self.post.val)\n', (1754, 1769), True, 'import numpy as np\n'), ((1975, 1993), 'numpy.exp', 'np.exp', (['betatimesU'], {}), '(betatimesU)\n', (1981, 1993), True, 'import numpy as np\n'), ((1413, 1437), 'numpy.shape', 'np.shape', (['self.prior.val'], {}), '(self.prior.val)\n', (1421, 1437), True, 'import numpy as np\n'), ((2502, 2558), 'numpy.unravel_index', 'np.unravel_index', (['k', '[self.dims[i] for i in self.beta_r]'], {}), '(k, [self.dims[i] for i in self.beta_r])\n', (2518, 2558), True, 'import numpy as np\n'), ((4387, 4403), 'numpy.zeros', 'np.zeros', (['dimind'], {}), '(dimind)\n', (4395, 4403), True, 'import numpy as np\n'), ((4428, 4455), 'numpy.unravel_index', 'np.unravel_index', (['k', 'dimind'], {}), '(k, dimind)\n', (4444, 4455), True, 'import numpy as np\n'), ((4522, 4578), 'numpy.einsum', 'np.einsum', (['delta', 'ind', 'self.post.val', 'self.post.r', 'rgoal'], {}), '(delta, ind, self.post.val, self.post.r, rgoal)\n', (4531, 4578), True, 'import numpy as np\n'), ((4726, 4789), 'numpy.einsum', 'np.einsum', (['delta', 'ind', 'self.prior.val', 'self.prior.r', 'prior_r_ag'], {}), '(delta, ind, self.prior.val, self.prior.r, prior_r_ag)\n', (4735, 4789), True, 'import numpy as np\n'), ((4931, 4990), 'numpy.einsum', 'np.einsum', (['delta', 'ind', 'self.p_in.val', 'self.p_in.r', 'pin_r_ag'], {}), '(delta, ind, self.p_in.val, self.p_in.r, pin_r_ag)\n', (4940, 4990), True, 'import numpy as np\n'), ((2811, 2861), 'numpy.log', 'np.log', (['(self.prior.val[index] / self.p0.val[index])'], {}), '(self.prior.val[index] / self.p0.val[index])\n', (2817, 2861), True, 'import numpy as np\n'), ((3385, 3421), 'numpy.log', 'np.log', (['(self.prior.val / self.p0.val)'], {}), '(self.prior.val / self.p0.val)\n', (3391, 3421), True, 'import numpy as np\n'), ((2698, 2748), 'numpy.log', 'np.log', (['(self.prior.val[index] / self.p0.val[index])'], {}), '(self.prior.val[index] / self.p0.val[index])\n', (2704, 2748), True, 'import numpy as np\n'), ((3298, 3334), 'numpy.log', 'np.log', (['(self.prior.val / self.p0.val)'], {}), '(self.prior.val / self.p0.val)\n', (3304, 3334), True, 'import numpy as np\n'), ((2993, 3005), 'numpy.exp', 'np.exp', (['cnst'], {}), '(cnst)\n', (2999, 3005), True, 'import numpy as np\n'), ((3530, 3542), 'numpy.exp', 'np.exp', (['cnst'], {}), '(cnst)\n', (3536, 3542), True, 'import numpy as np\n')]
|
import logging
import numpy as np
from monai.transforms import LoadImage
from monailabel.interfaces.datastore import Datastore, DefaultLabelTag
from monailabel.interfaces.tasks import ScoringMethod
logger = logging.getLogger(__name__)
class Sum(ScoringMethod):
"""
Consider implementing simple np sum method of label tags; Also add valid slices that have label mask
"""
def __init__(self, tags=(DefaultLabelTag.FINAL.value, DefaultLabelTag.ORIGINAL.value)):
super().__init__("Compute Numpy Sum for Final/Original Labels")
self.tags = tags
def __call__(self, request, datastore: Datastore):
loader = LoadImage(image_only=True)
result = {}
for image_id in datastore.list_images():
for tag in self.tags:
label_id: str = datastore.get_label_by_image_id(image_id, tag)
if label_id:
label = loader(datastore.get_label_uri(label_id))
slices = [sid for sid in range(label.shape[0]) if np.sum(label[sid] > 0)]
info = {"sum": int(np.sum(label)), "slices": slices}
logger.info(f"{label_id} => {info}")
datastore.update_label_info(label_id, info)
result[label_id] = info
return result
|
[
"monai.transforms.LoadImage",
"numpy.sum",
"logging.getLogger"
] |
[((210, 237), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (227, 237), False, 'import logging\n'), ((650, 676), 'monai.transforms.LoadImage', 'LoadImage', ([], {'image_only': '(True)'}), '(image_only=True)\n', (659, 676), False, 'from monai.transforms import LoadImage\n'), ((1028, 1050), 'numpy.sum', 'np.sum', (['(label[sid] > 0)'], {}), '(label[sid] > 0)\n', (1034, 1050), True, 'import numpy as np\n'), ((1091, 1104), 'numpy.sum', 'np.sum', (['label'], {}), '(label)\n', (1097, 1104), True, 'import numpy as np\n')]
|
#
# pr8_1_1
from math import pi
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import ellipord, ellip, freqz, group_delay
def freqz_m(b, a):
"""
Modified version of freqz subroutine
:param b: numerator polynomial of H(z) (for FIR: b=h)
:param a: denominator polynomial of H(z) (for FIR: a=[1])
:return db: Relative magnitude in dB computed over 0 to pi radians
:return mag: absolute magnitude computed over 0 to pi radians
:return pha: Phase response in radians over 0 to pi radians
:return grd: Group delay over 0 to pi radians
:return w: 501 frequency samples between 0 to pi radians
"""
w, H = freqz(b, a, 1000, whole=True)
H = H[0:501]
w = w[0:501]
mag = np.abs(H)
eps = np.finfo(float).eps
db = 20 * np.log10((mag + eps) / np.max(mag))
pha = np.angle(H)
_, grd = group_delay((b, a), w)
return db, mag, pha, grd, w
if __name__ == '__main__':
fs = 8000 # sampling frequency
fs2 = fs / 2
Wp = np.array([60, 500]) / fs2 # filter pass band
Ws = np.array([20, 2000]) / fs2 # filter stop band
Rp = 1 # passband ripple
Rs = 40 # stopband attenuation
n, Wn = ellipord(Wp,Ws,Rp,Rs) # filter order
b, a = ellip(n, Rp, Rs, Wn, 'bandpass') # filter coefficients
print('b = {} \na = {}'.format(b, a))
db, mag, pha, grd, w = freqz_m(b, a) # frequency response curve
# figure
plt.figure(figsize=(16, 9))
plt.plot(w / pi* fs2, db, linewidth=2)
plt.grid()
plt.axis([0, 4000, -90, 10])
plt.title('Frequency Response of Elliptical 6th-order BPF')
plt.xlabel('Frequency [Hz]')
plt.ylabel('Amplitude [dB]')
plt.savefig('images/elliptical_6th_BPF.png', bbox_inches='tight', dpi=600)
plt.show()
|
[
"matplotlib.pyplot.title",
"numpy.abs",
"scipy.signal.ellip",
"scipy.signal.group_delay",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.show",
"numpy.angle",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.axis",
"numpy.finfo",
"matplotlib.pyplot.figure",
"numpy.max",
"numpy.array",
"scipy.signal.ellipord",
"scipy.signal.freqz",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlabel"
] |
[((636, 665), 'scipy.signal.freqz', 'freqz', (['b', 'a', '(1000)'], {'whole': '(True)'}), '(b, a, 1000, whole=True)\n', (641, 665), False, 'from scipy.signal import ellipord, ellip, freqz, group_delay\n'), ((701, 710), 'numpy.abs', 'np.abs', (['H'], {}), '(H)\n', (707, 710), True, 'import numpy as np\n'), ((792, 803), 'numpy.angle', 'np.angle', (['H'], {}), '(H)\n', (800, 803), True, 'import numpy as np\n'), ((814, 836), 'scipy.signal.group_delay', 'group_delay', (['(b, a)', 'w'], {}), '((b, a), w)\n', (825, 836), False, 'from scipy.signal import ellipord, ellip, freqz, group_delay\n'), ((1264, 1288), 'scipy.signal.ellipord', 'ellipord', (['Wp', 'Ws', 'Rp', 'Rs'], {}), '(Wp, Ws, Rp, Rs)\n', (1272, 1288), False, 'from scipy.signal import ellipord, ellip, freqz, group_delay\n'), ((1327, 1359), 'scipy.signal.ellip', 'ellip', (['n', 'Rp', 'Rs', 'Wn', '"""bandpass"""'], {}), "(n, Rp, Rs, Wn, 'bandpass')\n", (1332, 1359), False, 'from scipy.signal import ellipord, ellip, freqz, group_delay\n'), ((1530, 1557), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (1540, 1557), True, 'import matplotlib.pyplot as plt\n'), ((1559, 1598), 'matplotlib.pyplot.plot', 'plt.plot', (['(w / pi * fs2)', 'db'], {'linewidth': '(2)'}), '(w / pi * fs2, db, linewidth=2)\n', (1567, 1598), True, 'import matplotlib.pyplot as plt\n'), ((1599, 1609), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (1607, 1609), True, 'import matplotlib.pyplot as plt\n'), ((1611, 1639), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, 4000, -90, 10]'], {}), '([0, 4000, -90, 10])\n', (1619, 1639), True, 'import matplotlib.pyplot as plt\n'), ((1641, 1700), 'matplotlib.pyplot.title', 'plt.title', (['"""Frequency Response of Elliptical 6th-order BPF"""'], {}), "('Frequency Response of Elliptical 6th-order BPF')\n", (1650, 1700), True, 'import matplotlib.pyplot as plt\n'), ((1702, 1730), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frequency [Hz]"""'], {}), "('Frequency [Hz]')\n", (1712, 1730), True, 'import matplotlib.pyplot as plt\n'), ((1732, 1760), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Amplitude [dB]"""'], {}), "('Amplitude [dB]')\n", (1742, 1760), True, 'import matplotlib.pyplot as plt\n'), ((1762, 1836), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""images/elliptical_6th_BPF.png"""'], {'bbox_inches': '"""tight"""', 'dpi': '(600)'}), "('images/elliptical_6th_BPF.png', bbox_inches='tight', dpi=600)\n", (1773, 1836), True, 'import matplotlib.pyplot as plt\n'), ((1838, 1848), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1846, 1848), True, 'import matplotlib.pyplot as plt\n'), ((718, 733), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (726, 733), True, 'import numpy as np\n'), ((986, 1005), 'numpy.array', 'np.array', (['[60, 500]'], {}), '([60, 500])\n', (994, 1005), True, 'import numpy as np\n'), ((1054, 1074), 'numpy.array', 'np.array', (['[20, 2000]'], {}), '([20, 2000])\n', (1062, 1074), True, 'import numpy as np\n'), ((772, 783), 'numpy.max', 'np.max', (['mag'], {}), '(mag)\n', (778, 783), True, 'import numpy as np\n')]
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-GPU tests for MirroredStrategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import sys
from absl.testing import parameterized
import numpy as np
from tensorflow.contrib.distribute.python import combinations
from tensorflow.contrib.distribute.python import mirrored_strategy
from tensorflow.contrib.distribute.python import multi_worker_test_base
from tensorflow.contrib.distribute.python import strategy_test_lib
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribution_strategy_context as ds_context
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import values
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import func_graph
from tensorflow.python.framework import ops
from tensorflow.python.keras.engine import training as keras_training
from tensorflow.python.keras.layers import core as keras_core
from tensorflow.python.layers import core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import optimizer as optimizer_lib
from tensorflow.python.training import server_lib
GPU_TEST = "test_gpu" in sys.argv[0]
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus],
mode=["graph", "eager"]))
class MirroredTwoDeviceDistributionTest(
strategy_test_lib.DistributionTestBase,
strategy_test_lib.TwoDeviceDistributionTestBase,
parameterized.TestCase):
def testMinimizeLoss(self, distribution):
if context.executing_eagerly():
self._test_minimize_loss_eager(distribution)
else:
self._test_minimize_loss_graph(distribution)
def testReplicaId(self, distribution):
self._test_replica_id(distribution)
def testNumReplicasInSync(self, distribution):
self.assertEqual(2, distribution.num_replicas_in_sync)
def testCallAndMergeExceptions(self, distribution):
self._test_call_and_merge_exceptions(distribution)
def testRunRegroupError(self, distribution):
def run_fn():
replica_id = int(self.evaluate(_replica_id()))
# Generates a list with different lengths on different devices.
# Will fail in _regroup() (if more than one device).
return list(range(replica_id))
with distribution.scope(), self.assertRaises(AssertionError):
distribution.extended.call_for_each_replica(run_fn)
def testReduceToCpu(self, distribution):
with distribution.scope():
result = distribution.extended.call_for_each_replica(_replica_id)
reduced = distribution.reduce(reduce_util.ReduceOp.SUM, result)
expected = sum(range(distribution.num_replicas_in_sync))
self.assertEqual(expected, self.evaluate(reduced))
def testMakeInputFnIteratorWithDataset(self, distribution):
dataset_fn = lambda: dataset_ops.Dataset.range(10)
expected_values = [[i, i+1] for i in range(0, 10, 2)]
input_fn = self._input_fn_to_test_input_context(
dataset_fn,
expected_num_replicas_in_sync=2,
expected_num_input_pipelines=1,
expected_input_pipeline_id=0)
iterator = distribution.make_input_fn_iterator(input_fn)
self._test_input_fn_iterator(iterator, distribution.extended.worker_devices,
expected_values)
# TODO(b/124344198): Re-enable after fixing this flaky test.
def DISABLED_testMakeInputFnIteratorWithCallable(self, distribution):
def fn():
dataset = dataset_ops.Dataset.range(2).interleave(
(lambda _: dataset_ops.Dataset.range(10)), cycle_length=2)
it = dataset.make_one_shot_iterator()
return it.get_next
expected_values = [[i, i] for i in range(0, 10)]
input_fn = self._input_fn_to_test_input_context(
fn,
expected_num_replicas_in_sync=2,
expected_num_input_pipelines=1,
expected_input_pipeline_id=0)
iterator = distribution.make_input_fn_iterator(input_fn)
self._test_input_fn_iterator(iterator, distribution.extended.worker_devices,
expected_values, test_reinitialize=False)
def testNumpyIterator(self, distribution):
self._test_numpy_iterator(distribution)
def testGlobalStepUpdate(self, distribution):
self._test_global_step_update(distribution)
def testRun(self, distribution):
self._test_run(distribution)
def testAllReduceSum(self, distribution):
self._test_all_reduce_sum(distribution)
def testAllReduceSumGradients(self, distribution):
self._test_all_reduce_sum_gradients(distribution)
def testAllReduceSumGradientTape(self, distribution):
self._test_all_reduce_sum_gradient_tape(distribution)
def testAllReduceMean(self, distribution):
self._test_all_reduce_mean(distribution)
def testAllReduceMeanGradients(self, distribution):
self._test_all_reduce_mean_gradients(distribution)
def testAllReduceMeanGradientTape(self, distribution):
self._test_all_reduce_mean_gradient_tape(distribution)
def testSummaryForReplicaZeroOnly(self, distribution):
self._test_summary_for_replica_zero_only(distribution)
def one_device_combinations():
return combinations.combine(
distribution=[
combinations.mirrored_strategy_with_one_cpu,
combinations.mirrored_strategy_with_one_gpu,
combinations.core_mirrored_strategy_with_one_cpu,
combinations.core_mirrored_strategy_with_one_gpu],
mode=["graph", "eager"])
@combinations.generate(one_device_combinations())
class MirroredOneDeviceDistributionTest(
strategy_test_lib.DistributionTestBase,
strategy_test_lib.OneDeviceDistributionTestBase,
parameterized.TestCase):
def testMinimizeLoss(self, distribution):
if context.executing_eagerly():
self._test_minimize_loss_eager(distribution)
else:
self._test_minimize_loss_graph(distribution)
def testReplicaId(self, distribution):
self._test_replica_id(distribution)
def testCallAndMergeExceptions(self, distribution):
self._test_call_and_merge_exceptions(distribution)
def testRun(self, distribution):
self._test_run(distribution)
def testAllReduceSum(self, distribution):
self._test_all_reduce_sum(distribution)
def testAllReduceSumGradients(self, distribution):
self._test_all_reduce_sum_gradients(distribution)
def testAllReduceSumGradientTape(self, distribution):
self._test_all_reduce_sum_gradient_tape(distribution)
def testAllReduceMean(self, distribution):
self._test_all_reduce_mean(distribution)
def testAllReduceMeanGradients(self, distribution):
self._test_all_reduce_mean_gradients(distribution)
def testAllReduceMeanGradientTape(self, distribution):
self._test_all_reduce_mean_gradient_tape(distribution)
class MirroredStrategyVariableCreatorStackTest(
test.TestCase, parameterized.TestCase):
@combinations.generate(combinations.combine(
distribution=[combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=["graph"]))
def testCreatorStacksAreThreadLocal(self, distribution):
def model_fn():
replica_id_str = str(self.evaluate(_replica_id()))
def thread_creator_fn(next_creator, *args, **kwargs):
return next_creator(*args, **kwargs) + ":thread_" + replica_id_str
with variable_scope.variable_creator_scope(thread_creator_fn):
# Create a variable in this scope.
v = variable_scope.variable(1.0)
# This will pause the current thread, and execute the other thread.
ds_context.get_replica_context().merge_call(lambda _: _)
return v
def main_thread_creator(next_creator, *args, **kwargs):
# We are not using the underlying next_creator for test purposes.
del next_creator, args, kwargs
return "main_thread"
with context.graph_mode(), \
distribution.scope(), \
variable_scope.variable_creator_scope(main_thread_creator):
result = distribution.extended.call_for_each_replica(model_fn)
result = distribution.experimental_local_results(result)
expected = ("main_thread:thread_0", "main_thread:thread_1")
self.assertEqual(expected, result)
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=["graph", "eager"]))
class MirroredStrategyCallForEachReplicaTest(test.TestCase):
def testExecutingEagerlyOutsideFunction(self, distribution):
"""Verify we preserve the value of executing_eagerly_outside_functions()."""
def model_fn():
return ops.executing_eagerly_outside_functions()
originally = ops.executing_eagerly_outside_functions()
with distribution.scope():
in_scope = ops.executing_eagerly_outside_functions()
in_model_fn = distribution.extended.call_for_each_replica(model_fn)
unwrapped = distribution.experimental_local_results(in_model_fn)
self.assertEqual(in_scope, unwrapped[0])
self.assertEqual(in_scope, originally)
# Verify this all again, but this time in a FuncGraph.
with func_graph.FuncGraph("fg").as_default(), distribution.scope():
in_scope = ops.executing_eagerly_outside_functions()
in_model_fn = distribution.extended.call_for_each_replica(model_fn)
unwrapped = distribution.experimental_local_results(in_model_fn)
self.assertEqual(in_scope, unwrapped[0])
self.assertEqual(in_scope, originally)
def testFunctionInCallForEachReplicaNoMergeCall(self, distribution):
@def_function.function
def model_fn():
return 0.
with distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
self.assertEqual((0., 0.), self.evaluate(result.values))
def testFunctionInCallForEachReplicaWithMergeCall(self, distribution):
def merge_fn(_):
pass
@def_function.function
def model_fn():
ds_context.get_replica_context().merge_call(merge_fn)
return 0.
with distribution.scope():
with self.assertRaisesRegexp(
RuntimeError, "`merge_call` called while defining a new graph."):
distribution.extended.call_for_each_replica(model_fn)
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=["graph", "eager"]))
class MirroredStrategyVariableCreationTest(test.TestCase):
# TODO(priyag): Modify more tests to use this helper and check more
# properties.
def _test_mv_properties(self, var, name, strategy):
self.assertIsInstance(var, values.MirroredVariable)
self.assertEqual(name, var.name)
self.assertIs(strategy, var.distribute_strategy)
for d in var.devices:
self.assertEqual(d, var.get(d).device)
self.assertIs(strategy, var.get(d)._distribute_strategy) # pylint: disable=protected-access
def testVariableInFuncGraph(self, distribution):
def model_fn():
v = variable_scope.variable(2.0, name="bar")
ds_context.get_replica_context().merge_call(lambda _: _)
return v
with func_graph.FuncGraph("fg").as_default(), distribution.scope():
v1 = variable_scope.variable(1.0, name="foo")
v2 = distribution.extended.call_for_each_replica(model_fn)
self._test_mv_properties(v1, "foo:0", distribution)
self._test_mv_properties(v2, "bar:0", distribution)
def testSingleVariable(self, distribution):
def model_fn():
# This variable should be created only once across the threads because of
# special variable_creator functions used by
# `distribution.extended.call_for_each_replica`.
v = variable_scope.variable(1.0, name="foo")
ds_context.get_replica_context().merge_call(lambda _: _)
return v
with distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
self._test_mv_properties(result, "foo:0", distribution)
def testUnnamedVariable(self, distribution):
def model_fn():
v = variable_scope.variable(1.0)
ds_context.get_replica_context().merge_call(lambda _: _)
return v
with distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
self._test_mv_properties(result, "Variable:0", distribution)
def testMultipleVariables(self, distribution):
def model_fn():
vs = []
for i in range(5):
vs.append(variable_scope.variable(1.0, name="foo" + str(i)))
ds_context.get_replica_context().merge_call(lambda _: _)
return vs
with distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
for i, v in enumerate(result):
self._test_mv_properties(v, "foo" + str(i) + ":0", distribution)
def testMultipleVariablesWithSameCanonicalName(self, distribution):
def model_fn():
vs = []
vs.append(variable_scope.variable(1.0, name="foo/bar"))
vs.append(variable_scope.variable(1.0, name="foo_1/bar"))
vs.append(variable_scope.variable(1.0, name="foo_1/bar_1"))
vs.append(variable_scope.variable(1.0, name="foo/bar_1"))
ds_context.get_replica_context().merge_call(lambda _: _)
return vs
with distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
for v in result:
self.assertIsInstance(v, values.MirroredVariable)
self.assertEqual(4, len(result))
self.assertEqual("foo/bar:0", result[0].name)
self.assertEqual("foo_1/bar:0", result[1].name)
self.assertEqual("foo_1/bar_1:0", result[2].name)
self.assertEqual("foo/bar_1:0", result[3].name)
def testVariableWithSameCanonicalNameAcrossThreads(self, distribution):
def model_fn():
replica_id = self.evaluate(_replica_id())
v = variable_scope.variable(1.0, name="foo_" + str(replica_id))
ds_context.get_replica_context().merge_call(lambda _: _)
return v
with distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
self.assertIsInstance(result, values.MirroredVariable)
# The resulting mirrored variable will use the name from the first device.
self.assertEqual("foo_0:0", result.name)
def testWithLayers(self, distribution):
def model_fn(features):
with variable_scope.variable_scope("common"):
layer1 = core.Dense(1)
layer1(features)
layer2 = core.Dense(1)
layer2(features)
# This will pause the current thread, and execute the other thread.
ds_context.get_replica_context().merge_call(lambda _: _)
layer3 = core.Dense(1)
layer3(features)
return [(layer1.kernel, layer1.bias),
(layer2.kernel, layer2.bias),
(layer3.kernel, layer3.bias)]
iterator = distribution.make_input_fn_iterator(
lambda _: dataset_ops.Dataset.from_tensors([[1.]]).repeat(10))
self.evaluate(iterator.initialize())
features = iterator.get_next()
with distribution.scope():
result = distribution.extended.call_for_each_replica(
model_fn, args=(features,))
suffixes = ["", "_1", "_2"]
for (kernel, bias), suffix in zip(result, suffixes):
self.assertIsInstance(kernel, values.MirroredVariable)
self.assertEqual("common/dense" + suffix + "/kernel:0", kernel.name)
self.assertIsInstance(bias, values.MirroredVariable)
self.assertEqual("common/dense" + suffix + "/bias:0", bias.name)
def testWithVariableAndVariableScope(self, distribution):
def model_fn():
v0 = variable_scope.variable(1.0, name="var0", aggregation=None)
with variable_scope.variable_scope("common"):
v1 = variable_scope.variable(1.0, name="var1")
# This will pause the current thread, and execute the other thread.
ds_context.get_replica_context().merge_call(lambda _: _)
v2 = variable_scope.variable(
1.0,
name="var2",
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
v3 = variable_scope.variable(
1.0,
name="var3",
synchronization=variable_scope.VariableSynchronization.ON_WRITE,
aggregation=variable_scope.VariableAggregation.MEAN)
return v0, v1, v2, v3
with distribution.scope():
v = variable_scope.variable(1.0, name="var-main0")
self.assertEqual("var-main0:0", v.name)
result = distribution.extended.call_for_each_replica(model_fn)
self.assertEqual(4, len(result))
v0, v1, v2, v3 = result
self.assertIsInstance(v0, values.MirroredVariable)
self.assertEqual("var0:0", v0.name)
self.assertIsInstance(v1, values.MirroredVariable)
self.assertEqual("common/var1:0", v1.name)
self.assertIsInstance(v2, values.SyncOnReadVariable)
self.assertEqual("common/var2:0", v2.name)
self.assertEqual(variable_scope.VariableAggregation.SUM, v2.aggregation)
self.assertIsInstance(v3, values.MirroredVariable)
self.assertEqual("common/var3:0", v3.name)
self.assertEqual(variable_scope.VariableAggregation.MEAN, v3.aggregation)
def testWithGetVariableAndVariableScope(self, distribution):
def model_fn():
v0 = variable_scope.get_variable("var0", [1])
with variable_scope.variable_scope("common"):
v1 = variable_scope.get_variable("var1", [1])
# This will pause the current thread, and execute the other thread.
ds_context.get_replica_context().merge_call(lambda _: _)
v2 = variable_scope.get_variable(
"var2", [1],
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
v3 = variable_scope.get_variable(
"var3", [1],
synchronization=variable_scope.VariableSynchronization.ON_WRITE,
aggregation=variable_scope.VariableAggregation.MEAN)
return v0, v1, v2, v3
with distribution.scope():
with variable_scope.variable_scope("main"):
v = variable_scope.get_variable("var-main0", [1])
self.assertEqual("main/var-main0:0", v.name)
result = distribution.extended.call_for_each_replica(model_fn)
self.assertEqual(4, len(result))
v0, v1, v2, v3 = result
self.assertIsInstance(v0, values.MirroredVariable)
self.assertEqual("main/var0:0", v0.name)
self.assertIsInstance(v1, values.MirroredVariable)
self.assertEqual("main/common/var1:0", v1.name)
self.assertIsInstance(v2, values.SyncOnReadVariable)
self.assertEqual("main/common/var2:0", v2.name)
self.assertEqual(variable_scope.VariableAggregation.SUM,
v2.aggregation)
self.assertIsInstance(v3, values.MirroredVariable)
self.assertEqual("main/common/var3:0", v3.name)
self.assertEqual(variable_scope.VariableAggregation.MEAN,
v3.aggregation)
def testOnlyFirstReplicaUpdatesVariables(self, distribution):
def create_fn():
aggregation = variable_scope.VariableAggregation.ONLY_FIRST_REPLICA
v0 = variable_scope.variable(
2.0,
name="on_read",
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=aggregation)
v1 = variable_scope.variable(
3.0,
name="on_write",
synchronization=variable_scope.VariableSynchronization.ON_WRITE,
aggregation=aggregation)
return v0, v1
devices = ["/device:GPU:0", "/device:CPU:0"]
with distribution.scope():
v0, v1 = distribution.extended.call_for_each_replica(create_fn)
self.evaluate(v0.initializer)
self.assertEqual(2.0, self.evaluate(v0.get(devices[0])))
self.assertEqual(2.0, self.evaluate(v0.get(devices[1])))
self.assertEqual(2.0, self.evaluate(distribution.extended.read_var(v0)))
self.evaluate(v1.initializer)
self.assertEqual(3.0, self.evaluate(v1.get(devices[0])))
self.assertEqual(3.0, self.evaluate(v1.get(devices[1])))
self.assertEqual(3.0, self.evaluate(distribution.extended.read_var(v1)))
def replica_id_plus_one():
return math_ops.cast(_replica_id() + 1, dtype=dtypes.float32)
# Update using the assign_add member function.
def update_member_fn():
update0 = v0.assign_add(5.0 * replica_id_plus_one())
update1 = v1.assign_add(7.0 * replica_id_plus_one())
return update0, update1
update0a, update1a = distribution.extended.call_for_each_replica(
update_member_fn)
# Update "sync on read" variable.
self.evaluate(distribution.group(update0a))
self.assertEqual(2.0 + 5.0, self.evaluate(v0.get(devices[0])))
# Writes are not synchronized for "sync on read" variables,
# so device[1] can end up with a different value.
self.assertEqual(2.0 + 2*5.0, self.evaluate(v0.get(devices[1])))
# Always reads from device 0.
self.assertEqual(2.0 + 5.0, self.evaluate(
distribution.extended.read_var(v0)))
# Update "sync on write" variable.
self.evaluate(distribution.group(update1a))
self.assertEqual(3.0 + 7.0, self.evaluate(v1.get(devices[0])))
# Writes are synchronized for v1, only the argument to assign_add on
# device[0] is used.
self.assertEqual(3.0 + 7.0, self.evaluate(v1.get(devices[1])))
self.assertEqual(3.0 + 7.0, self.evaluate(
distribution.extended.read_var(v1)))
# Update using state_ops.assign_add global function.
def update_state_ops_fn():
update0 = state_ops.assign_add(v0, 11.0 * replica_id_plus_one())
update1 = state_ops.assign_add(v1, 13.0 * replica_id_plus_one())
return update0, update1
update0b, update1b = distribution.extended.call_for_each_replica(
update_state_ops_fn)
self.evaluate(distribution.group(update0b))
# Update "sync on read" variable.
self.assertEqual(2.0 + 5.0 + 11.0, self.evaluate(v0.get(devices[0])))
self.assertEqual(2.0 + 2*5.0 + 2*11.0, self.evaluate(v0.get(devices[1])))
self.assertEqual(2.0 + 5.0 + 11.0, self.evaluate(
distribution.extended.read_var(v0)))
# Update "sync on write" variable.
self.evaluate(distribution.group(update1b))
self.assertEqual(3.0 + 7.0 + 13.0, self.evaluate(v1.get(devices[0])))
self.assertEqual(3.0 + 7.0 + 13.0, self.evaluate(v1.get(devices[1])))
self.assertEqual(3.0 + 7.0 + 13.0, self.evaluate(
distribution.extended.read_var(v1)))
def testNoneSynchronizationWithGetVariable(self, distribution):
with distribution.scope():
with self.assertRaisesRegexp(
ValueError, "`NONE` variable synchronization mode is not "
"supported with `Mirrored` distribution strategy. Please change "
"the `synchronization` for variable: v"):
variable_scope.get_variable(
"v", [1],
synchronization=variable_scope.VariableSynchronization.NONE)
def testNoneSynchronizationWithVariable(self, distribution):
with distribution.scope():
with self.assertRaisesRegexp(
ValueError, "`NONE` variable synchronization mode is not "
"supported with `Mirrored` distribution strategy. Please change "
"the `synchronization` for variable: v"):
variable_scope.variable(
1.0,
name="v",
synchronization=variable_scope.VariableSynchronization.NONE)
def testInvalidSynchronizationWithVariable(self, distribution):
with distribution.scope():
with self.assertRaisesRegexp(
ValueError, "Invalid variable synchronization mode: Invalid for "
"variable: v"):
variable_scope.variable(1.0, name="v", synchronization="Invalid")
def testInvalidAggregationWithGetVariable(self, distribution):
with distribution.scope():
with self.assertRaisesRegexp(
ValueError, "Invalid variable aggregation mode: invalid for "
"variable: v"):
variable_scope.get_variable(
"v", [1],
synchronization=variable_scope.VariableSynchronization.ON_WRITE,
aggregation="invalid")
def testInvalidAggregationWithVariable(self, distribution):
with distribution.scope():
with self.assertRaisesRegexp(
ValueError, "Invalid variable aggregation mode: invalid for "
"variable: v"):
variable_scope.variable(
1.0,
name="v",
synchronization=variable_scope.VariableSynchronization.ON_WRITE,
aggregation="invalid")
def testNonMatchingVariableCreation(self, distribution):
self.skipTest("b/123075960")
def model_fn(name):
v = variable_scope.variable(1.0, name=name)
ds_context.get_replica_context().merge_call(lambda _: _)
return v
with distribution.scope():
device_map = values.ReplicaDeviceMap(("/device:CPU:0", "/device:GPU:0"))
names = values.DistributedValues(device_map, ("foo", "bar"))
with self.assertRaises(RuntimeError):
_ = distribution.extended.call_for_each_replica(model_fn, args=(names,))
def testSyncOnReadVariable(self, distribution):
all_v_sum = {}
all_v_mean = {}
components_sum = {}
components_mean = {}
def model_fn():
replica_id = self.evaluate(_replica_id())
v_sum = variable_scope.variable(
1.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
v_mean = variable_scope.variable(
4.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.MEAN)
self.assertIsInstance(v_sum, values.SyncOnReadVariable)
self.assertIsInstance(v_mean, values.SyncOnReadVariable)
updates = [v_sum.assign_add(2.0 + replica_id),
v_mean.assign(6.0 * replica_id)]
all_v_sum[replica_id] = v_sum
all_v_mean[replica_id] = v_mean
c_sum = v_sum.get()
c_mean = v_mean.get()
components_sum[replica_id] = c_sum
components_mean[replica_id] = c_mean
self.assertIsNot(v_sum, c_sum)
self.assertIsNot(v_mean, c_mean)
return updates, v_sum, v_mean, c_sum, c_mean
with distribution.scope():
# Create "sum" and "mean" versions of SyncOnReadVariables.
ret_ops, ret_v_sum, ret_v_mean, regrouped_sum, regrouped_mean = (
distribution.extended.call_for_each_replica(model_fn))
# Should see the same wrapping instance in all replicas.
self.assertIs(all_v_sum[0], ret_v_sum)
self.assertIs(all_v_mean[0], ret_v_mean)
self.assertIs(all_v_sum[0], all_v_sum[1])
self.assertIs(all_v_mean[0], all_v_mean[1])
# Regroup should recover the same wrapper.
self.assertIs(ret_v_sum, regrouped_sum)
self.assertIs(ret_v_mean, regrouped_mean)
self.assertIsNot(components_sum[0], components_sum[1])
self.assertIsNot(components_mean[0], components_mean[1])
# Apply updates
self.evaluate(variables.global_variables_initializer())
self.evaluate([y for x in ret_ops # pylint: disable=g-complex-comprehension
for y in distribution.experimental_local_results(x)])
expected_sum = 0.0
expected_mean = 0.0
for i, d in enumerate(distribution.extended.worker_devices):
# Should see different values on different devices.
v_sum_value = self.evaluate(ret_v_sum.get(d).read_value())
v_mean_value = self.evaluate(ret_v_mean.get(d).read_value())
expected = i + 3.0
self.assertEqual(expected, v_sum_value)
expected_sum += expected
expected = i * 6.0
self.assertEqual(expected, v_mean_value)
expected_mean += expected
expected_mean /= len(distribution.extended.worker_devices)
# Without get(device), should return the value you get by
# applying the reduction across all replicas (whether you use
# read_var(), get(), or nothing).
self.assertEqual(expected_sum, self.evaluate(
distribution.extended.read_var(ret_v_sum)))
self.assertEqual(expected_mean, self.evaluate(
distribution.extended.read_var(ret_v_mean)))
self.assertEqual(expected_sum, self.evaluate(ret_v_sum.get()))
self.assertEqual(expected_mean, self.evaluate(ret_v_mean.get()))
self.assertEqual(expected_sum, self.evaluate(ret_v_sum))
self.assertEqual(expected_mean, self.evaluate(ret_v_mean))
# TODO(priyag): Update this test to work in eager mode as well.
def testDynamicRnnVariables(self, distribution):
def model_fn():
inputs = constant_op.constant(2 * [2 * [[0.0, 1.0, 2.0, 3.0, 4.0]]])
cell_fw = rnn_cell_impl.LSTMCell(300)
cell_bw = rnn_cell_impl.LSTMCell(300)
(outputs, _) = rnn.bidirectional_dynamic_rnn(
cell_fw,
cell_bw,
inputs,
dtype=dtypes.float32)
return outputs
with context.graph_mode(), distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
# Two variables are created by the RNN layer.
self.assertEqual(2, len(result))
for v in result:
self.assertIsInstance(v, values.DistributedValues)
_, v1 = distribution.experimental_local_results(v)
self.assertStartsWith(v1._op.name, "replica_1/")
def testSyncOnReadVariableUpdate(self, distribution):
def model_fn():
v_sum = variable_scope.variable(
1.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
self.assertIsInstance(v_sum, values.SyncOnReadVariable)
return v_sum
def update(var, value):
return var.assign(value)
with distribution.scope():
ret_v_sum = distribution.extended.call_for_each_replica(model_fn)
# Initialize variables.
self.evaluate(variables.global_variables_initializer())
# Assert that the aggregated value of the sync on read var is the sum
# of the individual values before running the update ops.
self.assertEqual(1.0, self.evaluate(ret_v_sum.get(
distribution.extended.worker_devices[0]).read_value()))
self.assertEqual(2.0, self.evaluate(ret_v_sum))
# Apply updates.
update_ops = distribution.extended.update(
ret_v_sum, update, args=(5.0,), group=False)
self.evaluate(update_ops)
# Assert that the aggregated value of the sync on read vars is the sum
# of the individual values after running the update ops.
self.assertEqual(5.0, self.evaluate(ret_v_sum.get(
distribution.extended.worker_devices[0]).read_value()))
self.assertEqual(10.0, self.evaluate(ret_v_sum))
def testVarDistributeStrategy(self, distribution):
with distribution.scope():
mirrored = variable_scope.variable(1.0)
sync_on_read = variable_scope.variable(
1.0,
synchronization=variable_scope.VariableSynchronization.ON_READ)
self.assertIs(distribution, mirrored.distribute_strategy)
self.assertIs(distribution, sync_on_read.distribute_strategy)
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=["graph"]))
class MirroredStrategyNameScopeTest(test.TestCase):
# NOTE(priyag): Names and name scopes are ignored in eager, hence we are not
# testing this in eager mode.
def testNameScope(self, distribution):
def model_fn():
with ops.name_scope("foo"):
a = constant_op.constant(1.0, name="a")
ds_context.get_replica_context().merge_call(lambda _: _)
b = constant_op.constant(1.0, name="b")
return a, b
with context.graph_mode(), distribution.scope():
with ops.name_scope("main"):
result = distribution.extended.call_for_each_replica(model_fn)
self.assertEqual(2, len(result))
for v, name in zip(result, ["a", "b"]):
self.assertIsInstance(v, values.DistributedValues)
v0, v1 = distribution.experimental_local_results(v)
self.assertEqual("main/foo/" + name + ":0", v0.name)
self.assertEqual("main/replica_1/foo/" + name + ":0", v1.name)
def testWithDefaultName(self, distribution):
def model_fn():
with ops.name_scope(None, "foo"):
a = constant_op.constant(1.0, name="a")
ds_context.get_replica_context().merge_call(lambda _: _)
b = constant_op.constant(2.0, name="b")
return a, b
with context.graph_mode(), distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
self.assertEqual(2, len(result))
for v, name in zip(result, ["a", "b"]):
self.assertIsInstance(v, values.DistributedValues)
v0, v1 = distribution.experimental_local_results(v)
self.assertEqual("foo/" + name + ":0", v0.name)
self.assertEqual("replica_1/foo/" + name + ":0", v1.name)
# variable_scope.variable() respects name scopes when creating
# variables. On the other hand variable_scope.get_variable() ignores name
# scopes when creating variables. We test both methods of creating variables
# to make sure that we have the same variable names in both cases.
def testNameScopeWithVariable(self, distribution):
def in_cross_replica(_):
c = variable_scope.variable(1.0, name="c")
return c
def model_fn():
b = variable_scope.variable(1.0, name="b")
with ops.name_scope("foo"):
c = ds_context.get_replica_context().merge_call(in_cross_replica)
return b, c
with context.graph_mode(), distribution.scope():
with ops.name_scope("main"):
a = variable_scope.variable(1.0, name="a")
result = distribution.extended.call_for_each_replica(model_fn)
result_b = result[0]
result_c = result[1]
self.assertIsInstance(result_b, values.DistributedValues)
self.assertIsInstance(result_c, values.DistributedValues)
a0, a1 = distribution.experimental_local_results(a)
b0, b1 = distribution.experimental_local_results(result_b)
c0, c1 = distribution.experimental_local_results(result_c)
self.assertEqual("main/a:0", a0.name)
self.assertEqual("main/a/replica_1:0", a1.name)
self.assertEqual("main/b:0", b0.name)
self.assertEqual("main/b/replica_1:0", b1.name)
self.assertEqual("main/foo/c:0", c0.name)
self.assertEqual("main/foo/c/replica_1:0", c1.name)
def testNameScopeWithGetVariable(self, distribution):
def in_cross_replica(_):
c = variable_scope.get_variable("c", [1])
return c
def model_fn():
b = variable_scope.get_variable("b", [1])
with ops.name_scope("foo"):
c = ds_context.get_replica_context().merge_call(in_cross_replica)
return b, c
with context.graph_mode(), distribution.scope():
with ops.name_scope("main"):
a = variable_scope.get_variable("a", [1])
result = distribution.extended.call_for_each_replica(model_fn)
result_b = result[0]
result_c = result[1]
self.assertIsInstance(result_b, values.DistributedValues)
self.assertIsInstance(result_c, values.DistributedValues)
a0, a1 = distribution.experimental_local_results(a)
b0, b1 = distribution.experimental_local_results(result_b)
c0, c1 = distribution.experimental_local_results(result_c)
self.assertEqual("a:0", a0.name)
self.assertEqual("a/replica_1:0", a1.name)
self.assertEqual("b:0", b0.name)
self.assertEqual("b/replica_1:0", b1.name)
self.assertEqual("c:0", c0.name)
self.assertEqual("c/replica_1:0", c1.name)
@combinations.generate(
combinations.combine(
distribution=[
combinations.NamedDistribution(
"Mirrored3Devices",
# pylint: disable=g-long-lambda
lambda: mirrored_strategy.MirroredStrategy(
["/device:GPU:0", "/device:GPU:1", "/device:CPU:0"]),
required_gpus=2),
combinations.NamedDistribution(
"CoreMirrored3Devices",
# pylint: disable=g-long-lambda
lambda: mirrored_strategy.CoreMirroredStrategy(
["/device:GPU:0", "/device:GPU:1", "/device:CPU:0"]),
required_gpus=2)
],
mode=["graph", "eager"]))
class MirroredThreeDeviceDistributionTest(
strategy_test_lib.DistributionTestBase,
parameterized.TestCase):
def testThreeDevices(self, distribution):
def model_fn():
v = variable_scope.variable(1.0, name="foo")
ds_context.get_replica_context().merge_call(lambda _: _)
return v
with distribution.scope():
result = distribution.extended.call_for_each_replica(model_fn)
self.assertIsInstance(result, values.MirroredVariable)
self.assertEqual("foo:0", result.name)
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=["graph", "eager"]))
class MirroredVariableUpdateTest(test.TestCase):
# The following tests check assign, assign_add and assign_sub on Mirrored
# variables in replica and cross replica context.
def testAssignMirroredVarReplicaContextWithoutAggregationType(self,
distribution):
# Test that we always have an aggregation type set on the mirrored variable
# if we assign to it in replica mode.
def var_fn():
v = variable_scope.variable(1.0, name="foo")
return v
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
def model_fn():
return mirrored_var.assign(5.0)
with self.assertRaisesRegexp(
ValueError, "You must specify an aggregation method to update a "
"MirroredVariable in Replica Context."):
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
def testAssignMirroredVarReplicaContextWithSum(self, distribution):
# Test that we don't reduce a non-per-replica value with the "sum"
# aggregation type.
def var_fn():
v = variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.SUM)
return v
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
def model_fn():
return mirrored_var.assign(5.0)
with self.assertRaisesRegexp(
ValueError, "A non-DistributedValues value 5.0 cannot be reduced "
"with the given reduce op ReduceOp.SUM."):
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
def testAssignMirroredVarCrossDeviceContext(self, distribution):
def var_fn():
return variable_scope.variable(1.0, name="foo")
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(mirrored_var))
mirrored_var_result = self.evaluate(mirrored_var.assign(6.0))
self.assertEqual(6.0, mirrored_var_result)
def testAssignMirroredVarReplicaContext(self, distribution):
def var_fn():
return variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(mirrored_var))
def model_fn():
value = math_ops.cast(
ds_context.get_replica_context().replica_id_in_sync_group,
mirrored_var.dtype)
return mirrored_var.assign(value)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(0.5, self.evaluate(mirrored_var))
def testAssignMirroredVarReplicaContextWithSingleValue(self, distribution):
def var_fn():
return variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(mirrored_var))
def model_fn():
return mirrored_var.assign(5.0)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(5.0, self.evaluate(mirrored_var))
def testAssignAddMirroredVarCrossDeviceContext(self, distribution):
def var_fn():
return variable_scope.variable(1.0, name="foo")
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(mirrored_var))
# read_value == True
mirrored_var_result = self.evaluate(
mirrored_var.assign_add(6.0, read_value=True))
self.assertEqual(7.0, mirrored_var_result)
self.assertEqual(7.0, self.evaluate(mirrored_var.get("/device:CPU:0")))
self.assertEqual(7.0, self.evaluate(mirrored_var.get("/device:GPU:0")))
# read_value == False
self.evaluate(mirrored_var.assign_add(2.0, read_value=False))
self.assertEqual(9.0, self.evaluate(mirrored_var.get("/device:CPU:0")))
self.assertEqual(9.0, self.evaluate(mirrored_var.get("/device:GPU:0")))
def testAssignAddMirroredVarReplicaContext(self, distribution):
def var_fn():
return variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(mirrored_var))
def model_fn():
value = math_ops.cast(
ds_context.get_replica_context().replica_id_in_sync_group,
mirrored_var.dtype)
return mirrored_var.assign_add(value)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(1.5, self.evaluate(mirrored_var))
def testAssignAddMirroredVarReplicaContextWithSingleValue(self, distribution):
def var_fn():
return variable_scope.variable(
1.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(1.0, self.evaluate(mirrored_var))
def model_fn():
return mirrored_var.assign_add(5.0)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(6.0, self.evaluate(mirrored_var))
def testAssignSubMirroredVarCrossDeviceContext(self, distribution):
def var_fn():
return variable_scope.variable(5.0, name="foo")
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(5.0, self.evaluate(mirrored_var))
mirrored_var_result = self.evaluate(mirrored_var.assign_sub(2.0))
self.assertEqual(3.0, mirrored_var_result)
self.assertEqual(3.0, self.evaluate(mirrored_var.get("/device:GPU:0")))
self.assertEqual(3.0, self.evaluate(mirrored_var.get("/device:CPU:0")))
def testAssignSubMirroredVarReplicaContext(self, distribution):
def var_fn():
return variable_scope.variable(
5.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(5.0, self.evaluate(mirrored_var))
def model_fn():
value = math_ops.cast(
ds_context.get_replica_context().replica_id_in_sync_group,
mirrored_var.dtype)
return mirrored_var.assign_sub(value)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(4.5, self.evaluate(mirrored_var))
def testAssignSubMirroredVarReplicaContextWithSingleValue(self, distribution):
def var_fn():
return variable_scope.variable(
5.0, name="foo", aggregation=variable_scope.VariableAggregation.MEAN)
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(5.0, self.evaluate(mirrored_var))
def model_fn():
return mirrored_var.assign_sub(1.0)
self.evaluate(distribution.experimental_local_results(
distribution.extended.call_for_each_replica(model_fn)))
self.assertEqual(4.0, self.evaluate(mirrored_var))
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=["graph", "eager"]))
class MirroredAndSyncOnReadVariableInitializerTest(test.TestCase):
def testAssignMirroredVarInitializer(self, distribution):
# This test is not eager compatible since in eager variables are initialized
# upon construction instead of once the initialization op is run.
with context.graph_mode():
def var_fn():
v = variable_scope.variable(1.0, name="foo")
return v
with distribution.scope():
mirrored_var = distribution.extended.call_for_each_replica(var_fn)
self.assertIsInstance(mirrored_var, values.MirroredVariable)
self.assertFalse(self.evaluate(mirrored_var.is_initialized()))
self.evaluate(mirrored_var.initializer)
self.assertTrue(self.evaluate(mirrored_var.is_initialized()))
def testAssignReplicaLocalVarInitializer(self, distribution):
# This test is not eager compatible since in eager variables are initialized
# upon construction instead of once the initialization op is run.
with context.graph_mode():
def model_fn():
v_sum = variable_scope.variable(
1.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
self.assertIsInstance(v_sum, values.SyncOnReadVariable)
return v_sum
with distribution.scope():
sync_on_read_var = distribution.extended.call_for_each_replica(
model_fn)
self.assertIsInstance(sync_on_read_var, values.SyncOnReadVariable)
self.assertFalse(self.evaluate(sync_on_read_var.is_initialized()))
self.evaluate(sync_on_read_var.initializer)
self.assertTrue(self.evaluate(sync_on_read_var.is_initialized()))
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=["graph", "eager"]))
class SyncOnReadVariableAssignTest(test.TestCase):
def testAssignReplicaLocalVarSumAggregation(self, distribution):
def model_fn():
v_sum = variable_scope.variable(
1.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.SUM)
return v_sum
with distribution.scope():
sync_on_read_var = distribution.extended.call_for_each_replica(model_fn)
self.assertIsInstance(sync_on_read_var, values.SyncOnReadVariable)
self.evaluate(variables.global_variables_initializer())
# Each replica has a value of 1.0 assigned to it in replica context.
# When we read the value using `read_var` we should see the SUM of each of
# values on each of the replicas.
self.assertEqual(2.0, self.evaluate(
distribution.extended.read_var(sync_on_read_var)))
# Assigning 6.0 in cross replica context will assign a value of
# 6.0/num_replicas to each replica.
tlv_ops = sync_on_read_var.assign(6.0)
self.evaluate(tlv_ops)
# On reading the sync on read var we should get the assigned value back.
# The value on all the replicas are added before being returned by
# `read_var`.
self.assertEqual(6.0, self.evaluate(
distribution.extended.read_var(sync_on_read_var)))
def testAssignReplicaLocalVarMeanAggregation(self, distribution):
def model_fn():
v_sum = variable_scope.variable(
1.0,
synchronization=variable_scope.VariableSynchronization.ON_READ,
aggregation=variable_scope.VariableAggregation.MEAN)
return v_sum
with distribution.scope():
sync_on_read_var = distribution.extended.call_for_each_replica(model_fn)
self.assertIsInstance(sync_on_read_var, values.SyncOnReadVariable)
self.evaluate(variables.global_variables_initializer())
# Each replica has a value of 1.0 assigned to it in replica context.
# When we read the value using `read_var` we should see the MEAN of values
# on all replicas which is the value assigned in replica context.
self.assertEqual(1.0, self.evaluate(
distribution.extended.read_var(sync_on_read_var)))
tlv_ops = sync_on_read_var.assign(6.0)
self.evaluate(tlv_ops)
# On reading the sync on read var we should get the MEAN of all values
# which is equal to the value assigned.
self.assertEqual(6.0, self.evaluate(
distribution.extended.read_var(sync_on_read_var)))
class MockModel(object):
def __init__(self, two_variables=False):
self.variables = []
self.variables.append(variable_scope.variable(1.25, name="dummy_var1"))
if two_variables:
self.variables.append(variable_scope.variable(2.0, name="dummy_var2"))
def __call__(self, factor=2):
x = factor * self.variables[0]
if len(self.variables) > 1:
x += self.variables[1]
return x
class MiniModel(keras_training.Model):
"""Minimal model for mnist.
Useful for testing and debugging on slow TPU simulators.
"""
def __init__(self):
super(MiniModel, self).__init__(name="")
self.fc = keras_core.Dense(1, name="fc", kernel_initializer="ones",
bias_initializer="ones")
def call(self, inputs, training=True):
inputs = array_ops.ones([1, 10])
return self.fc(inputs)
@combinations.generate(combinations.combine(
distribution=[
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_gpu_and_cpu],
mode=["graph", "eager"]))
class MirroredStrategyDefunTest(test.TestCase):
def _call_and_check(self, distribution, model_fn, inputs, expected_result,
defuns, two_variables=False):
cpu_dev = device_util.canonicalize("CPU:0")
gpu_dev = device_util.canonicalize("GPU:0")
devices = [cpu_dev, gpu_dev]
with distribution.scope():
mock_model = MockModel(two_variables)
self.evaluate(variables.global_variables_initializer())
result = distribution.extended.call_for_each_replica(
model_fn, args=[mock_model] + inputs)
for r in range(len(devices)):
device_result = values.select_replica(r, result)
device_expected_result = values.select_replica(r, expected_result)
self.assertAllClose(device_expected_result,
self.evaluate(device_result))
for defun in defuns:
# `Function`s are specialized to the current device stack, so
# call_for_each has one trace per device. To check that the expected set
# of variables was accessed on each trace, we first retrieve each
# device-specific graph function.
per_replica_graph_functions = (
distribution.extended.call_for_each_replica(
defun.get_concrete_function, args=[mock_model] + inputs))
for device in devices:
graph_function = per_replica_graph_functions.get(device=device)
self.assertEqual(set(mock_model.variables),
set(graph_function.graph.variables))
def testVariableInDefun(self, distribution):
@function.defun
def times_two(mock_model):
return mock_model()
def model_fn(mock_model):
return times_two(mock_model)
self._call_and_check(distribution, model_fn, [], 2.5, [times_two])
def testVariableInNestedDefun(self, distribution):
@function.defun
def times_two(mock_model):
return mock_model()
@function.defun
def two_x_plus_one(mock_model):
return times_two(mock_model) + 1
def model_fn(mock_model):
return two_x_plus_one(mock_model)
self._call_and_check(distribution, model_fn, [], 3.5,
[times_two, two_x_plus_one])
def testTwoVariablesInNestedDefun(self, distribution):
@function.defun
def fn1(mock_model):
return mock_model()
@function.defun
def fn2(mock_model):
return fn1(mock_model) + 1
def model_fn(mock_model):
return fn2(mock_model)
self._call_and_check(distribution, model_fn, [], 5.5, [fn1, fn2],
two_variables=True)
def testGradientTapeOverNestedDefuns(self, distribution):
@function.defun
def fn1(mock_model):
return mock_model()
@function.defun
def fn2(mock_model):
return fn1(mock_model) + 1
def model_fn(mock_model):
with backprop.GradientTape(persistent=True) as gtape:
result = fn2(mock_model)
grads = gtape.gradient(result,
[v.get() for v in mock_model.variables])
return grads
self._call_and_check(distribution, model_fn, [], [2.0, 1.0], [fn1, fn2],
two_variables=True)
def testPassPerReplica(self, distribution):
@function.defun
def fn1(mock_model, factor):
return mock_model(factor)
device_map = values.ReplicaDeviceMap(("/device:CPU:0", "/device:GPU:0"))
factors = values.PerReplica(device_map, (5.0, 3.0))
expected_result = values.PerReplica(device_map, (5.0 * 1.25, 3.0 * 1.25))
self._call_and_check(distribution, fn1, [factors], expected_result, [fn1])
def testTrain(self, distribution):
with distribution.scope():
mock_model = MiniModel()
mock_model.call = function.defun(mock_model.call)
def loss_fn(ctx):
del ctx
return mock_model(array_ops.ones([1, 10]))
gradients_fn = backprop.implicit_grad(loss_fn)
gradients_fn = optimizer_lib.get_filtered_grad_fn(gradients_fn)
grads_and_vars = distribution.extended.call_for_each_replica(
gradients_fn, args=(None,))
optimizer = gradient_descent.GradientDescentOptimizer(0.25)
update_ops = optimizer._distributed_apply(distribution, grads_and_vars) # pylint: disable=protected-access
if not context.executing_eagerly():
self.evaluate(variables.global_variables_initializer())
self.evaluate(update_ops)
updated_var_values = self.evaluate(mock_model.variables)
# All variables start at 1.0 and get two updates of 0.25.
self.assertAllEqual(0.5 * np.ones([10, 1]), updated_var_values[0])
self.assertAllEqual([0.5], updated_var_values[1])
@combinations.generate(
combinations.combine(
distribution=[
combinations.NamedDistribution(
"Mirrored",
# pylint: disable=g-long-lambda
lambda: mirrored_strategy.MirroredStrategy(num_gpus_per_worker=
context.num_gpus()),
required_gpus=1),
combinations.NamedDistribution(
"CoreMirrored",
# pylint: disable=g-long-lambda
lambda: mirrored_strategy.CoreMirroredStrategy(
mirrored_strategy.all_local_devices()),
required_gpus=1)
],
mode=["graph"]))
class MultiWorkerMirroredStrategyTest(
multi_worker_test_base.MultiWorkerTestBase,
strategy_test_lib.DistributionTestBase):
def _configure_distribution_strategy(self, distribution):
cluster_spec = server_lib.ClusterSpec({
"worker": ["/job:worker/task:0", "/job:worker/task:1"]
})
distribution.configure(cluster_spec=cluster_spec)
def test_num_replicas_in_sync(self, distribution):
self._configure_distribution_strategy(distribution)
# We calculate the total number of gpus across the workers(2) specified in
# the cluster spec.
self.assertEqual(context.num_gpus() * 2, distribution.num_replicas_in_sync)
def testMinimizeLossGraph(self, distribution):
self._configure_distribution_strategy(distribution)
self._test_minimize_loss_graph(distribution, learning_rate=0.05)
def testDeviceScope(self, distribution):
"""Test the device scope of multi-worker MirroredStrategy."""
self._configure_distribution_strategy(distribution)
with distribution.scope():
a = constant_op.constant(1.)
with ops.device("/cpu:0"):
b = constant_op.constant(1.)
self.assertEqual(a.device, "/job:worker/task:0")
self.assertEqual(b.device, "/job:worker/task:0/device:CPU:0")
def testMakeInputFnIteratorWithDataset(self, distribution):
self._configure_distribution_strategy(distribution)
dataset_fn = lambda: dataset_ops.Dataset.range(100)
num_gpus = context.num_gpus()
num_workers = 2
expected_values = [[i+j for j in range(num_gpus)] * num_workers
for i in range(0, 100, num_gpus)]
with context.graph_mode(), self.cached_session() as sess:
# `expected_input_pipeline_id` is None because the input_fn will be called
# multiple times, each with a different input_pipeline_id.
input_fn = self._input_fn_to_test_input_context(
dataset_fn,
expected_num_replicas_in_sync=num_workers*num_gpus,
expected_num_input_pipelines=num_workers,
expected_input_pipeline_id=None)
iterator = distribution.make_input_fn_iterator(input_fn)
self._test_input_fn_iterator(
iterator, distribution.extended.worker_devices, expected_values, sess)
def DISABLED_testMakeInputFnIteratorWithCallable(self, distribution):
self._configure_distribution_strategy(distribution)
def fn():
dataset = dataset_ops.Dataset.range(100)
it = dataset.make_one_shot_iterator()
return it.get_next
num_gpus = context.num_gpus()
num_workers = 2
expected_values = []
for i in range(0, 100, num_gpus):
expected_values.append([i+j for j in range(num_gpus)] * num_workers)
with context.graph_mode(), self.cached_session() as sess:
# `expected_input_pipeline_id` is None because the input_fn will be called
# multiple times, each with a different input_pipeline_id.
input_fn = self._input_fn_to_test_input_context(
fn,
expected_num_replicas_in_sync=num_workers*num_gpus,
expected_num_input_pipelines=num_workers,
expected_input_pipeline_id=None)
iterator = distribution.make_input_fn_iterator(input_fn)
self._test_input_fn_iterator(
iterator, distribution.extended.worker_devices, expected_values, sess,
test_reinitialize=False)
def testUpdateConfigProto(self, distribution):
distribution.configure(cluster_spec={"worker": ["fake1", "fake2"]})
config_proto = config_pb2.ConfigProto()
new_config = distribution.update_config_proto(config_proto)
# Verify isolate_session_state
self.assertTrue(new_config.isolate_session_state)
class MultiWorkerMirroredStrategyTestWithChief(
multi_worker_test_base.MultiWorkerTestBase,
strategy_test_lib.DistributionTestBase):
@classmethod
def setUpClass(cls):
"""Create a local cluster with 2 workers and 1 chief."""
cls._cluster_spec = multi_worker_test_base.create_in_process_cluster(
num_workers=2, num_ps=0, has_chief=True)
cls._default_target = "grpc://" + cls._cluster_spec["chief"][0]
def testMinimizeLossGraph(self):
strategy = mirrored_strategy.MirroredStrategy(
num_gpus_per_worker=context.num_gpus())
strategy.configure(cluster_spec=self._cluster_spec)
self._test_minimize_loss_graph(strategy, learning_rate=0.05)
def testMinimizeLossGraphCoreMirroredStrategy(self):
strategy = mirrored_strategy.CoreMirroredStrategy(
mirrored_strategy.all_local_devices())
strategy.configure(cluster_spec=self._cluster_spec)
self._test_minimize_loss_graph(strategy, learning_rate=0.05)
def testMinimizeLossGraphCoreMirroredStrategyWithOneNode(self):
cluster_spec = {}
cluster_spec["chief"] = self._cluster_spec["chief"]
tf_config = {"cluster": cluster_spec}
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(tf_config)}):
strategy = mirrored_strategy.CoreMirroredStrategy()
self.assertIsInstance(strategy.extended._inferred_cross_device_ops,
cross_device_ops_lib.NcclAllReduce)
self._test_minimize_loss_graph(strategy, learning_rate=0.05)
def testInitializeFromTFConfig(self):
tf_config = {"cluster": self._cluster_spec}
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(tf_config)}):
strategy = mirrored_strategy.CoreMirroredStrategy()
self.assertEqual(
max(context.num_gpus(), 1) * 3, strategy.num_replicas_in_sync)
def testSummaryForReplicaZeroOnly(self):
strategy = mirrored_strategy.CoreMirroredStrategy(
mirrored_strategy.all_local_devices())
strategy.configure(cluster_spec=self._cluster_spec)
self._test_summary_for_replica_zero_only(strategy)
def _replica_id():
replica_id = ds_context.get_replica_context().replica_id_in_sync_group
if not isinstance(replica_id, ops.Tensor):
replica_id = constant_op.constant(replica_id)
return replica_id
if __name__ == "__main__":
test.main()
|
[
"tensorflow.python.data.ops.dataset_ops.Dataset.from_tensors",
"tensorflow.contrib.distribute.python.combinations.combine",
"tensorflow.python.framework.constant_op.constant",
"numpy.ones",
"tensorflow.python.distribute.values.select_replica",
"tensorflow.python.framework.ops.device",
"json.dumps",
"tensorflow.python.training.optimizer.get_filtered_grad_fn",
"tensorflow.python.training.server_lib.ClusterSpec",
"tensorflow.python.framework.ops.executing_eagerly_outside_functions",
"tensorflow.contrib.distribute.python.mirrored_strategy.CoreMirroredStrategy",
"tensorflow.python.eager.test.main",
"tensorflow.python.eager.backprop.GradientTape",
"tensorflow.python.layers.core.Dense",
"tensorflow.python.eager.context.executing_eagerly",
"tensorflow.python.eager.context.graph_mode",
"tensorflow.python.ops.variable_scope.variable_creator_scope",
"tensorflow.contrib.distribute.python.multi_worker_test_base.create_in_process_cluster",
"tensorflow.python.eager.backprop.implicit_grad",
"tensorflow.python.distribute.distribution_strategy_context.get_replica_context",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.keras.layers.core.Dense",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.ops.variable_scope.variable",
"tensorflow.python.ops.rnn.bidirectional_dynamic_rnn",
"tensorflow.python.distribute.device_util.canonicalize",
"tensorflow.python.data.ops.dataset_ops.Dataset.range",
"tensorflow.python.distribute.values.DistributedValues",
"tensorflow.contrib.distribute.python.mirrored_strategy.MirroredStrategy",
"tensorflow.python.distribute.values.PerReplica",
"tensorflow.python.ops.variable_scope.get_variable",
"tensorflow.python.framework.func_graph.FuncGraph",
"tensorflow.python.eager.function.defun",
"tensorflow.python.training.gradient_descent.GradientDescentOptimizer",
"tensorflow.contrib.distribute.python.mirrored_strategy.all_local_devices",
"tensorflow.python.ops.rnn_cell_impl.LSTMCell",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.core.protobuf.config_pb2.ConfigProto",
"tensorflow.python.eager.context.num_gpus",
"tensorflow.python.ops.variable_scope.variable_scope",
"tensorflow.python.distribute.values.ReplicaDeviceMap"
] |
[((2753, 3033), 'tensorflow.contrib.distribute.python.combinations.combine', 'combinations.combine', ([], {'distribution': '[combinations.mirrored_strategy_with_gpu_and_cpu, combinations.\n mirrored_strategy_with_two_gpus, combinations.\n core_mirrored_strategy_with_gpu_and_cpu, combinations.\n core_mirrored_strategy_with_two_gpus]', 'mode': "['graph', 'eager']"}), "(distribution=[combinations.\n mirrored_strategy_with_gpu_and_cpu, combinations.\n mirrored_strategy_with_two_gpus, combinations.\n core_mirrored_strategy_with_gpu_and_cpu, combinations.\n core_mirrored_strategy_with_two_gpus], mode=['graph', 'eager'])\n", (2773, 3033), False, 'from tensorflow.contrib.distribute.python import combinations\n'), ((6871, 7141), 'tensorflow.contrib.distribute.python.combinations.combine', 'combinations.combine', ([], {'distribution': '[combinations.mirrored_strategy_with_one_cpu, combinations.\n mirrored_strategy_with_one_gpu, combinations.\n core_mirrored_strategy_with_one_cpu, combinations.\n core_mirrored_strategy_with_one_gpu]', 'mode': "['graph', 'eager']"}), "(distribution=[combinations.\n mirrored_strategy_with_one_cpu, combinations.\n mirrored_strategy_with_one_gpu, combinations.\n core_mirrored_strategy_with_one_cpu, combinations.\n core_mirrored_strategy_with_one_gpu], mode=['graph', 'eager'])\n", (6891, 7141), False, 'from tensorflow.contrib.distribute.python import combinations\n'), ((9965, 10138), 'tensorflow.contrib.distribute.python.combinations.combine', 'combinations.combine', ([], {'distribution': '[combinations.mirrored_strategy_with_gpu_and_cpu, combinations.\n core_mirrored_strategy_with_gpu_and_cpu]', 'mode': "['graph', 'eager']"}), "(distribution=[combinations.\n mirrored_strategy_with_gpu_and_cpu, combinations.\n core_mirrored_strategy_with_gpu_and_cpu], mode=['graph', 'eager'])\n", (9985, 10138), False, 'from tensorflow.contrib.distribute.python import combinations\n'), ((12011, 12184), 'tensorflow.contrib.distribute.python.combinations.combine', 'combinations.combine', ([], {'distribution': '[combinations.mirrored_strategy_with_gpu_and_cpu, combinations.\n core_mirrored_strategy_with_gpu_and_cpu]', 'mode': "['graph', 'eager']"}), "(distribution=[combinations.\n mirrored_strategy_with_gpu_and_cpu, combinations.\n core_mirrored_strategy_with_gpu_and_cpu], mode=['graph', 'eager'])\n", (12031, 12184), False, 'from tensorflow.contrib.distribute.python import combinations\n'), ((33197, 33361), 'tensorflow.contrib.distribute.python.combinations.combine', 'combinations.combine', ([], {'distribution': '[combinations.mirrored_strategy_with_gpu_and_cpu, combinations.\n core_mirrored_strategy_with_gpu_and_cpu]', 'mode': "['graph']"}), "(distribution=[combinations.\n mirrored_strategy_with_gpu_and_cpu, combinations.\n core_mirrored_strategy_with_gpu_and_cpu], mode=['graph'])\n", (33217, 33361), False, 'from tensorflow.contrib.distribute.python import combinations\n'), ((39025, 39198), 'tensorflow.contrib.distribute.python.combinations.combine', 'combinations.combine', ([], {'distribution': '[combinations.mirrored_strategy_with_gpu_and_cpu, combinations.\n core_mirrored_strategy_with_gpu_and_cpu]', 'mode': "['graph', 'eager']"}), "(distribution=[combinations.\n mirrored_strategy_with_gpu_and_cpu, combinations.\n core_mirrored_strategy_with_gpu_and_cpu], mode=['graph', 'eager'])\n", (39045, 39198), False, 'from tensorflow.contrib.distribute.python import combinations\n'), ((48480, 48653), 'tensorflow.contrib.distribute.python.combinations.combine', 'combinations.combine', ([], {'distribution': '[combinations.mirrored_strategy_with_gpu_and_cpu, combinations.\n core_mirrored_strategy_with_gpu_and_cpu]', 'mode': "['graph', 'eager']"}), "(distribution=[combinations.\n mirrored_strategy_with_gpu_and_cpu, combinations.\n core_mirrored_strategy_with_gpu_and_cpu], mode=['graph', 'eager'])\n", (48500, 48653), False, 'from tensorflow.contrib.distribute.python import combinations\n'), ((50419, 50592), 'tensorflow.contrib.distribute.python.combinations.combine', 'combinations.combine', ([], {'distribution': '[combinations.mirrored_strategy_with_gpu_and_cpu, combinations.\n core_mirrored_strategy_with_gpu_and_cpu]', 'mode': "['graph', 'eager']"}), "(distribution=[combinations.\n mirrored_strategy_with_gpu_and_cpu, combinations.\n core_mirrored_strategy_with_gpu_and_cpu], mode=['graph', 'eager'])\n", (50439, 50592), False, 'from tensorflow.contrib.distribute.python import combinations\n'), ((54020, 54193), 'tensorflow.contrib.distribute.python.combinations.combine', 'combinations.combine', ([], {'distribution': '[combinations.mirrored_strategy_with_gpu_and_cpu, combinations.\n core_mirrored_strategy_with_gpu_and_cpu]', 'mode': "['graph', 'eager']"}), "(distribution=[combinations.\n mirrored_strategy_with_gpu_and_cpu, combinations.\n core_mirrored_strategy_with_gpu_and_cpu], mode=['graph', 'eager'])\n", (54040, 54193), False, 'from tensorflow.contrib.distribute.python import combinations\n'), ((65599, 65610), 'tensorflow.python.eager.test.main', 'test.main', ([], {}), '()\n', (65608, 65610), False, 'from tensorflow.python.eager import test\n'), ((3276, 3303), 'tensorflow.python.eager.context.executing_eagerly', 'context.executing_eagerly', ([], {}), '()\n', (3301, 3303), False, 'from tensorflow.python.eager import context\n'), ((7447, 7474), 'tensorflow.python.eager.context.executing_eagerly', 'context.executing_eagerly', ([], {}), '()\n', (7472, 7474), False, 'from tensorflow.python.eager import context\n'), ((8599, 8763), 'tensorflow.contrib.distribute.python.combinations.combine', 'combinations.combine', ([], {'distribution': '[combinations.mirrored_strategy_with_gpu_and_cpu, combinations.\n core_mirrored_strategy_with_gpu_and_cpu]', 'mode': "['graph']"}), "(distribution=[combinations.\n mirrored_strategy_with_gpu_and_cpu, combinations.\n core_mirrored_strategy_with_gpu_and_cpu], mode=['graph'])\n", (8619, 8763), False, 'from tensorflow.contrib.distribute.python import combinations\n'), ((10455, 10496), 'tensorflow.python.framework.ops.executing_eagerly_outside_functions', 'ops.executing_eagerly_outside_functions', ([], {}), '()\n', (10494, 10496), False, 'from tensorflow.python.framework import ops\n'), ((53775, 53862), 'tensorflow.python.keras.layers.core.Dense', 'keras_core.Dense', (['(1)'], {'name': '"""fc"""', 'kernel_initializer': '"""ones"""', 'bias_initializer': '"""ones"""'}), "(1, name='fc', kernel_initializer='ones', bias_initializer=\n 'ones')\n", (53791, 53862), True, 'from tensorflow.python.keras.layers import core as keras_core\n'), ((53944, 53967), 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['[1, 10]'], {}), '([1, 10])\n', (53958, 53967), False, 'from tensorflow.python.ops import array_ops\n'), ((54403, 54436), 'tensorflow.python.distribute.device_util.canonicalize', 'device_util.canonicalize', (['"""CPU:0"""'], {}), "('CPU:0')\n", (54427, 54436), False, 'from tensorflow.python.distribute import device_util\n'), ((54451, 54484), 'tensorflow.python.distribute.device_util.canonicalize', 'device_util.canonicalize', (['"""GPU:0"""'], {}), "('GPU:0')\n", (54475, 54484), False, 'from tensorflow.python.distribute import device_util\n'), ((57524, 57583), 'tensorflow.python.distribute.values.ReplicaDeviceMap', 'values.ReplicaDeviceMap', (["('/device:CPU:0', '/device:GPU:0')"], {}), "(('/device:CPU:0', '/device:GPU:0'))\n", (57547, 57583), False, 'from tensorflow.python.distribute import values\n'), ((57598, 57639), 'tensorflow.python.distribute.values.PerReplica', 'values.PerReplica', (['device_map', '(5.0, 3.0)'], {}), '(device_map, (5.0, 3.0))\n', (57615, 57639), False, 'from tensorflow.python.distribute import values\n'), ((57662, 57717), 'tensorflow.python.distribute.values.PerReplica', 'values.PerReplica', (['device_map', '(5.0 * 1.25, 3.0 * 1.25)'], {}), '(device_map, (5.0 * 1.25, 3.0 * 1.25))\n', (57679, 57717), False, 'from tensorflow.python.distribute import values\n'), ((59772, 59857), 'tensorflow.python.training.server_lib.ClusterSpec', 'server_lib.ClusterSpec', (["{'worker': ['/job:worker/task:0', '/job:worker/task:1']}"], {}), "({'worker': ['/job:worker/task:0', '/job:worker/task:1']}\n )\n", (59794, 59857), False, 'from tensorflow.python.training import server_lib\n'), ((61004, 61022), 'tensorflow.python.eager.context.num_gpus', 'context.num_gpus', ([], {}), '()\n', (61020, 61022), False, 'from tensorflow.python.eager import context\n'), ((62066, 62084), 'tensorflow.python.eager.context.num_gpus', 'context.num_gpus', ([], {}), '()\n', (62082, 62084), False, 'from tensorflow.python.eager import context\n'), ((63036, 63060), 'tensorflow.core.protobuf.config_pb2.ConfigProto', 'config_pb2.ConfigProto', ([], {}), '()\n', (63058, 63060), False, 'from tensorflow.core.protobuf import config_pb2\n'), ((63482, 63575), 'tensorflow.contrib.distribute.python.multi_worker_test_base.create_in_process_cluster', 'multi_worker_test_base.create_in_process_cluster', ([], {'num_workers': '(2)', 'num_ps': '(0)', 'has_chief': '(True)'}), '(num_workers=2, num_ps=0,\n has_chief=True)\n', (63530, 63575), False, 'from tensorflow.contrib.distribute.python import multi_worker_test_base\n'), ((65395, 65427), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (65425, 65427), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((65515, 65547), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['replica_id'], {}), '(replica_id)\n', (65535, 65547), False, 'from tensorflow.python.framework import constant_op\n'), ((4553, 4582), 'tensorflow.python.data.ops.dataset_ops.Dataset.range', 'dataset_ops.Dataset.range', (['(10)'], {}), '(10)\n', (4578, 4582), False, 'from tensorflow.python.data.ops import dataset_ops\n'), ((9578, 9598), 'tensorflow.python.eager.context.graph_mode', 'context.graph_mode', ([], {}), '()\n', (9596, 9598), False, 'from tensorflow.python.eager import context\n'), ((9642, 9700), 'tensorflow.python.ops.variable_scope.variable_creator_scope', 'variable_scope.variable_creator_scope', (['main_thread_creator'], {}), '(main_thread_creator)\n', (9679, 9700), False, 'from tensorflow.python.ops import variable_scope\n'), ((10395, 10436), 'tensorflow.python.framework.ops.executing_eagerly_outside_functions', 'ops.executing_eagerly_outside_functions', ([], {}), '()\n', (10434, 10436), False, 'from tensorflow.python.framework import ops\n'), ((10545, 10586), 'tensorflow.python.framework.ops.executing_eagerly_outside_functions', 'ops.executing_eagerly_outside_functions', ([], {}), '()\n', (10584, 10586), False, 'from tensorflow.python.framework import ops\n'), ((10973, 11014), 'tensorflow.python.framework.ops.executing_eagerly_outside_functions', 'ops.executing_eagerly_outside_functions', ([], {}), '()\n', (11012, 11014), False, 'from tensorflow.python.framework import ops\n'), ((12800, 12840), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(2.0)'], {'name': '"""bar"""'}), "(2.0, name='bar')\n", (12823, 12840), False, 'from tensorflow.python.ops import variable_scope\n'), ((13003, 13043), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""foo"""'}), "(1.0, name='foo')\n", (13026, 13043), False, 'from tensorflow.python.ops import variable_scope\n'), ((13485, 13525), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""foo"""'}), "(1.0, name='foo')\n", (13508, 13525), False, 'from tensorflow.python.ops import variable_scope\n'), ((13845, 13873), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {}), '(1.0)\n', (13868, 13873), False, 'from tensorflow.python.ops import variable_scope\n'), ((17405, 17464), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""var0"""', 'aggregation': 'None'}), "(1.0, name='var0', aggregation=None)\n", (17428, 17464), False, 'from tensorflow.python.ops import variable_scope\n'), ((18226, 18272), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""var-main0"""'}), "(1.0, name='var-main0')\n", (18249, 18272), False, 'from tensorflow.python.ops import variable_scope\n'), ((19131, 19171), 'tensorflow.python.ops.variable_scope.get_variable', 'variable_scope.get_variable', (['"""var0"""', '[1]'], {}), "('var0', [1])\n", (19158, 19171), False, 'from tensorflow.python.ops import variable_scope\n'), ((21041, 21179), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(2.0)'], {'name': '"""on_read"""', 'synchronization': 'variable_scope.VariableSynchronization.ON_READ', 'aggregation': 'aggregation'}), "(2.0, name='on_read', synchronization=variable_scope\n .VariableSynchronization.ON_READ, aggregation=aggregation)\n", (21064, 21179), False, 'from tensorflow.python.ops import variable_scope\n'), ((21227, 21367), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(3.0)'], {'name': '"""on_write"""', 'synchronization': 'variable_scope.VariableSynchronization.ON_WRITE', 'aggregation': 'aggregation'}), "(3.0, name='on_write', synchronization=\n variable_scope.VariableSynchronization.ON_WRITE, aggregation=aggregation)\n", (21250, 21367), False, 'from tensorflow.python.ops import variable_scope\n'), ((26672, 26711), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': 'name'}), '(1.0, name=name)\n', (26695, 26711), False, 'from tensorflow.python.ops import variable_scope\n'), ((26841, 26900), 'tensorflow.python.distribute.values.ReplicaDeviceMap', 'values.ReplicaDeviceMap', (["('/device:CPU:0', '/device:GPU:0')"], {}), "(('/device:CPU:0', '/device:GPU:0'))\n", (26864, 26900), False, 'from tensorflow.python.distribute import values\n'), ((26915, 26967), 'tensorflow.python.distribute.values.DistributedValues', 'values.DistributedValues', (['device_map', "('foo', 'bar')"], {}), "(device_map, ('foo', 'bar'))\n", (26939, 26967), False, 'from tensorflow.python.distribute import values\n'), ((27315, 27469), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'synchronization': 'variable_scope.VariableSynchronization.ON_READ', 'aggregation': 'variable_scope.VariableAggregation.SUM'}), '(1.0, synchronization=variable_scope.\n VariableSynchronization.ON_READ, aggregation=variable_scope.\n VariableAggregation.SUM)\n', (27338, 27469), False, 'from tensorflow.python.ops import variable_scope\n'), ((27506, 27661), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(4.0)'], {'synchronization': 'variable_scope.VariableSynchronization.ON_READ', 'aggregation': 'variable_scope.VariableAggregation.MEAN'}), '(4.0, synchronization=variable_scope.\n VariableSynchronization.ON_READ, aggregation=variable_scope.\n VariableAggregation.MEAN)\n', (27529, 27661), False, 'from tensorflow.python.ops import variable_scope\n'), ((30653, 30712), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(2 * [2 * [[0.0, 1.0, 2.0, 3.0, 4.0]]])'], {}), '(2 * [2 * [[0.0, 1.0, 2.0, 3.0, 4.0]]])\n', (30673, 30712), False, 'from tensorflow.python.framework import constant_op\n'), ((30729, 30756), 'tensorflow.python.ops.rnn_cell_impl.LSTMCell', 'rnn_cell_impl.LSTMCell', (['(300)'], {}), '(300)\n', (30751, 30756), False, 'from tensorflow.python.ops import rnn_cell_impl\n'), ((30773, 30800), 'tensorflow.python.ops.rnn_cell_impl.LSTMCell', 'rnn_cell_impl.LSTMCell', (['(300)'], {}), '(300)\n', (30795, 30800), False, 'from tensorflow.python.ops import rnn_cell_impl\n'), ((30822, 30899), 'tensorflow.python.ops.rnn.bidirectional_dynamic_rnn', 'rnn.bidirectional_dynamic_rnn', (['cell_fw', 'cell_bw', 'inputs'], {'dtype': 'dtypes.float32'}), '(cell_fw, cell_bw, inputs, dtype=dtypes.float32)\n', (30851, 30899), False, 'from tensorflow.python.ops import rnn\n'), ((30972, 30992), 'tensorflow.python.eager.context.graph_mode', 'context.graph_mode', ([], {}), '()\n', (30990, 30992), False, 'from tensorflow.python.eager import context\n'), ((31465, 31619), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'synchronization': 'variable_scope.VariableSynchronization.ON_READ', 'aggregation': 'variable_scope.VariableAggregation.SUM'}), '(1.0, synchronization=variable_scope.\n VariableSynchronization.ON_READ, aggregation=variable_scope.\n VariableAggregation.SUM)\n', (31488, 31619), False, 'from tensorflow.python.ops import variable_scope\n'), ((32876, 32904), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {}), '(1.0)\n', (32899, 32904), False, 'from tensorflow.python.ops import variable_scope\n'), ((32926, 33023), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'synchronization': 'variable_scope.VariableSynchronization.ON_READ'}), '(1.0, synchronization=variable_scope.\n VariableSynchronization.ON_READ)\n', (32949, 33023), False, 'from tensorflow.python.ops import variable_scope\n'), ((33827, 33847), 'tensorflow.python.eager.context.graph_mode', 'context.graph_mode', ([], {}), '()\n', (33845, 33847), False, 'from tensorflow.python.eager import context\n'), ((34622, 34642), 'tensorflow.python.eager.context.graph_mode', 'context.graph_mode', ([], {}), '()\n', (34640, 34642), False, 'from tensorflow.python.eager import context\n'), ((35443, 35481), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""c"""'}), "(1.0, name='c')\n", (35466, 35481), False, 'from tensorflow.python.ops import variable_scope\n'), ((35528, 35566), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""b"""'}), "(1.0, name='b')\n", (35551, 35566), False, 'from tensorflow.python.ops import variable_scope\n'), ((35703, 35723), 'tensorflow.python.eager.context.graph_mode', 'context.graph_mode', ([], {}), '()\n', (35721, 35723), False, 'from tensorflow.python.eager import context\n'), ((36672, 36709), 'tensorflow.python.ops.variable_scope.get_variable', 'variable_scope.get_variable', (['"""c"""', '[1]'], {}), "('c', [1])\n", (36699, 36709), False, 'from tensorflow.python.ops import variable_scope\n'), ((36756, 36793), 'tensorflow.python.ops.variable_scope.get_variable', 'variable_scope.get_variable', (['"""b"""', '[1]'], {}), "('b', [1])\n", (36783, 36793), False, 'from tensorflow.python.ops import variable_scope\n'), ((36930, 36950), 'tensorflow.python.eager.context.graph_mode', 'context.graph_mode', ([], {}), '()\n', (36948, 36950), False, 'from tensorflow.python.eager import context\n'), ((38674, 38714), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""foo"""'}), "(1.0, name='foo')\n", (38697, 38714), False, 'from tensorflow.python.ops import variable_scope\n'), ((39693, 39733), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""foo"""'}), "(1.0, name='foo')\n", (39716, 39733), False, 'from tensorflow.python.ops import variable_scope\n'), ((40547, 40644), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""foo"""', 'aggregation': 'variable_scope.VariableAggregation.SUM'}), "(1.0, name='foo', aggregation=variable_scope.\n VariableAggregation.SUM)\n", (40570, 40644), False, 'from tensorflow.python.ops import variable_scope\n'), ((41360, 41400), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""foo"""'}), "(1.0, name='foo')\n", (41383, 41400), False, 'from tensorflow.python.ops import variable_scope\n'), ((41904, 42002), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""foo"""', 'aggregation': 'variable_scope.VariableAggregation.MEAN'}), "(1.0, name='foo', aggregation=variable_scope.\n VariableAggregation.MEAN)\n", (41927, 42002), False, 'from tensorflow.python.ops import variable_scope\n'), ((42794, 42892), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""foo"""', 'aggregation': 'variable_scope.VariableAggregation.MEAN'}), "(1.0, name='foo', aggregation=variable_scope.\n VariableAggregation.MEAN)\n", (42817, 42892), False, 'from tensorflow.python.ops import variable_scope\n'), ((43540, 43580), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""foo"""'}), "(1.0, name='foo')\n", (43563, 43580), False, 'from tensorflow.python.ops import variable_scope\n'), ((44556, 44654), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""foo"""', 'aggregation': 'variable_scope.VariableAggregation.MEAN'}), "(1.0, name='foo', aggregation=variable_scope.\n VariableAggregation.MEAN)\n", (44579, 44654), False, 'from tensorflow.python.ops import variable_scope\n'), ((45453, 45551), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""foo"""', 'aggregation': 'variable_scope.VariableAggregation.MEAN'}), "(1.0, name='foo', aggregation=variable_scope.\n VariableAggregation.MEAN)\n", (45476, 45551), False, 'from tensorflow.python.ops import variable_scope\n'), ((46203, 46243), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(5.0)'], {'name': '"""foo"""'}), "(5.0, name='foo')\n", (46226, 46243), False, 'from tensorflow.python.ops import variable_scope\n'), ((46910, 47008), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(5.0)'], {'name': '"""foo"""', 'aggregation': 'variable_scope.VariableAggregation.MEAN'}), "(5.0, name='foo', aggregation=variable_scope.\n VariableAggregation.MEAN)\n", (46933, 47008), False, 'from tensorflow.python.ops import variable_scope\n'), ((47807, 47905), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(5.0)'], {'name': '"""foo"""', 'aggregation': 'variable_scope.VariableAggregation.MEAN'}), "(5.0, name='foo', aggregation=variable_scope.\n VariableAggregation.MEAN)\n", (47830, 47905), False, 'from tensorflow.python.ops import variable_scope\n'), ((48959, 48979), 'tensorflow.python.eager.context.graph_mode', 'context.graph_mode', ([], {}), '()\n', (48977, 48979), False, 'from tensorflow.python.eager import context\n'), ((49663, 49683), 'tensorflow.python.eager.context.graph_mode', 'context.graph_mode', ([], {}), '()\n', (49681, 49683), False, 'from tensorflow.python.eager import context\n'), ((50763, 50917), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'synchronization': 'variable_scope.VariableSynchronization.ON_READ', 'aggregation': 'variable_scope.VariableAggregation.SUM'}), '(1.0, synchronization=variable_scope.\n VariableSynchronization.ON_READ, aggregation=variable_scope.\n VariableAggregation.SUM)\n', (50786, 50917), False, 'from tensorflow.python.ops import variable_scope\n'), ((52069, 52224), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'synchronization': 'variable_scope.VariableSynchronization.ON_READ', 'aggregation': 'variable_scope.VariableAggregation.MEAN'}), '(1.0, synchronization=variable_scope.\n VariableSynchronization.ON_READ, aggregation=variable_scope.\n VariableAggregation.MEAN)\n', (52092, 52224), False, 'from tensorflow.python.ops import variable_scope\n'), ((53265, 53313), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.25)'], {'name': '"""dummy_var1"""'}), "(1.25, name='dummy_var1')\n", (53288, 53313), False, 'from tensorflow.python.ops import variable_scope\n'), ((57921, 57952), 'tensorflow.python.eager.function.defun', 'function.defun', (['mock_model.call'], {}), '(mock_model.call)\n', (57935, 57952), False, 'from tensorflow.python.eager import function\n'), ((58067, 58098), 'tensorflow.python.eager.backprop.implicit_grad', 'backprop.implicit_grad', (['loss_fn'], {}), '(loss_fn)\n', (58089, 58098), False, 'from tensorflow.python.eager import backprop\n'), ((58120, 58168), 'tensorflow.python.training.optimizer.get_filtered_grad_fn', 'optimizer_lib.get_filtered_grad_fn', (['gradients_fn'], {}), '(gradients_fn)\n', (58154, 58168), True, 'from tensorflow.python.training import optimizer as optimizer_lib\n'), ((58294, 58341), 'tensorflow.python.training.gradient_descent.GradientDescentOptimizer', 'gradient_descent.GradientDescentOptimizer', (['(0.25)'], {}), '(0.25)\n', (58335, 58341), False, 'from tensorflow.python.training import gradient_descent\n'), ((60596, 60621), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {}), '(1.0)\n', (60616, 60621), False, 'from tensorflow.python.framework import constant_op\n'), ((60958, 60988), 'tensorflow.python.data.ops.dataset_ops.Dataset.range', 'dataset_ops.Dataset.range', (['(100)'], {}), '(100)\n', (60983, 60988), False, 'from tensorflow.python.data.ops import dataset_ops\n'), ((61179, 61199), 'tensorflow.python.eager.context.graph_mode', 'context.graph_mode', ([], {}), '()\n', (61197, 61199), False, 'from tensorflow.python.eager import context\n'), ((61951, 61981), 'tensorflow.python.data.ops.dataset_ops.Dataset.range', 'dataset_ops.Dataset.range', (['(100)'], {}), '(100)\n', (61976, 61981), False, 'from tensorflow.python.data.ops import dataset_ops\n'), ((62254, 62274), 'tensorflow.python.eager.context.graph_mode', 'context.graph_mode', ([], {}), '()\n', (62272, 62274), False, 'from tensorflow.python.eager import context\n'), ((64024, 64061), 'tensorflow.contrib.distribute.python.mirrored_strategy.all_local_devices', 'mirrored_strategy.all_local_devices', ([], {}), '()\n', (64059, 64061), False, 'from tensorflow.contrib.distribute.python import mirrored_strategy\n'), ((64501, 64541), 'tensorflow.contrib.distribute.python.mirrored_strategy.CoreMirroredStrategy', 'mirrored_strategy.CoreMirroredStrategy', ([], {}), '()\n', (64539, 64541), False, 'from tensorflow.contrib.distribute.python import mirrored_strategy\n'), ((64964, 65004), 'tensorflow.contrib.distribute.python.mirrored_strategy.CoreMirroredStrategy', 'mirrored_strategy.CoreMirroredStrategy', ([], {}), '()\n', (65002, 65004), False, 'from tensorflow.contrib.distribute.python import mirrored_strategy\n'), ((65209, 65246), 'tensorflow.contrib.distribute.python.mirrored_strategy.all_local_devices', 'mirrored_strategy.all_local_devices', ([], {}), '()\n', (65244, 65246), False, 'from tensorflow.contrib.distribute.python import mirrored_strategy\n'), ((9072, 9128), 'tensorflow.python.ops.variable_scope.variable_creator_scope', 'variable_scope.variable_creator_scope', (['thread_creator_fn'], {}), '(thread_creator_fn)\n', (9109, 9128), False, 'from tensorflow.python.ops import variable_scope\n'), ((9185, 9213), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {}), '(1.0)\n', (9208, 9213), False, 'from tensorflow.python.ops import variable_scope\n'), ((14709, 14753), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""foo/bar"""'}), "(1.0, name='foo/bar')\n", (14732, 14753), False, 'from tensorflow.python.ops import variable_scope\n'), ((14771, 14817), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""foo_1/bar"""'}), "(1.0, name='foo_1/bar')\n", (14794, 14817), False, 'from tensorflow.python.ops import variable_scope\n'), ((14835, 14883), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""foo_1/bar_1"""'}), "(1.0, name='foo_1/bar_1')\n", (14858, 14883), False, 'from tensorflow.python.ops import variable_scope\n'), ((14901, 14947), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""foo/bar_1"""'}), "(1.0, name='foo/bar_1')\n", (14924, 14947), False, 'from tensorflow.python.ops import variable_scope\n'), ((16128, 16167), 'tensorflow.python.ops.variable_scope.variable_scope', 'variable_scope.variable_scope', (['"""common"""'], {}), "('common')\n", (16157, 16167), False, 'from tensorflow.python.ops import variable_scope\n'), ((16186, 16199), 'tensorflow.python.layers.core.Dense', 'core.Dense', (['(1)'], {}), '(1)\n', (16196, 16199), False, 'from tensorflow.python.layers import core\n'), ((16242, 16255), 'tensorflow.python.layers.core.Dense', 'core.Dense', (['(1)'], {}), '(1)\n', (16252, 16255), False, 'from tensorflow.python.layers import core\n'), ((16439, 16452), 'tensorflow.python.layers.core.Dense', 'core.Dense', (['(1)'], {}), '(1)\n', (16449, 16452), False, 'from tensorflow.python.layers import core\n'), ((17476, 17515), 'tensorflow.python.ops.variable_scope.variable_scope', 'variable_scope.variable_scope', (['"""common"""'], {}), "('common')\n", (17505, 17515), False, 'from tensorflow.python.ops import variable_scope\n'), ((17530, 17571), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""var1"""'}), "(1.0, name='var1')\n", (17553, 17571), False, 'from tensorflow.python.ops import variable_scope\n'), ((17726, 17893), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""var2"""', 'synchronization': 'variable_scope.VariableSynchronization.ON_READ', 'aggregation': 'variable_scope.VariableAggregation.SUM'}), "(1.0, name='var2', synchronization=variable_scope.\n VariableSynchronization.ON_READ, aggregation=variable_scope.\n VariableAggregation.SUM)\n", (17749, 17893), False, 'from tensorflow.python.ops import variable_scope\n'), ((17946, 18115), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""var3"""', 'synchronization': 'variable_scope.VariableSynchronization.ON_WRITE', 'aggregation': 'variable_scope.VariableAggregation.MEAN'}), "(1.0, name='var3', synchronization=variable_scope.\n VariableSynchronization.ON_WRITE, aggregation=variable_scope.\n VariableAggregation.MEAN)\n", (17969, 18115), False, 'from tensorflow.python.ops import variable_scope\n'), ((19183, 19222), 'tensorflow.python.ops.variable_scope.variable_scope', 'variable_scope.variable_scope', (['"""common"""'], {}), "('common')\n", (19212, 19222), False, 'from tensorflow.python.ops import variable_scope\n'), ((19237, 19277), 'tensorflow.python.ops.variable_scope.get_variable', 'variable_scope.get_variable', (['"""var1"""', '[1]'], {}), "('var1', [1])\n", (19264, 19277), False, 'from tensorflow.python.ops import variable_scope\n'), ((19432, 19598), 'tensorflow.python.ops.variable_scope.get_variable', 'variable_scope.get_variable', (['"""var2"""', '[1]'], {'synchronization': 'variable_scope.VariableSynchronization.ON_READ', 'aggregation': 'variable_scope.VariableAggregation.SUM'}), "('var2', [1], synchronization=variable_scope.\n VariableSynchronization.ON_READ, aggregation=variable_scope.\n VariableAggregation.SUM)\n", (19459, 19598), False, 'from tensorflow.python.ops import variable_scope\n'), ((19639, 19807), 'tensorflow.python.ops.variable_scope.get_variable', 'variable_scope.get_variable', (['"""var3"""', '[1]'], {'synchronization': 'variable_scope.VariableSynchronization.ON_WRITE', 'aggregation': 'variable_scope.VariableAggregation.MEAN'}), "('var3', [1], synchronization=variable_scope.\n VariableSynchronization.ON_WRITE, aggregation=variable_scope.\n VariableAggregation.MEAN)\n", (19666, 19807), False, 'from tensorflow.python.ops import variable_scope\n'), ((19907, 19944), 'tensorflow.python.ops.variable_scope.variable_scope', 'variable_scope.variable_scope', (['"""main"""'], {}), "('main')\n", (19936, 19944), False, 'from tensorflow.python.ops import variable_scope\n'), ((19958, 20003), 'tensorflow.python.ops.variable_scope.get_variable', 'variable_scope.get_variable', (['"""var-main0"""', '[1]'], {}), "('var-main0', [1])\n", (19985, 20003), False, 'from tensorflow.python.ops import variable_scope\n'), ((24824, 24927), 'tensorflow.python.ops.variable_scope.get_variable', 'variable_scope.get_variable', (['"""v"""', '[1]'], {'synchronization': 'variable_scope.VariableSynchronization.NONE'}), "('v', [1], synchronization=variable_scope.\n VariableSynchronization.NONE)\n", (24851, 24927), False, 'from tensorflow.python.ops import variable_scope\n'), ((25284, 25388), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""v"""', 'synchronization': 'variable_scope.VariableSynchronization.NONE'}), "(1.0, name='v', synchronization=variable_scope.\n VariableSynchronization.NONE)\n", (25307, 25388), False, 'from tensorflow.python.ops import variable_scope\n'), ((25665, 25730), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""v"""', 'synchronization': '"""Invalid"""'}), "(1.0, name='v', synchronization='Invalid')\n", (25688, 25730), False, 'from tensorflow.python.ops import variable_scope\n'), ((25970, 26100), 'tensorflow.python.ops.variable_scope.get_variable', 'variable_scope.get_variable', (['"""v"""', '[1]'], {'synchronization': 'variable_scope.VariableSynchronization.ON_WRITE', 'aggregation': '"""invalid"""'}), "('v', [1], synchronization=variable_scope.\n VariableSynchronization.ON_WRITE, aggregation='invalid')\n", (25997, 26100), False, 'from tensorflow.python.ops import variable_scope\n'), ((26369, 26500), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""v"""', 'synchronization': 'variable_scope.VariableSynchronization.ON_WRITE', 'aggregation': '"""invalid"""'}), "(1.0, name='v', synchronization=variable_scope.\n VariableSynchronization.ON_WRITE, aggregation='invalid')\n", (26392, 26500), False, 'from tensorflow.python.ops import variable_scope\n'), ((29048, 29088), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (29086, 29088), False, 'from tensorflow.python.ops import variables\n'), ((31937, 31977), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (31975, 31977), False, 'from tensorflow.python.ops import variables\n'), ((33615, 33636), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (['"""foo"""'], {}), "('foo')\n", (33629, 33636), False, 'from tensorflow.python.framework import ops\n'), ((33650, 33685), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {'name': '"""a"""'}), "(1.0, name='a')\n", (33670, 33685), False, 'from tensorflow.python.framework import constant_op\n'), ((33763, 33798), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {'name': '"""b"""'}), "(1.0, name='b')\n", (33783, 33798), False, 'from tensorflow.python.framework import constant_op\n'), ((33882, 33904), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (['"""main"""'], {}), "('main')\n", (33896, 33904), False, 'from tensorflow.python.framework import ops\n'), ((34404, 34431), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (['None', '"""foo"""'], {}), "(None, 'foo')\n", (34418, 34431), False, 'from tensorflow.python.framework import ops\n'), ((34445, 34480), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {'name': '"""a"""'}), "(1.0, name='a')\n", (34465, 34480), False, 'from tensorflow.python.framework import constant_op\n'), ((34558, 34593), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(2.0)'], {'name': '"""b"""'}), "(2.0, name='b')\n", (34578, 34593), False, 'from tensorflow.python.framework import constant_op\n'), ((35578, 35599), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (['"""foo"""'], {}), "('foo')\n", (35592, 35599), False, 'from tensorflow.python.framework import ops\n'), ((35758, 35780), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (['"""main"""'], {}), "('main')\n", (35772, 35780), False, 'from tensorflow.python.framework import ops\n'), ((35794, 35832), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""a"""'}), "(1.0, name='a')\n", (35817, 35832), False, 'from tensorflow.python.ops import variable_scope\n'), ((36805, 36826), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (['"""foo"""'], {}), "('foo')\n", (36819, 36826), False, 'from tensorflow.python.framework import ops\n'), ((36985, 37007), 'tensorflow.python.framework.ops.name_scope', 'ops.name_scope', (['"""main"""'], {}), "('main')\n", (36999, 37007), False, 'from tensorflow.python.framework import ops\n'), ((37021, 37058), 'tensorflow.python.ops.variable_scope.get_variable', 'variable_scope.get_variable', (['"""a"""', '[1]'], {}), "('a', [1])\n", (37048, 37058), False, 'from tensorflow.python.ops import variable_scope\n'), ((39941, 39981), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (39979, 39981), False, 'from tensorflow.python.ops import variables\n'), ((40858, 40898), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (40896, 40898), False, 'from tensorflow.python.ops import variables\n'), ((41593, 41633), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (41631, 41633), False, 'from tensorflow.python.ops import variables\n'), ((42201, 42241), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (42239, 42241), False, 'from tensorflow.python.ops import variables\n'), ((43091, 43131), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (43129, 43131), False, 'from tensorflow.python.ops import variables\n'), ((43773, 43813), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (43811, 43813), False, 'from tensorflow.python.ops import variables\n'), ((44853, 44893), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (44891, 44893), False, 'from tensorflow.python.ops import variables\n'), ((45750, 45790), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (45788, 45790), False, 'from tensorflow.python.ops import variables\n'), ((46436, 46476), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (46474, 46476), False, 'from tensorflow.python.ops import variables\n'), ((47207, 47247), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (47245, 47247), False, 'from tensorflow.python.ops import variables\n'), ((48104, 48144), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (48142, 48144), False, 'from tensorflow.python.ops import variables\n'), ((49013, 49053), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'name': '"""foo"""'}), "(1.0, name='foo')\n", (49036, 49053), False, 'from tensorflow.python.ops import variable_scope\n'), ((49723, 49877), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(1.0)'], {'synchronization': 'variable_scope.VariableSynchronization.ON_READ', 'aggregation': 'variable_scope.VariableAggregation.SUM'}), '(1.0, synchronization=variable_scope.\n VariableSynchronization.ON_READ, aggregation=variable_scope.\n VariableAggregation.SUM)\n', (49746, 49877), False, 'from tensorflow.python.ops import variable_scope\n'), ((51162, 51202), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (51200, 51202), False, 'from tensorflow.python.ops import variables\n'), ((52469, 52509), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (52507, 52509), False, 'from tensorflow.python.ops import variables\n'), ((53365, 53412), 'tensorflow.python.ops.variable_scope.variable', 'variable_scope.variable', (['(2.0)'], {'name': '"""dummy_var2"""'}), "(2.0, name='dummy_var2')\n", (53388, 53412), False, 'from tensorflow.python.ops import variable_scope\n'), ((54614, 54654), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (54652, 54654), False, 'from tensorflow.python.ops import variables\n'), ((54825, 54857), 'tensorflow.python.distribute.values.select_replica', 'values.select_replica', (['r', 'result'], {}), '(r, result)\n', (54846, 54857), False, 'from tensorflow.python.distribute import values\n'), ((54891, 54932), 'tensorflow.python.distribute.values.select_replica', 'values.select_replica', (['r', 'expected_result'], {}), '(r, expected_result)\n', (54912, 54932), False, 'from tensorflow.python.distribute import values\n'), ((57043, 57081), 'tensorflow.python.eager.backprop.GradientTape', 'backprop.GradientTape', ([], {'persistent': '(True)'}), '(persistent=True)\n', (57064, 57081), False, 'from tensorflow.python.eager import backprop\n'), ((58470, 58497), 'tensorflow.python.eager.context.executing_eagerly', 'context.executing_eagerly', ([], {}), '()\n', (58495, 58497), False, 'from tensorflow.python.eager import context\n'), ((60155, 60173), 'tensorflow.python.eager.context.num_gpus', 'context.num_gpus', ([], {}), '()\n', (60171, 60173), False, 'from tensorflow.python.eager import context\n'), ((60632, 60652), 'tensorflow.python.framework.ops.device', 'ops.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (60642, 60652), False, 'from tensorflow.python.framework import ops\n'), ((60666, 60691), 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['(1.0)'], {}), '(1.0)\n', (60686, 60691), False, 'from tensorflow.python.framework import constant_op\n'), ((63764, 63782), 'tensorflow.python.eager.context.num_gpus', 'context.num_gpus', ([], {}), '()\n', (63780, 63782), False, 'from tensorflow.python.eager import context\n'), ((5192, 5220), 'tensorflow.python.data.ops.dataset_ops.Dataset.range', 'dataset_ops.Dataset.range', (['(2)'], {}), '(2)\n', (5217, 5220), False, 'from tensorflow.python.data.ops import dataset_ops\n'), ((5254, 5283), 'tensorflow.python.data.ops.dataset_ops.Dataset.range', 'dataset_ops.Dataset.range', (['(10)'], {}), '(10)\n', (5279, 5283), False, 'from tensorflow.python.data.ops import dataset_ops\n'), ((10893, 10919), 'tensorflow.python.framework.func_graph.FuncGraph', 'func_graph.FuncGraph', (['"""fg"""'], {}), "('fg')\n", (10913, 10919), False, 'from tensorflow.python.framework import func_graph\n'), ((11711, 11743), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (11741, 11743), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((12847, 12879), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (12877, 12879), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((12929, 12955), 'tensorflow.python.framework.func_graph.FuncGraph', 'func_graph.FuncGraph', (['"""fg"""'], {}), "('fg')\n", (12949, 12955), False, 'from tensorflow.python.framework import func_graph\n'), ((13532, 13564), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (13562, 13564), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((13880, 13912), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (13910, 13912), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((14304, 14336), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (14334, 14336), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((14955, 14987), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (14985, 14987), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((15684, 15716), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (15714, 15716), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((26718, 26750), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (26748, 26750), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((38721, 38753), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (38751, 38753), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((58020, 58043), 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['[1, 10]'], {}), '([1, 10])\n', (58034, 58043), False, 'from tensorflow.python.ops import array_ops\n'), ((58521, 58561), 'tensorflow.python.ops.variables.global_variables_initializer', 'variables.global_variables_initializer', ([], {}), '()\n', (58559, 58561), False, 'from tensorflow.python.ops import variables\n'), ((58757, 58773), 'numpy.ones', 'np.ones', (['[10, 1]'], {}), '([10, 1])\n', (58764, 58773), True, 'import numpy as np\n'), ((64459, 64480), 'json.dumps', 'json.dumps', (['tf_config'], {}), '(tf_config)\n', (64469, 64480), False, 'import json\n'), ((64922, 64943), 'json.dumps', 'json.dumps', (['tf_config'], {}), '(tf_config)\n', (64932, 64943), False, 'import json\n'), ((9299, 9331), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (9329, 9331), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((16365, 16397), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (16395, 16397), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((16687, 16728), 'tensorflow.python.data.ops.dataset_ops.Dataset.from_tensors', 'dataset_ops.Dataset.from_tensors', (['[[1.0]]'], {}), '([[1.0]])\n', (16719, 16728), False, 'from tensorflow.python.data.ops import dataset_ops\n'), ((17656, 17688), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (17686, 17688), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((19362, 19394), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (19392, 19394), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((33694, 33726), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (33724, 33726), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((34489, 34521), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (34519, 34521), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((35613, 35645), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (35643, 35645), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((36840, 36872), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (36870, 36872), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((37991, 38082), 'tensorflow.contrib.distribute.python.mirrored_strategy.MirroredStrategy', 'mirrored_strategy.MirroredStrategy', (["['/device:GPU:0', '/device:GPU:1', '/device:CPU:0']"], {}), "(['/device:GPU:0', '/device:GPU:1',\n '/device:CPU:0'])\n", (38025, 38082), False, 'from tensorflow.contrib.distribute.python import mirrored_strategy\n'), ((38291, 38386), 'tensorflow.contrib.distribute.python.mirrored_strategy.CoreMirroredStrategy', 'mirrored_strategy.CoreMirroredStrategy', (["['/device:GPU:0', '/device:GPU:1', '/device:CPU:0']"], {}), "(['/device:GPU:0', '/device:GPU:1',\n '/device:CPU:0'])\n", (38329, 38386), False, 'from tensorflow.contrib.distribute.python import mirrored_strategy\n'), ((42366, 42398), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (42396, 42398), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((45018, 45050), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (45048, 45050), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((47372, 47404), 'tensorflow.python.distribute.distribution_strategy_context.get_replica_context', 'ds_context.get_replica_context', ([], {}), '()\n', (47402, 47404), True, 'from tensorflow.python.distribute import distribution_strategy_context as ds_context\n'), ((65043, 65061), 'tensorflow.python.eager.context.num_gpus', 'context.num_gpus', ([], {}), '()\n', (65059, 65061), False, 'from tensorflow.python.eager import context\n'), ((59451, 59488), 'tensorflow.contrib.distribute.python.mirrored_strategy.all_local_devices', 'mirrored_strategy.all_local_devices', ([], {}), '()\n', (59486, 59488), False, 'from tensorflow.contrib.distribute.python import mirrored_strategy\n'), ((59188, 59206), 'tensorflow.python.eager.context.num_gpus', 'context.num_gpus', ([], {}), '()\n', (59204, 59206), False, 'from tensorflow.python.eager import context\n')]
|
import numpy
import argparse
from matplotlib import colors
from src.powerspectrum import from_frequency_to_eta
from src.powerspectrum import fiducial_eor_power_spectrum
from src.radiotelescope import RadioTelescope
from src.plottools import plot_2dpower_spectrum
from src.plottools import plot_power_contours
from src.generaltools import from_jansky_to_milikelvin
from src.covariance import calibrated_residual_error
from src.covariance import compute_weights
from src.util import redundant_baseline_finder
def main(labelfontsize = 16, ticksize= 11):
output_path = "/home/ronniyjoseph/Sync/PhD/Thesis/ThesisTex/images/chapter_7/"
contour_levels = numpy.array([1e0, 1e1, 1e2])
# telescope_position_path = "./Data/MWA_Compact_Coordinates.txt"
# tile_diameter = 4
# fraction_broken = 0.3
# model_limit = 1e-1
telescope_position_path = "./Data/HERA_128.txt"
tile_diameter = 14
fraction_broken = 0.3
model_limit = 1e-1
k_perp_range = numpy.array([1e-4, 1.1e-1])
u_range = numpy.logspace(0, numpy.log10(500), 50)
frequency_range = numpy.linspace(135, 165, 251) * 1e6
eta = from_frequency_to_eta(frequency_range)
eor_power_spectrum = fiducial_eor_power_spectrum(u_range, eta)
telescope = RadioTelescope(load=True, path=telescope_position_path)
redundant_table = telescope.baseline_table
# redundant_table = redundant_baseline_finder(telescope.baseline_table)
weights = compute_weights(u_range, redundant_table.u_coordinates,
redundant_table.v_coordinates)
sky_clocations = None# [(6e-2, 0.21), (4e-2, 0.13), (3e-2, 0.07 )]
beam_clocations = sky_clocations
total_clocations = sky_clocations
# print(numpy.max(numpy.sqrt(redundant_table.u_coordinates**2 + redundant_table.v_coordinates**2)))
sky_calibrated = calibrated_residual_error(u=u_range, nu=frequency_range, residuals='sky',
calibration_type='sky', weights = weights, tile_diameter=tile_diameter,
broken_baselines_weight = fraction_broken, model_limit=model_limit)
beam_calibrated = calibrated_residual_error(u=u_range, nu=frequency_range, residuals='beam',
calibration_type='sky', weights = weights,
tile_diameter=tile_diameter,
broken_baselines_weight = fraction_broken, model_limit=model_limit)
total_calibrated = calibrated_residual_error(u=u_range, nu=frequency_range, residuals='both',
calibration_type='sky', weights = weights,
tile_diameter=tile_diameter,
broken_baselines_weight = fraction_broken, model_limit=model_limit)
figure, axes = pyplot.subplots(1, 3, figsize=(15, 5))
ps_norm = colors.LogNorm(vmin=1e3, vmax=1e15)
plot_2dpower_spectrum(u_range, eta, frequency_range, sky_calibrated, title="Sky Error", axes=axes[0],
axes_label_font=labelfontsize, tickfontsize=ticksize, colorbar_show=False,
xlabel_show=True, norm=ps_norm, ylabel_show=True)
plot_2dpower_spectrum(u_range, eta, frequency_range, beam_calibrated, title="Beam Variations", axes=axes[1],
axes_label_font=labelfontsize, tickfontsize=ticksize, colorbar_show=False,
xlabel_show=True, norm=ps_norm, ylabel_show=False)
plot_2dpower_spectrum(u_range, eta, frequency_range, total_calibrated, title="Total Error", axes=axes[2],
axes_label_font=labelfontsize, tickfontsize=ticksize, colorbar_show=True,
xlabel_show=True, norm=ps_norm, ylabel_show=False, zlabel_show=True)
plot_power_contours(u_range, eta, frequency_range, from_jansky_to_milikelvin(sky_calibrated,
frequency_range)/eor_power_spectrum,
axes=axes[0], ratio=True, axes_label_font=labelfontsize, tickfontsize=ticksize, xlabel_show=True,
norm=ps_norm, ylabel_show=True, contour_levels=contour_levels, contour_label_locs=sky_clocations)
plot_power_contours(u_range, eta, frequency_range, from_jansky_to_milikelvin(beam_calibrated, frequency_range)/eor_power_spectrum,
axes=axes[1], ratio=True, axes_label_font=labelfontsize, tickfontsize=ticksize, xlabel_show=True,
norm=ps_norm, ylabel_show=False, contour_levels=contour_levels, contour_label_locs=beam_clocations)
plot_power_contours(u_range, eta, frequency_range, from_jansky_to_milikelvin(total_calibrated, frequency_range)/eor_power_spectrum,
axes=axes[2], ratio=True, axes_label_font=labelfontsize, tickfontsize=ticksize, xlabel_show=True,
norm=ps_norm, ylabel_show=False, contour_levels=contour_levels, contour_label_locs=total_clocations)
pyplot.tight_layout()
# pyplot.savefig(output_path + "Calibrated_Residuals_Sky_MWA.pdf")
pyplot.show()
return
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ssh", action="store_true", dest="ssh_key", default=False)
params = parser.parse_args()
import matplotlib
if params.ssh_key:
matplotlib.use("Agg")
from matplotlib import pyplot
main()
|
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"src.powerspectrum.fiducial_eor_power_spectrum",
"src.plottools.plot_2dpower_spectrum",
"src.covariance.calibrated_residual_error",
"matplotlib.colors.LogNorm",
"numpy.array",
"matplotlib.use",
"src.powerspectrum.from_frequency_to_eta",
"src.covariance.compute_weights",
"numpy.linspace",
"src.generaltools.from_jansky_to_milikelvin",
"numpy.log10",
"matplotlib.pyplot.subplots",
"src.radiotelescope.RadioTelescope"
] |
[((663, 694), 'numpy.array', 'numpy.array', (['[1.0, 10.0, 100.0]'], {}), '([1.0, 10.0, 100.0])\n', (674, 694), False, 'import numpy\n'), ((984, 1011), 'numpy.array', 'numpy.array', (['[0.0001, 0.11]'], {}), '([0.0001, 0.11])\n', (995, 1011), False, 'import numpy\n'), ((1136, 1174), 'src.powerspectrum.from_frequency_to_eta', 'from_frequency_to_eta', (['frequency_range'], {}), '(frequency_range)\n', (1157, 1174), False, 'from src.powerspectrum import from_frequency_to_eta\n'), ((1200, 1241), 'src.powerspectrum.fiducial_eor_power_spectrum', 'fiducial_eor_power_spectrum', (['u_range', 'eta'], {}), '(u_range, eta)\n', (1227, 1241), False, 'from src.powerspectrum import fiducial_eor_power_spectrum\n'), ((1260, 1315), 'src.radiotelescope.RadioTelescope', 'RadioTelescope', ([], {'load': '(True)', 'path': 'telescope_position_path'}), '(load=True, path=telescope_position_path)\n', (1274, 1315), False, 'from src.radiotelescope import RadioTelescope\n'), ((1453, 1544), 'src.covariance.compute_weights', 'compute_weights', (['u_range', 'redundant_table.u_coordinates', 'redundant_table.v_coordinates'], {}), '(u_range, redundant_table.u_coordinates, redundant_table.\n v_coordinates)\n', (1468, 1544), False, 'from src.covariance import compute_weights\n'), ((1843, 2060), 'src.covariance.calibrated_residual_error', 'calibrated_residual_error', ([], {'u': 'u_range', 'nu': 'frequency_range', 'residuals': '"""sky"""', 'calibration_type': '"""sky"""', 'weights': 'weights', 'tile_diameter': 'tile_diameter', 'broken_baselines_weight': 'fraction_broken', 'model_limit': 'model_limit'}), "(u=u_range, nu=frequency_range, residuals='sky',\n calibration_type='sky', weights=weights, tile_diameter=tile_diameter,\n broken_baselines_weight=fraction_broken, model_limit=model_limit)\n", (1868, 2060), False, 'from src.covariance import calibrated_residual_error\n'), ((2180, 2398), 'src.covariance.calibrated_residual_error', 'calibrated_residual_error', ([], {'u': 'u_range', 'nu': 'frequency_range', 'residuals': '"""beam"""', 'calibration_type': '"""sky"""', 'weights': 'weights', 'tile_diameter': 'tile_diameter', 'broken_baselines_weight': 'fraction_broken', 'model_limit': 'model_limit'}), "(u=u_range, nu=frequency_range, residuals='beam',\n calibration_type='sky', weights=weights, tile_diameter=tile_diameter,\n broken_baselines_weight=fraction_broken, model_limit=model_limit)\n", (2205, 2398), False, 'from src.covariance import calibrated_residual_error\n'), ((2592, 2810), 'src.covariance.calibrated_residual_error', 'calibrated_residual_error', ([], {'u': 'u_range', 'nu': 'frequency_range', 'residuals': '"""both"""', 'calibration_type': '"""sky"""', 'weights': 'weights', 'tile_diameter': 'tile_diameter', 'broken_baselines_weight': 'fraction_broken', 'model_limit': 'model_limit'}), "(u=u_range, nu=frequency_range, residuals='both',\n calibration_type='sky', weights=weights, tile_diameter=tile_diameter,\n broken_baselines_weight=fraction_broken, model_limit=model_limit)\n", (2617, 2810), False, 'from src.covariance import calibrated_residual_error\n'), ((3004, 3042), 'matplotlib.pyplot.subplots', 'pyplot.subplots', (['(1)', '(3)'], {'figsize': '(15, 5)'}), '(1, 3, figsize=(15, 5))\n', (3019, 3042), False, 'from matplotlib import pyplot\n'), ((3058, 3110), 'matplotlib.colors.LogNorm', 'colors.LogNorm', ([], {'vmin': '(1000.0)', 'vmax': '(1000000000000000.0)'}), '(vmin=1000.0, vmax=1000000000000000.0)\n', (3072, 3110), False, 'from matplotlib import colors\n'), ((3099, 3339), 'src.plottools.plot_2dpower_spectrum', 'plot_2dpower_spectrum', (['u_range', 'eta', 'frequency_range', 'sky_calibrated'], {'title': '"""Sky Error"""', 'axes': 'axes[0]', 'axes_label_font': 'labelfontsize', 'tickfontsize': 'ticksize', 'colorbar_show': '(False)', 'xlabel_show': '(True)', 'norm': 'ps_norm', 'ylabel_show': '(True)'}), "(u_range, eta, frequency_range, sky_calibrated, title=\n 'Sky Error', axes=axes[0], axes_label_font=labelfontsize, tickfontsize=\n ticksize, colorbar_show=False, xlabel_show=True, norm=ps_norm,\n ylabel_show=True)\n", (3120, 3339), False, 'from src.plottools import plot_2dpower_spectrum\n'), ((3379, 3627), 'src.plottools.plot_2dpower_spectrum', 'plot_2dpower_spectrum', (['u_range', 'eta', 'frequency_range', 'beam_calibrated'], {'title': '"""Beam Variations"""', 'axes': 'axes[1]', 'axes_label_font': 'labelfontsize', 'tickfontsize': 'ticksize', 'colorbar_show': '(False)', 'xlabel_show': '(True)', 'norm': 'ps_norm', 'ylabel_show': '(False)'}), "(u_range, eta, frequency_range, beam_calibrated, title\n ='Beam Variations', axes=axes[1], axes_label_font=labelfontsize,\n tickfontsize=ticksize, colorbar_show=False, xlabel_show=True, norm=\n ps_norm, ylabel_show=False)\n", (3400, 3627), False, 'from src.plottools import plot_2dpower_spectrum\n'), ((3667, 3928), 'src.plottools.plot_2dpower_spectrum', 'plot_2dpower_spectrum', (['u_range', 'eta', 'frequency_range', 'total_calibrated'], {'title': '"""Total Error"""', 'axes': 'axes[2]', 'axes_label_font': 'labelfontsize', 'tickfontsize': 'ticksize', 'colorbar_show': '(True)', 'xlabel_show': '(True)', 'norm': 'ps_norm', 'ylabel_show': '(False)', 'zlabel_show': '(True)'}), "(u_range, eta, frequency_range, total_calibrated,\n title='Total Error', axes=axes[2], axes_label_font=labelfontsize,\n tickfontsize=ticksize, colorbar_show=True, xlabel_show=True, norm=\n ps_norm, ylabel_show=False, zlabel_show=True)\n", (3688, 3928), False, 'from src.plottools import plot_2dpower_spectrum\n'), ((5195, 5216), 'matplotlib.pyplot.tight_layout', 'pyplot.tight_layout', ([], {}), '()\n', (5214, 5216), False, 'from matplotlib import pyplot\n'), ((5292, 5305), 'matplotlib.pyplot.show', 'pyplot.show', ([], {}), '()\n', (5303, 5305), False, 'from matplotlib import pyplot\n'), ((5359, 5384), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5382, 5384), False, 'import argparse\n'), ((1045, 1061), 'numpy.log10', 'numpy.log10', (['(500)'], {}), '(500)\n', (1056, 1061), False, 'import numpy\n'), ((1090, 1119), 'numpy.linspace', 'numpy.linspace', (['(135)', '(165)', '(251)'], {}), '(135, 165, 251)\n', (1104, 1119), False, 'import numpy\n'), ((5558, 5579), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (5572, 5579), False, 'import matplotlib\n'), ((4020, 4078), 'src.generaltools.from_jansky_to_milikelvin', 'from_jansky_to_milikelvin', (['sky_calibrated', 'frequency_range'], {}), '(sky_calibrated, frequency_range)\n', (4045, 4078), False, 'from src.generaltools import from_jansky_to_milikelvin\n'), ((4480, 4539), 'src.generaltools.from_jansky_to_milikelvin', 'from_jansky_to_milikelvin', (['beam_calibrated', 'frequency_range'], {}), '(beam_calibrated, frequency_range)\n', (4505, 4539), False, 'from src.generaltools import from_jansky_to_milikelvin\n'), ((4862, 4922), 'src.generaltools.from_jansky_to_milikelvin', 'from_jansky_to_milikelvin', (['total_calibrated', 'frequency_range'], {}), '(total_calibrated, frequency_range)\n', (4887, 4922), False, 'from src.generaltools import from_jansky_to_milikelvin\n')]
|
"""
Created on Mon Nov 23 2020
@author: <NAME>
"""
import numpy as np
from PIL import Image
import cv2
import time
import copy
import arcpy
from arcpy import env
from arcpy.sa import Viewshed2
#from arcpy.da import *
import gym
from gym import error, spaces, utils
from gym.utils import seeding
import matplotlib.pyplot as plt
import os
os.environ['CUDA_VISIBLE_DEVICES'] = "0"
import math
#env.scratchWorkspace = r"in_memory"
# print('ClearWorkspaceCache_management: ', arcpy.ClearWorkspaceCache_management())
arcpy.ClearWorkspaceCache_management()
env.scratchWorkspace = r"in_memory"
#env.workspace = r"../data/space/"
#env.workspace = r"C:/Users/Akmaral/Desktop/coverage/test4/shape_file_gen/"
env.overwriteOutput = True
env.outputCoordinateSystem = arcpy.SpatialReference("WGS 1984 UTM Zone 18N")
env.geographicTransformations = "Arc_1950_To_WGS_1984_5; PSAD_1956_To_WGS_1984_6"
#env.parallelProcessingFactor = "200%"
env.processorType = "GPU"
env.gpuID = "0"
env.compression = "LZ77" #"LZ77" #"JPEG" # LZW
env.tileSize = "128 128"
env.pyramid = "PYRAMIDS -1 CUBIC LZ77 NO_SKIP"
# arcpy.Delete_management("in_memory")
class ViewshedCoverageEnv(gym.Env):
"""
Description:
Viewshed analysis on raster data
Source:
ArcGIS function
Observation:
Type: Image
Actions:
Type: Discrete
Num Action
0 Pan +5 deg
1 Pan -5 deg
2 Tilt +5 deg
3 Tilt -5 deg
4 Zoom +5 factor
5 Zoom -5 factor
Reward:
Reward 1 for game over
Starting State:
Init image of the city
Episode termination:
Episode > 100
"""
metadata = {'render.modes': ['human']}
def __init__(self):
# import image of city
self.city_array = np.array((Image.open(r"../data/images/RasterAstanaCroppedZero.png")), dtype=np.uint16) #.resize((900,600))
# self.city_array = self.city_array/100
print('+++ ', np.max(np.max(self.city_array)), np.min(np.min(self.city_array)))
self.city_array = self.city_array/100 - 285 # convert to meter
print('Original Image: ', type(self.city_array), self.city_array.shape)
# crop the image with center at camera
self.camera_location = (3073, 11684, 350) # x,y,z coordinate # (11685, 7074, 350) - RasterAstana.png
# self.camera_location = (3073, 11684, 350) # x,y,z coordinate # (11685, 7074, 350) - RasterAstana.png
self.coverage_radius = 2000 # .. km square from the center
self.city_array = self.city_array[self.camera_location[1]-self.coverage_radius:self.camera_location[1]+self.coverage_radius,
self.camera_location[0]-self.coverage_radius:self.camera_location[0]+self.coverage_radius]
# resize the image
# self.city_array = self.city_array[2500:3500, 2500:3500]#np.resize(self.city_array, (1000,1000))
# self.city_array_res = self.city_array[0:1000, 0:1000]
self.im_height, self.im_width = self.city_array.shape # reshape (width, height) [300,500] --> example: height = 500, width = 300
print('Cropped Image: ', type(self.city_array), self.city_array.shape)
print('Range Image: ', np.min(self.city_array), np.max(self.city_array))
# input raster
self.input_raster = arcpy.NumPyArrayToRaster(self.city_array)
# input shapefile
self.shape_file = r"../data/input_shapefile/1/points_XYTableToPoint_second.shp"
# CAMERA params
self.camera_number = 1
self.camera_location_cropped = (int(self.coverage_radius), int(self.coverage_radius), self.camera_location[2]-285)
print('Camera Loc: ', self.camera_location_cropped)
#
self.max_distance_min_zoom = 100 # at min zoom - 20mm - the max distance 50
self.max_distance_max_zoom = 4000 # at min zoom - 800mm - the max distance 2000
# PTZ
self.pan_pos = 0
self.tilt_pos = -45
self.zoom_pos = 20 # 0 - 20mm (min), 1 - 800 mm (max)
self.delta_pan = 5 # deg
self.delta_tilt = 3 # deg
self.delta_zoom = 1.25 # 1.25x times
self.horizon_fov = 21 # 21 # Field of View deg
self.vertical_fov = 11.8 # 11.8 # Field of View deg
self.zoom_distance = self.max_distance_min_zoom
# VIEWSHED params
self.init_x = self.camera_location_cropped[0] # self.im_width/2 #310
self.init_y = self.camera_location_cropped[1] # self.im_height/2 #80
self.observer_height = self.camera_location_cropped[2] + 5 # height
self.analysis_type = "FREQUENCY"
self.analysis_method = "PERIMETER_SIGHTLINES"
self.azimuth1 = self.pan_pos - self.horizon_fov/2
self.azimuth2 = self.pan_pos + self.horizon_fov/2
self.vertical_lower_angle = self.tilt_pos - self.vertical_fov/2
self.vertical_upper_angle = self.tilt_pos + self.vertical_fov/2
self.radius_is_3d = 'True'
self.inner_radius = 0
self.outer_radius = self.zoom_distance
# GYM env params
self.observation_space = spaces.Box(low=0, high=255, shape=(self.im_width,self.im_height, 1), dtype = np.uint8)
self.action_space = spaces.Discrete(6) # 6 different actions
self.state = np.zeros((self.im_height, self.im_width)) # self.city_Array
# render
self.max_render = 100
self.is_render = 'True'
self.iteration = 0
self.info = 0
self.info_x = 0.0
self.info_y = 0.0
self.seed(0)
# reward
self.ratio_threshhold = 0.02
self.reward_good_step = 1
self.reward_bad_step = -0.05
self.max_iter = 200
# input
self.input_total_coverage = np.asarray(Image.open(r"../data/images/RasterTotalCoverage4.png"))
#self.input_total_coverage = np.asarray(Image.open(r"../data/images/RasterTotalCoverage4Resized.png"))
self.rad_matrix, self.angle_matrix = self.create_cartesian()
def step(self, action):
#assert self.action_space.contains(action)
# this function needs to do:
# map the "action" to CELL value update in shapefile (actions x observers)
# action [0 ... N] --- > action type x observerN
# here assumption is that action will be 1xD array for all N cameras, and should be interpreted as which action to which observer
# for 1 camera
action_type = action # %cameraN
observer_n = self.camera_number #action//actionN + 1
#print('action', action) # [0 ... 5]
#print('action_type',action_type) # [0 ... 5]
#print('observerN',observerN ) # [1 ... ]
self.update_shapefile_discrete(self.shape_file, action_type, observer_n)
# create the viewshed
output_array, visible_area = self.create_viewshed(self.input_raster, self.shape_file)
output_array2, visible_area2 = self.get_coverage_fast()
self.testing_im = output_array2
# interpret the viewshed output to some value - state , reward etc
# next_state ?
next_state = output_array
ratio = visible_area/output_array.size
# for rendering
self.state = output_array
self.info = ratio
#reward ?
#reward = visible_area/output_array.size
#done ?
crossed_map = np.multiply(self.input_total_coverage,(output_array))
crossed_points = (crossed_map > 0).astype(int)
crossed_area = crossed_points.sum()
reward = crossed_area
# if ratio > self.ratio_threshhold:
# reward = self.reward_good_step + ratio*5
# else:
# reward = self.reward_bad_step + ratio*5
if self.iteration > self.max_iter:
done = 1
else:
done = 0
self.iteration = self.iteration + 1
self.input_total_coverage = np.multiply(self.input_total_coverage,(1-output_array))
next_state = np.stack((self.input_total_coverage, next_state), axis = 0)
return next_state, reward, done
def seed(self, seed = None):
self.np_random , seed = seeding.np_random()
return [seed]
def reset(self):
print('Env reset ...')
self.reset_shapefile(self.shape_file)
self.state = np.zeros((self.im_height, self.im_width)) # self.state
self.iteration = 0
next_state = np.stack((self.input_total_coverage, self.state), axis = 0)
return next_state
def render(self, mode='human'):
mode = 0 # 0 - black/white ; 1 - rgb
if mode == 1:
city_gray = np.array(self.city_array, dtype=np.uint8)
show_array = np.stack((city_gray,)*3, axis=-1)
show_array[:,:,2] = self.state*255
show_array = cv2.resize(show_array, (1000,1000), interpolation = cv2.INTER_AREA)
else:
show_array = np.array(self.state*255, dtype='uint8')
show_array = cv2.resize(show_array, (1000,1000), interpolation = cv2.INTER_AREA)
# if mode == 1:
# city_gray1 = np.array(self.city_array, dtype=np.uint8)
# show_array1 = np.stack((city_gray1,)*3, axis= -1)
# show_array1[:,:,2] = self.testing_im*255
# show_array1 = cv2.resize(show_array1, (1000,1000), interpolation = cv2.INTER_AREA)
# else:
# show_array1 = self.testing_im
# print('****** ', np.max(np.max(self.testing_im)))
# show_array1 = cv2.resize(show_array1, (800,800), interpolation = cv2.INTER_AREA)
# if self.is_render == 'True' and self.iteration < self.max_render :
# print('render --- ratio --- ', self.info)
# cv2.startWindowThread()
# cv2.namedWindow("preview")
# cv2.imshow("preview", show_array)
# #cv2.imshow("GET COVERAGE", show_array1)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
try:
cv2.startWindowThread()
cv2.namedWindow("preview")
cv2.imshow("preview", show_array)
cv2.namedWindow("COVERAGE")
#show_array1 = cv2.resize(self.input_total_coverage, (1000,1000), interpolation = cv2.INTER_AREA)
#cv2.imshow("COVERAGE", show_array1)
array = np.array(self.testing_im*255, dtype='uint8')
show_array1 = cv2.resize(array, (1000,1000), interpolation = cv2.INTER_AREA)
cv2.imshow("COVERAGE", show_array1)
#cv2.imshow("COVERAGE", show_array1)
cv2.waitKey(100)
#if cv2.waitKey(1)& 0xFF == ord('q'):
# quit()
except KeyboardInterrupt:
cv2.destroyAllWindows()
# quit()
def close(self):
pass
def reset_shapefile(self, shape_file):
#print('Reset init camera locations')
fieldlist=['AZIMUTH1','AZIMUTH2']
tokens=['SHAPE@X','SHAPE@Y']
with arcpy.da.UpdateCursor(shape_file,tokens+fieldlist) as cursor:
for row in cursor:
row[0]= self.init_x
row[1]= self.init_y
row[2]= self.azimuth1
row[3]= self.azimuth2
cursor.updateRow(row)
del cursor
def update_shapefile_discrete(self, shape_file, action_type, observer_n):
# Type: Discrete
# Num Action
# 0 Pan +5 deg
# 1 Pan -5 deg
# 2 Tilt +5 deg
# 3 Tilt -5 deg
# 4 Zoom +5 factor
# 5 Zoom -5 factor
if action_type == 0: # rotate + delta
print('... pan right')
# update camera/ptz setting
self.pan_pos += self.delta_pan
if self.pan_pos >= 360:
self.pan_pos -= 360
elif action_type == 1: # rotate - delta deg
print('... pan left')
# update camera/ptz setting
self.pan_pos -= self.delta_pan
if self.pan_pos < 0:
self.pan_pos += 360
elif action_type == 2: # tilt + deg
print('... tilt up')
# update camera/ptz setting
self.tilt_pos += self.delta_tilt
if self.tilt_pos > 20:
self.tilt_pos = 20
elif action_type == 3: # tilt - deg
print('... tilt down')
# update camera/ptz setting
self.tilt_pos -= self.delta_tilt
if self.tilt_pos < -45:
self.tilt_pos = -45
elif action_type == 4: # zoom + in
print('... zoom in')
# update camera/ptz setting
self.zoom_pos *= self.delta_zoom
self.horizon_fov /= self.delta_zoom
self.vertical_fov /= self.delta_zoom
self.zoom_distance *= self.delta_zoom
# boundaries
if self.zoom_pos > 800:
self.zoom_pos = 800
if self.horizon_fov < 0.5:
self.horizon_fov = 0.5
if self.vertical_fov < 0.3:
self.vertical_fov = 0.3
if self.zoom_distance > self.max_distance_max_zoom:
self.zoom_distance = self.max_distance_max_zoom
elif action_type == 5: # zoom - out
print('... zoom out')
# update camera/ptz setting
self.zoom_pos /= self.delta_zoom
self.horizon_fov *= self.delta_zoom
self.vertical_fov *= self.delta_zoom
self.zoom_distance /= self.delta_zoom
# boundaries
if self.zoom_pos < 20:
self.zoom_pos = 20
if self.horizon_fov > 21:
self.horizon_fov = 21
if self.vertical_fov > 11.8:
self.vertical_fov = 11.8
if self.zoom_distance < self.max_distance_min_zoom:
self.zoom_distance = self.max_distance_min_zoom
else:
pass
print('No action done ..')
def create_viewshed(self, input_raster, shape_file):
# UPDATE viewshed params
self.azimuth1 = self.pan_pos - self.horizon_fov/2
if self.azimuth1 < 0:
self.azimuth1 += 360
self.azimuth2 = self.pan_pos + self.horizon_fov/2
# second
# self.azimuth2 = self.pan_pos - self.horizon_fov/2
# self.azimuth2 = 90 - self.azimuth2
# if self.azimuth2 < 0:
# self.azimuth2 += 360
# self.azimuth1 = self.azimuth2 - self.horizon_fov
# temp_angle = self.pan_pos
# temp_angle = 90 - temp_angle
# if temp_angle < 0:
# temp_angle += 360
#
# self.azimuth1 = temp_angle - self.horizon_fov/2
# #self.azimuth1 = 90 - self.azimuth1
# if self.azimuth1 < 0:
# self.azimuth1 += 360
# self.azimuth2 = temp_angle + self.horizon_fov/2
self.vertical_lower_angle = self.tilt_pos - self.vertical_fov/2
self.vertical_upper_angle = self.tilt_pos + self.vertical_fov/2
self.outer_radius = self.zoom_distance
# print('Elapsed time for viewshed: ', time.time() - start_t)
print('1 - camera : pan_pos {}, tilt_pos {} , zoom_pos {}, horizon_fov {}, vertical_fov {}, zoom_distance {}'.format(
self.pan_pos, self.tilt_pos, self.zoom_pos, self.horizon_fov, self.vertical_fov, self.zoom_distance))
print('2 - viewshed : azimuth1 {}, azimuth2 {} , vertical_lower_angle {}, vertical_upper_angle {}, outer_radius {}'.format(
self.azimuth1, self.azimuth2, self.vertical_lower_angle, self.vertical_upper_angle, self.outer_radius))
start_t = time.time()
#self.azimuth1 = 315 #int(input("s1 "))
#self.azimuth2 = 45 #int(input("s2 "))
# self.vertical_lower_angle = -90
# self.vertical_upper_angle = 90
outViewshed2 = Viewshed2(in_raster=self.input_raster, in_observer_features= self.shape_file, out_agl_raster= "", analysis_type= self.analysis_type,
vertical_error= 0, out_observer_region_relationship_table= "", refractivity_coefficient= 0.13,
surface_offset= 0, observer_offset = 0, observer_elevation = self.observer_height, inner_radius= self.inner_radius,
outer_radius= self.outer_radius, inner_radius_is_3d = self.radius_is_3d, outer_radius_is_3d = self.radius_is_3d,
horizontal_start_angle= self.azimuth1, horizontal_end_angle= self.azimuth2, vertical_upper_angle = self.vertical_upper_angle,
vertical_lower_angle= self.vertical_lower_angle, analysis_method=self.analysis_method)
# # # manual
# outViewshed2 = Viewshed2(in_raster=self.input_raster, in_observer_features= self.shape_file, out_agl_raster= "", analysis_type= self.analysis_type,
# vertical_error= 0, out_observer_region_relationship_table= "", refractivity_coefficient= 0.13,
# surface_offset= 0, observer_offset = 0, observer_elevation = 70, inner_radius= 0,
# outer_radius= 200, inner_radius_is_3d = self.radius_is_3d, outer_radius_is_3d = self.radius_is_3d,
# horizontal_start_angle= 0, horizontal_end_angle= 360, vertical_upper_angle = 25.9,
# vertical_lower_angle= -56, analysis_method=self.analysis_method)
#print('--------------- finished -----------------')
print('Elapsed time for viewshed: ', time.time() - start_t)
# extract the array
output_array = arcpy.RasterToNumPyArray(outViewshed2) # output array -> each cell how many observer can see that pixel
# not visible cells will have value of zero
output_array[output_array == 255] = 0
visible_points = output_array > 0
visible_area = visible_points.sum()
print('visible_points ', visible_area)
# save
# im = Image.fromarray(output_array*255)
# im.save("../data/images/RasterTotalCoverage4.png")
return output_array, visible_area
#
# def get_coverage(self):
# start_t = time.time()
# output_array = np.zeros((self.im_height, self.im_width))
#
# temp_angle = self.pan_pos
# # temp_angle = 450-temp_angle
# # if temp_angle >= 360:
# # temp_angle -= 360
#
#
# # temp_angle = 90-self.pan_pos
# # if temp_angle < -180:
# # temp_angle = 90 + (temp_angle + 180)
# #
# # print('test: ', temp_angle, self.pan_pos)
#
#
# # #self.azimuth1 = temp_angle - self.horizon_fov/2
# #self.azimuth1 = 90 - self.azimuth1
# #if self.azimuth1 < 0:
# # self.azimuth1 += 360
# #self.azimuth2 = temp_angle + self.horizon_fov/2
#
# horizon_start = temp_angle - self.horizon_fov/2
# horizon_end = temp_angle + self.horizon_fov/2
# if horizon_start < 0:
# horizon_start += 360
#
# # if horizon_start <= -180:
# # horizon_end = 180 + (horizon_start + 180)
# #
# # if horizon_end > 180:
# # horizon_start = -180 + (horizon_end - 180)
#
# vertical_start = self.tilt_pos - self.vertical_fov/2
# vertical_end = self.tilt_pos + self.vertical_fov/2
#
# if vertical_start < 0 and vertical_end < 0:
#
# radius_inner = self.observer_height*math.tan(math.radians(90+vertical_start))
# radius_outer = self.observer_height*math.tan(math.radians(90+vertical_end))
# if radius_outer > self.zoom_distance:
# radius_outer = self.zoom_distance
#
# # print('rad ---> ', radius_inner, radius_outer)
# # print('hor ---> ', horizon_start, horizon_end)
#
# for i in range(1500, 2500):
# for j in range(1500, 2500):
#
# point_rad = math.sqrt((self.coverage_radius-i)**2 + (self.coverage_radius-j)**2)
# #if i == self.coverage_radius:
# # point_angle = 0
# #else:
# #point_angle = 90-math.degrees(math.atan((self.coverage_radius-j)/(self.coverage_radius-i)))
# # point_angle = math.degrees(math.atan2((self.coverage_radius-j),(i-self.coverage_radius)))
# # if point_angle < 0:
# # point_angle += 360
#
# point_angle = math.degrees(math.atan2((self.coverage_radius-i),(j-self.coverage_radius)))
# point_angle *= -1
# point_angle += 90
#
# #point_angle += 90
# #if point_angle > 360:
# # point_angle -= 360
#
# if point_angle < 0:
# point_angle += 360
#
# inside_rad = radius_inner < point_rad < radius_outer
#
# # case 1
#
# if horizon_start < horizon_end:
# output_array[i,j] = (horizon_start < point_angle and point_angle < horizon_end) and inside_rad
# else:
# output_array[i,j] = (horizon_start < point_angle or point_angle < horizon_end) and inside_rad
#
# #output_array[i,j] = point_angle > horizon_start #
# # output_array[i,j] = (radius_inner < point_rad < radius_outer) and (horizon_start < point_angle < horizon_end)
#
# #point_rad = np.sqrt((self.city_array-self.coverage_radius)**2 + (self.city_array-self.coverage_radius)**2)
# #point_angle = -np.degrees(np.arctan((self.coverage_radius - self.city_array)/((self.coverage_radius - self.city_array).transpose())))
#
#
# #output_array = point_rad > 2000
#
#
# #output_array = (radius_inner < point_rad).astype(int) * (radius_outer > point_rad).astype(int) * (horizon_start < point_angle).astype(int) * (point_angle < horizon_end).astype(int)
# print('Elapsed time for coverage: ', time.time() - start_t)
#
# output_array = output_array.astype(int)
# print('*** ', type(output_array), output_array.shape)
#
# visible_points = (output_array > 0).astype(int)
# visible_area = 0 # visible_points.sum()
#
# else:
# visible_area = 0
#
# return output_array, visible_area
def create_cartesian(self):
rad_matrix = np.zeros((self.im_height, self.im_width))
angle_matrix = np.zeros((self.im_height, self.im_width))
for i in range(self.im_height):
for j in range(self.im_width):
point_rad = math.sqrt((self.coverage_radius-i)**2 + (self.coverage_radius-j)**2)
point_angle = math.degrees(math.atan2((self.coverage_radius-i),(j-self.coverage_radius)))
point_angle *= -1
point_angle += 90
if point_angle < 0:
point_angle += 360
rad_matrix[i,j] = point_rad
angle_matrix[i,j] = point_angle
return rad_matrix, angle_matrix
def get_coverage_fast(self):
start_t = time.time()
output_array = np.zeros((self.im_height, self.im_width))
temp_angle = self.pan_pos
# temp_angle = 450-temp_angle
# if temp_angle >= 360:
# temp_angle -= 360
# temp_angle = 90-self.pan_pos
# if temp_angle < -180:
# temp_angle = 90 + (temp_angle + 180)
#
# print('test: ', temp_angle, self.pan_pos)
# #self.azimuth1 = temp_angle - self.horizon_fov/2
#self.azimuth1 = 90 - self.azimuth1
#if self.azimuth1 < 0:
# self.azimuth1 += 360
#self.azimuth2 = temp_angle + self.horizon_fov/2
horizon_start = temp_angle - self.horizon_fov/2
horizon_end = temp_angle + self.horizon_fov/2
if horizon_start < 0:
horizon_start += 360
if horizon_end >= 360:
horizon_end -= 360
# if horizon_start <= -180:
# horizon_end = 180 + (horizon_start + 180)
#
# if horizon_end > 180:
# horizon_start = -180 + (horizon_end - 180)
vertical_start = self.tilt_pos - self.vertical_fov/2
vertical_end = self.tilt_pos + self.vertical_fov/2
if vertical_start < 0 and vertical_end < 0:
radius_inner = self.observer_height*math.tan(math.radians(90+vertical_start))
radius_outer = self.observer_height*math.tan(math.radians(90+vertical_end))
if radius_outer > self.zoom_distance:
radius_outer = self.zoom_distance
# matrix
rad_matrix, angle_matrix = self.rad_matrix, self.angle_matrix
#inside_rad = radius_inner < rad_matrix and rad_matrix < radius_outer
inside_rad = np.multiply( np.greater_equal(rad_matrix, radius_inner), np.greater_equal(radius_outer, rad_matrix))
# if horizon_start < horizon_end:
# inside_angle = (horizon_start < point_angle and point_angle < horizon_end)
# else:
# inside_angle = (horizon_start < point_angle or point_angle < horizon_end)
#
if horizon_start < horizon_end:
inside_angle = np.multiply(np.greater_equal(angle_matrix, horizon_start), np.greater_equal(horizon_end, angle_matrix))
else:
inside_angle = np.add(np.greater_equal(angle_matrix, horizon_start), np.greater_equal(horizon_end, angle_matrix))
inside_sector = np.multiply(inside_rad, inside_angle)
print('Here --- ', inside_rad.shape, inside_angle.shape, inside_sector.shape)
print('2 - coverage : horizon_start {}, horizon_end {} , vertical_start {}, vertical_end {}, radius_inner{}, outer_radius {}'.format(
horizon_start, horizon_end, vertical_start, vertical_end, radius_inner, radius_outer))
output_array = inside_sector
print('Elapsed time for coverage: ', time.time() - start_t)
output_array = output_array.astype(int)
print('*** ', type(output_array), output_array.shape)
visible_points = (output_array > 0).astype(int)
visible_area = 0 # visible_points.sum()
else:
print('Tilt Angle is larger than zero !!!')
visible_area = 0
return output_array, visible_area
|
[
"math.atan2",
"gym.spaces.Discrete",
"arcpy.sa.Viewshed2",
"cv2.startWindowThread",
"arcpy.ClearWorkspaceCache_management",
"cv2.imshow",
"gym.utils.seeding.np_random",
"numpy.multiply",
"math.radians",
"numpy.max",
"cv2.destroyAllWindows",
"arcpy.NumPyArrayToRaster",
"cv2.resize",
"arcpy.SpatialReference",
"numpy.stack",
"arcpy.RasterToNumPyArray",
"arcpy.da.UpdateCursor",
"math.sqrt",
"cv2.waitKey",
"numpy.min",
"numpy.greater_equal",
"numpy.zeros",
"time.time",
"PIL.Image.open",
"numpy.array",
"gym.spaces.Box",
"cv2.namedWindow"
] |
[((518, 556), 'arcpy.ClearWorkspaceCache_management', 'arcpy.ClearWorkspaceCache_management', ([], {}), '()\n', (554, 556), False, 'import arcpy\n'), ((762, 809), 'arcpy.SpatialReference', 'arcpy.SpatialReference', (['"""WGS 1984 UTM Zone 18N"""'], {}), "('WGS 1984 UTM Zone 18N')\n", (784, 809), False, 'import arcpy\n'), ((3378, 3419), 'arcpy.NumPyArrayToRaster', 'arcpy.NumPyArrayToRaster', (['self.city_array'], {}), '(self.city_array)\n', (3402, 3419), False, 'import arcpy\n'), ((5290, 5380), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(255)', 'shape': '(self.im_width, self.im_height, 1)', 'dtype': 'np.uint8'}), '(low=0, high=255, shape=(self.im_width, self.im_height, 1), dtype\n =np.uint8)\n', (5300, 5380), False, 'from gym import error, spaces, utils\n'), ((5405, 5423), 'gym.spaces.Discrete', 'spaces.Discrete', (['(6)'], {}), '(6)\n', (5420, 5423), False, 'from gym import error, spaces, utils\n'), ((5468, 5509), 'numpy.zeros', 'np.zeros', (['(self.im_height, self.im_width)'], {}), '((self.im_height, self.im_width))\n', (5476, 5509), True, 'import numpy as np\n'), ((7541, 7593), 'numpy.multiply', 'np.multiply', (['self.input_total_coverage', 'output_array'], {}), '(self.input_total_coverage, output_array)\n', (7552, 7593), True, 'import numpy as np\n'), ((8078, 8134), 'numpy.multiply', 'np.multiply', (['self.input_total_coverage', '(1 - output_array)'], {}), '(self.input_total_coverage, 1 - output_array)\n', (8089, 8134), True, 'import numpy as np\n'), ((8156, 8213), 'numpy.stack', 'np.stack', (['(self.input_total_coverage, next_state)'], {'axis': '(0)'}), '((self.input_total_coverage, next_state), axis=0)\n', (8164, 8213), True, 'import numpy as np\n'), ((8323, 8342), 'gym.utils.seeding.np_random', 'seeding.np_random', ([], {}), '()\n', (8340, 8342), False, 'from gym.utils import seeding\n'), ((8485, 8526), 'numpy.zeros', 'np.zeros', (['(self.im_height, self.im_width)'], {}), '((self.im_height, self.im_width))\n', (8493, 8526), True, 'import numpy as np\n'), ((8588, 8645), 'numpy.stack', 'np.stack', (['(self.input_total_coverage, self.state)'], {'axis': '(0)'}), '((self.input_total_coverage, self.state), axis=0)\n', (8596, 8645), True, 'import numpy as np\n'), ((15835, 15846), 'time.time', 'time.time', ([], {}), '()\n', (15844, 15846), False, 'import time\n'), ((16051, 16739), 'arcpy.sa.Viewshed2', 'Viewshed2', ([], {'in_raster': 'self.input_raster', 'in_observer_features': 'self.shape_file', 'out_agl_raster': '""""""', 'analysis_type': 'self.analysis_type', 'vertical_error': '(0)', 'out_observer_region_relationship_table': '""""""', 'refractivity_coefficient': '(0.13)', 'surface_offset': '(0)', 'observer_offset': '(0)', 'observer_elevation': 'self.observer_height', 'inner_radius': 'self.inner_radius', 'outer_radius': 'self.outer_radius', 'inner_radius_is_3d': 'self.radius_is_3d', 'outer_radius_is_3d': 'self.radius_is_3d', 'horizontal_start_angle': 'self.azimuth1', 'horizontal_end_angle': 'self.azimuth2', 'vertical_upper_angle': 'self.vertical_upper_angle', 'vertical_lower_angle': 'self.vertical_lower_angle', 'analysis_method': 'self.analysis_method'}), "(in_raster=self.input_raster, in_observer_features=self.shape_file,\n out_agl_raster='', analysis_type=self.analysis_type, vertical_error=0,\n out_observer_region_relationship_table='', refractivity_coefficient=\n 0.13, surface_offset=0, observer_offset=0, observer_elevation=self.\n observer_height, inner_radius=self.inner_radius, outer_radius=self.\n outer_radius, inner_radius_is_3d=self.radius_is_3d, outer_radius_is_3d=\n self.radius_is_3d, horizontal_start_angle=self.azimuth1,\n horizontal_end_angle=self.azimuth2, vertical_upper_angle=self.\n vertical_upper_angle, vertical_lower_angle=self.vertical_lower_angle,\n analysis_method=self.analysis_method)\n", (16060, 16739), False, 'from arcpy.sa import Viewshed2\n'), ((17848, 17886), 'arcpy.RasterToNumPyArray', 'arcpy.RasterToNumPyArray', (['outViewshed2'], {}), '(outViewshed2)\n', (17872, 17886), False, 'import arcpy\n'), ((22883, 22924), 'numpy.zeros', 'np.zeros', (['(self.im_height, self.im_width)'], {}), '((self.im_height, self.im_width))\n', (22891, 22924), True, 'import numpy as np\n'), ((22948, 22989), 'numpy.zeros', 'np.zeros', (['(self.im_height, self.im_width)'], {}), '((self.im_height, self.im_width))\n', (22956, 22989), True, 'import numpy as np\n'), ((23610, 23621), 'time.time', 'time.time', ([], {}), '()\n', (23619, 23621), False, 'import time\n'), ((23645, 23686), 'numpy.zeros', 'np.zeros', (['(self.im_height, self.im_width)'], {}), '((self.im_height, self.im_width))\n', (23653, 23686), True, 'import numpy as np\n'), ((1803, 1859), 'PIL.Image.open', 'Image.open', (['"""../data/images/RasterAstanaCroppedZero.png"""'], {}), "('../data/images/RasterAstanaCroppedZero.png')\n", (1813, 1859), False, 'from PIL import Image\n'), ((3276, 3299), 'numpy.min', 'np.min', (['self.city_array'], {}), '(self.city_array)\n', (3282, 3299), True, 'import numpy as np\n'), ((3301, 3324), 'numpy.max', 'np.max', (['self.city_array'], {}), '(self.city_array)\n', (3307, 3324), True, 'import numpy as np\n'), ((5948, 6001), 'PIL.Image.open', 'Image.open', (['"""../data/images/RasterTotalCoverage4.png"""'], {}), "('../data/images/RasterTotalCoverage4.png')\n", (5958, 6001), False, 'from PIL import Image\n'), ((8808, 8849), 'numpy.array', 'np.array', (['self.city_array'], {'dtype': 'np.uint8'}), '(self.city_array, dtype=np.uint8)\n', (8816, 8849), True, 'import numpy as np\n'), ((8875, 8910), 'numpy.stack', 'np.stack', (['((city_gray,) * 3)'], {'axis': '(-1)'}), '((city_gray,) * 3, axis=-1)\n', (8883, 8910), True, 'import numpy as np\n'), ((8981, 9047), 'cv2.resize', 'cv2.resize', (['show_array', '(1000, 1000)'], {'interpolation': 'cv2.INTER_AREA'}), '(show_array, (1000, 1000), interpolation=cv2.INTER_AREA)\n', (8991, 9047), False, 'import cv2\n'), ((9088, 9129), 'numpy.array', 'np.array', (['(self.state * 255)'], {'dtype': '"""uint8"""'}), "(self.state * 255, dtype='uint8')\n", (9096, 9129), True, 'import numpy as np\n'), ((9153, 9219), 'cv2.resize', 'cv2.resize', (['show_array', '(1000, 1000)'], {'interpolation': 'cv2.INTER_AREA'}), '(show_array, (1000, 1000), interpolation=cv2.INTER_AREA)\n', (9163, 9219), False, 'import cv2\n'), ((10140, 10163), 'cv2.startWindowThread', 'cv2.startWindowThread', ([], {}), '()\n', (10161, 10163), False, 'import cv2\n'), ((10176, 10202), 'cv2.namedWindow', 'cv2.namedWindow', (['"""preview"""'], {}), "('preview')\n", (10191, 10202), False, 'import cv2\n'), ((10215, 10248), 'cv2.imshow', 'cv2.imshow', (['"""preview"""', 'show_array'], {}), "('preview', show_array)\n", (10225, 10248), False, 'import cv2\n'), ((10261, 10288), 'cv2.namedWindow', 'cv2.namedWindow', (['"""COVERAGE"""'], {}), "('COVERAGE')\n", (10276, 10288), False, 'import cv2\n'), ((10470, 10516), 'numpy.array', 'np.array', (['(self.testing_im * 255)'], {'dtype': '"""uint8"""'}), "(self.testing_im * 255, dtype='uint8')\n", (10478, 10516), True, 'import numpy as np\n'), ((10541, 10602), 'cv2.resize', 'cv2.resize', (['array', '(1000, 1000)'], {'interpolation': 'cv2.INTER_AREA'}), '(array, (1000, 1000), interpolation=cv2.INTER_AREA)\n', (10551, 10602), False, 'import cv2\n'), ((10616, 10651), 'cv2.imshow', 'cv2.imshow', (['"""COVERAGE"""', 'show_array1'], {}), "('COVERAGE', show_array1)\n", (10626, 10651), False, 'import cv2\n'), ((10715, 10731), 'cv2.waitKey', 'cv2.waitKey', (['(100)'], {}), '(100)\n', (10726, 10731), False, 'import cv2\n'), ((11117, 11170), 'arcpy.da.UpdateCursor', 'arcpy.da.UpdateCursor', (['shape_file', '(tokens + fieldlist)'], {}), '(shape_file, tokens + fieldlist)\n', (11138, 11170), False, 'import arcpy\n'), ((26061, 26098), 'numpy.multiply', 'np.multiply', (['inside_rad', 'inside_angle'], {}), '(inside_rad, inside_angle)\n', (26072, 26098), True, 'import numpy as np\n'), ((1977, 2000), 'numpy.max', 'np.max', (['self.city_array'], {}), '(self.city_array)\n', (1983, 2000), True, 'import numpy as np\n'), ((2010, 2033), 'numpy.min', 'np.min', (['self.city_array'], {}), '(self.city_array)\n', (2016, 2033), True, 'import numpy as np\n'), ((10853, 10876), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (10874, 10876), False, 'import cv2\n'), ((17773, 17784), 'time.time', 'time.time', ([], {}), '()\n', (17782, 17784), False, 'import time\n'), ((23104, 23180), 'math.sqrt', 'math.sqrt', (['((self.coverage_radius - i) ** 2 + (self.coverage_radius - j) ** 2)'], {}), '((self.coverage_radius - i) ** 2 + (self.coverage_radius - j) ** 2)\n', (23113, 23180), False, 'import math\n'), ((25348, 25390), 'numpy.greater_equal', 'np.greater_equal', (['rad_matrix', 'radius_inner'], {}), '(rad_matrix, radius_inner)\n', (25364, 25390), True, 'import numpy as np\n'), ((25392, 25434), 'numpy.greater_equal', 'np.greater_equal', (['radius_outer', 'rad_matrix'], {}), '(radius_outer, rad_matrix)\n', (25408, 25434), True, 'import numpy as np\n'), ((23217, 23279), 'math.atan2', 'math.atan2', (['(self.coverage_radius - i)', '(j - self.coverage_radius)'], {}), '(self.coverage_radius - i, j - self.coverage_radius)\n', (23227, 23279), False, 'import math\n'), ((24910, 24943), 'math.radians', 'math.radians', (['(90 + vertical_start)'], {}), '(90 + vertical_start)\n', (24922, 24943), False, 'import math\n'), ((25000, 25031), 'math.radians', 'math.radians', (['(90 + vertical_end)'], {}), '(90 + vertical_end)\n', (25012, 25031), False, 'import math\n'), ((25791, 25836), 'numpy.greater_equal', 'np.greater_equal', (['angle_matrix', 'horizon_start'], {}), '(angle_matrix, horizon_start)\n', (25807, 25836), True, 'import numpy as np\n'), ((25838, 25881), 'numpy.greater_equal', 'np.greater_equal', (['horizon_end', 'angle_matrix'], {}), '(horizon_end, angle_matrix)\n', (25854, 25881), True, 'import numpy as np\n'), ((25939, 25984), 'numpy.greater_equal', 'np.greater_equal', (['angle_matrix', 'horizon_start'], {}), '(angle_matrix, horizon_start)\n', (25955, 25984), True, 'import numpy as np\n'), ((25986, 26029), 'numpy.greater_equal', 'np.greater_equal', (['horizon_end', 'angle_matrix'], {}), '(horizon_end, angle_matrix)\n', (26002, 26029), True, 'import numpy as np\n'), ((26534, 26545), 'time.time', 'time.time', ([], {}), '()\n', (26543, 26545), False, 'import time\n')]
|
from __future__ import print_function
import warnings
from setuptools import setup, find_packages, Extension
from setuptools.command.install import install
import numpy
from six.moves import input
# from theano.compat.six.moves import input
# Because many people neglected to run the pylearn2/utils/setup.py script
# separately, we compile the necessary Cython extensions here but because
# Cython is not a strict dependency, we issue a warning when it is not
# available.
try:
from Cython.Distutils import build_ext
cython_available = True
except ImportError:
warnings.warn("Cython was not found and hence pylearn2.utils._window_flip "
"and pylearn2.utils._video and classes that depend on them "
"(e.g. pylearn2.train_extensions.window_flip) will not be "
"available")
cython_available = False
if cython_available:
cmdclass = {'build_ext': build_ext}
ext_modules = [Extension("pylearn2.utils._window_flip",
["pylearn2/utils/_window_flip.pyx"],
include_dirs=[numpy.get_include()]),
Extension("pylearn2.utils._video",
["pylearn2/utils/_video.pyx"],
include_dirs=[numpy.get_include()])]
else:
cmdclass = {}
ext_modules = []
# Inform user of setup.py develop preference
class pylearn2_install(install):
def run(self):
print("Because Pylearn2 is under heavy development, we generally do "
"not advice using the `setup.py install` command. Please "
"consider using the `setup.py develop` command instead for the "
"following reasons:\n\n1. Using `setup.py install` creates a "
"copy of the Pylearn2 source code in your Python installation "
"path. In order to update Pylearn2 afterwards you will need to "
"rerun `setup.py install` (!). Simply using `git pull` to "
"update your local copy of Pylearn2 code will not suffice. \n\n"
"2. When using `sudo` to install Pylearn2, all files, "
"including the tutorials, will be copied to a directory owned "
"by root. Not only is running tutorials as root unsafe, it "
"also means that all Pylearn2-related environment variables "
"which were defined for the user will be unavailable.\n\n"
"Pressing enter will continue the installation of Pylearn2 in "
"`develop` mode instead. Note that this means that you need to "
"keep this folder with the Pylearn2 code in its current "
"location. If you know what you are doing, and are very sure "
"that you want to install Pylearn2 using the `install` "
"command instead, please type `install`.\n")
mode = None
while mode not in ['', 'install', 'develop', 'cancel']:
if mode is not None:
print("Please try again")
mode = input("Installation mode: [develop]/install/cancel: ")
if mode in ['', 'develop']:
self.distribution.run_command('develop')
if mode == 'install':
return install.run(self)
cmdclass.update({'install': pylearn2_install})
setup(
cmdclass=cmdclass,
ext_modules=ext_modules,
name='pylearn2',
version='0.1dev',
packages=find_packages(),
description='A machine learning library built on top of Theano.',
license='BSD 3-clause license',
long_description=open('README.rst', 'rb').read().decode('utf8'),
dependency_links=['git+http://github.com/Theano/Theano.git#egg=Theano'],
install_requires=['numpy>=1.5', 'pyyaml', 'argparse', "Theano"],
scripts=['bin/pylearn2-plot-monitor', 'bin/pylearn2-print-monitor',
'bin/pylearn2-show-examples', 'bin/pylearn2-show-weights',
'bin/pylearn2-train'],
package_data={
'': ['*.cu', '*.cuh', '*.h'],
},
)
|
[
"numpy.get_include",
"warnings.warn",
"setuptools.command.install.install.run",
"six.moves.input",
"setuptools.find_packages"
] |
[((576, 786), 'warnings.warn', 'warnings.warn', (['"""Cython was not found and hence pylearn2.utils._window_flip and pylearn2.utils._video and classes that depend on them (e.g. pylearn2.train_extensions.window_flip) will not be available"""'], {}), "(\n 'Cython was not found and hence pylearn2.utils._window_flip and pylearn2.utils._video and classes that depend on them (e.g. pylearn2.train_extensions.window_flip) will not be available'\n )\n", (589, 786), False, 'import warnings\n'), ((3424, 3439), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (3437, 3439), False, 'from setuptools import setup, find_packages, Extension\n'), ((3050, 3104), 'six.moves.input', 'input', (['"""Installation mode: [develop]/install/cancel: """'], {}), "('Installation mode: [develop]/install/cancel: ')\n", (3055, 3104), False, 'from six.moves import input\n'), ((3243, 3260), 'setuptools.command.install.install.run', 'install.run', (['self'], {}), '(self)\n', (3254, 3260), False, 'from setuptools.command.install import install\n'), ((1100, 1119), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (1117, 1119), False, 'import numpy\n'), ((1280, 1299), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (1297, 1299), False, 'import numpy\n')]
|
from unittest import TestCase
import numpy as np
from scvi.dataset import (
SyntheticDataset,
SyntheticRandomDataset,
SyntheticDatasetCorr,
ZISyntheticDatasetCorr,
)
from .utils import unsupervised_training_one_epoch
class TestSyntheticDataset(TestCase):
def test_train_one(self):
dataset = SyntheticDataset(batch_size=10, nb_genes=10)
unsupervised_training_one_epoch(dataset)
def test_RandomDataset_populate_and_train_one(self):
dataset = SyntheticRandomDataset(save_path="tests/data")
unsupervised_training_one_epoch(dataset)
def test_DatasetCorr_populate_and_train_one(self):
dataset = SyntheticDatasetCorr(n_cells_cluster=10)
self.assertListEqual(
np.unique(dataset.labels).tolist(), np.arange(dataset.n_clusters).tolist()
)
unsupervised_training_one_epoch(dataset)
def test_ZIDatasetCorr_populate_and_train_one(self):
dataset = ZISyntheticDatasetCorr(n_cells_cluster=10)
unsupervised_training_one_epoch(dataset)
def test_corr_zeros(self):
# Test hierarchy of zeros
nb_data = SyntheticDatasetCorr()
zi_data = ZISyntheticDatasetCorr()
zi_zeros_frac = (zi_data.X == 0).mean()
nb_zeros_frac = (nb_data.X == 0).mean()
# nb is not zero inflated
# zi is zero inflated for all genes
# We expect the number of zeros to organize accordingly
self.assertLess(nb_zeros_frac, zi_zeros_frac)
# We enforce that the zero inflated model has at least 20% of zeros
self.assertGreaterEqual(zi_zeros_frac, 0.2)
|
[
"scvi.dataset.SyntheticDataset",
"scvi.dataset.ZISyntheticDatasetCorr",
"scvi.dataset.SyntheticRandomDataset",
"numpy.arange",
"scvi.dataset.SyntheticDatasetCorr",
"numpy.unique"
] |
[((323, 367), 'scvi.dataset.SyntheticDataset', 'SyntheticDataset', ([], {'batch_size': '(10)', 'nb_genes': '(10)'}), '(batch_size=10, nb_genes=10)\n', (339, 367), False, 'from scvi.dataset import SyntheticDataset, SyntheticRandomDataset, SyntheticDatasetCorr, ZISyntheticDatasetCorr\n'), ((493, 539), 'scvi.dataset.SyntheticRandomDataset', 'SyntheticRandomDataset', ([], {'save_path': '"""tests/data"""'}), "(save_path='tests/data')\n", (515, 539), False, 'from scvi.dataset import SyntheticDataset, SyntheticRandomDataset, SyntheticDatasetCorr, ZISyntheticDatasetCorr\n'), ((663, 703), 'scvi.dataset.SyntheticDatasetCorr', 'SyntheticDatasetCorr', ([], {'n_cells_cluster': '(10)'}), '(n_cells_cluster=10)\n', (683, 703), False, 'from scvi.dataset import SyntheticDataset, SyntheticRandomDataset, SyntheticDatasetCorr, ZISyntheticDatasetCorr\n'), ((956, 998), 'scvi.dataset.ZISyntheticDatasetCorr', 'ZISyntheticDatasetCorr', ([], {'n_cells_cluster': '(10)'}), '(n_cells_cluster=10)\n', (978, 998), False, 'from scvi.dataset import SyntheticDataset, SyntheticRandomDataset, SyntheticDatasetCorr, ZISyntheticDatasetCorr\n'), ((1132, 1154), 'scvi.dataset.SyntheticDatasetCorr', 'SyntheticDatasetCorr', ([], {}), '()\n', (1152, 1154), False, 'from scvi.dataset import SyntheticDataset, SyntheticRandomDataset, SyntheticDatasetCorr, ZISyntheticDatasetCorr\n'), ((1173, 1197), 'scvi.dataset.ZISyntheticDatasetCorr', 'ZISyntheticDatasetCorr', ([], {}), '()\n', (1195, 1197), False, 'from scvi.dataset import SyntheticDataset, SyntheticRandomDataset, SyntheticDatasetCorr, ZISyntheticDatasetCorr\n'), ((746, 771), 'numpy.unique', 'np.unique', (['dataset.labels'], {}), '(dataset.labels)\n', (755, 771), True, 'import numpy as np\n'), ((782, 811), 'numpy.arange', 'np.arange', (['dataset.n_clusters'], {}), '(dataset.n_clusters)\n', (791, 811), True, 'import numpy as np\n')]
|
# Copyright 2020 <NAME>
# SPDX-License-Identifier: Apache-2.0
'''
batch and commandline utilities
'''
from __future__ import print_function
import gc
import os
import ssl
import sys
import site
import shlex
import logging
import warnings
import argparse
import platform
import resource
import subprocess
import time
if sys.version_info.major < 3 or sys.version_info.minor < 5:
warnings.warn('old python')
#pylint: disable=wrong-import-position
from pathlib import Path
try:
import numpy
except ImportError:
numpy = None
try:
from pynvml.smi import nvidia_smi
except ImportError:
nvidia_smi = None
CODE_RESET = '\033[0m'
CODE_BLACK = '\033[1;30m'
CODE_RED = '\033[1;31m'
CODE_GREEN = '\033[1;32m'
CODE_YELLOW = '\033[1;33m'
CODE_BLUE = '\033[1;34m'
CODE_MAGENTA = '\033[1;35m'
CODE_CYAN = '\033[1;36m'
CODE_WHITE = '\033[1;37m'
RUN_CMD_ALWAYS = 'RUN_CMD_ALWAYS'
RUN_CMD_CONFIRM = 'RUN_CMD_USER_CONFIRMATION'
RUN_CMD_NEVER = 'RUN_CMD_NEVER'
USER_CONFIRM_ALWAYS = False
def confirm(run_mode, cmd_str):
'optionally ask user for confirmation with info about a cmd about to be run'
#pylint: disable=global-statement
global USER_CONFIRM_ALWAYS
if run_mode == RUN_CMD_NEVER:
return False
if not USER_CONFIRM_ALWAYS and run_mode == RUN_CMD_CONFIRM:
c = input('run command [%s] ? (N)o / (Y)es / (A)lways:' % (cmd_str))
if not isinstance(c, str) or c == '':
return False
c = c.lower()
if c == 'n':
return False
if c == 'a':
USER_CONFIRM_ALWAYS = True
return True
def color_text(text, color, fmt=None):
'if color control string is not None, wrap like so: color|text|color_rest'
if text is None:
return None
if fmt is not None:
text = fmt % (text)
if color is None:
return str(text)
return color + str(text) + CODE_RESET
def black_text(text, **kwargs):
'wrap text in terminal encoding characters'
return color_text(text, CODE_BLACK, **kwargs)
def red_text(text, **kwargs):
'wrap text in terminal encoding characters'
return color_text(text, CODE_RED, **kwargs)
def green_text(text, **kwargs):
'wrap text in terminal encoding characters'
return color_text(text, CODE_GREEN, **kwargs)
def yellow_text(text, **kwargs):
'wrap text in terminal encoding characters'
return color_text(text, CODE_YELLOW, **kwargs)
def blue_text(text, **kwargs):
'wrap text in terminal encoding characters'
return color_text(text, CODE_BLUE, **kwargs)
def magenta_text(text, **kwargs):
'wrap text in terminal encoding characters'
return color_text(text, CODE_MAGENTA, **kwargs)
def cyan_text(text, **kwargs):
'wrap text in terminal encoding characters'
return color_text(text, CODE_CYAN, **kwargs)
def white_text(text, **kwargs):
'wrap text in terminal encoding characters'
return color_text(text, CODE_WHITE, **kwargs)
def color_code_stdout(color_code):
'write color code to stdout and flush'
if color_code is not None:
sys.stdout.write(color_code)
sys.stdout.flush()
def reset_color_code_stdout(color):
'reset stdout to non normal color code mode flush'
if color:
sys.stdout.write(CODE_RESET)
sys.stdout.flush()
def execute(
cmd,
run_mode=RUN_CMD_ALWAYS,
cwd=None,
output=False,
color=True,
log_level=logging.DEBUG,
env=None,
):
'''
execute a subprocess with
logging of commandline before output
optional color coded output
optional current working directory override
a run mode that can disable execution, ask for user confirmation, or execute
'''
nottext = color_text('not', CODE_RED) if color else 'not'
cmd = [str(x) for x in cmd]
cmd_str = color_text(subprocess.list2cmdline(cmd), CODE_GREEN)
go = confirm(run_mode, cmd_str)
verb = 'running' if go else nottext + ' running'
highlight_color = (CODE_YELLOW if go else CODE_GREEN) if color else None
result_color = CODE_CYAN if color else None
cwd_str = color_text(cwd, highlight_color)
cmd_str = color_text(subprocess.list2cmdline(cmd), highlight_color)
if cwd is None:
logging.log(log_level, '%s [%s]', verb, cmd_str)
else:
logging.log(log_level, 'from [%s] %s [%s]', cwd_str, verb, cmd_str)
if not go:
return None
if not output:
color_code_stdout(result_color)
try:
subprocess.check_call(cmd, cwd=cwd, env=env)
finally:
reset_color_code_stdout(color)
else:
return subprocess.check_output(cmd, cwd=cwd, env=env)
return None
def execute_multiline_str(**kwargs):
'wraps execute by converting multiline "cmd" kwarg to strings'
cmd = kwargs.pop('cmd')
if cmd is None:
raise ValueError('expected multiline string keyword arg "cmd"')
lines = cmd.split('\n')
lines = [x.strip() for x in lines]
lines = [x for x in lines if not x.startswith('#')]
cmd = ' '.join(lines)
cmd = cmd.split(' ')
cmd = [x for x in cmd if x] # remove empty argv
execute(cmd, **kwargs)
def execute_callback(
message,
callback,
args,
kwargs,
run_mode=RUN_CMD_ALWAYS,
color=True,
log_arguments=True,
log_time=False,
log_level=logging.DEBUG,
):
'''
execute a python function with
a run mode that can disable execution, ask for user confirmation, or execute
'''
nottext = color_text('not', CODE_RED) if color else 'not'
go = confirm(run_mode, message)
verb = 'calling' if go else nottext + ' calling'
if log_arguments:
logging.log(
log_level,
'%s [%s.%s] with args %s and kwargs %s',
verb,
callback.__module__,
callback.__name__,
args,
kwargs,
)
else:
logging.log(
log_level,
'%s [%s.%s] to %s',
verb,
callback.__module__,
callback.__name__,
message,
)
if not go:
return None
if log_time:
with T(message + ' total'):
result = callback(*args, **kwargs)
else:
result = callback(*args, **kwargs)
return result
def set_log_level(level):
'set the global logging level'
logging.getLogger('').setLevel(level)
def setup_logging(
level=logging.DEBUG,
setup_matplotlib=True,
setup_lambda=False,
numpy_precision=3,
numpy_suppress=True,
numpy_linewidth=75,
stream=None,
color=True,
force_warning_modules=(
'boto3',
'botocore',
's3transfer',
'urllib3',
'websockets',
),
):
'setup reasonable logging defaults'
if setup_lambda:
color = False
logger = logging.getLogger()
logger.setLevel(level)
logger.propagate = False
for modname in force_warning_modules:
modlogger = logging.getLogger(modname)
modlogger.setLevel(logging.WARNING)
elif level == logging.INFO:
logging.basicConfig(level=logging.INFO, format='%(message)s', stream=stream)
else:
logging.basicConfig(
level=level,
format='%(levelname)s %(message)s',
stream=stream,
)
logger = logging.getLogger()
logger.propagate = False
for modname in force_warning_modules:
modlogger = logging.getLogger(modname)
modlogger.setLevel(logging.WARNING)
if setup_matplotlib:
# force matplotlib to never show debug info!
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.WARNING)
for num, name, color_code in [
(logging.CRITICAL, 'BAD ', CODE_RED),
(logging.ERROR, 'err ', CODE_RED),
(logging.WARNING, 'warn', CODE_WHITE),
(logging.INFO, 'info', CODE_BLACK),
(logging.DEBUG, 'dbg ', CODE_BLACK),
]:
#name = logging.getLevelName(num).lower().ljust(8)
resolved_name = name
if color:
resolved_name = color_text(name, color_code)
logging.addLevelName(num, resolved_name)
if numpy is not None:
numpy.set_printoptions(
precision=numpy_precision,
suppress=numpy_suppress,
linewidth=numpy_linewidth,
)
def setup_patching(setup_ssl=True):
'''
follow this guide to make sure models can be downloaded without error:
https://github.com/fchollet/deep-learning-models/issues/33#issuecomment-397257502
'''
if setup_ssl:
#pylint: disable=W0212
ssl._create_default_https_context = ssl._create_unverified_context
def setup_tensorflow():
'make tensorflow silent unless TF_CPP_MIN_LOG_LEVEL envvar found'
tf_log_key = 'TF_CPP_MIN_LOG_LEVEL'
tf_logger = logging.getLogger('tensorflow')
if tf_log_key not in os.environ:
os.environ[tf_log_key] = '3'
tf_logger.setLevel(logging.INFO)
else:
tf_logger.setLevel(logging.DEBUG)
# redirect stdout/stderr, import keras, then restore stdout/stderr
# avoids keras cluttering up the console during version or other query cmds
save_stdout, save_stderr = sys.stdout, sys.stderr
try:
sys.stdout = open(os.devnull, 'w')
sys.stderr = sys.stdout
#pylint: disable=unused-import,import-outside-toplevel
import tensorflow.keras
finally:
sys.stdout, sys.stderr = save_stdout, save_stderr
class HELP_FMT(
argparse.ArgumentDefaultsHelpFormatter,
argparse.RawTextHelpFormatter,
):
'''
composite class to provide both default args in help and raw help strings
goes to crazy lengths to split up lists of choices...
'''
def format_help(self):
tmp = argparse.HelpFormatter.format_help(self)
result = []
for line in tmp.split('\n'):
if '[' in line and '{' in line and line.count(',') > 5:
test = line
total_whitespace = line.count(' ')
test = test.strip()
leading = total_whitespace - test.count(' ')
if test[0] == '[' and test[-1] == ']':
test = test[1:-1]
# use shlex to hanle list tokenizing
# by turning lists into strings
test = test.replace('[', '"')
test = test.replace(']', '"')
test = test.replace('{', "'")
test = test.replace('}', "'")
test = test.replace(' ...', '')
parts = shlex.split(test, comments=False)
# remove crazy duplication of the same list
A = parts[-1]
B = parts[-2]
C = "'%s'" % (B)
if A == C:
parts.pop()
norm_line = ' '.join(parts)
indent = ' ' * leading
line = indent + ('\n ' + indent).join(norm_line.split(','))
result.append(line)
return '\n'.join(result)
VERBOSE_MAP = {0: logging.WARNING, 1: logging.INFO, 2: logging.DEBUG}
def add_verbose_parse_arg(parser):
'add verbosity levels to a parser'
if not getattr(parser, 'vm_build_utils_has_verbose', False):
parser.add_argument(
'-v',
'--verbose',
action='count',
help='verbose level... repeat up to 2 times',
)
parser.vm_build_utils_has_verbose = True
def set_log_level_from_args(args):
'args is a command line parser result - use it to configure logging'
if args.verbose is None:
args.verbose = 0
set_log_level(VERBOSE_MAP[args.verbose])
def add_run_mode_parse_arg(parser):
'add controls to run sub commands / persistent system operations'
if not getattr(parser, 'vm_build_utils_has_run_mode', False):
RUN_MODE_GROUP = parser.add_mutually_exclusive_group()
RUN_MODE_GROUP.add_argument(
'--run-never',
action='store_true',
help='no actions will be taken, only logging will be performed')
RUN_MODE_GROUP.add_argument(
'--run-confirm',
action='store_true',
help='actions will be performed with user confirmation')
RUN_MODE_GROUP.add_argument(
'--run-always',
action='store_true',
help='actions will be performed always [ default ]',
)
parser.vm_build_utils_has_run_mode = True
def setup_run_mode(args):
'args is a command line parser result - use it to configure the run mode'
if not args.run_confirm and not args.run_never:
args.run_always = True
result = None
if args.run_never:
result = RUN_CMD_NEVER
elif args.run_confirm:
result = RUN_CMD_CONFIRM
elif args.run_always:
result = RUN_CMD_ALWAYS
else:
raise ValueError('one of [run-never,run-confirm,run-always] must be True')
return result
def add_file_logging_parse_arg(parser):
'add file logging output + verbosity to a parser'
if not getattr(parser, 'vm_build_utils_has_file_log', False):
parser.add_argument(
'-fv',
'--file-verbose',
action='count',
help='verbose level for --file-log ... repeat up to 2 times',
)
parser.add_argument(
'--file-log',
default=None,
type=Path,
help='direct logging stream to this file in addition to stderr',
)
parser.vm_build_utils_has_file_log = True
def set_file_logging_from_args(args):
'args is a command line parser result - use it to configure file logging'
if args.file_log is None:
return
if args.file_verbose is None:
args.file_verbose = 0
level = VERBOSE_MAP[args.file_verbose]
file_log = logging.FileHandler(args.file_log, mode='w')
file_log.setLevel(level)
file_log.setFormatter(
logging.Formatter('%(levelname)s %(message)s', None, '%'))
logging.getLogger('').addHandler(file_log)
def finish_args(parser):
'add common arguments to a parser if not already added: verbose, run_mode'
add_verbose_parse_arg(parser)
add_file_logging_parse_arg(parser)
add_run_mode_parse_arg(parser)
return parser
def log_parsed_args(args_namespace, level=logging.DEBUG):
'log each elemenet in an argparser namespace'
items = dict(vars(args_namespace)).items()
key_whitespace_len = -1
for key, _ in items:
key_whitespace_len = max(key_whitespace_len, len(key))
key_whitespace_len += 2
newline_whitespace_len = key_whitespace_len + 6
newline_whitespace = ''.join(['\n'] + [' '] * newline_whitespace_len)
for key, value in items:
tmp = str(value)
if isinstance(value, list):
tmp = newline_whitespace.join([str(c) for c in value])
logging.log(level, '%s[%s]', key.rjust(key_whitespace_len), tmp)
def parse_args(parser, args=None, parse_known_args=False, return_unknown=False):
'parse, handle logging and run mode arguments'
finish_args(parser)
if parse_known_args:
args, unknown = parser.parse_known_args(args=args)
else:
args = parser.parse_args(args=args)
set_log_level_from_args(args)
set_file_logging_from_args(args)
args.run_mode = setup_run_mode(args)
if return_unknown:
return args, unknown
return args
KB = float(10**3)
GB = float(10**9) # 1000000000
MiB = float(2**20) # 1048576
GiB = float(2**30) # 1073741824
def current_platform_is_darwin():
'returns true if current system is darwin, false on linux or windows'
return platform.system().lower() == 'darwin'
def current_platform_is_linux():
'returns true if current system is linux, false on darwin or windows'
return platform.system().lower() == 'linux'
def get_rss():
'get high water mark resident memory usage'
rss_bytes = 0
maxrss = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
if current_platform_is_darwin():
rss_bytes = maxrss
else:
rss_bytes = maxrss * KB
rss_gb = rss_bytes / GB
return rss_gb
def get_rss_and_total():
'resident and total physical memory in GB'
try:
total = (os.sysconf('SC_PAGE_SIZE') * os.sysconf('SC_PHYS_PAGES')) / GB
except ValueError:
total = -1
return (get_rss(), total)
def get_gpu_used_and_total():
'total physical memory in GB'
if nvidia_smi is None:
return 0, 0
nvsmi = nvidia_smi.getInstance()
qresult = nvsmi.DeviceQuery('memory.used, memory.total')
mem = qresult['gpu'][0]['fb_memory_usage']
assert mem['unit'] == 'MiB'
used = (mem['used'] * MiB) / GiB
total = (mem['total'] * MiB) / GiB
return used, total
class T(object):
'simple timer'
def __init__(self, name, level=logging.INFO):
self.name = name
self.start = self.end = self.interval = 0
self.level = level
def __enter__(self):
self.start = time.perf_counter()
return self
def __exit__(self, *args):
self.end = time.perf_counter()
self.interval = self.end - self.start
gc.collect()
rss, total = get_rss_and_total()
gpu_used, gpu_total = get_gpu_used_and_total()
logging.log(
self.level,
'%s [%s sec] [%s/%s GB] [%s/%s GB gpu]',
self.name.rjust(40),
yellow_text('% 7.2f' % (self.interval)),
yellow_text('% 6.2f' % (rss)),
yellow_text('%02.2f' % (total)),
yellow_text('% 6.2f' % (gpu_used)),
yellow_text('%02.2f' % (gpu_total)),
)
def format_size(byte_size):
'convert size in bytes to a human readable string'
if byte_size > 1000 * 1000:
return '%.1fMB' % (byte_size / 1000.0 / 1000)
if byte_size > 10 * 1000:
return '%ikB' % (byte_size / 1000)
if byte_size > 1000:
return '%.1fkB' % (byte_size / 1000.0)
return '%ibytes' % byte_size
def remove_prefix(value, prefix):
'remove string prefix'
if value.startswith(prefix):
return value[len(prefix):]
return value
def get_sitepackages_path():
'get path to python site-packages directory'
try:
return site.getsitepackages()[0]
except AttributeError:
for path in sys.path:
if 'local' in path:
continue
if 'site-packages' in path:
return path
raise ValueError('no site packages found')
def executable_path():
'get a path to the python interpreter than can be tweaked via env var'
result = sys.executable
override = os.environ.get('VM_EXECUTABLE')
if override is not None:
result = override
result = str(result)
result = remove_prefix(result, '/System/Volumes/Data')
return Path(result)
def project_path_components():
'validate and return paths related to /comet/PROJECT/env/DEVREL/bin/python'
template_path = '"/comet/PROJECT/env/DEVREL/bin/python"'
err_msg = 'python path must be of the form %s' % (template_path)
python_exec = executable_path()
assert len(python_exec.parts) >= 6, err_msg
user_parts = python_exec.parts[:-4]
_env, dev_rel, _bin, _python = python_exec.parts[-4:]
assert (_env, _bin, _python) == ('env', 'bin', 'python'), err_msg
return user_parts, dev_rel
def project_path():
'abs path relative to the directory containing env/container/bin/python'
user_parts, _ = project_path_components()
return Path().joinpath(*user_parts)
def env_root(rel_path=''):
'abs path relative to the directory containing bin/python'
python_exec = executable_path()
bin_path = python_exec.parent.resolve()
env = bin_path.parent
if rel_path:
result = env / rel_path
else:
result = env
return result
|
[
"sys.stdout.write",
"logging.addLevelName",
"subprocess.list2cmdline",
"logging.Formatter",
"gc.collect",
"pathlib.Path",
"sys.stdout.flush",
"resource.getrusage",
"subprocess.check_call",
"numpy.set_printoptions",
"logging.FileHandler",
"logging.log",
"shlex.split",
"site.getsitepackages",
"subprocess.check_output",
"os.sysconf",
"time.perf_counter",
"platform.system",
"pynvml.smi.nvidia_smi.getInstance",
"logging.basicConfig",
"os.environ.get",
"warnings.warn",
"logging.getLogger",
"argparse.HelpFormatter.format_help"
] |
[((380, 407), 'warnings.warn', 'warnings.warn', (['"""old python"""'], {}), "('old python')\n", (393, 407), False, 'import warnings\n'), ((6927, 6946), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (6944, 6946), False, 'import logging\n'), ((8334, 8365), 'logging.getLogger', 'logging.getLogger', (['"""tensorflow"""'], {}), "('tensorflow')\n", (8351, 8365), False, 'import logging\n'), ((12886, 12930), 'logging.FileHandler', 'logging.FileHandler', (['args.file_log'], {'mode': '"""w"""'}), "(args.file_log, mode='w')\n", (12905, 12930), False, 'import logging\n'), ((15427, 15451), 'pynvml.smi.nvidia_smi.getInstance', 'nvidia_smi.getInstance', ([], {}), '()\n', (15449, 15451), False, 'from pynvml.smi import nvidia_smi\n'), ((17400, 17431), 'os.environ.get', 'os.environ.get', (['"""VM_EXECUTABLE"""'], {}), "('VM_EXECUTABLE')\n", (17414, 17431), False, 'import os\n'), ((17570, 17582), 'pathlib.Path', 'Path', (['result'], {}), '(result)\n', (17574, 17582), False, 'from pathlib import Path\n'), ((2945, 2973), 'sys.stdout.write', 'sys.stdout.write', (['color_code'], {}), '(color_code)\n', (2961, 2973), False, 'import sys\n'), ((2978, 2996), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2994, 2996), False, 'import sys\n'), ((3104, 3132), 'sys.stdout.write', 'sys.stdout.write', (['CODE_RESET'], {}), '(CODE_RESET)\n', (3120, 3132), False, 'import sys\n'), ((3137, 3155), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3153, 3155), False, 'import sys\n'), ((3661, 3689), 'subprocess.list2cmdline', 'subprocess.list2cmdline', (['cmd'], {}), '(cmd)\n', (3684, 3689), False, 'import subprocess\n'), ((3980, 4008), 'subprocess.list2cmdline', 'subprocess.list2cmdline', (['cmd'], {}), '(cmd)\n', (4003, 4008), False, 'import subprocess\n'), ((4050, 4098), 'logging.log', 'logging.log', (['log_level', '"""%s [%s]"""', 'verb', 'cmd_str'], {}), "(log_level, '%s [%s]', verb, cmd_str)\n", (4061, 4098), False, 'import logging\n'), ((4111, 4178), 'logging.log', 'logging.log', (['log_level', '"""from [%s] %s [%s]"""', 'cwd_str', 'verb', 'cmd_str'], {}), "(log_level, 'from [%s] %s [%s]', cwd_str, verb, cmd_str)\n", (4122, 4178), False, 'import logging\n'), ((4393, 4439), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {'cwd': 'cwd', 'env': 'env'}), '(cmd, cwd=cwd, env=env)\n', (4416, 4439), False, 'import subprocess\n'), ((5403, 5530), 'logging.log', 'logging.log', (['log_level', '"""%s [%s.%s] with args %s and kwargs %s"""', 'verb', 'callback.__module__', 'callback.__name__', 'args', 'kwargs'], {}), "(log_level, '%s [%s.%s] with args %s and kwargs %s', verb,\n callback.__module__, callback.__name__, args, kwargs)\n", (5414, 5530), False, 'import logging\n'), ((5602, 5703), 'logging.log', 'logging.log', (['log_level', '"""%s [%s.%s] to %s"""', 'verb', 'callback.__module__', 'callback.__name__', 'message'], {}), "(log_level, '%s [%s.%s] to %s', verb, callback.__module__,\n callback.__name__, message)\n", (5613, 5703), False, 'import logging\n'), ((6470, 6489), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (6487, 6489), False, 'import logging\n'), ((7031, 7057), 'logging.getLogger', 'logging.getLogger', (['modname'], {}), '(modname)\n', (7048, 7057), False, 'import logging\n'), ((7188, 7219), 'logging.getLogger', 'logging.getLogger', (['"""matplotlib"""'], {}), "('matplotlib')\n", (7205, 7219), False, 'import logging\n'), ((7664, 7704), 'logging.addLevelName', 'logging.addLevelName', (['num', 'resolved_name'], {}), '(num, resolved_name)\n', (7684, 7704), False, 'import logging\n'), ((7734, 7839), 'numpy.set_printoptions', 'numpy.set_printoptions', ([], {'precision': 'numpy_precision', 'suppress': 'numpy_suppress', 'linewidth': 'numpy_linewidth'}), '(precision=numpy_precision, suppress=numpy_suppress,\n linewidth=numpy_linewidth)\n', (7756, 7839), False, 'import numpy\n'), ((9223, 9263), 'argparse.HelpFormatter.format_help', 'argparse.HelpFormatter.format_help', (['self'], {}), '(self)\n', (9257, 9263), False, 'import argparse\n'), ((12989, 13046), 'logging.Formatter', 'logging.Formatter', (['"""%(levelname)s %(message)s"""', 'None', '"""%"""'], {}), "('%(levelname)s %(message)s', None, '%')\n", (13006, 13046), False, 'import logging\n'), ((14902, 14942), 'resource.getrusage', 'resource.getrusage', (['resource.RUSAGE_SELF'], {}), '(resource.RUSAGE_SELF)\n', (14920, 14942), False, 'import resource\n'), ((15895, 15914), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (15912, 15914), False, 'import time\n'), ((15976, 15995), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (15993, 15995), False, 'import time\n'), ((16042, 16054), 'gc.collect', 'gc.collect', ([], {}), '()\n', (16052, 16054), False, 'import gc\n'), ((4278, 4322), 'subprocess.check_call', 'subprocess.check_call', (['cmd'], {'cwd': 'cwd', 'env': 'env'}), '(cmd, cwd=cwd, env=env)\n', (4299, 4322), False, 'import subprocess\n'), ((6001, 6022), 'logging.getLogger', 'logging.getLogger', (['""""""'], {}), "('')\n", (6018, 6022), False, 'import logging\n'), ((6607, 6633), 'logging.getLogger', 'logging.getLogger', (['modname'], {}), '(modname)\n', (6624, 6633), False, 'import logging\n'), ((6711, 6787), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(message)s"""', 'stream': 'stream'}), "(level=logging.INFO, format='%(message)s', stream=stream)\n", (6730, 6787), False, 'import logging\n'), ((6800, 6888), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'level', 'format': '"""%(levelname)s %(message)s"""', 'stream': 'stream'}), "(level=level, format='%(levelname)s %(message)s', stream\n =stream)\n", (6819, 6888), False, 'import logging\n'), ((13050, 13071), 'logging.getLogger', 'logging.getLogger', (['""""""'], {}), "('')\n", (13067, 13071), False, 'import logging\n'), ((17043, 17065), 'site.getsitepackages', 'site.getsitepackages', ([], {}), '()\n', (17063, 17065), False, 'import site\n'), ((18244, 18250), 'pathlib.Path', 'Path', ([], {}), '()\n', (18248, 18250), False, 'from pathlib import Path\n'), ((9891, 9924), 'shlex.split', 'shlex.split', (['test'], {'comments': '(False)'}), '(test, comments=False)\n', (9902, 9924), False, 'import shlex\n'), ((14621, 14638), 'platform.system', 'platform.system', ([], {}), '()\n', (14636, 14638), False, 'import platform\n'), ((14775, 14792), 'platform.system', 'platform.system', ([], {}), '()\n', (14790, 14792), False, 'import platform\n'), ((15183, 15209), 'os.sysconf', 'os.sysconf', (['"""SC_PAGE_SIZE"""'], {}), "('SC_PAGE_SIZE')\n", (15193, 15209), False, 'import os\n'), ((15212, 15239), 'os.sysconf', 'os.sysconf', (['"""SC_PHYS_PAGES"""'], {}), "('SC_PHYS_PAGES')\n", (15222, 15239), False, 'import os\n')]
|
import unittest
import numpy as np
from collections import namedtuple
from pyrostest import RosTest, with_launch_file, launch_node
from process.bearing import calculate_directions
from sensor_msgs.msg import NavSatFix
from std_msgs.msg import Float64
fix = namedtuple('fix', ['latitude', 'longitude'])
class TestBearing(unittest.TestCase):
def test_distance(self):
fix1 = fix(33.636700, -84.427863)
fix2 = fix(39.029128, -111.838257)
assert np.isclose(calculate_directions.get_distance(fix1, fix2), 2517000, rtol=.01)
class TestBearingNode(RosTest):
@with_launch_file('buzzmobile', 'test_params.launch')
@launch_node('buzzmobile', 'bearing.py')
def test_bearing_node(self):
with self.mock_pub('/fix', NavSatFix, queue_size=0) as fix_node:
with self.check_topic('/buzzmobile/bearing', Float64) as ct:
# send mock data
fix_node.send(NavSatFix(None, None, 33.636700, -84.427863, None, None, None))
fix_node.send(NavSatFix(None, None, 39.029128, -111.838257, None, None, None))
# check the output from the node
assert np.isclose(ct.message.data, 1.19212)
|
[
"process.bearing.calculate_directions.get_distance",
"numpy.isclose",
"pyrostest.with_launch_file",
"collections.namedtuple",
"pyrostest.launch_node",
"sensor_msgs.msg.NavSatFix"
] |
[((259, 303), 'collections.namedtuple', 'namedtuple', (['"""fix"""', "['latitude', 'longitude']"], {}), "('fix', ['latitude', 'longitude'])\n", (269, 303), False, 'from collections import namedtuple\n'), ((589, 641), 'pyrostest.with_launch_file', 'with_launch_file', (['"""buzzmobile"""', '"""test_params.launch"""'], {}), "('buzzmobile', 'test_params.launch')\n", (605, 641), False, 'from pyrostest import RosTest, with_launch_file, launch_node\n'), ((647, 686), 'pyrostest.launch_node', 'launch_node', (['"""buzzmobile"""', '"""bearing.py"""'], {}), "('buzzmobile', 'bearing.py')\n", (658, 686), False, 'from pyrostest import RosTest, with_launch_file, launch_node\n'), ((484, 529), 'process.bearing.calculate_directions.get_distance', 'calculate_directions.get_distance', (['fix1', 'fix2'], {}), '(fix1, fix2)\n', (517, 529), False, 'from process.bearing import calculate_directions\n'), ((1161, 1197), 'numpy.isclose', 'np.isclose', (['ct.message.data', '(1.19212)'], {}), '(ct.message.data, 1.19212)\n', (1171, 1197), True, 'import numpy as np\n'), ((929, 989), 'sensor_msgs.msg.NavSatFix', 'NavSatFix', (['None', 'None', '(33.6367)', '(-84.427863)', 'None', 'None', 'None'], {}), '(None, None, 33.6367, -84.427863, None, None, None)\n', (938, 989), False, 'from sensor_msgs.msg import NavSatFix\n'), ((1023, 1086), 'sensor_msgs.msg.NavSatFix', 'NavSatFix', (['None', 'None', '(39.029128)', '(-111.838257)', 'None', 'None', 'None'], {}), '(None, None, 39.029128, -111.838257, None, None, None)\n', (1032, 1086), False, 'from sensor_msgs.msg import NavSatFix\n')]
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import math
import paddle
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid.tests.unittests.op_test import OpTest
from paddle.fluid.tests.unittests.op_test import skip_check_grad_ci
def bilinear_interp_mkldnn_np(input,
out_h,
out_w,
out_size=None,
actual_shape=None,
data_layout='NCHW'):
"""bilinear interpolation implement in shape [N, C, H, W]"""
if data_layout == "NHWC":
input = np.transpose(input, (0, 3, 1, 2)) # NHWC => NCHW
if out_size is not None:
out_h = out_size[0]
out_w = out_size[1]
if actual_shape is not None:
out_h = actual_shape[0]
out_w = actual_shape[1]
batch_size, channel, in_h, in_w = input.shape
out = np.zeros((batch_size, channel, out_h, out_w))
for oh in range(out_h):
h0 = int(math.floor((oh + 0.5) * in_h / out_h - 0.5))
h1 = int(math.ceil((oh + 0.5) * in_h / out_h - 0.5))
h0 = max(h0, 0)
h1 = min(h1, in_h - 1)
Wh = (oh + 0.5) * in_h / out_h - 0.5 - h0
for ow in range(out_w):
w0 = int(math.floor((ow + 0.5) * in_w / out_w - 0.5))
w1 = int(math.ceil((ow + 0.5) * in_w / out_w - 0.5))
w0 = max(w0, 0)
w1 = min(w1, in_w - 1)
Ww = (ow + 0.5) * in_w / out_w - 0.5 - w0
input_h0_w0 = input[:, :, h0, w0]
input_h1_w0 = input[:, :, h1, w0]
input_h0_w1 = input[:, :, h0, w1]
input_h1_w1 = input[:, :, h1, w1]
out[:, :, oh,
ow] = input_h0_w0 * (1 - Wh) * (1 - Ww) + input_h1_w0 * Wh * (
1 - Ww) + input_h0_w1 * (1 -
Wh) * Ww + input_h1_w1 * Wh * Ww
if data_layout == "NHWC":
out = np.transpose(out, (0, 2, 3, 1)) # NCHW => NHWC
return out.astype(input.dtype)
@skip_check_grad_ci(reason="Haven not implement interpolate grad kernel.")
class TestBilinearInterpMKLDNNOp(OpTest):
def init_test_case(self):
pass
def setUp(self):
self.op_type = "bilinear_interp_v2"
self.interp_method = 'bilinear'
self._cpu_only = True
self.use_mkldnn = True
self.input_shape = [1, 1, 2, 2]
self.data_layout = 'NCHW'
# priority: actual_shape > out_size > scale > out_h & out_w
self.out_h = 1
self.out_w = 1
self.scale = 2.0
self.out_size = None
self.actual_shape = None
self.init_test_case()
input_np = np.random.random(self.input_shape).astype("float32")
if self.data_layout == "NCHW":
in_h = self.input_shape[2]
in_w = self.input_shape[3]
else:
in_h = self.input_shape[1]
in_w = self.input_shape[2]
scale_h = 0
scale_w = 0
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
scale_h = float(self.scale)
scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1:
scale_w = self.scale[0]
scale_h = self.scale[0]
elif isinstance(self.scale, list) and len(self.scale) > 1:
scale_w = self.scale[1]
scale_h = self.scale[0]
if scale_h > 0 and scale_w > 0:
out_h = int(in_h * scale_h)
out_w = int(in_w * scale_w)
else:
out_h = self.out_h
out_w = self.out_w
output_np = bilinear_interp_mkldnn_np(input_np, out_h, out_w,
self.out_size, self.actual_shape,
self.data_layout)
if isinstance(self.scale, float):
self.scale = [self.scale, self.scale]
self.inputs = {'X': input_np}
if self.out_size is not None:
self.inputs['OutSize'] = self.out_size
if self.actual_shape is not None:
self.inputs['OutSize'] = self.actual_shape
self.attrs = {
'interp_method': self.interp_method,
'out_h': self.out_h,
'out_w': self.out_w,
'scale': self.scale,
'data_layout': self.data_layout,
'use_mkldnn': self.use_mkldnn
}
self.outputs = {'Out': output_np}
def test_check_output(self):
self.check_output(check_dygraph=False)
class TestBilinearInterpOpMKLDNNNHWC(TestBilinearInterpMKLDNNOp):
def init_test_case(self):
self.input_shape = [3, 2, 32, 16]
self.out_h = 27
self.out_w = 49
self.scale = [2.0, 3.0]
self.data_layout = 'NHWC'
class TestBilinearNeighborInterpMKLDNNCase2(TestBilinearInterpMKLDNNOp):
def init_test_case(self):
self.input_shape = [3, 3, 9, 6]
self.out_h = 12
self.out_w = 12
class TestBilinearNeighborInterpCase3(TestBilinearInterpMKLDNNOp):
def init_test_case(self):
self.input_shape = [1, 1, 32, 64]
self.out_h = 64
self.out_w = 128
self.scale = [0.1, 0.05]
class TestBilinearNeighborInterpCase4(TestBilinearInterpMKLDNNOp):
def init_test_case(self):
self.input_shape = [1, 1, 32, 64]
self.out_h = 64
self.out_w = 32
self.scale = [13.0, 15.0]
self.out_size = np.array([65, 129]).astype("int32")
class TestBilinearNeighborInterpCase5(TestBilinearInterpMKLDNNOp):
def init_test_case(self):
self.input_shape = [1, 1, 9, 6]
self.out_h = 12
self.out_w = 12
self.out_size = np.array([13, 13]).astype("int32")
class TestBilinearNeighborInterpCase6(TestBilinearInterpMKLDNNOp):
def init_test_case(self):
self.input_shape = [1, 1, 32, 64]
self.out_h = 64
self.out_w = 32
self.scale = 1.0
self.out_size = np.array([65, 129]).astype("int32")
class TestBilinearNeighborInterpSame(TestBilinearInterpMKLDNNOp):
def init_test_case(self):
self.input_shape = [2, 3, 32, 64]
self.out_h = 32
self.out_w = 64
self.scale = 2.0
self.out_size = np.array([65, 129]).astype("int32")
if __name__ == "__main__":
from paddle import enable_static
enable_static()
unittest.main()
|
[
"unittest.main",
"paddle.fluid.tests.unittests.op_test.skip_check_grad_ci",
"math.ceil",
"paddle.enable_static",
"numpy.zeros",
"numpy.transpose",
"math.floor",
"numpy.random.random",
"numpy.array"
] |
[((2692, 2765), 'paddle.fluid.tests.unittests.op_test.skip_check_grad_ci', 'skip_check_grad_ci', ([], {'reason': '"""Haven not implement interpolate grad kernel."""'}), "(reason='Haven not implement interpolate grad kernel.')\n", (2710, 2765), False, 'from paddle.fluid.tests.unittests.op_test import skip_check_grad_ci\n'), ((1561, 1606), 'numpy.zeros', 'np.zeros', (['(batch_size, channel, out_h, out_w)'], {}), '((batch_size, channel, out_h, out_w))\n', (1569, 1606), True, 'import numpy as np\n'), ((7082, 7097), 'paddle.enable_static', 'enable_static', ([], {}), '()\n', (7095, 7097), False, 'from paddle import enable_static\n'), ((7102, 7117), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7115, 7117), False, 'import unittest\n'), ((1268, 1301), 'numpy.transpose', 'np.transpose', (['input', '(0, 3, 1, 2)'], {}), '(input, (0, 3, 1, 2))\n', (1280, 1301), True, 'import numpy as np\n'), ((2605, 2636), 'numpy.transpose', 'np.transpose', (['out', '(0, 2, 3, 1)'], {}), '(out, (0, 2, 3, 1))\n', (2617, 2636), True, 'import numpy as np\n'), ((1653, 1696), 'math.floor', 'math.floor', (['((oh + 0.5) * in_h / out_h - 0.5)'], {}), '((oh + 0.5) * in_h / out_h - 0.5)\n', (1663, 1696), False, 'import math\n'), ((1715, 1757), 'math.ceil', 'math.ceil', (['((oh + 0.5) * in_h / out_h - 0.5)'], {}), '((oh + 0.5) * in_h / out_h - 0.5)\n', (1724, 1757), False, 'import math\n'), ((1917, 1960), 'math.floor', 'math.floor', (['((ow + 0.5) * in_w / out_w - 0.5)'], {}), '((ow + 0.5) * in_w / out_w - 0.5)\n', (1927, 1960), False, 'import math\n'), ((1983, 2025), 'math.ceil', 'math.ceil', (['((ow + 0.5) * in_w / out_w - 0.5)'], {}), '((ow + 0.5) * in_w / out_w - 0.5)\n', (1992, 2025), False, 'import math\n'), ((3345, 3379), 'numpy.random.random', 'np.random.random', (['self.input_shape'], {}), '(self.input_shape)\n', (3361, 3379), True, 'import numpy as np\n'), ((6180, 6199), 'numpy.array', 'np.array', (['[65, 129]'], {}), '([65, 129])\n', (6188, 6199), True, 'import numpy as np\n'), ((6428, 6446), 'numpy.array', 'np.array', (['[13, 13]'], {}), '([13, 13])\n', (6436, 6446), True, 'import numpy as np\n'), ((6702, 6721), 'numpy.array', 'np.array', (['[65, 129]'], {}), '([65, 129])\n', (6710, 6721), True, 'import numpy as np\n'), ((6976, 6995), 'numpy.array', 'np.array', (['[65, 129]'], {}), '([65, 129])\n', (6984, 6995), True, 'import numpy as np\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.